code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Converts data from external to internal indexing.
"""
import sys
from numpy import arange, concatenate
from scipy.sparse import issparse, vstack, hstack
from pypower.get_reorder import get_reorder
def e2i_data(ppc, val, ordering, dim=0):
"""Converts data from external to internal indexing.
When given a case dict that has already been converted to
internal indexing, this function can be used to convert other data
structures as well by passing in 2 or 3 extra parameters in
addition to the case dict. If the value passed in the 2nd
argument is a column vector, it will be converted according to the
C{ordering} specified by the 3rd argument (described below). If C{val}
is an n-dimensional matrix, then the optional 4th argument (C{dim},
default = 0) can be used to specify which dimension to reorder.
The return value in this case is the value passed in, converted
to internal indexing.
The 3rd argument, C{ordering}, is used to indicate whether the data
corresponds to bus-, gen- or branch-ordered data. It can be one
of the following three strings: 'bus', 'gen' or 'branch'. For
data structures with multiple blocks of data, ordered by bus,
gen or branch, they can be converted with a single call by
specifying C{ordering} as a list of strings.
Any extra elements, rows, columns, etc. beyond those indicated
in C{ordering}, are not disturbed.
Examples:
A_int = e2i_data(ppc, A_ext, ['bus','bus','gen','gen'], 1)
Converts an A matrix for user-supplied OPF constraints from
external to internal ordering, where the columns of the A
matrix correspond to bus voltage angles, then voltage
magnitudes, then generator real power injections and finally
generator reactive power injections.
gencost_int = e2i_data(ppc, gencost_ext, ['gen','gen'], 0)
Converts a GENCOST matrix that has both real and reactive power
costs (in rows 1--ng and ng+1--2*ng, respectively).
"""
if 'order' not in ppc:
sys.stderr.write('e2i_data: ppc does not have the \'order\' field '
'required to convert from external to internal numbering.\n')
return
o = ppc['order']
if o['state'] != 'i':
sys.stderr.write('e2i_data: ppc does not have internal ordering '
'data available, call ext2int first\n')
return
if isinstance(ordering, str): ## single set
if ordering == 'gen':
idx = o[ordering]["status"]["on"][ o[ordering]["e2i"] ]
else:
idx = o[ordering]["status"]["on"]
val = get_reorder(val, idx, dim)
else: ## multiple: sets
b = 0 ## base
new_v = []
for ordr in ordering:
n = o["ext"][ordr].shape[0]
v = get_reorder(val, b + arange(n), dim)
new_v.append( e2i_data(ppc, v, ordr, dim) )
b = b + n
n = val.shape[dim]
if n > b: ## the rest
v = get_reorder(val, arange(b, n), dim)
new_v.append(v)
if issparse(new_v[0]):
if dim == 0:
vstack(new_v, 'csr')
elif dim == 1:
hstack(new_v, 'csr')
else:
raise ValueError('dim (%d) may be 0 or 1' % dim)
else:
val = concatenate(new_v, dim)
return val
|
praba230890/PYPOWER
|
pypower/e2i_data.py
|
Python
|
bsd-3-clause
| 3,601
|
import os
import sys
import skybase.schemas
from skybase.utils.schema import read_yaml_from_file
from skybase.utils import simple_error_format
from skybase.exceptions import SkyBaseConfigurationError
from yaml.scanner import ScannerError
from yaml.parser import ParserError
# Application CONSTANTS
# Configuration
DEFAULT_CONFIG_DIR = '/etc/skybase'
CONFIG_DIR = DEFAULT_CONFIG_DIR
# TODO: verify directory existence or error
# use CLI runtime --config option for application config directory default if exists
if '--config' in sys.argv:
cfg_pos = sys.argv.index('--config')
CONFIG_DIR = sys.argv[cfg_pos + 1]
# RestAPI
API_SERVER = 'http://localhost:8880'
API_ROOT = 'api'
API_VERSION = '0.1'
API_ROUTES = {
'task': 'task',
}
API_HTTP_HEADER_SIGNATURE = 'Skybase-Request-Signature'
API_HTTP_HEADER_ACCESS_KEY = 'Skybase-Access-Key'
API_STATUS_SUCCESS = 'success'
API_STATUS_FAIL = 'fail'
# Client
DEFAULT_PLANET = 'dev-aws-us-west-1'
# User Authentication DB
skybase.schemas.set_indicators()
# client config should contain url and credentials for the Skybase REST API and client logging settings
# "default" section should contain default settings for the command line options (currently only planet_name)
# location of the config is $HOME/.skybase/ (unless overriden on the commandline --config_dir=... option
#
# client_config_dir contains 2 files: a) client.yaml; b) credentials.yaml
config_schemas = {
'client': [
[['restapi_server_url'], []],
[['log_file'], []],
[['log_level'], []],
[['defaults'], []],
[['defaults', 'planet'], []]
],
'restapi': [
[['queues'], []],
[['roles'], []],
],
'runner': [
[['planet_data_dir'], ['/srv/skybase/data/planets']],
[['artiball_data_dir'], ['/srv/skybase/data/artiballs']],
[['std_templates_dir'], ['/srv/skybase/data/templates']],
[['runner_credentials_dir'], ['/etc/skybase/credentials']],
[['package_depot_s3_bucket'], ['skybase-artiball-cache']],
[['package_depot_aws_profile'], ['lithiumdev']]
],
'worker': [],
'credentials': [
[['user_id'], []],
[['key'], []],
],
}
class SkyConfig(object):
# reminder for different types of configs: client_dat, restapi_data, worker_data, topology_data
def __init__(self, schema_name, config_data=None):
self.schema = config_schemas[schema_name]
self.data = config_data
def get_data_value(self, data_key, data_default=None):
'''
attempt to retrieve configuration value by key. return default if not found.
'''
if self.data and self.data.get(data_key):
data_value = self.data.get(data_key)
else:
data_value = data_default
return data_value
@classmethod
def init_from_file(cls, schema_name, config_dir=CONFIG_DIR):
# prepare configuration filename
config_file_name = '/'.join([config_dir, schema_name + '.yaml'])
config_file = os.path.expanduser(config_file_name)
# read in target configuration filea and attempt to init class
try:
runner_config_data = read_yaml_from_file(config_file)
except (IOError, ScannerError, ParserError) as e:
# wrap all expected errors as SkyBaseError type
raise SkyBaseConfigurationError(simple_error_format(e))
cfg = cls(schema_name, runner_config_data)
return cfg
|
lithiumtech/skybase.io
|
skybase/config/__init__.py
|
Python
|
apache-2.0
| 3,382
|
# encoding: utf-8
from distutils.core import setup
import codecs
from swatch import __version__ as VERSION
README = codecs.open('README.rst', encoding='utf-8').read()
LICENSE = codecs.open('LICENSE', encoding='utf-8').read()
setup(
name='swatch',
version=VERSION,
author='Marcos A Ojeda',
author_email='marcos@generic.cx',
url='http://github.com/nsfmc/swatch',
packages=['swatch'],
license=LICENSE,
description='a parser for adobe swatch exchange files',
long_description=README,
platforms=['any'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
nsfmc/swatch
|
setup.py
|
Python
|
mit
| 1,011
|
""" basic collect and runtest protocol implementations """
from __future__ import absolute_import, division, print_function
import bdb
import os
import sys
from time import time
import py
from _pytest._code.code import TerminalRepr, ExceptionInfo
from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
item.ihook.pytest_runtest_logfinish(
nodeid=item.nodeid, location=item.location,
)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.option.setupshow:
show_test_item(item)
if not item.config.option.setuponly:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(' ' * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
def pytest_runtest_setup(item):
_update_current_test_var(item, 'setup')
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
_update_current_test_var(item, 'call')
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
_update_current_test_var(item, 'teardown')
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(item, when):
"""
Update PYTEST_CURRENT_TEST to reflect the current item and stage.
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
"""
var_name = 'PYTEST_CURRENT_TEST'
if when:
value = '{0} ({1})'.format(item.nodeid, when)
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace('\x00', '(null)')
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo(object):
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except: # noqa
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop - call.start
keywords = dict([(x, 1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(
lambda: list(collector.collect()),
'collect')
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except TEST_OUTCOME:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
|
tareqalayan/pytest
|
_pytest/runner.py
|
Python
|
mit
| 16,205
|
import unittest
from test import test_support
from itertools import *
from weakref import proxy
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) // fact(r) // fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
@test_support.impl_detail("tuple resuse is CPython specific")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr
self.assertEqual(comb, filter(set(perm).__contains__, cwr)) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)), range(maxsize-5, maxsize+5))
self.assertEqual(list(islice(count(-maxsize-5), 10)), range(-maxsize-5, -maxsize+5))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(c.next(), -8)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, sys.maxint-5, sys.maxint+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
self.assertEqual(next(pickle.loads(pickle.dumps(c))), value)
def test_count_with_stride(self):
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(zip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(zip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
c.next()
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
c.next()
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
c.next()
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
for j in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 1, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_tuple_resuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
# target = map(None, *args) <- this raises a py3k warning
# this is the replacement:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})),
zip(list('abc') + [None], 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_longest_tuple_reuse(self):
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater(object):
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def next(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in izip_longest(r1, r2, fillvalue=0):
with test_support.captured_output('stdout'):
print (i, j)
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = izip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
any(forward) # exhaust the iterator
del backward
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, list, compress(N(s), repeat(1)))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return imap(next, imap(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
2uller/LotF
|
App/Lib/test/test_itertools.py
|
Python
|
gpl-2.0
| 69,023
|
print(10)
|
mjpatter88/mjpython
|
test/acc/scripts/print_ten.py
|
Python
|
mit
| 10
|
# -*- coding: utf-8 -*-
# Copyright 2013 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of AnotherPyGraphvizAgain. http://jacquev6.github.com/AnotherPyGraphvizAgain
# AnotherPyGraphvizAgain is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# AnotherPyGraphvizAgain is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with AnotherPyGraphvizAgain. If not, see <http://www.gnu.org/licenses/>.
import unittest
from . import AllTests
if __name__ == "__main__":
unittest.main(module=AllTests)
|
Titulacion-Sistemas/PythonTitulacion-EV
|
Lib/site-packages/AnotherPyGraphvizAgain/tests/__main__.py
|
Python
|
mit
| 964
|
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.conf.urls import url
from awx.api.views import (
RoleList,
RoleDetail,
RoleUsersList,
RoleTeamsList,
RoleParentsList,
RoleChildrenList,
)
urls = [
url(r'^$', RoleList.as_view(), name='role_list'),
url(r'^(?P<pk>[0-9]+)/$', RoleDetail.as_view(), name='role_detail'),
url(r'^(?P<pk>[0-9]+)/users/$', RoleUsersList.as_view(), name='role_users_list'),
url(r'^(?P<pk>[0-9]+)/teams/$', RoleTeamsList.as_view(), name='role_teams_list'),
url(r'^(?P<pk>[0-9]+)/parents/$', RoleParentsList.as_view(), name='role_parents_list'),
url(r'^(?P<pk>[0-9]+)/children/$', RoleChildrenList.as_view(), name='role_children_list'),
]
__all__ = ['urls']
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/api/urls/role.py
|
Python
|
apache-2.0
| 753
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8638")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8638")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a GingerCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a GingerCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
dzsmining/GingerCoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,842
|
"""Class to perform under-sampling based on one-sided selection method."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import check_random_state, _safe_indexing
from ..base import BaseCleaningSampler
from ._tomek_links import TomekLinks
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring
from ...utils._docstring import _random_state_docstring
from ...utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseCleaningSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class OneSidedSelection(BaseCleaningSampler):
"""Class to perform under-sampling based on one-sided selection method.
Read more in the :ref:`User Guide <condensed_nearest_neighbors>`.
Parameters
----------
{sampling_strategy}
{random_state}
n_neighbors : int or estimator object, default=None
If ``int``, size of the neighbourhood to consider to compute the
nearest neighbors. If object, an estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors. If `None`, a
:class:`~sklearn.neighbors.KNeighborsClassifier` with a 1-NN rules will
be used.
n_seeds_S : int, default=1
Number of samples to extract in order to build the set S.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
estimator_ : estimator object
Validated K-nearest neighbors estimator created from parameter `n_neighbors`.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
See Also
--------
EditedNearestNeighbours : Undersample by editing noisy samples.
Notes
-----
The method is based on [1]_.
Supports multi-class resampling. A one-vs.-one scheme is used when sampling
a class as proposed in [1]_. For each class to be sampled, all samples of
this class and the minority class are used during the sampling procedure.
References
----------
.. [1] M. Kubat, S. Matwin, "Addressing the curse of imbalanced training
sets: one-sided selection," In ICML, vol. 97, pp. 179-186, 1997.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import \
OneSidedSelection # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> oss = OneSidedSelection(random_state=42)
>>> X_res, y_res = oss.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{1: 496, 0: 100}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
n_neighbors=None,
n_seeds_S=1,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_seeds_S = n_seeds_S
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Private function to create the NN estimator"""
if self.n_neighbors is None:
self.estimator_ = KNeighborsClassifier(n_neighbors=1, n_jobs=self.n_jobs)
elif isinstance(self.n_neighbors, int):
self.estimator_ = KNeighborsClassifier(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs
)
elif isinstance(self.n_neighbors, KNeighborsClassifier):
self.estimator_ = clone(self.n_neighbors)
else:
raise ValueError(
f"`n_neighbors` has to be a int or an object"
f" inherited from KNeighborsClassifier."
f" Got {type(self.n_neighbors)} instead."
)
def _fit_resample(self, X, y):
self._validate_estimator()
random_state = check_random_state(self.random_state)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
idx_under = np.empty((0,), dtype=int)
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
# select a sample from the current class
idx_maj = np.flatnonzero(y == target_class)
sel_idx_maj = random_state.randint(
low=0, high=target_stats[target_class], size=self.n_seeds_S
)
idx_maj_sample = idx_maj[sel_idx_maj]
minority_class_indices = np.flatnonzero(y == class_minority)
C_indices = np.append(minority_class_indices, idx_maj_sample)
# create the set composed of all minority samples and one
# sample from the current class.
C_x = _safe_indexing(X, C_indices)
C_y = _safe_indexing(y, C_indices)
# create the set S with removing the seed from S
# since that it will be added anyway
idx_maj_extracted = np.delete(idx_maj, sel_idx_maj, axis=0)
S_x = _safe_indexing(X, idx_maj_extracted)
S_y = _safe_indexing(y, idx_maj_extracted)
self.estimator_.fit(C_x, C_y)
pred_S_y = self.estimator_.predict(S_x)
S_misclassified_indices = np.flatnonzero(pred_S_y != S_y)
idx_tmp = idx_maj_extracted[S_misclassified_indices]
idx_under = np.concatenate((idx_under, idx_maj_sample, idx_tmp), axis=0)
else:
idx_under = np.concatenate(
(idx_under, np.flatnonzero(y == target_class)), axis=0
)
X_resampled = _safe_indexing(X, idx_under)
y_resampled = _safe_indexing(y, idx_under)
# apply Tomek cleaning
tl = TomekLinks(sampling_strategy=list(self.sampling_strategy_.keys()))
X_cleaned, y_cleaned = tl.fit_resample(X_resampled, y_resampled)
self.sample_indices_ = _safe_indexing(idx_under, tl.sample_indices_)
return X_cleaned, y_cleaned
def _more_tags(self):
return {"sample_indices": True}
|
scikit-learn-contrib/imbalanced-learn
|
imblearn/under_sampling/_prototype_selection/_one_sided_selection.py
|
Python
|
mit
| 7,093
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import formatdate
from erpnext.controllers.website_list_for_contact import (get_customers_suppliers,
get_party_details)
def get_context(context):
context.no_cache = 1
context.show_sidebar = True
context.doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
context.parents = frappe.form_dict.parents
context.doc.supplier = get_supplier()
context.doc.rfq_links = get_link_quotation(context.doc.supplier, context.doc.name)
unauthorized_user(context.doc.supplier)
update_supplier_details(context)
context["title"] = frappe.form_dict.name
def get_supplier():
doctype = frappe.form_dict.doctype
parties_doctype = 'Request for Quotation Supplier' if doctype == 'Request for Quotation' else doctype
customers, suppliers = get_customers_suppliers(parties_doctype, frappe.session.user)
key, parties = get_party_details(customers, suppliers)
return parties[0] if key == 'supplier' else ''
def check_supplier_has_docname_access(supplier):
status = True
if frappe.form_dict.name not in frappe.db.sql_list("""select parent from `tabRequest for Quotation Supplier`
where supplier = %s""", (supplier,)):
status = False
return status
def unauthorized_user(supplier):
status = check_supplier_has_docname_access(supplier) or False
if status == False:
frappe.throw(_("Not Permitted"), frappe.PermissionError)
def update_supplier_details(context):
supplier_doc = frappe.get_doc("Supplier", context.doc.supplier)
context.doc.currency = supplier_doc.default_currency or frappe.get_cached_value('Company', context.doc.company, "default_currency")
context.doc.currency_symbol = frappe.db.get_value("Currency", context.doc.currency, "symbol", cache=True)
context.doc.number_format = frappe.db.get_value("Currency", context.doc.currency, "number_format", cache=True)
context.doc.buying_price_list = supplier_doc.default_price_list or ''
def get_link_quotation(supplier, rfq):
quotation = frappe.db.sql(""" select distinct `tabSupplier Quotation Item`.parent as name,
`tabSupplier Quotation`.status, `tabSupplier Quotation`.transaction_date from
`tabSupplier Quotation Item`, `tabSupplier Quotation` where `tabSupplier Quotation`.docstatus < 2 and
`tabSupplier Quotation Item`.request_for_quotation =%(name)s and
`tabSupplier Quotation Item`.parent = `tabSupplier Quotation`.name and
`tabSupplier Quotation`.supplier = %(supplier)s order by `tabSupplier Quotation`.creation desc""",
{'name': rfq, 'supplier': supplier}, as_dict=1)
for data in quotation:
data.transaction_date = formatdate(data.transaction_date)
return quotation or None
|
Zlash65/erpnext
|
erpnext/templates/pages/rfq.py
|
Python
|
gpl-3.0
| 2,809
|
#!/usr/bin/env python
#==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# This examples shows how to do streamed processing of an image read with the
# IOOpenSlide module to reduce memory consumption.
import sys
import itk
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <radius>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.ctype('unsigned char')
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
reader = itk.ImageFileReader[ImageType].New(FileName=inputImage)
imageio = itk.OpenSlideImageIO.New()
reader.SetImageIO(imageio)
median = itk.MedianImageFilter[ImageType,ImageType].New(Input=reader.GetOutput())
median.SetRadius(radiusValue)
writer = itk.ImageFileWriter[ImageType].New(Input=median.GetOutput(),
FileName=outputImage)
# Process the image in three chunks
writer.SetNumberOfStreamDivisions(3)
writer.Update()
|
InsightSoftwareConsortium/ITKIOOpenSlide
|
examples/StreamProcessing.py
|
Python
|
apache-2.0
| 1,655
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.decorators import login_required,\
permission_required
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.db.models import Sum, Avg, Count
from django.conf import settings
from dialer_campaign.function_def import user_dialer_setting_msg
from dialer_cdr.models import VoIPCall
from dialer_cdr.constants import CDR_REPORT_COLUMN_NAME
from dialer_cdr.forms import VoipSearchForm
from common.common_functions import current_view, ceil_strdate,\
get_pagination_vars
from datetime import datetime
import csv
def get_voipcall_daily_data(voipcall_list):
"""Get voipcall daily data"""
select_data = {"starting_date": "SUBSTR(CAST(starting_date as CHAR(30)),1,10)"}
# Get Total Rrecords from VoIPCall Report table for Daily Call Report
total_data = voipcall_list.extra(select=select_data)\
.values('starting_date')\
.annotate(Count('starting_date'))\
.annotate(Sum('duration'))\
.annotate(Avg('duration'))\
.order_by('-starting_date')
# Following code will count total voip calls, duration
if total_data:
max_duration = max([x['duration__sum'] for x in total_data])
total_duration = sum([x['duration__sum'] for x in total_data])
total_calls = sum([x['starting_date__count'] for x in total_data])
total_avg_duration = (sum([x['duration__avg'] for x in total_data])) / total_calls
else:
max_duration = 0
total_duration = 0
total_calls = 0
total_avg_duration = 0
data = {
'total_data': total_data,
'total_duration': total_duration,
'total_calls': total_calls,
'total_avg_duration': total_avg_duration,
'max_duration': max_duration,
}
return data
@permission_required('dialer_cdr.view_call_detail_report', login_url='/')
@login_required
def voipcall_report(request):
"""VoIP Call Report
**Attributes**:
* ``form`` - VoipSearchForm
* ``template`` - frontend/report/voipcall_report.html
**Logic Description**:
* Get VoIP call list according to search parameters for loggedin user
**Important variable**:
* ``request.session['voipcall_record_kwargs']`` - stores voipcall kwargs
"""
sort_col_field_list = ['starting_date', 'leg_type', 'disposition',
'used_gateway', 'callerid', 'callid', 'phone_number',
'duration', 'billsec', 'amd_status']
default_sort_field = 'starting_date'
pagination_data =\
get_pagination_vars(request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
start_page = pagination_data['start_page']
end_page = pagination_data['end_page']
search_tag = 1
action = 'tabs-1'
if request.method == 'POST':
form = VoipSearchForm(request.user, request.POST)
if form.is_valid():
request.session['session_start_date'] = ''
request.session['session_end_date'] = ''
request.session['session_disposition'] = ''
request.session['session_campaign_id'] = ''
if request.POST.get('from_date'):
# From
from_date = request.POST['from_date']
start_date = ceil_strdate(from_date, 'start')
request.session['session_start_date'] = start_date
if request.POST.get('to_date'):
# To
to_date = request.POST['to_date']
end_date = ceil_strdate(to_date, 'end')
request.session['session_end_date'] = end_date
disposition = request.POST.get('status')
if disposition != 'all':
request.session['session_disposition'] = disposition
campaign_id = request.POST.get('campaign')
if campaign_id and int(campaign_id) != 0:
request.session['session_campaign_id'] = int(campaign_id)
post_var_with_page = 0
try:
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
start_date = request.session.get('session_start_date')
end_date = request.session.get('session_end_date')
disposition = request.session.get('session_disposition')
campaign_id = request.session.get('session_campaign_id')
form = VoipSearchForm(request.user,
initial={'from_date': start_date.strftime('%Y-%m-%d'),
'to_date': end_date.strftime('%Y-%m-%d'),
'status': disposition,
'campaign': campaign_id})
else:
post_var_with_page = 1
if request.method == 'GET':
post_var_with_page = 0
except:
pass
if post_var_with_page == 0:
# default
tday = datetime.today()
from_date = tday.strftime('%Y-%m-%d')
to_date = tday.strftime('%Y-%m-%d')
start_date = datetime(tday.year, tday.month, tday.day, 0, 0, 0, 0)
end_date = datetime(tday.year, tday.month, tday.day, 23, 59, 59, 999999)
disposition = 'all'
campaign_id = 0
form = VoipSearchForm(request.user,
initial={'from_date': from_date, 'to_date': to_date,
'status': disposition, 'campaign': campaign_id})
# unset session var
request.session['session_start_date'] = start_date
request.session['session_end_date'] = end_date
request.session['session_disposition'] = disposition
request.session['session_campaign_id'] = ''
kwargs = {}
if start_date and end_date:
kwargs['starting_date__range'] = (start_date, end_date)
if start_date and end_date == '':
kwargs['starting_date__gte'] = start_date
if start_date == '' and end_date:
kwargs['starting_date__lte'] = end_date
if disposition and disposition != 'all':
kwargs['disposition__exact'] = disposition
if campaign_id and int(campaign_id) != 0:
kwargs['callrequest__campaign_id'] = campaign_id
if not request.user.is_superuser:
kwargs['user'] = request.user
voipcall_list = VoIPCall.objects.filter(**kwargs)
all_voipcall_list = voipcall_list.values_list('id', flat=True)
# Session variable is used to get record set with searched option
# into export file
request.session['voipcall_record_kwargs'] = kwargs
if request.GET.get('page') or request.GET.get('sort_by'):
daily_data = request.session['voipcall_daily_data']
else:
if not voipcall_list:
request.session['voipcall_daily_data'] = ''
daily_data = get_voipcall_daily_data(voipcall_list)
request.session['voipcall_daily_data'] = daily_data
voipcall_list = voipcall_list.order_by(sort_order)[start_page:end_page]
template = 'frontend/report/voipcall_report.html'
data = {
'form': form,
'total_data': daily_data['total_data'],
'total_duration': daily_data['total_duration'],
'total_calls': daily_data['total_calls'],
'total_avg_duration': daily_data['total_avg_duration'],
'max_duration': daily_data['max_duration'],
'module': current_view(request),
'dialer_setting_msg': user_dialer_setting_msg(request.user),
'all_voipcall_list': all_voipcall_list,
'voipcall_list': voipcall_list,
'PAGE_SIZE': PAGE_SIZE,
'CDR_REPORT_COLUMN_NAME': CDR_REPORT_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'search_tag': search_tag,
'start_date': start_date,
'end_date': end_date,
'action': action,
'AMD': settings.AMD,
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@login_required
def export_voipcall_report(request):
"""Export CSV file of VoIP call record
**Important variable**:
* ``request.session['voipcall_record_kwargs']`` - stores voipcall kwargs
**Exported fields**: [user, callid, callerid, phone_number, starting_date,
duration, disposition, used_gateway]
"""
# get the response object, this can be used as a stream.
response = HttpResponse(mimetype='text/csv')
# force download.
response['Content-Disposition'] = 'attachment;filename=export.csv'
# the csv writer
writer = csv.writer(response)
# super(VoIPCall_ReportAdmin, self).queryset(request)
if request.session.get('voipcall_record_kwargs'):
kwargs = request.session['voipcall_record_kwargs']
qs = VoIPCall.objects.filter(**kwargs)
amd_status = ''
if settings.AMD:
amd_status = 'amd_status'
writer.writerow(['user', 'callid', 'callerid', 'phone_number',
'starting_date', 'duration', 'billsec',
'disposition', 'hangup_cause', 'hangup_cause_q850',
'used_gateway', amd_status])
for i in qs:
gateway_used = i.used_gateway.name if i.used_gateway else ''
amd_status = i.amd_status if settings.AMD else ''
writer.writerow([
i.user,
i.callid,
i.callerid,
i.phone_number,
i.starting_date,
i.duration,
i.billsec,
i.disposition,
i.hangup_cause,
i.hangup_cause_q850,
gateway_used,
amd_status,
])
return response
|
berinhard/newfies-dialer
|
newfies/dialer_cdr/views.py
|
Python
|
mpl-2.0
| 10,337
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_json import scrub
from mo_times.dates import unix2datetime
import jx_elasticsearch
from bugzilla_etl import extract_bugzilla, alias_analysis, parse_bug_history
from bugzilla_etl.alias_analysis import AliasAnalyzer
from bugzilla_etl.extract_bugzilla import get_comments, get_current_time, MIN_TIMESTAMP, get_private_bugs_for_delete, get_recent_private_bugs, get_recent_private_attachments, get_recent_private_comments, get_comments_by_id, get_bugs, \
get_dependencies, get_flags, get_new_activities, get_bug_see_also, get_attachments, get_tracking_flags, get_keywords, get_tags, get_cc, get_bug_groups, get_duplicates
from bugzilla_etl.parse_bug_history import BugHistoryParser
from jx_python import jx
from mo_dots import wrap, coalesce, listwrap, Data
from mo_files import File
from mo_future import text_type, long
from mo_kwargs import override
from mo_logs import Log, startup, constants
from mo_math import Math
from mo_threads import Lock, Queue, Thread, THREAD_STOP
from mo_threads.threads import AllThread, MAIN_THREAD
from mo_times.timer import Timer
from pyLibrary import convert
from pyLibrary.env.elasticsearch import Cluster
from pyLibrary.sql.mysql import MySQL
NUM_CONNECTIONS = 4
db_cache_lock = Lock()
db_cache = []
comment_db_cache_lock = Lock()
comment_db_cache = None
#HERE ARE ALL THE FUNCTIONS WE WANT TO RUN, IN PARALLEL (b
get_stuff_from_bugzilla = [
get_bugs,
get_dependencies,
get_flags,
get_new_activities,
get_bug_see_also,
get_attachments,
get_tracking_flags,
get_keywords,
get_tags,
get_cc,
get_bug_groups,
get_duplicates
]
def etl_comments(db, output_queue, param, please_stop):
# CONNECTIONS ARE EXPENSIVE, CACHE HERE
global comment_db_cache
with comment_db_cache_lock:
if not comment_db_cache:
comment_db = MySQL(db.settings)
comment_db_cache = comment_db
with comment_db_cache_lock:
comments = get_comments(comment_db_cache, param)
for g, block_of_comments in jx.groupby(comments, size=500):
output_queue.extend({"id": text_type(comment.comment_id), "value": scrub(comment)} for comment in block_of_comments)
def etl(db, bug_output_queue, param, alias_analyzer, please_stop):
"""
PROCESS RANGE, AS SPECIFIED IN param AND PUSH
BUG VERSION RECORDS TO output_queue
"""
# MAKING CONNECTIONS ARE EXPENSIVE, CACHE HERE
with db_cache_lock:
if not db_cache:
with Timer("open {{num}} connections to db", {"num": NUM_CONNECTIONS}):
for i in range(NUM_CONNECTIONS):
db_cache.append(MySQL(db.settings))
db_results = Queue(name="db results", max=2**30)
def get_records_from_bugzilla(db, param, please_stop):
with db.transaction():
for get_stuff in get_stuff_from_bugzilla:
if please_stop:
break
db_results.extend(get_stuff(db, param))
with AllThread() as all:
with db_cache_lock:
# SPLIT TASK EVENLY, HAVE EACH BUG USE SAME CONNECTION FOR ALL DATA
size = Math.ceiling(len(param.bug_list)/len(db_cache))
for g, bug_ids in jx.groupby(param.bug_list, size=size):
param = param.copy()
param.bug_list = bug_ids
all.add(get_records_from_bugzilla, db_cache[g], param)
db_results.add(THREAD_STOP)
sorted = jx.sort(db_results, [
"bug_id",
"_merge_order",
{"modified_ts": "desc"},
"modified_by",
{"id": "desc"}
])
process = BugHistoryParser(param, alias_analyzer, bug_output_queue)
for i, s in enumerate(sorted):
process.processRow(s)
process.processRow(wrap({"bug_id": parse_bug_history.STOP_BUG, "_merge_order": 1}))
process.alias_analyzer.save_aliases()
def run_both_etl(db, bug_output_queue, comment_output_queue, param, alias_analyzer):
comment_thread = Thread.run("etl comments", etl_comments, db, comment_output_queue, param)
process_thread = Thread.run("etl", etl, db, bug_output_queue, param, alias_analyzer)
result = comment_thread.join()
if result.exception:
Log.error("etl_comments had problems", cause=result.exception)
result = process_thread.join()
if result.exception:
Log.error("etl had problems", cause=result.exception)
def setup_es(settings, db):
"""
SETUP ES CONNECTIONS TO REFLECT IF WE ARE RESUMING, INCREMENTAL, OR STARTING OVER
"""
current_run_time = get_current_time(db)
if File(settings.param.first_run_time).exists and File(settings.param.last_run_time).exists:
# INCREMENTAL UPDATE; DO NOT MAKE NEW INDEX
last_run_time = long(File(settings.param.last_run_time).read())
esq = jx_elasticsearch.new_instance(read_only=False, kwargs=settings.es)
esq_comments = jx_elasticsearch.new_instance(read_only=False, kwargs=settings.es_comments)
elif File(settings.param.first_run_time).exists:
# DO NOT MAKE NEW INDEX, CONTINUE INITIAL FILL
try:
last_run_time = MIN_TIMESTAMP
current_run_time = unix2datetime(long(File(settings.param.first_run_time).read())/1000)
bugs = Cluster(settings.es).get_best_matching_index(settings.es.index)
esq = jx_elasticsearch.new_instance(index=bugs.index, read_only=False, kwargs=settings.es)
comments = Cluster(settings.es_comments).get_best_matching_index(settings.es_comments.index)
esq_comments = jx_elasticsearch.new_instance(index=comments.index, read_only=False, kwargs=settings.es_comments)
esq.es.set_refresh_interval(1) #REQUIRED SO WE CAN SEE WHAT BUGS HAVE BEEN LOADED ALREADY
except Exception as e:
Log.warning("can not resume ETL, restarting", cause=e)
File(settings.param.first_run_time).delete()
return setup_es(settings, db)
else:
# START ETL FROM BEGINNING, MAKE NEW INDEX
last_run_time = MIN_TIMESTAMP
File(settings.param.first_run_time).write(text_type(convert.datetime2milli(current_run_time)))
cluster = Cluster(settings.es)
es = cluster.create_index(kwargs=settings.es, limit_replicas=True)
es_comments = cluster.create_index(kwargs=settings.es_comments, limit_replicas=True)
esq = jx_elasticsearch.new_instance(read_only=False, index=es.settings.index, kwargs=settings.es)
esq_comments = jx_elasticsearch.new_instance(read_only=False, index=es_comments.settings.index, kwargs=settings.es_comments)
return current_run_time, esq, esq_comments, last_run_time
@override
def incremental_etl(param, db, esq, esq_comments, bug_output_queue, comment_output_queue, kwargs):
####################################################################
## ES TAKES TIME TO DELETE RECORDS, DO DELETE FIRST WITH HOPE THE
## INDEX GETS A REWRITE DURING ADD OF NEW RECORDS
####################################################################
# REMOVE PRIVATE BUGS
private_bugs = get_private_bugs_for_delete(db, param)
alias_analyzer = AliasAnalyzer(kwargs.alias)
Log.note("Ensure the following private bugs are deleted:\n{{private_bugs|indent}}", private_bugs=sorted(private_bugs))
for g, delete_bugs in jx.groupby(private_bugs, size=1000):
still_existing = get_bug_ids(esq, {"terms": {"bug_id": delete_bugs}})
if still_existing:
Log.note("Ensure the following existing private bugs are deleted:\n{{private_bugs|indent}}", private_bugs=sorted(still_existing))
esq.es.delete_record({"terms": {"bug_id.~n~": delete_bugs}})
esq_comments.es.delete_record({"terms": {"bug_id.~n~": delete_bugs}})
# RECENT PUBLIC BUGS
possible_public_bugs = get_recent_private_bugs(db, param)
if param.allow_private_bugs:
#PRIVATE BUGS
# A CHANGE IN PRIVACY INDICATOR MEANS THE WHITEBOARD IS AFFECTED, REDO
esq.es.delete_record({"terms": {"bug_id.~n~": possible_public_bugs}})
else:
#PUBLIC BUGS
# IF ADDING GROUP THEN private_bugs ALREADY DID THIS
# IF REMOVING GROUP THEN NO RECORDS TO DELETE
pass
# REMOVE **RECENT** PRIVATE ATTACHMENTS
private_attachments = get_recent_private_attachments(db, param)
bugs_to_refresh = set(jx.select(private_attachments, "bug_id"))
esq.es.delete_record({"terms": {"bug_id.~n~": bugs_to_refresh}})
# REBUILD BUGS THAT GOT REMOVED
bug_list = jx.sort((possible_public_bugs | bugs_to_refresh) - private_bugs) # REMOVE PRIVATE BUGS
if bug_list:
refresh_param = param.copy()
refresh_param.bug_list = bug_list
refresh_param.start_time = MIN_TIMESTAMP
refresh_param.start_time_str = extract_bugzilla.milli2string(db, MIN_TIMESTAMP)
try:
etl(db, bug_output_queue, refresh_param.copy(), alias_analyzer, please_stop=None)
etl_comments(db, esq_comments.es, refresh_param.copy(), please_stop=None)
except Exception as e:
Log.error(
"Problem with etl using parameters {{parameters}}",
parameters=refresh_param,
cause=e
)
# REFRESH COMMENTS WITH PRIVACY CHANGE
private_comments = get_recent_private_comments(db, param)
comment_list = set(jx.select(private_comments, "comment_id")) | {0}
esq_comments.es.delete_record({"terms": {"comment_id.~n~": comment_list}})
changed_comments = get_comments_by_id(db, comment_list, param)
esq_comments.es.extend({"id": c.comment_id, "value": c} for c in changed_comments)
# GET LIST OF CHANGED BUGS
with Timer("time to get changed bug list"):
if param.allow_private_bugs:
bug_list = jx.select(db.query("""
SELECT
b.bug_id
FROM
bugs b
WHERE
delta_ts >= {{start_time_str}}
""", {
"start_time_str": param.start_time_str
}), u"bug_id")
else:
bug_list = jx.select(db.query("""
SELECT
b.bug_id
FROM
bugs b
LEFT JOIN
bug_group_map m ON m.bug_id=b.bug_id
WHERE
delta_ts >= {{start_time_str}} AND
m.bug_id IS NULL
""", {
"start_time_str": param.start_time_str
}), u"bug_id")
if not bug_list:
return
with Thread.run("alias analysis", alias_analysis.full_analysis, kwargs=kwargs, bug_list=bug_list):
Log.note(
"Updating {{num}} bugs:\n{{bug_list|indent}}",
num=len(bug_list),
bug_list=bug_list
)
param.bug_list = bug_list
run_both_etl(
db=db,
bug_output_queue=bug_output_queue,
comment_output_queue=comment_output_queue,
param=param.copy(),
alias_analyzer=alias_analyzer
)
@override
def full_etl(resume_from_last_run, param, db, esq, esq_comments, bug_output_queue, comment_output_queue, kwargs):
end = coalesce(param.end, db.query("SELECT max(bug_id) bug_id FROM bugs")[0].bug_id)
start = coalesce(param.start, 0)
alias_analyzer = AliasAnalyzer(kwargs=kwargs.alias)
if resume_from_last_run:
# FIND THE LAST GOOD BUG NUMBER PROCESSED (WE GO BACKWARDS, SO LOOK FOR MINIMUM BUG, AND ROUND UP)
end = coalesce(param.end, Math.ceiling(get_min_bug_id(esq), param.increment), end)
Log.note("full etl from {{min}} to {{max}}", min=start, max=end)
#############################################################
## MAIN ETL LOOP
#############################################################
for min, max in jx.reverse(jx.intervals(start, end, param.increment)):
with Timer("etl block {{min}}..{{max}}", param={"min":min, "max":max}, silent=not param.debug):
if kwargs.args.quick and min < end - param.increment and min != 0:
#--quick ONLY DOES FIRST AND LAST BLOCKS
continue
try:
#GET LIST OF CHANGED BUGS
with Timer("time to get {{min}}..{{max}} bug list", {"min":min, "max":max}):
if param.allow_private_bugs:
bug_list = jx.select(db.query("""
SELECT
b.bug_id
FROM
bugs b
WHERE
delta_ts >= {{start_time_str}} AND
({{min}} <= b.bug_id AND b.bug_id < {{max}})
""", {
"min": min,
"max": max,
"start_time_str": param.start_time_str
}), u"bug_id")
else:
bug_list = jx.select(db.query("""
SELECT
b.bug_id
FROM
bugs b
LEFT JOIN
bug_group_map m ON m.bug_id=b.bug_id
WHERE
delta_ts >= {{start_time_str}} AND
({{min}} <= b.bug_id AND b.bug_id < {{max}}) AND
m.bug_id IS NULL
""", {
"min": min,
"max": max,
"start_time_str": param.start_time_str
}), u"bug_id")
if not bug_list:
continue
param.bug_list = bug_list
run_both_etl(
db,
bug_output_queue,
comment_output_queue,
param.copy(),
alias_analyzer=alias_analyzer
)
except Exception as e:
Log.error(
"Problem with dispatch loop in range [{{min}}, {{max}})",
min=min,
max=max,
cause=e
)
@override
def main(param, es, es_comments, bugzilla, kwargs):
param.allow_private_bugs = param.allow_private_bugs in [True, "true"]
if not param.allow_private_bugs and es and not es_comments:
Log.error("Must have ES for comments")
resume_from_last_run = File(param.first_run_time).exists and not File(param.last_run_time).exists
# MAKE HANDLES TO CONTAINERS
try:
with MySQL(kwargs=bugzilla, readonly=True) as db:
current_run_time, esq, esq_comments, last_run_time = setup_es(kwargs, db)
with esq.es.threaded_queue(max_size=500, silent=True) as output_queue:
# def _add(value):
# if not isinstance(value, text_type):
# value = wrap(value)
# if value.value.bug_id==1877:
# Log.note("{{group}}", group= value.value.bug_group)
# _output_queue.add(value)
# output_queue = Data(add=_add)
#SETUP RUN PARAMETERS
param_new = Data()
param_new.end_time = convert.datetime2milli(get_current_time(db))
# MySQL WRITES ARE DELAYED, RESULTING IN UNORDERED bug_when IN bugs_activity (AS IS ASSUMED FOR bugs(delats_ts))
# THIS JITTER IS USUALLY NO MORE THAN ONE SECOND, BUT WE WILL GO BACK 60sec, JUST IN CASE.
# THERE ARE OCCASIONAL WRITES THAT ARE IN GMT, BUT SINCE THEY LOOK LIKE THE FUTURE, WE CAPTURE THEM
param_new.start_time = last_run_time - coalesce(param.look_back, 5 * 60 * 1000) # 5 MINUTE LOOK_BACK
param_new.start_time_str = extract_bugzilla.milli2string(db, param_new.start_time)
param_new.alias = param.alias
param_new.allow_private_bugs = param.allow_private_bugs
param_new.increment = param.increment
if last_run_time > MIN_TIMESTAMP:
with Timer("run incremental etl"):
incremental_etl(
param=param_new,
db=db,
esq=esq,
esq_comments=esq_comments,
bug_output_queue=output_queue,
comment_output_queue=esq_comments.es,
kwargs=kwargs
)
else:
with Timer("run full etl"):
full_etl(
resume_from_last_run=resume_from_last_run,
param=param_new,
db=db,
esq=esq,
esq_comments=esq_comments,
bug_output_queue=output_queue,
comment_output_queue=esq_comments.es,
kwargs=kwargs
)
output_queue.add(THREAD_STOP)
s = Data(alias=es.index, index=esq.es.settings.index)
if s.alias:
esq.es.cluster.delete_all_but(s.alias, s.index)
esq.es.add_alias(s.alias)
s = Data(alias=es_comments.index, index=esq_comments.es.settings.index)
if s.alias:
esq.es.cluster.delete_all_but(s.alias, s.index)
esq_comments.es.add_alias(s.alias)
File(param.last_run_time).write(text_type(convert.datetime2milli(current_run_time)))
except Exception as e:
Log.error("Problem with main ETL loop", cause=e)
finally:
try:
close_db_connections()
except Exception as e:
pass
try:
es.set_refresh_interval(1)
except Exception as e:
pass
def get_bug_ids(esq, filter):
try:
result = esq.query({"from": esq.name, "select": "bug_id", "where": filter, "limit": 20000, "format": "list"})
return set(result.data) - {None}
except Exception as e:
Log.error(
"Can not get_max_bug from {{host}}/{{index}}",
host=esq.settings.host,
index=esq.settings.index,
cause=e
)
def get_min_bug_id(esq):
try:
result = esq.query({"from": esq.name, "select": {"value": "bug_id", "aggregate": "min"}, "format": "list"})
return result.data
except Exception as e:
Log.error(
"Can not get_max_bug from {{host}}/{{index}}",
host=esq.settings.host,
index=esq.settings.index,
cause=e
)
def close_db_connections():
global db_cache, comment_db_cache
db_cache, temp = [], db_cache
for db in temp:
db.close()
comment_db_cache, temp = [], comment_db_cache
if temp:
temp.close()
def setup():
try:
settings = startup.read_settings(defs=[{
"name": ["--quick", "--fast"],
"help": "use this to process the first and last block, useful for testing the config settings before doing a full run",
"action": "store_true",
"dest": "quick"
}, {
"name": ["--restart", "--reset", "--redo"],
"help": "use this to force a reprocessing of all data",
"action": "store_true",
"dest": "restart"
}])
constants.set(settings.constants)
with startup.SingleInstance(flavor_id=settings.args.filename):
if settings.args.restart:
for l in listwrap(settings.debug.log):
if l.filename:
File(l.filename).delete()
File(settings.param.first_run_time).delete()
File(settings.param.last_run_time).delete()
Log.start(settings.debug)
main(settings)
except Exception as e:
Log.fatal("Can not start", e)
finally:
MAIN_THREAD.stop()
if __name__ == "__main__":
setup()
|
klahnakoski/Bugzilla-ETL
|
bugzilla_etl/bz_etl.py
|
Python
|
mpl-2.0
| 20,680
|
import abc
import six
import time
import random
import itertools
from functools import wraps
from ..translators import DefaultTranslator
from modularodm.exceptions import KeyExistsException
class Logger(object):
def __init__(self):
self.listening = False
self.events = []
self.xtra = []
def listen(self, xtra=None):
self.xtra.append(xtra)
if self.listening:
return False
self.listening = True
self.events = []
return True
def record_event(self, event):
if self.listening:
self.events.append(event)
def report(self, sort_func=None):
out = {}
if sort_func is None:
sort_func = lambda e: e.func.__name__
heard = sorted(self.events, key=sort_func)
for key, group in itertools.groupby(heard, sort_func):
group = list(group)
num_events = len(group)
total_time = sum([event.elapsed_time for event in group])
out[key] = (num_events, total_time)
return out
def pop(self):
self.xtra.pop()
def clear(self):
self.listening = False
self.events = []
class LogEvent(object):
def __init__(self, func, start_time, stop_time, xtra=None):
self.func = func
self.start_time = start_time
self.stop_time = stop_time
self.elapsed_time = stop_time - start_time
self.xtra = xtra
def __repr__(self):
return 'LogEvent("{func}", {start_time}, {stop_time}, {xtra})'.format(
**self.__dict__
)
def logify(func):
@wraps(func)
def wrapped(this, *args, **kwargs):
# Note: Copy value of `this.logger.listening` here in the event that
# this value is changed externally during the decorated function call.
# TODO: Verify that this produces valid output for concurrent requests
listening = this.logger.listening
if listening:
start_time = time.time()
out = func(this, *args, **kwargs)
if listening:
stop_time = time.time()
# TODO: This is a temporary fix for a suspected concurrency issue.
xtra = this.logger.xtra[-1] if this.logger.xtra else None
this.logger.record_event(
LogEvent(
func,
start_time,
stop_time,
xtra
)
)
return out
return wrapped
class StorageMeta(abc.ABCMeta):
def __new__(mcs, name, bases, dct):
# Decorate methods
for key, value in dct.items():
if hasattr(value, '__call__') \
and not isinstance(value, type) \
and not key.startswith('_'):
dct[key] = logify(value)
# Run super-metaclass __new__
return super(StorageMeta, mcs).__new__(mcs, name, bases, dct)
@six.add_metaclass(StorageMeta)
class Storage(object):
"""Abstract base class for storage objects. Subclasses (e.g.
:class:`~modularodm.storage.picklestorage.PickleStorage`,
:class:`~modularodm.storage.mongostorage.MongoStorage`, etc.)
must define insert, update, get, remove, flush, and find_all methods.
"""
translator = DefaultTranslator()
logger = Logger()
def _ensure_index(self, key):
pass
# todo allow custom id generator
# todo increment n on repeated failures
def _generate_random_id(self, n=5):
"""Generated random alphanumeric key.
:param n: Number of characters in random key
"""
alphabet = '23456789abcdefghijkmnpqrstuvwxyz'
return ''.join(random.sample(alphabet, n))
def _optimistic_insert(self, primary_name, value, n=5):
"""Attempt to insert with randomly generated key until insert
is successful.
:param str primary_name: The name of the primary key.
:param dict value: The dictionary representation of the record.
:param n: Number of characters in random key
"""
while True:
try:
key = self._generate_random_id(n)
value[primary_name] = key
self.insert(primary_name, key, value)
break
except KeyExistsException:
pass
return key
@abc.abstractmethod
def insert(self, primary_name, key, value):
"""Insert a new record.
:param str primary_name: Name of primary key
:param key: The value of the primary key
:param dict value: The dictionary of attribute:value pairs
"""
pass
@abc.abstractmethod
def update(self, query, data):
"""Update multiple records with new data.
:param query: A query object.
:param dict data: Dictionary of key:value pairs.
"""
pass
@abc.abstractmethod
def get(self, primary_name, key):
"""Get a single record.
:param str primary_name: The name of the primary key.
:param key: The value of the primary key.
"""
pass
@abc.abstractmethod
def remove(self, query=None):
"""Remove records.
"""
pass
@abc.abstractmethod
def flush(self):
"""Flush the database."""
pass
@abc.abstractmethod
def find_one(self, query=None, **kwargs):
""" Gets a single object from the collection.
If no matching documents are found, raises `NoResultsFound`.
If >1 matching documents are found, raises `MultipleResultsFound`.
:params: One or more `Query` or `QuerySet` objects may be passed
:returns: The selected document
"""
pass
@abc.abstractmethod
def find(self, query=None, **kwargs):
"""
Return a generator of query results. Takes optional `by_pk` keyword
argument; if true, return keys rather than
values.
:param query:
:return: a generator of :class:`~.storedobject.StoredObject` instances
"""
pass
|
chrisseto/modular-odm
|
modularodm/storage/base.py
|
Python
|
apache-2.0
| 6,109
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
result = left.join(right)
print(result)
result = left.join(right, how='outer')
print(result)
result = left.join(right, how='inner')
print(result)
result = pd.merge(left, right, left_index=True, right_index=True, how='outer')
print(result)
result = pd.merge(left, right, left_index=True, right_index=True, how='inner')
left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'key': ['K0', 'K1', 'K0', 'K1']})
right = pd.DataFrame({'C': ['C0', 'C1'],
'D': ['D0', 'D1']},
index=['K0', 'K1'])
result = left.join(right, on='key')
print(result)
result = pd.merge(left, right, left_on='key', right_index=True,
how='left', sort=False);
print(result)
left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1']})
index = pd.MultiIndex.from_tuples([('K0', 'K0'), ('K1', 'K0'),
('K2', 'K0'), ('K2', 'K1')])
right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=index)
result = left.join(right, on=['key1', 'key2'])
print(result)
result = left.join(right, on=['key1', 'key2'], how='inner')
print(result)
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=pd.Index(['K0', 'K1', 'K2'], name='key'))
index = pd.MultiIndex.from_tuples([('K0', 'Y0'), ('K1', 'Y1'),
('K2', 'Y2'), ('K2', 'Y3')],
names=['key', 'Y'])
right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=index)
result = left.join(right, how='inner')
print(result)
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
result = pd.merge(left, right, on='key')
print(result)
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
result = pd.merge(left, right, on=['key1', 'key2'])
print(result)
result = pd.merge(left, right, how='left', on=['key1', 'key2'])
print(result)
result = pd.merge(left, right, how='right', on=['key1', 'key2'])
print(result)
result = pd.merge(left, right, how='outer', on=['key1', 'key2'])
print(result)
result = pd.merge(left, right, how='inner', on=['key1', 'key2'])
left = pd.DataFrame({'A': [1, 2], 'B': [2, 2]})
right = pd.DataFrame({'A': [4, 5, 6], 'B': [2, 2, 2]})
result = pd.merge(left, right, on='B', how='outer')
print(result)
df1 = pd.DataFrame({'col1': [0, 1], 'col_left': ['a', 'b']})
df2 = pd.DataFrame({'col1': [1, 2, 2], 'col_right': [2, 2, 2]})
print(pd.merge(df1, df2, on='col1', how='outer', indicator=True))
print(pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column'))
left = pd.DataFrame({'key': [1], 'v1': [10]})
print(left)
right = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]})
print(right)
print(pd.merge(left, right, how='outer'))
print(pd.merge(left, right, how='outer').dtypes)
print(pd.merge(left, right, how='outer', on='key'))
|
davidam/python-examples
|
pandas/pandas-join.py
|
Python
|
gpl-3.0
| 4,244
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import PersonaCharacteristic
from PersonaCharacteristicDialog import PersonaCharacteristicDialog
from PersonaCharacteristicDialogParameters import PersonaCharacteristicDialogParameters
from TaskCharacteristicDialog import TaskCharacteristicDialog
from TaskCharacteristicDialogParameters import TaskCharacteristicDialogParameters
import ARM
from DimensionBaseDialog import DimensionBaseDialog
from Borg import Borg
class BehaviouralCharacteristicsDialog(DimensionBaseDialog):
def __init__(self,parent,aName,bvName = ''):
b = Borg()
self.dbProxy = b.dbProxy
windowLabel = 'Persona Characteristics'
windowIcon = 'persona.png'
getFn = self.dbProxy.getPersonaBehaviouralCharacteristics
if (bvName == ''):
windowLabel = 'Task Characteristics'
windowIcon = 'task.png'
getFn = self.dbProxy.getTaskSpecificCharacteristics
DimensionBaseDialog.__init__(self,parent,armid.PERSONACHARACTERISTICS_ID,windowLabel,(930,300),windowIcon)
self.theMainWindow = parent
self.theName = aName
self.theBehaviouralVariable = bvName
idList = [armid.PERSONACHARACTERISTICS_CHARLIST_ID,armid.PERSONACHARACTERISTICS_BUTTONADD_ID,armid.PERSONACHARACTERISTICS_BUTTONDELETE_ID]
columnList = ['Characteristic']
self.buildControls(idList,columnList,getFn,'behavioural_characteristic')
listCtrl = self.FindWindowById(armid.PERSONACHARACTERISTICS_CHARLIST_ID)
listCtrl.SetColumnWidth(0,700)
def addObjectRow(self,listCtrl,listRow,objt):
listCtrl.InsertStringItem(listRow,objt.characteristic())
def onAdd(self,evt):
try:
if (self.theBehaviouralVariable != ''):
addParameters = PersonaCharacteristicDialogParameters(armid.PERSONACHARACTERISTIC_ID,'Add Persona Characteristic',PersonaCharacteristicDialog,armid.PERSONACHARACTERISTIC_BUTTONCOMMIT_ID,self.dbProxy.addPersonaCharacteristic,True,self.theName,self.theBehaviouralVariable)
else:
addParameters = TaskCharacteristicDialogParameters(armid.TASKCHARACTERISTIC_ID,'Add Task Characteristic',TaskCharacteristicDialog,armid.TASKCHARACTERISTIC_BUTTONCOMMIT_ID,self.dbProxy.addTaskCharacteristic,True,self.theName,False)
self.addObject(addParameters)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add characteristic',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.deprecatedLabel()]
objtId = selectedObjt.id()
try:
if (self.theBehaviouralVariable != ''):
updateParameters = PersonaCharacteristicDialogParameters(armid.PERSONACHARACTERISTIC_ID,'Edit Persona Characteristic',PersonaCharacteristicDialog,armid.PERSONACHARACTERISTIC_BUTTONCOMMIT_ID,self.dbProxy.updatePersonaCharacteristic,False,self.theName,self.theBehaviouralVariable)
else:
updateParameters = TaskCharacteristicDialogParameters(armid.TASKCHARACTERISTIC_ID,'Edit Task Characteristic',TaskCharacteristicDialog,armid.TASKCHARACTERISTIC_BUTTONCOMMIT_ID,self.dbProxy.updateTaskCharacteristic,False,self.theName,False)
self.updateObject(selectedObjt,updateParameters)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit characteristic',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
if (self.theBehaviouralVariable != ''):
self.deleteObject('No persona characteristic','Delete persona characteristic',self.dbProxy.deletePersonaCharacteristic)
else:
self.deleteObject('No task characteristic','Delete task characteristic',self.dbProxy.deleteTaskCharacteristic)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete task characteristic',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def deprecatedLabel(self):
listCtrl = self.FindWindowById(armid.PERSONACHARACTERISTICS_CHARLIST_ID)
charItem = listCtrl.GetItem(listCtrl.theSelectedIdx,0)
charTxt = charItem.GetText()
if (self.theBehaviouralVariable != ''):
pcLabel = self.theName + '/' + self.theBehaviouralVariable + '/' + charTxt
else:
pcLabel = self.theName + '/' + charTxt
return pcLabel
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/BehaviouralCharacteristicsDialog.py
|
Python
|
apache-2.0
| 5,061
|
# -*- coding: utf-8 -*-
#Copyright (C) 2011 Seán Hayes
#Python imports
from datetime import datetime
#Django imports
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
#App imports
from ..models import Feedback
#Test imports
from .util import BaseTestCase
class FeedbackTestCase(BaseTestCase):
def test_post_success(self):
post_data = {
'text': 'This is a test!'
}
self.assertEqual(Feedback.objects.count(), 0)
response = self.client.post(reverse('feedback'), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Feedback.objects.count(), 1)
f = Feedback.objects.all()[0]
self.assertEqual(f.user, self.user1)
self.assertNotEqual(f.date, None)
self.assertEqual(f.page, '')
self.assertEqual(f.text, post_data['text'])
self.assertEqual(f.archived, False)
|
SeanHayes/django-basic-feedback
|
django_basic_feedback/tests/test_urls.py
|
Python
|
bsd-3-clause
| 1,010
|
"""APyCON URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from API import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^register/', views.MyView.as_view()),
url(r'^login/', views.Login.as_view()),
]
|
stefaniapazabarca/APYCON
|
APyCON/APyCON/urls.py
|
Python
|
gpl-3.0
| 929
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for Packagemanager."""
from script import Script
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/packagemanager/__init__.py
|
Python
|
gpl-3.0
| 850
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
================================
Iterating Over A Predefined List
================================
The Chooser component iterates (steps) forwards and backwards through a list of
items. Request the next or previous item and Chooser will return it.
The ForwardIteratingChooser component only steps forwards, but can therefore
handle more than just lists - for example: infinite sequences.
Example Usage
-------------
A simple slideshow::
items=[ "image1.png", "image2.png", "image3.png", ... ]
Graphline( CHOOSER = Chooser(items=imagefiles),
FORWARD = Button(position=(300,16), msg="NEXT", caption="Next"),
BACKWARD = Button(position=(16,16), msg="PREV", caption="Previous"),
DISPLAY = Image(position=(16,64), size=(640,480)),
linkages = { ("FORWARD" ,"outbox") : ("CHOOSER","inbox"),
("BACKWARD","outbox") : ("CHOOSER","inbox"),
("CHOOSER" ,"outbox") : ("DISPLAY","inbox"),
}
).run()
The chooser is driven by the 'next' and 'previous' Button components. Chooser
then sends filenames to an Image component to display them.
Another example: a forever looping carousel of files, read at 1MBit/s::
def filenames():
while 1:
yield "file 1"
yield "file 2"
yield "file 3"
JoinChooserToCarousel( chooser = InfiniteChooser(items=filenames),
carousel = FixedRateControlledReusableFilereader("byte",rate=131072,chunksize=1024),
)
How does it work?
-----------------
When creating it, pass the component a set of items for it to iterate over.
Chooser will only accept finite length datasets. InfiniteChooser will accept
any interable sequence, even one that never ends from a generator.
Once activated, the component will emit the first item from the list from its
"outbox" outbox.
If the list/sequence is empty, then nothing is emitted, even in response to
messages sent to the "inbox" inbox described now.
Send commands to the "inbox" inbox to move onto another item of data and cause
it to be emitted. This behaviour is very much like a database cursor or file
pointer - you are issuing commands to step through a dataset.
Send "SAME" and the component will emit the same item of data that was last
emitted last time. Both Chooser and InfiniteChooser respond to this request.
Send "NEXT" and the component will emit the next item from the list or sequence.
If there is no 'next' item (becuase we are already at the end of the
list/sequence) then nothing is emitted. Both Chooser and InfiniteChooser respond
to this request.
With InfiniteChooser, if there is not 'next' item then, additionally, a
producerFinished message will be sent out of its "signal" outbox to signal that
the end of the sequence has been reached. The component will then terminate.
All requests described from now are only supported by the Chooser component.
InfiniteChooser will ignore them.
Send "PREV" and the previous item from the list or sequence will be emitted. If
there is no previous item (because we are already at the front of the
list/sequence) then nothing is emitted.
Send "FIRST" or "LAST" and the first or last item from the list or sequence will
be emitted, respectively. The item will be emitted even if we are already at the
first/last item.
Send "RANDOM" and a random item will be emitted.
If Chooser or InfiniteChooser receive a shutdownMicroprocess message on the
"control" inbox, they will pass it on out of the "signal" outbox. The component
will then terminate.
"""
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess
import random
class Chooser(Axon.Component.component):
"""\
Chooser([items]) -> new Chooser component.
Iterates through a finite list of items. Step by sending "NEXT", "PREV",
"FIRST" or "LAST" messages to its "inbox" inbox.
Keyword arguments:
- items -- list of items to be chosen from, must be type 'list' (default=[])
"""
Inboxes = { "inbox" : "receive commands",
"control" : "shutdown messages"
}
Outboxes = { "outbox" : "emits chosen items",
"signal" : "shutdown messages"
}
def __init__(self, items = []):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Chooser,self).__init__()
self.items = list(items)
self.useditems = []
def shutdown(self):
"""
Returns True if a shutdownMicroprocess message was received.
"""
if self.dataReady("control"):
message = self.recv("control")
if isinstance(message, shutdownMicroprocess):
self.send(message, "signal")
return True
return False
def main(self):
"""Main loop."""
try:
self.send( self.getCurrentChoice(), "outbox")
except IndexError:
pass
done = False
while not done:
yield 1
while self.dataReady("inbox"):
send = True
msg = self.recv("inbox")
if msg == "SAME":
pass
elif msg == "NEXT":
send = self.gotoNext()
elif msg == "PREV":
send = self.gotoPrev()
elif msg == "FIRST":
send = self.gotoFirst()
elif msg == "LAST":
send = self.gotoLast()
elif msg == "RANDOM":
pass
send = self.gotoRandom()
else:
send = False
if send:
try:
self.send( self.getCurrentChoice(), "outbox")
except IndexError:
pass
done = self.shutdown()
def getCurrentChoice(self):
"""Return the current choice to the outbox"""
return self.items[0]
def gotoNext(self):
"""\
Advance the choice forwards one.
Returns True if successful or False if unable to (eg. already at end).
"""
if len(self.items) > 1:
self.useditems.append(self.items[0])
del(self.items[0])
return True
return False
def gotoPrev(self):
"""\
Backstep the choice backwards one.
Returns True if successful or False if unable to (eg. already at start).
"""
#why not use the same form as above, with an if statement that tests len(self.useditems)?
try:
self.items.insert(0, self.useditems[-1])
del(self.useditems[-1])
return True
except IndexError: #if self.useditems[-1] doesn't exist
return False
def gotoLast(self):
"""Goto the last item in the set. Returns True."""
self.useditems.extend(self.items[:-1])
self.items = [self.items[-1]]
return True
def gotoFirst(self):
"""Goto the first item in the set. Returns True."""
self.useditems.extend(self.items)
self.items = self.useditems
self.useditems = []
return True
def gotoRandom(self):
"""Goto a random item."""
numItems = len(self.useditems) + len(self.items)
index = int(random.random()*numItems)
if index < len(self.useditems): #index is one of the useditems
newItems = self.useditems[index:]
newItems.extend(self.items)
self.items = newItems
self.useditems = self.useditems[:index]
return True
else: #index is greater than the breakoff
offsetIndex = index - len(self.useditems)
self.useditems.extend(self.items[:offsetIndex])
self.items = self.items[offsetIndex:]
return True
class ForwardIteratingChooser(Axon.Component.component):
"""\
Chooser([items]) -> new Chooser component.
Iterates through an iterable set of items. Step by sending "NEXT" messages to
its "inbox" inbox.
Keyword arguments:
- items -- iterable source of items to be chosen from (default=[])
"""
Inboxes = { "inbox" : "receive commands",
"control" : "shutdown messages"
}
Outboxes = { "outbox" : "emits chosen items",
"signal" : "shutdown messages"
}
def __init__(self, items = []):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(ForwardIteratingChooser,self).__init__()
self.items = iter(items)
self.gotoNext()
def shutdown(self):
"""
Returns True if a shutdownMicroprocess message was received.
"""
if self.dataReady("control"):
message = self.recv("control")
if isinstance(message, shutdownMicroprocess):
self.send(message, "signal")
return True
return False
def main(self):
"""Main loop."""
try:
self.send( self.getCurrentChoice(), "outbox")
except IndexError:
pass
done = False
while not done:
yield 1
while self.dataReady("inbox"):
send = True
msg = self.recv("inbox")
if msg == "SAME":
pass
elif msg == "NEXT":
send = self.gotoNext()
if not send:
done = True
self.send( producerFinished(self), "signal")
else:
send = False
if send:
try:
self.send( self.getCurrentChoice(), "outbox")
except IndexError:
pass
done = done or self.shutdown()
if not done:
self.pause()
def getCurrentChoice(self):
"""Return the current choice"""
try:
return self.currentitem
except AttributeError:
raise IndexError()
def gotoNext(self):
"""\
Advance the choice forwards one.
Returns True if successful or False if unable to (eg. already at end).
"""
try:
self.currentitem = self.items.next()
return True
except StopIteration:
return False
__kamaelia_components__ = ( Chooser, ForwardIteratingChooser, )
|
sparkslabs/kamaelia_
|
Sketches/JL/Slideshows/Chooser_jlei.py
|
Python
|
apache-2.0
| 11,189
|
from flask import Flask, redirect, abort, url_for
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login')
def login():
abort(401)
this_is_never_executed()
if __name__ == '__main__':
app.run()
|
schanezon/webapp
|
test.py
|
Python
|
apache-2.0
| 278
|
########################################
# Read radiosonde files from Integrated Global RadiosondeArchive
#, separate soundings and save in python
#
# http://www1.ncdc.noaa.gov/pub/data/igra
# Created by: Peter Willetts
# Created on: 25/06/2014
#
########################################
#
#
###################################################
import glob
rad_flist = glob.glob ('/nfs/a90/eepdw/Data/Observations/Radiosonde_downloaded_from_NOAA_GUAN/derived_parameters/*.dat')
|
peterwilletts24/Python-Scripts
|
Radiosonde_Data/Rough_Station_Variable_Plot.py
|
Python
|
mit
| 479
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-labservices"
PACKAGE_PPRINT_NAME = "Lab Services"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.3.0,<2.0.0',
],
python_requires=">=3.6"
)
|
Azure/azure-sdk-for-python
|
sdk/labservices/azure-mgmt-labservices/setup.py
|
Python
|
mit
| 2,668
|
from base.decorators import anonymous_required
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.password_validation import password_validators_help_texts
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from registration.backends.hmac.views import RegistrationView as ActivationRegistrationView
from registration.backends.simple.views import RegistrationView
from .forms import UserRegisterForm
User = get_user_model()
@method_decorator(anonymous_required(redirect_url='index'), name='dispatch')
class UserActivationRegisterView(ActivationRegistrationView):
form_class = UserRegisterForm
def get_context_data(self, **kwargs):
context = super(UserActivationRegisterView, self).get_context_data(**kwargs)
context["password_rules"] = password_validators_help_texts()
return context
@method_decorator(anonymous_required(redirect_url='index'), name='dispatch')
class UserNormalRegisterView(RegistrationView):
form_class = UserRegisterForm
@method_decorator(login_required(login_url='index'), name='dispatch')
class HomeView(TemplateView):
"""
Home view
"""
template_name = 'users/home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['authenticated'] = self.request.user.is_authenticated()
if context['authenticated']:
context['user_detail'] = User.objects.get(username=self.request.user)
return context
|
iPablo/starter-python-sevilla
|
python_sevilla/users/views.py
|
Python
|
mit
| 1,604
|
# -*- coding: utf-8 -*-
INBOUND_POST_REQUEST = {
'text': '',
'html': '',
'from': '',
'to': '',
'cc': '',
'subject': '',
'dkim': '',
'SPF': '',
'envelopeemail': '',
'charsets': '',
'spam_score': '',
'spam_report': '',
'attachments': '',
'attachment-info': '',
'attachmentX': ''
}
STANDARD_WEBHOOK_EVENT = [
{
"email": "john.doe@sendgrid.com",
"timestamp": 1337197600,
"smtp-id": "<4FB4041F.6080505@sendgrid.com>",
"event": "processed"
},
{
"email": "john.doe@sendgrid.com",
"timestamp": 1337966815,
"category": "newuser",
"event": "click",
"url": "http://sendgrid.com"
},
{
"email": "john.doe@sendgrid.com",
"timestamp": 1337969592,
"smtp-id": "<20120525181309.C1A9B40405B3@Example-Mac.local>",
"event": "processed"
}
]
from .test_views import *
|
rosscdh/django-sendgrid
|
dj_sendgrid/tests/__init__.py
|
Python
|
mit
| 870
|
#!/usr/bin/env python
"""
A toolkit for identifying and advertising service resources.
Uses a specific naming convention for the Task Definition of services. If you
name the Task Definition ending with "-service", no configuration is needed.
This also requires that you not use that naming convention for task definitions
that are not services.
For example:
A Task Definition with the family name of 'cache-service' will have its
hosting Container Instance's internal ip added to a Route53 private Zone as
cache.local and other machines on the same subnet can address it that way.
"""
import argparse
import logging
import os
import re
import json
import boto
import boto.ec2
import boto.route53
import requests
from etcd.client import Client
from time import sleep
region = os.environ.get('ECS_REGION', 'us-east-1')
ecs = boto.connect_ec2containerservice(
host='ecs.{0}.amazonaws.com'.format(region))
ec2 = boto.ec2.connect_to_region(region)
route53 = boto.route53.connect_to_region(region)
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y/%m/%d/ %I:%M:%S %p')
if 'ECS_CLUSTER' in os.environ:
cluster = os.environ['ECS_CLUSTER']
elif os.path.exists('/etc/ecs/ecs.config'):
pat = re.compile(r'\bECS_CLUSTER\b\s*=\s*(\w*)')
cluster = pat.findall(open('/etc/ecs/ecs.config').read())[-1]
else:
cluster = None
def get_task_arns(family):
"""
Get the ARN of running task, given the family name.
"""
response = ecs.list_tasks(cluster=cluster, family=family)
arns = response['ListTasksResponse']['ListTasksResult']['taskArns']
if len(arns) == 0:
return None
return arns
def get_ec2_interface(container_instance_arn):
"""
Get the ec2 interface from an container instance ARN.
"""
response = ecs.describe_container_instances(container_instance_arn, cluster=cluster)
ec2_instance_id = response['DescribeContainerInstancesResponse'] \
['DescribeContainerInstancesResult']['containerInstances'] \
[0]['ec2InstanceId']
response = ec2.get_all_instances(filters={'instance-id': ec2_instance_id})
return response[0].instances[0].interfaces[0]
def get_zone_for_vpc(vpc_id):
"""
Identify the Hosted Zone for the given VPC.
Assumes a 1 to 1 relationship.
NOTE: There is an existing bug.
https://github.com/boto/boto/issues/3061
When that changes, I expect to have to search ['VPCs'] as a list of
dictionaries rather than a dictionary. This has the unfortunate side
effect of not working for Hosted Zones that are associated with more than
one VPC. (But, why would you expect internal DNS for 2 different private
networks to be the same anyway?)
"""
response = route53.get_all_hosted_zones()['ListHostedZonesResponse']
for zone in response['HostedZones']:
zone_id = zone['Id'].split('/')[-1]
detail = route53.get_hosted_zone(zone_id)['GetHostedZoneResponse']
try:
if detail['VPCs']['VPC']['VPCId'] == vpc_id:
return {'zone_id': zone_id, 'zone_name': zone['Name']}
except KeyError:
pass
def get_service_info(service_name):
info = {
"name": service_name,
"tasks": []
}
if service_name[-8:] == '-service':
info['name'] = service_name[:-8]
task_arns = get_task_arns(service_name)
if not task_arns:
logging.info('{0} is NOT RUNNING'.format(service_name))
return None
else:
logging.info('{0} is RUNNING'.format(service_name))
data = ecs.describe_tasks(task_arns, cluster=cluster)
tasks = data['DescribeTasksResponse']['DescribeTasksResult']['tasks']
for task in tasks:
interface = get_ec2_interface(task['containerInstanceArn'])
task_info = {
'ip': interface.private_ip_address,
'ports': {}
}
for container in task['containers']:
if container['networkBindings']:
for port in container['networkBindings']:
if port['protocol'] == 'tcp':
task_info['ports'][port['containerPort']] = port['hostPort']
info['tasks'].append(task_info)
info['vpc_id'] = interface.vpc_id
return info
def update_dns(zone_id, zone_name, service_name, service_ips, ttl=20):
"""
Insert or update DNS record.
"""
host_name = '.'.join([service_name, zone_name])
record_set = boto.route53.record.ResourceRecordSets(route53, zone_id)
record = record_set.add_change('UPSERT', host_name, 'A', ttl)
for service_ip in service_ips:
record.add_value(service_ip)
record_set.commit()
return record_set
def update_service(service_name, method, prefix):
"""
Update DNS to allow discovery of properly named task definitions.
"""
info = get_service_info(service_name)
if not info:
return None
if method == 'dns':
network = get_zone_for_vpc(info["vpc_id"])
ips = [t['ip'] for t in info['tasks']]
logging.info('Registering {0}.{1} as {2}'.format(
info['name'], network['zone_name'], ','.join(ips)))
update_dns(network['zone_id'], network['zone_name'],
info['name'], ips)
elif method == 'etcd':
data = json.dumps(info['tasks'])
logging.info('Registering {0} as {1}'.format(
info['name'], data))
host = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4").content
client = Client(host=host, port=4001)
key = '/' + '/'.join([i for i in ['tasks', prefix, info['name']] if i])
client.node.set(key, data)
def main():
"""
Main function that handles running the command.
"""
parser = argparse.ArgumentParser()
parser.add_argument('service_name', nargs=1,
help='list of services to start')
parser.add_argument('method', nargs=1,
help='method of registering service')
parser.add_argument('-p', '--prefix', action='store', default=False,
help='prefix when saving to etcd')
parser.add_argument('-q', '--quiet', action='store_true',
help='suppress output')
parser.add_argument('-r', '--rerun', action='store_true',
help='run again after a 60 second pause')
args = parser.parse_args()
if not args.quiet:
logging.getLogger().setLevel(logging.INFO)
update_service(args.service_name[0], args.method[0], args.prefix)
if args.rerun:
sleep(60)
update_service(args.service_name[0], args.method[0], args.prefix)
if __name__ == '__main__':
main()
|
simonluijk/aws-ecs-service-discovery
|
register.py
|
Python
|
mit
| 6,803
|
from typing_extensions import Final
class A:
def __init__(self):
self.a: Final[int] = 1
class B:
b: Final[int]
def __init__(self):
self.b = 1
|
smmribeiro/intellij-community
|
python/testData/inspections/PyFinalInspection/ImportedInstanceFinalReassignment/b.py
|
Python
|
apache-2.0
| 172
|
import sys
def get_time_stamp():
return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
|
bahamoot/Association-Study
|
python/pana/mylib.py
|
Python
|
artistic-2.0
| 204
|
import errno
import json.decoder as json_decoder
import logging
import os
import platform
import random
import re
import socket
import sys
import tempfile
import time
from dateutil import tz
from .exceptions import PatroniException
from .version import __version__
tzutc = tz.tzutc()
logger = logging.getLogger(__name__)
USER_AGENT = 'Patroni/{0} Python/{1} {2}'.format(__version__, platform.python_version(), platform.system())
OCT_RE = re.compile(r'^[-+]?0[0-7]*')
DEC_RE = re.compile(r'^[-+]?(0|[1-9][0-9]*)')
HEX_RE = re.compile(r'^[-+]?0x[0-9a-fA-F]+')
DBL_RE = re.compile(r'^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?')
def deep_compare(obj1, obj2):
"""
>>> deep_compare({'1': None}, {})
False
>>> deep_compare({'1': {}}, {'1': None})
False
>>> deep_compare({'1': [1]}, {'1': [2]})
False
>>> deep_compare({'1': 2}, {'1': '2'})
True
>>> deep_compare({'1': {'2': [3, 4]}}, {'1': {'2': [3, 4]}})
True
"""
if set(list(obj1.keys())) != set(list(obj2.keys())): # Objects have different sets of keys
return False
for key, value in obj1.items():
if isinstance(value, dict):
if not (isinstance(obj2[key], dict) and deep_compare(value, obj2[key])):
return False
elif str(value) != str(obj2[key]):
return False
return True
def patch_config(config, data):
"""recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed"""
is_changed = False
for name, value in data.items():
if value is None:
if config.pop(name, None) is not None:
is_changed = True
elif name in config:
if isinstance(value, dict):
if isinstance(config[name], dict):
if patch_config(config[name], value):
is_changed = True
else:
config[name] = value
is_changed = True
elif str(config[name]) != str(value):
config[name] = value
is_changed = True
else:
config[name] = value
is_changed = True
return is_changed
def parse_bool(value):
"""
>>> parse_bool(1)
True
>>> parse_bool('off')
False
>>> parse_bool('foo')
"""
value = str(value).lower()
if value in ('on', 'true', 'yes', '1'):
return True
if value in ('off', 'false', 'no', '0'):
return False
def strtol(value, strict=True):
"""As most as possible close equivalent of strtol(3) function (with base=0),
used by postgres to parse parameter values.
>>> strtol(0) == (0, '')
True
>>> strtol(1) == (1, '')
True
>>> strtol(9) == (9, '')
True
>>> strtol(' +0x400MB') == (1024, 'MB')
True
>>> strtol(' -070d') == (-56, 'd')
True
>>> strtol(' d ') == (None, 'd')
True
>>> strtol(' 1 d ') == (1, ' d')
True
>>> strtol('9s', False) == (9, 's')
True
>>> strtol(' s ', False) == (1, 's')
True
"""
value = str(value).strip()
for regex, base in ((HEX_RE, 16), (OCT_RE, 8), (DEC_RE, 10)):
match = regex.match(value)
if match:
end = match.end()
return int(value[:end], base), value[end:]
return (None if strict else 1), value
def strtod(value):
"""As most as possible close equivalent of strtod(3) function used by postgres to parse parameter values.
>>> strtod(' A ') == (None, 'A')
True
"""
value = str(value).strip()
match = DBL_RE.match(value)
if match:
end = match.end()
return float(value[:end]), value[end:]
return None, value
def rint(value):
"""
>>> rint(0.5) == 0
True
>>> rint(0.501) == 1
True
>>> rint(1.5) == 2
True
"""
ret = round(value)
return 2.0 * round(value / 2.0) if abs(ret - value) == 0.5 else ret
def convert_to_base_unit(value, unit, base_unit):
convert = {
'B': {'B': 1, 'kB': 1024, 'MB': 1024 * 1024, 'GB': 1024 * 1024 * 1024, 'TB': 1024 * 1024 * 1024 * 1024},
'kB': {'B': 1.0 / 1024, 'kB': 1, 'MB': 1024, 'GB': 1024 * 1024, 'TB': 1024 * 1024 * 1024},
'MB': {'B': 1.0 / (1024 * 1024), 'kB': 1.0 / 1024, 'MB': 1, 'GB': 1024, 'TB': 1024 * 1024},
'ms': {'us': 1.0 / 1000, 'ms': 1, 's': 1000, 'min': 1000 * 60, 'h': 1000 * 60 * 60, 'd': 1000 * 60 * 60 * 24},
's': {'us': 1.0 / (1000 * 1000), 'ms': 1.0 / 1000, 's': 1, 'min': 60, 'h': 60 * 60, 'd': 60 * 60 * 24},
'min': {'us': 1.0 / (1000 * 1000 * 60), 'ms': 1.0 / (1000 * 60), 's': 1.0 / 60, 'min': 1, 'h': 60, 'd': 60 * 24}
}
round_order = {
'TB': 'GB', 'GB': 'MB', 'MB': 'kB', 'kB': 'B',
'd': 'h', 'h': 'min', 'min': 's', 's': 'ms', 'ms': 'us'
}
if base_unit and base_unit not in convert:
base_value, base_unit = strtol(base_unit, False)
else:
base_value = 1
if base_unit in convert and unit in convert[base_unit]:
value *= convert[base_unit][unit] / float(base_value)
if unit in round_order:
multiplier = convert[base_unit][round_order[unit]]
value = rint(value / float(multiplier)) * multiplier
return value
def parse_int(value, base_unit=None):
"""
>>> parse_int('1') == 1
True
>>> parse_int(' 0x400 MB ', '16384kB') == 64
True
>>> parse_int('1MB', 'kB') == 1024
True
>>> parse_int('1000 ms', 's') == 1
True
>>> parse_int('1TB', 'GB') is None
True
>>> parse_int(0) == 0
True
>>> parse_int('6GB', '16MB') == 384
True
>>> parse_int('4097.4kB', 'kB') == 4097
True
>>> parse_int('4097.5kB', 'kB') == 4098
True
"""
val, unit = strtol(value)
if val is None and unit.startswith('.') or unit and unit[0] in ('.', 'e', 'E'):
val, unit = strtod(value)
if val is not None:
unit = unit.strip()
if not unit:
return int(rint(val))
val = convert_to_base_unit(val, unit, base_unit)
if val is not None:
return int(rint(val))
def parse_real(value, base_unit=None):
"""
>>> parse_real(' +0.0005 ') == 0.0005
True
>>> parse_real('0.0005ms', 'ms') == 0.0
True
>>> parse_real('0.00051ms', 'ms') == 0.001
True
"""
val, unit = strtod(value)
if val is not None:
unit = unit.strip()
if not unit:
return val
return convert_to_base_unit(val, unit, base_unit)
def compare_values(vartype, unit, old_value, new_value):
"""
>>> compare_values('enum', None, 'remote_write', 'REMOTE_WRITE')
True
>>> compare_values('real', None, '1e-06', 0.000001)
True
"""
converters = {
'bool': lambda v1, v2: parse_bool(v1),
'integer': parse_int,
'real': parse_real,
'enum': lambda v1, v2: str(v1).lower(),
'string': lambda v1, v2: str(v1)
}
convert = converters.get(vartype) or converters['string']
old_value = convert(old_value, None)
new_value = convert(new_value, unit)
return old_value is not None and new_value is not None and old_value == new_value
def _sleep(interval):
time.sleep(interval)
class RetryFailedError(PatroniException):
"""Raised when retrying an operation ultimately failed, after retrying the maximum number of attempts."""
class Retry(object):
"""Helper for retrying a method in the face of retry-able exceptions"""
def __init__(self, max_tries=1, delay=0.1, backoff=2, max_jitter=0.8, max_delay=3600,
sleep_func=_sleep, deadline=None, retry_exceptions=PatroniException):
"""Create a :class:`Retry` instance for retrying function calls
:param max_tries: How many times to retry the command. -1 means infinite tries.
:param delay: Initial delay between retry attempts.
:param backoff: Backoff multiplier between retry attempts. Defaults to 2 for exponential backoff.
:param max_jitter: Additional max jitter period to wait between retry attempts to avoid slamming the server.
:param max_delay: Maximum delay in seconds, regardless of other backoff settings. Defaults to one hour.
:param retry_exceptions: single exception or tuple"""
self.max_tries = max_tries
self.delay = delay
self.backoff = backoff
self.max_jitter = int(max_jitter * 100)
self.max_delay = float(max_delay)
self._attempts = 0
self._cur_delay = delay
self.deadline = deadline
self._cur_stoptime = None
self.sleep_func = sleep_func
self.retry_exceptions = retry_exceptions
def reset(self):
"""Reset the attempt counter"""
self._attempts = 0
self._cur_delay = self.delay
self._cur_stoptime = None
def copy(self):
"""Return a clone of this retry manager"""
return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,
max_jitter=self.max_jitter / 100.0, max_delay=self.max_delay, sleep_func=self.sleep_func,
deadline=self.deadline, retry_exceptions=self.retry_exceptions)
@property
def sleeptime(self):
return self._cur_delay + (random.randint(0, self.max_jitter) / 100.0)
def update_delay(self):
self._cur_delay = min(self._cur_delay * self.backoff, self.max_delay)
@property
def stoptime(self):
return self._cur_stoptime
def __call__(self, func, *args, **kwargs):
"""Call a function with arguments until it completes without throwing a `retry_exceptions`
:param func: Function to call
:param args: Positional arguments to call the function with
:params kwargs: Keyword arguments to call the function with
The function will be called until it doesn't throw one of the retryable exceptions"""
self.reset()
while True:
try:
if self.deadline is not None and self._cur_stoptime is None:
self._cur_stoptime = time.time() + self.deadline
return func(*args, **kwargs)
except self.retry_exceptions as e:
# Note: max_tries == -1 means infinite tries.
if self._attempts == self.max_tries:
logger.warning('Retry got exception: %s', e)
raise RetryFailedError("Too many retry attempts")
self._attempts += 1
sleeptime = hasattr(e, 'sleeptime') and e.sleeptime or self.sleeptime
if self._cur_stoptime is not None and time.time() + sleeptime >= self._cur_stoptime:
logger.warning('Retry got exception: %s', e)
raise RetryFailedError("Exceeded retry deadline")
logger.debug('Retry got exception: %s', e)
self.sleep_func(sleeptime)
self.update_delay()
def polling_loop(timeout, interval=1):
"""Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration."""
start_time = time.time()
iteration = 0
end_time = start_time + timeout
while time.time() < end_time:
yield iteration
iteration += 1
time.sleep(interval)
def split_host_port(value, default_port):
t = value.rsplit(':', 1)
if ':' in t[0]:
t[0] = t[0].strip('[]')
t.append(default_port)
return t[0], int(t[1])
def uri(proto, netloc, path='', user=None):
host, port = netloc if isinstance(netloc, (list, tuple)) else split_host_port(netloc, 0)
if host and ':' in host and host[0] != '[' and host[-1] != ']':
host = '[{0}]'.format(host)
port = ':{0}'.format(port) if port else ''
path = '/{0}'.format(path) if path and not path.startswith('/') else path
user = '{0}@'.format(user) if user else ''
return '{0}://{1}{2}{3}{4}'.format(proto, user, host, port, path)
def iter_response_objects(response):
prev = ''
decoder = json_decoder.JSONDecoder()
for chunk in response.read_chunked(decode_content=False):
if isinstance(chunk, bytes):
chunk = chunk.decode('utf-8')
chunk = prev + chunk
length = len(chunk)
idx = json_decoder.WHITESPACE.match(chunk, 0).end()
while idx < length:
try:
message, idx = decoder.raw_decode(chunk, idx)
except ValueError: # malformed or incomplete JSON, unlikely to happen
break
else:
yield message
idx = json_decoder.WHITESPACE.match(chunk, idx).end()
prev = chunk[idx:]
def is_standby_cluster(config):
# Check whether or not provided configuration describes a standby cluster
return isinstance(config, dict) and (config.get('host') or config.get('port') or config.get('restore_command'))
def cluster_as_json(cluster):
leader_name = cluster.leader.name if cluster.leader else None
cluster_lsn = cluster.last_lsn or 0
ret = {'members': []}
for m in cluster.members:
if m.name == leader_name:
config = cluster.config.data if cluster.config and cluster.config.modify_index else {}
role = 'standby_leader' if is_standby_cluster(config.get('standby_cluster')) else 'leader'
elif m.name in cluster.sync.members:
role = 'sync_standby'
else:
role = 'replica'
member = {'name': m.name, 'role': role, 'state': m.data.get('state', ''), 'api_url': m.api_url}
conn_kwargs = m.conn_kwargs()
if conn_kwargs.get('host'):
member['host'] = conn_kwargs['host']
if conn_kwargs.get('port'):
member['port'] = int(conn_kwargs['port'])
optional_attributes = ('timeline', 'pending_restart', 'scheduled_restart', 'tags')
member.update({n: m.data[n] for n in optional_attributes if n in m.data})
if m.name != leader_name:
lsn = m.data.get('xlog_location')
if lsn is None:
member['lag'] = 'unknown'
elif cluster_lsn >= lsn:
member['lag'] = cluster_lsn - lsn
else:
member['lag'] = 0
ret['members'].append(member)
# sort members by name for consistency
ret['members'].sort(key=lambda m: m['name'])
if cluster.is_paused():
ret['pause'] = True
if cluster.failover and cluster.failover.scheduled_at:
ret['scheduled_switchover'] = {'at': cluster.failover.scheduled_at.isoformat()}
if cluster.failover.leader:
ret['scheduled_switchover']['from'] = cluster.failover.leader
if cluster.failover.candidate:
ret['scheduled_switchover']['to'] = cluster.failover.candidate
return ret
def is_subpath(d1, d2):
real_d1 = os.path.realpath(d1) + os.path.sep
real_d2 = os.path.realpath(os.path.join(real_d1, d2))
return os.path.commonprefix([real_d1, real_d2 + os.path.sep]) == real_d1
def validate_directory(d, msg="{} {}"):
if not os.path.exists(d):
try:
os.makedirs(d)
except OSError as e:
logger.error(e)
if e.errno != errno.EEXIST:
raise PatroniException(msg.format(d, "couldn't create the directory"))
elif os.path.isdir(d):
try:
fd, tmpfile = tempfile.mkstemp(dir=d)
os.close(fd)
os.remove(tmpfile)
except OSError:
raise PatroniException(msg.format(d, "the directory is not writable"))
else:
raise PatroniException(msg.format(d, "is not a directory"))
def data_directory_is_empty(data_dir):
if not os.path.exists(data_dir):
return True
return all(os.name != 'nt' and (n.startswith('.') or n == 'lost+found') for n in os.listdir(data_dir))
def keepalive_intvl(timeout, idle, cnt=3):
return max(1, int(float(timeout - idle) / cnt))
def keepalive_socket_options(timeout, idle, cnt=3):
yield (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if sys.platform.startswith('linux'):
yield (socket.SOL_TCP, 18, int(timeout * 1000)) # TCP_USER_TIMEOUT
TCP_KEEPIDLE = getattr(socket, 'TCP_KEEPIDLE', None)
TCP_KEEPINTVL = getattr(socket, 'TCP_KEEPINTVL', None)
TCP_KEEPCNT = getattr(socket, 'TCP_KEEPCNT', None)
elif sys.platform.startswith('darwin'):
TCP_KEEPIDLE = 0x10 # (named "TCP_KEEPALIVE" in C)
TCP_KEEPINTVL = 0x101
TCP_KEEPCNT = 0x102
else:
return
intvl = keepalive_intvl(timeout, idle, cnt)
yield (socket.IPPROTO_TCP, TCP_KEEPIDLE, idle)
yield (socket.IPPROTO_TCP, TCP_KEEPINTVL, intvl)
yield (socket.IPPROTO_TCP, TCP_KEEPCNT, cnt)
def enable_keepalive(sock, timeout, idle, cnt=3):
SIO_KEEPALIVE_VALS = getattr(socket, 'SIO_KEEPALIVE_VALS', None)
if SIO_KEEPALIVE_VALS is not None: # Windows
intvl = keepalive_intvl(timeout, idle, cnt)
return sock.ioctl(SIO_KEEPALIVE_VALS, (1, idle * 1000, intvl * 1000))
for opt in keepalive_socket_options(timeout, idle, cnt):
sock.setsockopt(*opt)
def find_executable(executable, path=None):
_, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if os.path.isfile(executable):
return executable
if path is None:
path = os.environ.get('PATH', os.defpath)
for p in path.split(os.pathsep):
f = os.path.join(p, executable)
if os.path.isfile(f):
return f
|
zalando/patroni
|
patroni/utils.py
|
Python
|
mit
| 17,617
|
#-*- coding: UTF-8 -*-
from numpy import *
import operator
def classifyPerson():
resultList = ['not at all','in small doses','in large doses']
percentTats = float(raw_input("percentage of time spent playing video games?"))
ffMiles = float(raw_input("frequent filter miles earned per year?"))
iceCream = float(raw_input("liters of ice cream consumed per year?"))
datingDataMat,datingLables = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
inArr = array([ffMiles,percentTats,iceCream])
classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLables,3)
print "You will probably like this person:", resultList[classifierResult-1]
def datingClassTest():
hoRatio = 0.10
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print "the classifier came back with: %d, the real answer is: %d" %(classifierResult,datingLabels[i])
if(classifierResult != datingLabels[i]) : errorCount+=1.0
print "total error rate is : %f " %(errorCount/float(numTestVecs))
def autoNorm(dataSet):
minValue = dataSet.min(0) # 寻找最小值
maxValue = dataSet.max(0) # 寻找最大值
ranges = maxValue - minValue # 最大值-最小值
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minValue,(m,1))
normDataSet = normDataSet/tile(ranges,(m,1)) # 值 - 最小值 / 最大值 - 最小值
return normDataSet, ranges, minValue
def file2matrix(filename):
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines) # 获得总的记录条数
returnMat = zeros((numberOfLines,3)) # 初始化矩阵,全都置为0
classLabelVector = []
index = 0
for line in arrayOLines:
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
# returnMat 返回全部的值
# classLabelVector 数据对应的标签
return returnMat,classLabelVector
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group,labels
def classify0(inX, dataSet, labels, k):
# 获取数据集的大小, 4
dataSetSize = dataSet.shape[0]
# 复制输入的向量,然后做减法
diffMat = tile(inX, (dataSetSize,1)) - dataSet
# print diffMat
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndices = distances.argsort()
classCount = {}
for i in range(k):
voteIlable = labels[sortedDistIndices[i]]
classCount[voteIlable] = classCount.get(voteIlable,0)+1
sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
|
xinghalo/DMInAction
|
src/mlearning/chap02-knn/dating/knn.py
|
Python
|
apache-2.0
| 2,943
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import varnish
# Globals
varnish.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class VarnishTestCase(TestCase):
'''
Test cases for salt.modules.varnish
'''
def test_version(self):
'''
Test to return server version from varnishd -V
'''
with patch.dict(varnish.__salt__,
{'cmd.run': MagicMock(return_value='(varnish-2.0)')}):
self.assertEqual(varnish.version(), '2.0')
def test_ban(self):
'''
Test to add ban to the varnish cache
'''
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': 0}):
self.assertTrue(varnish.ban('ban_expression'))
def test_ban_list(self):
'''
Test to list varnish cache current bans
'''
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': True}):
self.assertFalse(varnish.ban_list())
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': False,
'stdout': 'A\nB\nC'}):
self.assertEqual(varnish.ban_list(), ['B', 'C'])
def test_purge(self):
'''
Test to purge the varnish cache
'''
with patch.object(varnish, 'ban', return_value=True):
self.assertTrue(varnish.purge())
def test_param_set(self):
'''
Test to set a param in varnish cache
'''
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': 0}):
self.assertTrue(varnish.param_set('param', 'value'))
def test_param_show(self):
'''
Test to show params of varnish cache
'''
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': True,
'stdout': 'A\nB\nC'}):
self.assertFalse(varnish.param_show('param'))
with patch.object(varnish, '_run_varnishadm',
return_value={'retcode': False,
'stdout': 'A .1\nB .2\n'}):
self.assertEqual(varnish.param_show('param'), {'A': '.1'})
if __name__ == '__main__':
from integration import run_tests
run_tests(VarnishTestCase, needs_daemon=False)
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/tests/unit/modules/varnish_test.py
|
Python
|
apache-2.0
| 2,828
|
import time
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (
QFileDialog,
QFontDialog,
QMessageBox,
)
from wcwidth import wcwidth
from plover import system
from plover.gui_qt.paper_tape_ui import Ui_PaperTape
from plover.gui_qt.i18n import get_gettext
from plover.gui_qt.utils import ToolBar
from plover.gui_qt.tool import Tool
_ = get_gettext()
class PaperTape(Tool, Ui_PaperTape):
''' Paper tape display of strokes. '''
TITLE = _('Paper Tape')
ICON = ':/tape.svg'
ROLE = 'paper_tape'
SHORTCUT = 'Ctrl+T'
STYLE_PAPER, STYLE_RAW = (_('Paper'), _('Raw'))
STYLES = (STYLE_PAPER, STYLE_RAW)
def __init__(self, engine):
super().__init__(engine)
self.setupUi(self)
self._strokes = []
self._all_keys = None
self._all_keys_filler = None
self._formatter = None
self._history_size = 2000000
self.styles.addItems(self.STYLES)
# Toolbar.
self.layout().addWidget(ToolBar(
self.action_ToggleOnTop,
self.action_SelectFont,
self.action_Clear,
self.action_Save,
))
self.action_Clear.setEnabled(False)
self.action_Save.setEnabled(False)
engine.signal_connect('config_changed', self.on_config_changed)
self.on_config_changed(engine.config)
engine.signal_connect('stroked', self.on_stroke)
self.tape.setFocus()
self.restore_state()
self.finished.connect(self.save_state)
def _restore_state(self, settings):
style = settings.value('style', None, int)
if style is not None:
self.styles.setCurrentText(self.STYLES[style])
self.on_style_changed(self.STYLES[style])
font_string = settings.value('font')
if font_string is not None:
font = QFont()
if font.fromString(font_string):
self.header.setFont(font)
self.tape.setFont(font)
ontop = settings.value('ontop', None, bool)
if ontop is not None:
self.action_ToggleOnTop.setChecked(ontop)
self.on_toggle_ontop(ontop)
def _save_state(self, settings):
settings.setValue('style', self.STYLES.index(self._style))
settings.setValue('font', self.header.font().toString())
ontop = bool(self.windowFlags() & Qt.WindowStaysOnTopHint)
settings.setValue('ontop', ontop)
def on_config_changed(self, config):
if 'system_name' in config:
self._strokes = []
self._all_keys = ''.join(key.strip('-') for key in system.KEYS)
self._all_keys_filler = [
' ' * wcwidth(k)
for k in self._all_keys
]
self._numbers = set(system.NUMBERS.values())
self.header.setText(self._all_keys)
self.on_style_changed(self._style)
@property
def _style(self):
return self.styles.currentText()
def _paper_format(self, stroke):
text = self._all_keys_filler * 1
keys = stroke.steno_keys[:]
if any(key in self._numbers for key in keys):
keys.append('#')
for key in keys:
index = system.KEY_ORDER[key]
text[index] = self._all_keys[index]
return ''.join(text)
def _raw_format(self, stroke):
return stroke.rtfcre
def _show_stroke(self, stroke):
text = self._formatter(stroke)
self.tape.appendPlainText(text)
def on_stroke(self, stroke):
assert len(self._strokes) <= self._history_size
if len(self._strokes) == self._history_size:
self._strokes.pop(0)
self._strokes.append(stroke)
self._show_stroke(stroke)
self.action_Clear.setEnabled(True)
self.action_Save.setEnabled(True)
def on_style_changed(self, style):
assert style in self.STYLES
if style == self.STYLE_PAPER:
self.header.show()
self._formatter = self._paper_format
elif style == self.STYLE_RAW:
self.header.hide()
self._formatter = self._raw_format
self.tape.clear()
for stroke in self._strokes:
self._show_stroke(stroke)
def on_select_font(self):
font, ok = QFontDialog.getFont(self.header.font(), self, '',
QFontDialog.MonospacedFonts)
if ok:
self.header.setFont(font)
self.tape.setFont(font)
def on_toggle_ontop(self, ontop):
flags = self.windowFlags()
if ontop:
flags |= Qt.WindowStaysOnTopHint
else:
flags &= ~Qt.WindowStaysOnTopHint
self.setWindowFlags(flags)
self.show()
def on_clear(self):
flags = self.windowFlags()
msgbox = QMessageBox()
msgbox.setText(_("Do you want to clear the paper tape?"))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
# Make sure the message box ends up above the paper tape!
msgbox.setWindowFlags(msgbox.windowFlags() | (flags & Qt.WindowStaysOnTopHint))
if QMessageBox.Yes != msgbox.exec_():
return
self._strokes = []
self.action_Clear.setEnabled(False)
self.action_Save.setEnabled(False)
self.tape.clear()
def on_save(self):
filename_suggestion = 'steno-notes-%s.txt' % time.strftime('%Y-%m-%d-%H-%M')
filename = QFileDialog.getSaveFileName(
self, _('Save Paper Tape'), filename_suggestion,
_('Text files') + ' (*.txt)',
)[0]
if not filename:
return
with open(filename, 'w') as fp:
fp.write(self.tape.toPlainText())
|
nimble0/plover
|
plover/gui_qt/paper_tape.py
|
Python
|
gpl-2.0
| 5,775
|
"""
.. todo::
WRITEME
"""
import numpy as np
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import contains_nan
class TFD(dense_design_matrix.DenseDesignMatrix):
"""
Pylearn2 wrapper for the Toronto Face Dataset.
http://aclab.ca/users/josh/TFD.html
Parameters
----------
which_set : str
Dataset to load. One of ['train','valid','test','unlabeled'].
fold : int in {0,1,2,3,4}
TFD contains 5 official folds for train, valid and test.
image_size : int in [48,96]
Load smaller or larger dataset variant.
example_range : array_like or None, optional
Load only examples in range [example_range[0]:example_range[1]].
center : bool, optional
Move data from range [0., 255.] to [-127.5, 127.5]
False by default.
scale : bool, optional
Move data from range [0., 255.] to [0., 1.], or
from range [-127.5, 127.5] to [-1., 1.] if center is True
False by default.
shuffle : WRITEME
rng : WRITEME
seed : WRITEME
preprocessor : WRITEME
axes : WRITEME
"""
mapper = {'unlabeled': 0, 'train': 1, 'valid': 2, 'test': 3,
'full_train': 4}
def __init__(self, which_set, fold=0, image_size=48,
example_range=None, center=False, scale=False,
shuffle=False, rng=None, seed=132987,
preprocessor=None, axes=('b', 0, 1, 'c')):
if which_set not in self.mapper.keys():
raise ValueError("Unrecognized which_set value: %s. Valid values" +
"are %s." % (str(which_set),
str(self.mapper.keys())))
assert (fold >= 0) and (fold < 5)
self.args = locals()
# load data
path = '${PYLEARN2_DATA_PATH}/faces/TFD/'
if image_size == 48:
data = load(path + 'TFD_48x48.mat')
elif image_size == 96:
data = load(path + 'TFD_96x96.mat')
else:
raise ValueError("image_size should be either 48 or 96.")
# retrieve indices corresponding to `which_set` and fold number
if self.mapper[which_set] == 4:
set_indices = (data['folds'][:, fold] == 1) + \
(data['folds'][:, fold] == 2)
else:
set_indices = data['folds'][:, fold] == self.mapper[which_set]
assert set_indices.sum() > 0
# limit examples returned to `example_range`
if example_range:
ex_range = slice(example_range[0], example_range[1])
else:
ex_range = slice(None)
# get images and cast to float32
data_x = data['images'][set_indices]
data_x = np.cast['float32'](data_x)
data_x = data_x[ex_range]
# create dense design matrix from topological view
data_x = data_x.reshape(data_x.shape[0], image_size ** 2)
if center and scale:
data_x[:] -= 127.5
data_x[:] /= 127.5
elif center:
data_x[:] -= 127.5
elif scale:
data_x[:] /= 255.
if shuffle:
rng = make_np_rng(rng, seed, which_method='permutation')
rand_idx = rng.permutation(len(data_x))
data_x = data_x[rand_idx]
# get labels
if which_set != 'unlabeled':
data_y = data['labs_ex'][set_indices]
data_y = data_y[ex_range] - 1
data_y_identity = data['labs_id'][set_indices]
data_y_identity = data_y_identity[ex_range]
if shuffle:
data_y = data_y[rand_idx]
data_y_identity = data_y_identity[rand_idx]
y_labels = 7
else:
data_y = None
data_y_identity = None
y_labels = None
# create view converting for retrieving topological view
view_converter = dense_design_matrix.DefaultViewConverter((image_size,
image_size,
1),
axes)
# init the super class
super(TFD, self).__init__(X=data_x, y=data_y, y_labels=y_labels,
view_converter=view_converter)
assert not contains_nan(self.X)
self.y_identity = data_y_identity
self.axes = axes
if preprocessor is not None:
preprocessor.apply(self)
def get_test_set(self, fold=None):
"""
Return the test set
"""
args = {}
args.update(self.args)
del args['self']
args['which_set'] = 'test'
if fold is not None:
args['fold'] = fold
return TFD(**args)
|
JazzeYoung/VeryDeepAutoEncoder
|
pylearn2/pylearn2/datasets/tfd.py
|
Python
|
bsd-3-clause
| 4,912
|
# encoding:utf-8
__author__ = 'Hope6537'
# 单元测试
import unittest
class Dict(dict):
def __init__(self, **kw):
super(Dict, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
# 编写单元测试时,我们需要编写一个测试类,从unittest.TestCase继承。
# 以test开头的方法就是测试方法,不以test开头的方法不被认为是测试方法,测试的时候不会被执行。
class TestDict(unittest.TestCase):
# 可以在单元测试中编写两个特殊的setUp()和tearDown()方法。这两个方法会分别在每调用一个测试方法的前后分别被执行。
# 相当于Junit的Before和After
def setUp(self):
print 'setUp...'
def tearDown(self):
print 'tearDown...'
def test_init(self):
d = Dict(a=1, b='test')
self.assertEquals(d.a, 1)
self.assertEquals(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = Dict()
d['key'] = 'value'
self.assertEquals(d.key, 'value')
def test_attr(self):
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEquals(d['key'], 'value')
def test_keyerror(self):
d = Dict()
# 断言抛出指定异常
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
if __name__ == '__main__':
unittest.main() # 断言单元测试
# 文档测试
def abs(n):
# 在这里直接选择执行 DocTest abs
'''
Function to get absolute value of number.
Example:
>>> abs(1)
1
>>> abs(-1)
1
>>> abs(0)
0
'''
return n if n >= 0 else (-n)
class Dict(dict):
'''
Simple dict but also support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
'''
def __init__(self, **kw):
super(Dict, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Hope6537/hope-tactical-equipment
|
hope-note-module/hope-python-2.7-note/Chapter5.py
|
Python
|
apache-2.0
| 2,880
|
import logging
import registration.backends.default.urls
import registration.views
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.models import User
from django.contrib.auth.views import PasswordResetView, PasswordResetDoneView
from django.contrib.sites.requests import RequestSite
from django.urls import reverse_lazy
from django.views.generic import TemplateView
from registration import signals
from registration.backends.default.views import (
RegistrationView as DefaultRegistrationView,
)
from registration.models import RegistrationProfile
from oioioi.base.forms import OioioiPasswordResetForm, RegistrationFormWithNames
from oioioi.base.models import PreferencesSaved
from oioioi.base.preferences import PreferencesFactory
auditLogger = logging.getLogger(__name__ + '.audit')
class RegistrationView(DefaultRegistrationView):
def form_class(self, instance=None, *args, **kwargs):
return PreferencesFactory().create_form(
RegistrationFormWithNames, instance, *args, **kwargs
)
def register(self, form):
data = form.cleaned_data
request = self.request
user = User.objects.create_user(
data['username'], data['email'], data['password1']
)
user.first_name = data['first_name']
user.last_name = data['last_name']
user.is_active = not settings.SEND_USER_ACTIVATION_EMAIL
user.save()
auditLogger.info(
"User %d (%s) created account from IP %s UA: %s",
user.id,
user.username,
request.META.get('REMOTE_ADDR', '?'),
request.META.get('HTTP_USER_AGENT', '?'),
)
registration_profile = RegistrationProfile.objects.create_profile(user)
signals.user_registered.send(sender=self.__class__, user=user, request=request)
PreferencesSaved.send(form, user=user)
if settings.SEND_USER_ACTIVATION_EMAIL:
registration_profile.send_activation_email(RequestSite(request))
else:
signals.user_activated.send(
sender=self.__class__, user=user, request=request
)
return user
urlpatterns = [
url(r'^register/$', RegistrationView.as_view(), name='registration_register'),
]
if not settings.SEND_USER_ACTIVATION_EMAIL:
urlpatterns += [
url(
r'^register/complete/$',
TemplateView.as_view(
template_name='registration/'
'registration_and_activation_complete.html'
),
name='registration_complete',
)
]
urlpatterns += [
url(
r'^password/reset/$',
PasswordResetView.as_view(
form_class=OioioiPasswordResetForm,
success_url=reverse_lazy('auth_password_reset_done'),
),
name="auth_password_reset",
),
]
urlpatterns += registration.backends.default.urls.urlpatterns
|
sio2project/oioioi
|
oioioi/base/registration_backend.py
|
Python
|
gpl-3.0
| 2,955
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.conf import settings
from django.urls import reverse
# here will be defined models for home directories,
# info about sharing directories and item-model
class CustomUserManager(BaseUserManager):
def create_user(self, email, home_dir, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
home_dir=home_dir,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, home_dir, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
email,
password=password,
home_dir=home_dir
)
user.is_admin = True
user.save(using=self._db)
return user
def get_by_natural_key(self, username):
return CustomUser.objects.get(email=username)
class CustomUser(AbstractBaseUser):
email = models.EmailField(
verbose_name='Email address',
max_length=255,
unique=True,
)
home_dir = models.CharField("Home Directory", max_length=90)
is_active = models.BooleanField("Is active", default=True)
is_admin = models.BooleanField("Is admin", default=False)
date_joined = models.DateTimeField("Date joined", auto_now=True)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['home_dir',]
def get_username(self):
return self.email
@property
def username(self):
return self.email
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
@property
def home_dir_item(self):
from .items import UserItemFactory
from .item_reps import get_representation
return get_representation(UserItemFactory(self).get_item('/'))
@property
def shared_items_with_me(self):
from .items import SharedItemItemFactory
from .item_reps import get_representation
items = []
for sharing in self.shared_with_me.all():
items.append(get_representation(SharedItemItemFactory(sharing).get_item('/')))
return items
# need to store information about directory-sharing
class Sharing (models.Model):
# owner of shared dir
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="my_sharings")
# which item is shared
item = models.CharField(max_length=1024, blank=True, null=False)
# with who it is shared
shared_with = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='shared_with_me')
# bitmask - what owner allows second person to do
permissions = models.IntegerField()
def __str__(self):
return self.owner.username + " shared " + self.item + " with: " + self.shared_with.username
class SharedLink(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="my_shared_links")
item = models.CharField(max_length=1024, blank=True, null=False)
link_id = models.CharField(max_length=256, blank=False, null=False, unique=True)
permissions = models.IntegerField()
def __str__(self):
return self.owner.username + " shared " + self.item
@property
def url(self):
return reverse('link_handler', kwargs={'link_id': self.link_id, 'relative_path': ''})
|
bshishov/Memorandum
|
main/models.py
|
Python
|
gpl-3.0
| 4,408
|
# projet: User Folder specialise pour Zope
class ScoUserFolder(BasicUserFolder):
"""ScoDoc User Folder
"""
id ='acl_users'
title ='ScoDoc User Folder'
def __init__(self):
pass # should create db connexion ???
def getUserNames(self):
"""Return a list of usernames"""
# TODO
def getUsers(self):
"""Return a list of user objects"""
# TODO
def getUser(self, name):
"""Return the named user object or None"""
# TODO
def hasUsers(self):
return True # we lie
# validate (in super) calls
# in turn: identify, authenticate, authorize (wrt object and roles)
def identify(self, auth):
"""Identify the username and password.
Use only our cookie mode
"""
# see exUserFolder decodeAdvancedCookie
# c'est lui qui fait
# raise 'LoginRequired', self.docLogin(self, request)
return name, password
# authenticate(name, password) retrouve le user avec getUser()
# et appelle user.authenticate(password, request)
|
denys-duchier/Scolar
|
ZScoUF.py
|
Python
|
gpl-2.0
| 1,098
|
# harness_beaker.py
#
# Copyright (C) 2011 Jan Stancek <jstancek@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# started by Jan Stancek <jstancek@redhat.com> 2011
"""
The harness interface
The interface between the client and beaker lab controller.
"""
__author__ = """Don Zickus 2013"""
import os
import logging
import harness
import time
import re
import sys
from autotest.client.shared import utils, error
from autotest.client.bkr_xml import BeakerXMLParser
from autotest.client.bkr_proxy import BkrProxy
'''Use 5 minutes for console heartbeat'''
BEAKER_CONSOLE_HEARTBEAT = 60 * 5
class HarnessException(Exception):
def __init__(self, text):
Exception.__init__(self, text)
class harness_beaker(harness.harness):
def __init__(self, job, harness_args):
logging.debug('harness_beaker __init__')
super(harness_beaker, self).__init__(job)
# temporary hack until BEAKER_RECIPE_ID and BEAKER_LAB_CONTROLLER_URL is setup in beaker
os.environ['BEAKER_RECIPE_ID'] = open('/root/RECIPE.TXT', 'r').read().strip()
os.environ['BEAKER_LAB_CONTROLLER_URL'] = re.sub("/bkr/", ":8000", os.environ['BEAKER'])
# control whether bootstrap environment remotely connects or stays offline
# cheap hack to support flexible debug environment
# the bootstrap job object is just a stub and won't have the '_state' attribute
if hasattr(job, '_state'):
is_bootstrap = False
recipe_id = os.environ.get('RECIPE_ID') or '0'
else:
is_bootstrap = True
recipe_id = os.environ.get('BEAKER_RECIPE_ID')
os.environ['RECIPE_ID'] = recipe_id
self.state_file = os.path.join(os.path.dirname(__file__), 'harness_beaker.state')
self.recipe_id = recipe_id
self.labc_url = os.environ.get('BEAKER_LAB_CONTROLLER_URL')
self.hostname = os.environ.get('HOSTNAME')
self.tests = self.get_processed_tests()
self.watchdog_pid = None
self.offline = False
self.cmd = None
# handle legacy rhts scripts called from inside tests
os.environ['PATH'] = "%s:%s" % ('/var/cache/autotest', os.environ['PATH'])
if harness_args:
logging.info('harness_args: %s' % harness_args)
os.environ['AUTOTEST_HARNESS_ARGS'] = harness_args
self.args = self.parse_args(harness_args, is_bootstrap)
logging.debug('harness_beaker: state_file: <%s>', self.state_file)
logging.debug('harness_beaker: hostname: <%s>', self.hostname)
logging.debug('harness_beaker: labc_url: <%s>', self.labc_url)
if not self.hostname:
raise error.HarnessError('Need valid hostname')
# hack for flexible debug environment
labc = not self.offline and self.labc_url or None
self.bkr_proxy = BkrProxy(self.recipe_id, labc)
self.setupInitSymlink()
def parse_args(self, args, is_bootstrap):
if not args:
return
for a in args.split(','):
if a == 'offline':
# use cached recipe and stay offline whole time
self.offline = True
elif a[:5] == 'cache':
if len(a) > 5 and a[5] == '=':
# cache a different recipe instead
self.recipe_id = a[6:]
# remotely retrieve recipe, but stay offline during run
if not is_bootstrap:
self.offline = True
elif a[:8] == 'quickcmd':
if len(a) < 8 or a[8] != '=':
raise error.HarnessError("Bad use of 'quickcmd'")
self.cmd = a[9:]
else:
raise error.HarnessError("Unknown beaker harness arg: %s" % a)
def parse_quickcmd(self, args):
# hack allow tests to quickly submit feedback through harness
if not args:
return
if 'BEAKER_TASK_ID' not in os.environ:
raise error.HarnessError("No BEAKER_TASK_ID set")
task_id = os.environ['BEAKER_TASK_ID']
# Commands are from tests and should be reported as results
cmd, q_args = args.split(':')
if cmd == 'submit_log':
try:
# rhts_submit_log has as args: -S -T -l
# we just care about -l
f = None
arg_list = q_args.split(' ')
while arg_list:
arg = arg_list.pop(0)
if arg == '-l':
f = arg_list.pop(0)
break
if not f:
raise
self.bkr_proxy.task_upload_file(task_id, f)
except Exception:
logging.critical('ERROR: Failed to process quick cmd %s' % cmd)
elif cmd == 'submit_result':
def init_args(testname='Need/a/testname/here', status="None", logfile=None, score="0"):
return testname, status, logfile, score
try:
# report_result has TESTNAME STATUS LOGFILE SCORE
arg_list = q_args.split(' ')
testname, status, logfile, score = init_args(*arg_list)
resultid = self.bkr_proxy.task_result(task_id, status,
testname, score, '')
if (logfile and os.path.isfile(logfile) and
os.path.getsize(logfile) != 0):
self.bkr_proxy.result_upload_file(task_id, resultid, logfile)
# save the dmesg file
dfile = '/tmp/beaker.dmesg'
utils.system('dmesg -c > %s' % dfile)
if os.path.getsize(dfile) != 0:
self.bkr_proxy.result_upload_file(task_id, resultid, dfile)
# os.remove(dfile)
except Exception:
logging.critical('ERROR: Failed to process quick cmd %s' % cmd)
elif cmd == 'reboot':
# we are in a stub job. Can't use self.job.reboot() :-(
utils.system("sync; sync; reboot")
self.run_pause()
raise error.JobContinue("more to come")
else:
raise error.HarnessError("Bad sub-quickcmd: %s" % cmd)
def bootstrap(self, fetchdir):
'''How to kickstart autotest when you have no control file?
You download the beaker XML, convert it to a control file
and pass it back to autotest. Much like bootstrapping.. :-)
'''
# hack to sneakily pass results back to beaker without running
# autotest. Need to avoid calling get_recipe below
if self.cmd:
self.parse_quickcmd(self.cmd)
return None
recipe = self.init_recipe_from_beaker()
# remove stale file
if os.path.isfile(self.state_file):
os.remove(self.state_file)
self.tests = {}
# sanity check
if self.recipe_id != recipe.id:
raise error.HarnessError('Recipe mismatch: machine %s.. != XML %s..' %
(self.recipe_id, recipe.id))
# create unique name
control_file_name = recipe.job_id + '_' + recipe.id + '.control'
control_file_path = fetchdir + '/' + control_file_name
logging.debug('setting up control file - %s' % control_file_path)
control_file = open(control_file_path, 'w')
try:
# convert recipe xml into control file
for task in recipe.tasks:
self.convert_task_to_control(fetchdir, control_file, task)
# getting the task id later, will be hard, store it in file/memory
self.write_processed_tests(self.get_test_name(task), task.id)
control_file.close()
except HarnessException:
# hook to bail out on reservesys systems and not run autotest
return None
except Exception, ex:
os.remove(control_file_path)
raise error.HarnessError('beaker_harness: convert failed with -> %s' % ex)
# autotest should find this under FETCHDIRTEST because it is unique
return control_file_path
def init_recipe_from_beaker(self):
logging.debug('Contacting beaker to get task details')
bxp = BeakerXMLParser()
recipe_xml = self.get_recipe_from_LC()
recipes_dict = bxp.parse_xml(recipe_xml)
return self.find_recipe(recipes_dict)
def init_task_params(self, task):
logging.debug('PrepareTaskParams')
if task is None:
raise error.HarnessError('No valid task')
for (name, value) in task.params.items():
logging.debug('adding to os.environ: <%s=%s>', name, value)
os.environ[name] = value
def get_recipe_from_LC(self):
logging.debug('trying to get recipe from LC:')
try:
recipe = self.bkr_proxy.get_recipe()
except Exception, exc:
raise error.HarnessError('Failed to retrieve xml: %s' % exc)
return recipe
def find_recipe(self, recipes_dict):
if self.hostname in recipes_dict:
return recipes_dict[self.hostname]
for h in recipes_dict:
if self.recipe_id == recipes_dict[h].id:
return recipes_dict[h]
raise error.HarnessError('No valid recipe for host %s' % self.hostname)
# the block below was taken from standalone harness
def setupInitSymlink(self):
logging.debug('Symlinking init scripts')
autodir = os.environ.get('AUTODIR')
rc = os.path.join(autodir, 'tools/autotest')
if os.path.isfile(rc) and os.path.islink(rc):
# nothing to do
return
# see if system supports event.d versus inittab
if os.path.exists('/etc/event.d'):
# NB: assuming current runlevel is default
initdefault = utils.system_output('/sbin/runlevel').split()[1]
elif os.path.exists('/etc/inittab'):
initdefault = utils.system_output('grep :initdefault: /etc/inittab')
initdefault = initdefault.split(':')[1]
else:
initdefault = '2'
try:
utils.system('ln -sf %s /etc/init.d/autotest' % rc)
utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % (rc, initdefault))
logging.debug('Labeling init scripts with unconfined_exec_t')
utils.system('chcon -h system_u:object_r:unconfined_exec_t:s0 /etc/init.d/autotest')
utils.system('chcon -h system_u:object_r:unconfined_exec_t:s0 /etc/rc%s.d/S99autotest' % initdefault)
autotest_init = os.path.join(autodir, 'tools/autotest')
ret = os.system('chcon system_u:object_r:unconfined_exec_t:s0 %s' % autotest_init)
logging.debug('chcon returned <%s>', ret)
except:
logging.warning('Linking init scripts failed')
def get_test_name(self, task):
name = re.sub('-', '_', task.rpmName)
return re.sub('\.', '_', name)
def convert_task_to_control(self, fetchdir, control, task):
"""Tasks are really just:
# yum install $TEST
# cd /mnt/tests/$TEST
# make run
Convert that into a test module with a control file
"""
timeout = ''
if task.timeout:
timeout = ", timeout=%s" % task.timeout
# python doesn't like '-' in its class names
rpm_name = self.get_test_name(task)
rpm_dir = fetchdir + '/' + rpm_name
rpm_file = rpm_dir + '/' + rpm_name + '.py'
if task.status == 'Completed' and not self.offline:
logging.debug("SKIP Completed test %s" % rpm_name)
return
if task.status == 'Running' and not self.offline:
if re.search('reservesys', task.rpmName):
logging.debug("Found reservesys, skipping execution")
raise HarnessException('executing under a reservesys')
else:
logging.warning("Found Running test %s that isn't reservesys" % task.rpmName)
# append test name to control file
logging.debug('adding test %s to control file' % rpm_name)
# Trick to avoid downloading XML all the time
# statically update each TASK_ID
control.write("os.environ['BEAKER_TASK_ID']='%s'\n" % task.id)
control.write("job.run_test('%s'%s)\n" % (rpm_name, timeout))
# TODO check for git commands in task.params
# create the test itself
logging.debug('setting up test %s' % (rpm_file))
if not os.path.exists(rpm_dir):
os.mkdir(rpm_dir)
test = open(rpm_file, 'w')
test.write("import os\n")
test.write("from autotest.client import test, utils\n\n")
test.write("class %s(test.test):\n" % rpm_name)
test.write(" version=1\n\n")
test.write(" def initialize(self):\n")
test.write(" utils.system('yum install -y %s')\n" % task.rpmName)
for param in task.params:
test.write(" os.environ['%s']='%s'\n" % (param, task.params[param]))
test.write(" def run_once(self):\n")
test.write(" os.chdir('%s')\n" % task.rpmPath)
test.write(" raw_output = utils.system_output('make run', retain_output=True)\n")
test.write(" self.results = raw_output\n")
test.close()
def run_start(self):
"""A run within this job is starting"""
logging.debug('run_start')
try:
self.start_watchdog(BEAKER_CONSOLE_HEARTBEAT)
except Exception:
logging.critical('ERROR: Failed to start watchdog')
def run_pause(self):
"""A run within this job is completing (expect continue)"""
logging.debug('run_pause')
def run_reboot(self):
"""A run within this job is performing a reboot
(expect continue following reboot)
"""
logging.debug('run_reboot')
def run_abort(self):
"""A run within this job is aborting. It all went wrong"""
logging.debug('run_abort')
self.bkr_proxy.recipe_abort()
self.tear_down()
def run_complete(self):
"""A run within this job is completing (all done)"""
logging.debug('run_complete')
self.tear_down()
def run_test_complete(self):
"""A test run by this job is complete. Note that if multiple
tests are run in parallel, this will only be called when all
of the parallel runs complete."""
logging.debug('run_test_complete')
def test_status(self, status, tag):
"""A test within this job is completing"""
logging.debug('test_status ' + status + ' / ' + tag)
def test_status_detail(self, code, subdir, operation, status, tag,
optional_fields):
"""A test within this job is completing (detail)"""
logging.debug('test_status_detail %s / %s / %s / %s / %s / %s',
code, subdir, operation, status, tag, str(optional_fields))
if not subdir:
# recipes - covered by run_start/complete/abort
return
"""The mapping between beaker tasks and non-beaker tasks is not easy to
separate. Therefore we use the START and END markers along with the
environment variable BEAKER_TASK_ID to help us.
We keep an on-disk-file that stores the tests we have seen (or will run
[add by the conversion function above]). If the test is expected, it
will have a task id associated with it and we can communicate with beaker
about it. Otherwise if no 'id' is found, assume this is a sub-task that
beaker doesn't care about and keep all the results contained to the
beaker results directory.
"""
if code.startswith('START'):
if subdir in self.tests and self.tests[subdir] != '0':
# predefined beaker task
self.bkr_proxy.task_start(self.tests[subdir])
else:
# some random sub-task, save for cleanup purposes
self.write_processed_tests(subdir)
return
elif code.startswith('END'):
if subdir in self.tests and self.tests[subdir] != '0':
# predefined beaker task
self.upload_task_files(self.tests[subdir], subdir)
self.bkr_proxy.task_stop(self.tests[subdir])
return
else:
if subdir in self.tests and self.tests[subdir] != '0':
# predefine beaker tasks, will upload on END
task_id = self.tests[subdir]
task_upload = False
else:
# some random sub-task, save upload as task result
# because there is no beaker task to add them too
# task id was not saved in dictionary, get it from env
if 'BEAKER_TASK_ID' not in os.environ:
raise error.HarnessError("No BEAKER_TASK_ID set")
task_id = os.environ['BEAKER_TASK_ID']
task_upload = True
bkr_status = get_beaker_code(code)
try:
resultid = self.bkr_proxy.task_result(task_id, bkr_status,
subdir, 1, '')
if task_upload:
self.upload_result_files(task_id, resultid, subdir)
except Exception:
logging.critical('ERROR: Failed to process test results')
def tear_down(self):
'''called from complete and abort. clean up and shutdown'''
self.kill_watchdog()
if self.recipe_id != '0':
self.upload_recipe_files()
self.bkr_proxy.recipe_stop()
os.remove(self.state_file)
def start_watchdog(self, heartbeat):
logging.debug('harness: Starting watchdog process, heartbeat: %d' % heartbeat)
try:
pid = os.fork()
if pid == 0:
self.watchdog_loop(heartbeat)
else:
self.watchdog_pid = pid
logging.debug('harness: Watchdog process started, pid: %d', self.watchdog_pid)
except OSError, e:
logging.error('harness: fork in start_watchdog failed: %d (%s)\n' % (e.errno, e.strerror))
def kill_watchdog(self):
logging.debug('harness: Killing watchdog, pid: %d', self.watchdog_pid)
utils.nuke_pid(self.watchdog_pid)
self.watchdog_pid = None
def watchdog_loop(self, heartbeat):
while True:
time.sleep(heartbeat)
logging.info('[-- MARK -- %s]' % time.asctime(time.localtime(time.time())))
sys.exit()
def get_processed_tests(self):
tests = {}
if not os.path.isfile(self.state_file):
return tests
f = open(self.state_file, 'r')
lines = f.readlines()
f.close()
for line in lines:
subdir, t_id = line.strip().split()
# duplicates result from multiple writers
# once during the conversion and then again
# during an update of a test run
# former has task ids, latter will not
if subdir not in tests:
tests[subdir] = t_id
return tests
def write_processed_tests(self, subdir, t_id='0'):
f = open(self.state_file, 'a')
f.write(subdir + ' ' + t_id + '\n')
f.close()
def upload_recipe_files(self):
path = self.job.resultdir
# refresh latest executed tests
tests = self.get_processed_tests()
logging.debug("Recipe filtering following tests: %s" % tests)
for root, dirnames, files in os.walk(path):
'''do not upload previously uploaded results files'''
for d in dirnames:
if d in tests:
dirnames.remove(d)
for name in files:
# strip full path
remotepath = re.sub(path, "", root)
# The localfile has the full path
localfile = os.path.join(root, name)
if os.path.getsize(localfile) == 0:
continue # skip empty files
# Upload the file
self.bkr_proxy.recipe_upload_file(localfile, remotepath)
def upload_task_files(self, task_id, subdir):
path = os.path.join(self.job.resultdir, subdir)
for root, _, files in os.walk(path):
for name in files:
# strip full path
remotepath = re.sub(path, "", root)
# The localfile has the full path
localfile = os.path.join(root, name)
if os.path.getsize(localfile) == 0:
continue # skip empty files
# Upload the file
self.bkr_proxy.task_upload_file(task_id, localfile,
remotepath)
def upload_result_files(self, task_id, resultid, subdir):
path = os.path.join(self.job.resultdir, subdir)
for root, _, files in os.walk(path):
for name in files:
# strip full path
remotepath = re.sub(path, "", root)
# The localfile has the full path
localfile = os.path.join(root, name)
if os.path.getsize(localfile) == 0:
continue # skip empty files
# Upload the file
self.bkr_proxy.result_upload_file(task_id, resultid, localfile,
remotepath)
def get_beaker_code(at_code):
bkr_status = 'Warn'
if at_code == 'GOOD':
bkr_status = 'Pass'
if at_code in ['WARN', 'FAIL', 'ERROR', 'ABORT', 'TEST_NA']:
bkr_status = 'Fail'
return bkr_status
if __name__ == '__main__':
pass
|
uni-peter-zheng/autotest
|
client/harness_beaker.py
|
Python
|
gpl-2.0
| 22,692
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from datetime import datetime, timedelta
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six.moves.urllib.request import urlopen
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(six.StringIO('1'), '', 'test',
'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files),
{'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
self.assertRaisesMessage(
SuspiciousFileOperation, 'Storage can not find an available filename',
objs[1].limited_length.save, *(filename, ContentFile('Same Content'))
)
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = 251 * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_old_style_storage(self):
# Testing backward-compatibility with old-style storage backends that
# don't take ``max_length`` parameter in ``get_available_name()``
# and save(). A deprecation warning should be raised.
obj = Storage()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
obj.old_style.save('deprecated_storage_test.txt', ContentFile('Same Content'))
self.assertEqual(len(warns), 2)
self.assertEqual(
str(warns[0].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.save() will be removed in '
'Django 2.0.'
)
self.assertEqual(
str(warns[1].message),
'Backwards compatibility for storage backends without support for '
'the `max_length` argument in Storage.get_available_name() will '
'be removed in Django 2.0.'
)
self.assertEqual(obj.old_style.name, 'tests/deprecated_storage_test.txt')
self.assertEqual(obj.old_style.read(), b'Same Content')
obj.old_style.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
abhattad4/Digi-Menu
|
tests/file_storage/tests.py
|
Python
|
bsd-3-clause
| 30,670
|
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
Post GitHub Issues
------------------
Take the stdout and stderr passed to the catch_output_email and create an issue
on GitHub that uses a tag corresponding to the script any error was detected
with.
Example Usage:
python post_github_issues.py -o sibis-platform -r ncanda-issues \
-t "NCANDA: Laptop Data (update_visit_date)" \
-b /tmp/test.txt -v
"""
import os
import sys
import json
import hashlib
import ConfigParser
import github
from github.GithubException import UnknownObjectException
def create_connection(cfg, verbose=None):
"""Get a connection to github api.
Args:
cfg (str): Path to configuration file.
verbose (bool): True turns on verbose.
Returns:
object: A github.MainClass.Github.
"""
if verbose:
print "Parsing config: {0}".format(cfg)
# Get the redcap mysql configuration
config = ConfigParser.RawConfigParser()
config_path = os.path.expanduser(cfg)
config.read(config_path)
user = config.get('github', 'user')
passwd = config.get('github', 'password')
g = github.Github(user, passwd)
if verbose:
print "Connected to GitHub..."
return g
def get_label(repo, title, verbose=None):
"""Get a label object to tag the issue.
Args:
repo (object): A github.Repository object.
title (str): Title of posting.
verbose (bool): True turns on verbose.
Returns:
object: A github.Label.
"""
if verbose:
print "Checking for label..."
label = None
label_text = None
try:
label_start = 1 + title.index('(')
label_end = title.index(')')
label_text = title[label_start:label_end]
except ValueError, e:
print "Warning: This tile has no embeded label. {0}".format(e)
if label_text:
try:
label = [repo.get_label(label_text)]
if verbose:
print "Found label: {0}".format(label)
except UnknownObjectException, e:
print "Error: The label '{0}' does not exist on " \
"Github. {1}".format(label_text, e)
return label
def is_open_issue(repo, subject, verbose=None):
"""Verify if issue already exists, if the issue is closed, reopen it.
Args:
repo (object): a github.Repository.
subject (str): Subject line.
verbose (bool): True turns on verbose.
Returns:
bool: True if issue is already open.
"""
if verbose:
print "Checking for open issue: {0}".format(subject)
for issue in repo.get_issues(state='all'):
if issue.title == subject and issue.state == 'open':
if verbose:
print "Open issue already exists... See: {0}".format(issue.url)
return True
if issue.title == subject and issue.state == 'closed':
if verbose:
print "Closed issue already exists, reopening... " \
"See: {0}".format(issue.url)
issue.edit(state='open')
return True
if verbose:
print "Issue does not already exist... Creating.".format(subject)
return False
def generate_body(issue):
"""Generate Markdown for body of issue.
Args:
issue (dict): Keys for title and others.
Returns:
str: Markdown text.
"""
markdown = "### {}\n".format(issue.pop('title'))
for k, v in issue.iteritems():
markdown += "- {}: {}\n".format(k, v)
return markdown
def get_valid_title(title):
"""Ensure that the title isn't over 255 chars.
Args:
title (str): Title to be used in issue report.
Returns:
str: Less than 255 chars long.
"""
if len(title) >= 254:
title = title[:254]
return title
def create_issues(repo, title, body, verbose=None):
"""Create a GitHub issue for the provided repository with a label
Args:
repo: github.Repository
title (str): Contains label on github in parentheses.
body (str):
verbose (bool): True turns on verbose
Returns:
None
"""
label = get_label(repo, title)
if not label:
err = "A label embedded in parentheses is currently required. For " \
"example 'Title of Error (title_tag).' You provided: {0}"
raise NotImplementedError(err.format(title))
# get stdout written to file
with open(body) as fi:
issues = fi.readlines()
fi.close()
# Handle empty body
if not issues:
raise RuntimeWarning("The body text is empty and no issue will be "
"created for file: {}.".format(body))
# Handle multiline error messages.
if 'Traceback' in ''.join(issues):
if verbose:
print "Issue is a Traceback..."
string = "".join(issues)
sha = hashlib.sha1(string).hexdigest()[0:6]
error = dict(experiment_site_id="Traceback:{}".format(sha),
error="Traceback",
message=string)
issues = [json.dumps(error, sort_keys=True)]
for issue in issues:
# Check for new format
try:
issue_dict = json.loads(issue)
issue_dict.update({'title': get_valid_title(title)})
error_msg = issue_dict.get('error')
experiment_site_id = issue_dict.get('experiment_site_id')
subject = "{}, {}".format(experiment_site_id, error_msg)
body = generate_body(issue_dict)
except:
if verbose:
print("Falling back to old issue formatting.")
# Old error handling approach.
# Create a unique id.
sha1 = hashlib.sha1(issue).hexdigest()[0:6]
subject_base = title[0:title.index(' (')]
subject = subject_base + ": {0}".format(sha1)
body = issue
if is_open_issue(repo, subject, verbose=verbose):
pass
else:
github_issue = repo.create_issue(subject, body=body, labels=label)
if verbose:
print "Created issue... See: {0}".format(github_issue.url)
return None
def main(args=None):
if args.verbose:
print "Initializing..."
g = create_connection(args.config, verbose=args.verbose)
organization = g.get_organization(args.org)
repo = organization.get_repo(args.repo)
create_issues(repo, args.title, args.body, verbose=args.verbose)
if args.verbose:
print "Finished!"
if __name__ == "__main__":
import argparse
formatter = argparse.RawDescriptionHelpFormatter
default = 'default: %(default)s'
parser = argparse.ArgumentParser(prog="post_github_issues.py",
description=__doc__,
formatter_class=formatter)
default_cfg = '~/.server_config/github.cfg'
parser.add_argument("-c", "--config", dest="config",
default=os.path.expanduser(default_cfg),
help="GitHub authentication info.".format(default))
parser.add_argument("-o", "--org", dest="org", required=True,
help="GitHub organization.")
parser.add_argument("-r", "--repo", dest="repo", required=True,
help="GitHub repo.")
parser.add_argument("-t", "--title", dest="title", required=True,
help="GitHub issue title with label in parentheses.")
parser.add_argument("-b", "--body", dest="body", required=True,
help="GitHub issue body.")
parser.add_argument("-v", "--verbose", dest="verbose", action='store_true',
help="Turn on verbose.")
argv = parser.parse_args()
sys.exit(main(args=argv))
|
nicholsn/ncanda-data-integration
|
scripts/crond/post_github_issues.py
|
Python
|
bsd-3-clause
| 7,944
|
#!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import threading
import traceback
import pulseaudio_dlna.plugins
import pulseaudio_dlna.plugins.dlna.ssdp
import pulseaudio_dlna.plugins.dlna.ssdp.listener
import pulseaudio_dlna.plugins.dlna.ssdp.discover
from pulseaudio_dlna.plugins.dlna.renderer import DLNAMediaRendererFactory
logger = logging.getLogger('pulseaudio_dlna.plugins.dlna')
class DLNAPlugin(pulseaudio_dlna.plugins.BasePlugin):
NOTIFICATION_TYPES = [
'urn:schemas-upnp-org:device:MediaRenderer:1',
'urn:schemas-upnp-org:device:MediaRenderer:2',
]
def __init__(self, *args):
pulseaudio_dlna.plugins.BasePlugin.__init__(self, *args)
def lookup(self, url, xml):
return DLNAMediaRendererFactory.from_xml(url, xml)
def discover(self, holder, ttl=None, host=None):
self.holder = holder
def launch_discover():
discover = pulseaudio_dlna.plugins.dlna.ssdp.discover\
.SSDPDiscover(
cb_on_device_response=self._on_device_response,
host=host,
)
discover.search(ssdp_ttl=ttl)
def launch_listener():
ssdp = pulseaudio_dlna.plugins.dlna.ssdp.listener\
.ThreadedSSDPListener(
cb_on_device_alive=self._on_device_added,
cb_on_device_byebye=self._on_device_removed,
host=host,
)
ssdp.run(ttl=ttl)
threads = []
for func in [launch_discover, launch_listener]:
thread = threading.Thread(target=func)
thread.daemon = True
threads.append(thread)
try:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
except:
traceback.print_exc()
logger.info('DLNAPlugin.discover()')
@pulseaudio_dlna.plugins.BasePlugin.add_device_after
def _on_device_response(self, header, address):
st_header = header.get('st', None)
if st_header and st_header in self.NOTIFICATION_TYPES:
return DLNAMediaRendererFactory.from_header(header)
@pulseaudio_dlna.plugins.BasePlugin.add_device_after
def _on_device_added(self, header):
nt_header = header.get('nt', None)
if nt_header and nt_header in self.NOTIFICATION_TYPES:
return DLNAMediaRendererFactory.from_header(header)
@pulseaudio_dlna.plugins.BasePlugin.remove_device_after
def _on_device_removed(self, header):
nt_header = header.get('nt', None)
if nt_header and nt_header in self.NOTIFICATION_TYPES:
device_id = pulseaudio_dlna.plugins.dlna.ssdp._get_device_id(
header)
return device_id
|
super7ramp/pulseaudio-dlna
|
pulseaudio_dlna/plugins/dlna/__init__.py
|
Python
|
gpl-3.0
| 3,522
|
import logging
log = logging.getLogger(__name__)
import botologist.plugin
class TestPlugin(botologist.plugin.Plugin):
@botologist.plugin.command('ping')
def ping(self, cmd):
return 'pong!'
@botologist.plugin.command('test_err')
def test_err(self, cmd):
raise RuntimeError('test exception')
@botologist.plugin.command('test_err_threaded', threaded=True)
def test_err_threaded(self, cmd):
raise RuntimeError('test exception')
@botologist.plugin.ticker()
def test_err_ticker(self):
raise RuntimeError('test exception')
|
moopie/botologist
|
plugins/test.py
|
Python
|
mit
| 538
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for Elastic Average SGD """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import session_run_hook
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import constant_op
LOCAL_VARIABLE_NAME = 'local_center_variable'
GLOBAL_VARIABLE_NAME = 'global_center_variable'
class ElasticAverageCustomGetter(object):
"""Custom_getter class is used to do:
1. Change trainable variables to local collection and place them at worker
device
2. Generate global variables(global center variables)
3. Generate local variables(local center variables) which record the global
variables and place them at worker device
Notice that the class should be used with tf.replica_device_setter,
so that the global center variables and global step variable can be placed
at ps device. Besides, use 'tf.get_variable' instead of 'tf.Variable' to
use this custom getter.
For example,
ea_custom_getter = ElasticAverageCustomGetter(worker_device)
with tf.device(
tf.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps/cpu:0",
cluster=cluster)),
tf.variable_scope('',custom_getter=ea_custom_getter):
hid_w = tf.get_variable(
initializer=tf.truncated_normal(
[IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
stddev=1.0 / IMAGE_PIXELS),
name="hid_w")
hid_b = tf.get_variable(initializer=tf.zeros([FLAGS.hidden_units]),
name="hid_b")
"""
def __init__(self, worker_device):
"""Create a new `ElasticAverageCustomGetter`.
Args:
worker_device: String. Name of the `worker` job.
"""
self._worker_device = worker_device
self._local_map = {}
self._global_map = {}
def __call__(self, getter, name, trainable, collections, *args, **kwargs):
if trainable:
with ops.device(self._worker_device):
local_var = getter(name, trainable=True,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args, **kwargs)
global_center_variable = variable_scope.variable(
name='%s/%s' %
(GLOBAL_VARIABLE_NAME,
name),
initial_value=local_var.initialized_value(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
with ops.device(self._worker_device):
local_center_variable = variable_scope.variable(
name='%s/%s' % (LOCAL_VARIABLE_NAME, name),
initial_value=local_var.initialized_value(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self._local_map[local_var] = local_center_variable
self._global_map[local_var] = global_center_variable
return local_var
else:
return getter(name, trainable, collections, *args, **kwargs)
class ElasticAverageOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that implements the Elastic Average SGD algorithm.
This is an async optimizer. During the training, Each worker will update
the local variables and maintains its own local_step, which starts from 0
and is incremented by 1 after each update of local variables. Whenever
the communication period divides the local step, the worker requests
the current global center variables and then computed the elastic difference
between global center variables and local variables. The elastic difference
then be used to update both local variables and global variables.
"""
# Default value as paper described
BETA = 0.9
def __init__(
self,
opt,
num_worker,
ea_custom_getter,
communication_period=10,
moving_rate=None,
rho=None,
use_locking=True,
name="ElasticAverageOptimizer"):
"""Construct a new gradient descent optimizer.
Args:
opt: The actual optimizer that will be used to update local variables.
Must be one of the Optimizer classes.
num_worker: The number of workers
ea_custom_getter: The ElasticAverageCustomGetter
communication_period: An int point value to controls the frequency
of the communication between every worker and the ps.
moving_rate: A floating point value to control the elastic difference.
rho: the amount of exploration we allow ine the model. The default
value is moving_rate/learning_rate
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "ElasticAverageOptimizer".
"""
super(ElasticAverageOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._num_worker = num_worker
self._period = communication_period
self._local_map = ea_custom_getter._local_map
self._global_map = ea_custom_getter._global_map
if moving_rate is None:
self._moving_rate = BETA / communication_period / num_worker
else:
self._moving_rate = moving_rate
if rho is None:
self._rho = self._moving_rate / self._opt._learning_rate
else:
self._rho = rho
self._local_step = variable_scope.get_variable(
initializer=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="local_step")
self._opt._prepare()
def compute_gradients(self, loss, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
Add rho*elastic_difference to loss to control the exploration
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKey.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
"""
if not var_list:
var_list = variables.trainable_variables()
elastic_difference = [math_ops.subtract(v, lv) for v, lv in zip(
variables.trainable_variables(),
[self._local_map[var] for var in var_list])]
distance_loss = self._rho * math_ops.add_n(
[gen_nn_ops.l2_loss(ed) for ed in elastic_difference])
total_loss = loss + distance_loss
return self._opt.compute_gradients(total_loss, var_list,
gate_gradients, aggregation_method,
colocate_gradients_with_ops, grad_loss)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to global variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
apply_updates = self._opt.apply_gradients(grads_and_vars)
with ops.control_dependencies([apply_updates]):
local_update = state_ops.assign_add(
self._local_step, 1, name='local_step_update').op
# update global variables.
def _Update_global_variables():
local_vars = [v for g, v in grads_and_vars if g is not None]
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
local_center_vars_update = []
for lvar, var in zip(local_center_vars, global_center_vars):
local_center_vars_update.append(lvar.assign(var))
update_ops = []
differences = []
with ops.control_dependencies(local_center_vars_update):
for v, lv in zip(local_vars, local_center_vars):
with ops.device(v.device):
differences.append(math_ops.subtract(v, lv))
for lvar, diff in zip(local_vars, differences):
with ops.device(lvar.device):
update_ops.append(state_ops.assign_sub(lvar, math_ops.multiply(
self._moving_rate, diff)))
for var, diff in zip(global_center_vars, differences):
with ops.device(var.device):
update_ops.append(state_ops.assign_add(var, math_ops.multiply(
self._moving_rate, diff)))
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
with ops.control_dependencies([local_update]):
condition = math_ops.equal(math_ops.mod(
self._local_step, self._period), 0)
conditional_update = control_flow_ops.cond(
condition, _Update_global_variables, control_flow_ops.no_op)
return conditional_update
def get_init_op(self, task_index):
"""Returns the op to let all the local variables and local center
variables equal to the global center variables before the training begins"""
def _Add_sync_queues_and_barrier(enqueue_after_list):
"""Adds ops to enqueu on all worker queues"""
sync_queues = [
data_flow_ops.FIFOQueue(self._num_worker, [dtypes.bool], shapes=[[]],
shared_name='%s%s' % (
'variable_init_sync_queue', i)) for i in
range(self._num_worker)]
queue_ops = []
# For each other worker, add an entry in a queue
token = constant_op.constant(False)
with ops.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == task_index:
queue_ops.append(control_flow_ops.no_op())
else:
queue_ops.append(q.enqueue(token))
queue_ops.append(
sync_queues[task_index].dequeue_many(len(sync_queues) - 1))
return control_flow_ops.group(*queue_ops)
init_ops = []
local_vars = variables.trainable_variables()
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
if not (local_vars and global_center_vars and local_center_vars):
raise ValueError(
'The lists of local_variables, global_center_variables, '
'local_center_variables should not be empty ')
for lvar, gc_var, lc_var in zip(
local_vars, global_center_vars, local_center_vars):
init_ops.append(state_ops.assign(lvar, gc_var))
init_ops.append(state_ops.assign(lc_var, gc_var))
init_op = control_flow_ops.group(*(init_ops))
sync_queue_op = _Add_sync_queues_and_barrier([init_op])
return sync_queue_op
def make_session_run_hook(self, is_chief, task_index):
"""Creates a hook to handle ElasticAverageOptimizerHook ops such as initialization."""
return _ElasticAverageOptimizerHook(self, is_chief, task_index)
class _ElasticAverageOptimizerHook(session_run_hook.SessionRunHook):
def __init__(self, ea_optimizer, is_chief, task_index):
"""Creates hook to handle ElasticAverageOptimizer initialization ops.
Args:
ea_optimizer: `ElasticAverageOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
"""
self._ea_optimizer = ea_optimizer
self._is_chief = is_chief
self._task_index = task_index
def begin(self):
self._local_init_op = variables.local_variables_initializer()
self._global_init_op = None
if self._is_chief:
self._global_init_op = variables.global_variables_initializer()
self._variable_init_op = self._ea_optimizer.get_init_op(self._task_index)
|
Kongsea/tensorflow
|
tensorflow/contrib/opt/python/training/elastic_average_optimizer.py
|
Python
|
apache-2.0
| 14,340
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_Lag1Trend/cycle_5/ar_/test_artificial_128_None_Lag1Trend_5__100.py
|
Python
|
bsd-3-clause
| 260
|
input = """
c num blocks = 1
c num vars = 100
c minblockids[0] = 1
c maxblockids[0] = 100
p cnf 100 405
15 -27 78 0
97 -3 87 0
-100 -13 60 0
-25 -97 -83 0
-92 -40 28 0
71 -96 -30 0
4 -61 36 0
-44 100 7 0
-91 43 -52 0
89 -70 -97 0
11 59 60 0
82 -51 -16 0
-94 55 42 0
68 -86 -20 0
79 -40 15 0
71 -4 -54 0
-33 -87 -61 0
54 -64 -63 0
-41 -47 -81 0
-66 4 -70 0
-32 -14 -61 0
-28 -40 67 0
2 -59 -57 0
-68 -3 -18 0
-93 49 -96 0
24 49 -31 0
-18 32 91 0
62 65 -57 0
-64 78 10 0
66 -88 34 0
53 -30 -64 0
31 -22 -93 0
-34 61 47 0
-20 85 -64 0
29 -39 -49 0
15 -7 90 0
71 -97 -42 0
-32 71 1 0
-24 -81 7 0
-90 -62 -84 0
-75 2 -49 0
-4 92 91 0
97 -29 -11 0
12 -44 -29 0
-38 60 11 0
-56 -5 25 0
-69 -13 -56 0
-65 86 78 0
-74 65 -35 0
-22 -68 -55 0
50 65 -14 0
10 -5 -87 0
-16 23 59 0
-75 -70 64 0
72 -44 28 0
58 -86 33 0
-96 -67 4 0
55 1 -77 0
-14 41 67 0
-97 88 92 0
97 77 -55 0
-64 37 -98 0
63 -64 -11 0
-3 34 -8 0
-49 5 -68 0
-96 -51 24 0
-100 47 -54 0
68 -55 6 0
-13 -17 -52 0
-52 5 22 0
-94 -73 -75 0
4 -1 96 0
-14 37 71 0
86 43 -33 0
-27 97 -9 0
-3 4 12 0
50 90 46 0
46 97 -55 0
88 97 77 0
-79 -97 31 0
-28 87 -56 0
-99 -52 49 0
-12 -65 33 0
-29 -44 -38 0
-52 -73 -75 0
9 -44 46 0
61 11 -57 0
93 -23 17 0
71 -44 73 0
37 20 -24 0
-81 -45 -57 0
-81 52 33 0
87 -19 -27 0
64 -87 -51 0
41 -12 -51 0
29 4 14 0
-14 -65 8 0
-54 -6 12 0
-9 23 -11 0
-45 25 23 0
-57 -9 30 0
-44 -24 67 0
-60 -76 30 0
44 -26 51 0
-18 -52 -15 0
11 97 29 0
-61 89 -28 0
38 13 -64 0
30 7 -84 0
-36 -45 69 0
-74 -89 -19 0
-21 99 65 0
97 80 22 0
35 78 63 0
-81 33 -8 0
-27 -77 94 0
-92 2 -11 0
-24 86 97 0
18 57 -25 0
12 6 76 0
-62 -70 83 0
-7 -71 100 0
-19 71 95 0
-32 -71 -23 0
11 -18 82 0
-35 -92 -1 0
-16 -78 -88 0
-6 98 39 0
-87 -74 -47 0
30 16 86 0
43 34 87 0
24 100 -83 0
13 44 -60 0
100 -33 -94 0
61 -32 -24 0
55 -16 -51 0
-70 22 95 0
-1 -10 5 0
75 3 35 0
-38 11 81 0
-39 64 -99 0
-3 -50 49 0
79 32 -26 0
84 -44 -60 0
94 -32 61 0
-14 -81 46 0
31 85 -42 0
-37 14 82 0
89 -18 -68 0
80 -77 -39 0
33 -27 -88 0
-56 -67 -20 0
-61 84 -60 0
-62 14 -10 0
-94 88 -65 0
-80 61 -70 0
37 -22 70 0
25 29 -2 0
-21 66 -26 0
-42 -61 36 0
83 -38 -56 0
37 -18 9 0
97 -79 -30 0
-15 42 2 0
-5 -90 -8 0
-10 -81 79 0
-74 -96 -60 0
-22 18 47 0
45 -5 84 0
51 -87 53 0
-60 -11 70 0
45 64 18 0
15 -78 56 0
9 -85 -92 0
43 -89 82 0
-2 63 9 0
58 45 -16 0
78 53 87 0
87 47 -89 0
66 -43 94 0
76 29 -34 0
-24 -34 43 0
66 -68 -12 0
79 61 48 0
-31 -72 75 0
93 8 -73 0
76 90 53 0
65 -60 -63 0
-78 51 -57 0
-36 57 -76 0
67 -83 15 0
-36 94 67 0
11 -68 -30 0
-54 -40 -73 0
30 50 94 0
35 31 43 0
-92 -50 97 0
-1 -55 -4 0
-5 -15 55 0
19 46 -78 0
89 -18 -19 0
-21 -41 -62 0
81 30 59 0
-91 58 35 0
-36 26 -55 0
15 -80 -87 0
89 -47 66 0
60 38 47 0
-39 -86 -34 0
-23 43 -31 0
24 -79 -98 0
-86 91 -90 0
-99 64 -100 0
-22 -81 65 0
13 -15 -51 0
-80 -52 -1 0
-22 72 -13 0
87 -58 -19 0
25 3 19 0
-87 21 90 0
53 -80 -14 0
-30 -46 -3 0
-81 51 -24 0
10 65 44 0
62 4 74 0
-11 77 15 0
67 17 -12 0
49 -48 -20 0
43 95 65 0
30 -25 -8 0
75 -34 89 0
98 -90 -4 0
77 31 74 0
-66 -100 -87 0
8 92 68 0
63 -29 -5 0
-93 45 71 0
-13 -15 -52 0
-29 76 1 0
-71 50 -82 0
-17 61 21 0
-72 17 -85 0
94 45 72 0
18 1 -56 0
-6 -1 69 0
-26 -11 -96 0
-72 -38 -94 0
-1 -96 18 0
29 100 -10 0
26 -46 6 0
-55 48 -90 0
76 -75 9 0
-24 84 -36 0
19 29 -48 0
52 45 -51 0
-36 46 96 0
-74 -85 -96 0
50 -92 -31 0
-38 48 -61 0
81 -89 -49 0
-87 93 48 0
90 -46 87 0
83 -59 -15 0
-46 -78 69 0
-47 82 -79 0
-31 -62 -79 0
55 53 10 0
84 28 -68 0
-33 -78 -14 0
-94 -33 -67 0
-2 7 -84 0
-88 -93 -97 0
-73 13 63 0
-45 -6 4 0
-38 18 45 0
48 -47 -99 0
-76 -92 -13 0
-91 -99 100 0
19 88 83 0
-1 98 -24 0
-77 72 40 0
32 -41 -77 0
-45 94 34 0
98 28 29 0
55 -75 93 0
-4 29 -42 0
73 -99 43 0
87 -36 -90 0
-45 -47 61 0
52 86 7 0
-1 13 9 0
43 10 -93 0
13 -37 26 0
-41 47 -27 0
9 69 68 0
81 82 41 0
18 -57 64 0
-82 -73 30 0
14 31 88 0
30 -17 49 0
27 4 22 0
-61 -22 -10 0
1 -62 20 0
50 79 -29 0
-62 32 -61 0
-35 -74 41 0
49 42 -81 0
20 7 6 0
33 80 -50 0
14 25 41 0
5 -56 -19 0
-33 70 32 0
-54 92 -33 0
-57 -58 -66 0
29 -50 -31 0
-42 -35 -49 0
45 36 -55 0
25 -2 60 0
76 -22 47 0
-23 27 -85 0
-32 -17 -2 0
-40 69 -66 0
77 -5 61 0
17 46 -38 0
-50 -75 -98 0
70 -43 -81 0
47 61 8 0
-59 68 -75 0
11 34 -69 0
-18 -27 -54 0
-9 -29 59 0
51 30 88 0
24 -61 60 0
70 -11 36 0
-69 57 63 0
58 57 93 0
22 -13 77 0
42 79 -50 0
26 82 -78 0
91 57 15 0
-97 44 16 0
85 -3 -89 0
-17 -59 84 0
-60 34 97 0
6 -73 41 0
42 82 -66 0
-86 -77 -16 0
-58 71 56 0
19 -61 39 0
87 98 -26 0
-3 16 96 0
-93 86 77 0
-84 74 52 0
-78 8 -1 0
-12 -87 -97 0
-26 -99 95 0
-47 -66 -95 0
-1 -77 45 0
74 27 15 0
18 -4 -94 0
26 11 43 0
82 77 -7 0
-53 93 32 0
-64 -21 -26 0
-15 -33 -41 0
24 -34 -47 0
15 -94 80 0
76 3 28 0
4 -95 90 0
53 -68 62 0
-85 97 22 0
13 -87 79 0
-51 -6 44 0
-60 -16 -42 0
-20 -80 -28 0
57 -2 -52 0
52 -38 12 0
14 -29 -85 0
-64 -80 35 0
48 -24 32 0
12 13 -49 0
2 26 22 0
92 -99 10 0
75 7 25 0
-87 44 -25 0
18 -6 91 0
15 -8 20 0
98 -42 -13 0
63 -64 -3 0
84 -35 -88 0
-51 -29 31 0
14 11 -35 0
34 99 7 0
-71 25 -52 0
-7 -13 -78 0
45 -83 9 0
94 2 84 0
8 12 1 0
-98 -26 -61 0
67 -8 71 0
72 -9 60 0
-29 6 19 0
99 -29 -81 0
40 -24 -75 0
-78 -46 -50 0
"""
output = "SAT"
|
alviano/aspino
|
tests/sat/Models/c405.100.SAT.dimacs.test.py
|
Python
|
apache-2.0
| 5,091
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
# <<< imports
# @generated
from dynamics.dynamics.excitation_systems.excitation_system import ExcitationSystem
from dynamics.domain import Seconds
from google.appengine.ext import db
# >>> imports
class ExcDC1A(ExcitationSystem):
""" IEEE (1992/2005) DC1A Model This model is used to represent field-controlled dc commutator exciters with continuously acting voltage regulators (especially the direct-acting rheostatic, rotating amplifier, and magnetic amplifier types). Because this model has been widely implemented by the industry, it is sometimes used to represent other types of systems when detailed data for them are not available or when a simplified model is required.
"""
# <<< exc_dc1_a.attributes
# @generated
# If not 0, apply lower limit of 0. to exciter output
exclim = db.FloatProperty()
# Minimum controller output (< 0.)
vrmin = db.FloatProperty()
# Lead time constant
tc = Seconds
# Time constant (> 0.)
ta = Seconds
# Lag time constant (>= 0.)
tb = Seconds
# Gain (> 0.)
ka = db.FloatProperty()
# Exciter field resistance line slope
ke = db.FloatProperty()
# Saturation factor at e2 (>= 0.)
se2 = db.FloatProperty()
# Exciter time constant (> 0.)
te = Seconds
# Saturation factor at e1 (>= 0.)
se1 = db.FloatProperty()
# Rate feedback time constant, sec. (> 0.)
tf = Seconds
# Rate feedback gain (>= 0.)
kf = db.FloatProperty()
# UEL input: if < 2, HV gate; if = 2, add to error signal
uelin = db.FloatProperty()
# Filter time constant (>= 0.)
tr = Seconds
# Field voltage value 1 (> 0.)
e1 = db.FloatProperty()
# Maximum controller output
vrmax = db.FloatProperty()
# Field voltage value 2. (> 0.)
e2 = db.FloatProperty()
# >>> exc_dc1_a.attributes
# <<< exc_dc1_a.references
# @generated
# >>> exc_dc1_a.references
# <<< exc_dc1_a.operations
# @generated
# >>> exc_dc1_a.operations
# EOF -------------------------------------------------------------------------
|
rwl/openpowersystem
|
dynamics/dynamics/excitation_systems/exc_dc1_a.py
|
Python
|
agpl-3.0
| 3,021
|
# import the libraries that you need
import requests
import csv
# make a GET request to the OneSearch X-Service API
response = requests.get('http://onesearch.cuny.edu/PrimoWebServices'
'/xservice/search/brief?'
'&institution=KB'
'&query=any,contains,obama'
'&query=facet_rtype,exact,books'
'&loc=adaptor,primo_central_multiple_fe'
'&loc=local,scope:(KB,AL,CUNY_BEPRESS)'
'&json=true')
# take the JSON from the response
# and store it in a variable called alldata
alldata = response.json()
# drill down into a smaller subset of the json
# and print this smaller bit of json
somedata = alldata['SEGMENTS']['JAGROOT']['RESULT']['FACETLIST']['FACET']\
[1]['FACET_VALUES']
print(somedata)
# open a file called mycsv.csv, then loop through the data
# and write to that file
with open('mycsv.csv', 'wb') as f:
writer = csv.writer(f)
for x in somedata:
writer.writerow([x['@KEY'], x['@VALUE']])
|
MarkEEaton/api-workshop
|
4-json-to-csv.py
|
Python
|
mit
| 1,093
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import tf_export
class Conv(Layer):
"""Abstract nD convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=op_padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format,
self.rank + 2))
self.built = True
def call(self, inputs):
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
if self.rank == 2:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.rank == 3:
# As of Mar 2017, direct addition is significantly slower than
# bias_add when computing gradients. To use bias_add, we collapse Z
# and Y into a single dimension to obtain a 4D input tensor.
outputs_shape = outputs.shape.as_list()
if outputs_shape[0] is None:
outputs_shape[0] = -1
outputs_4d = array_ops.reshape(outputs,
[outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3],
outputs_shape[4]])
outputs_4d = nn.bias_add(outputs_4d, self.bias, data_format='NCHW')
outputs = array_ops.reshape(outputs_4d, outputs_shape)
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
@tf_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
return super(Conv1D, self).call(inputs)
@tf_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@tf_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@tf_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@tf_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs_shape = outputs.shape.as_list()
if outputs_shape[0] is None:
outputs_shape[0] = -1
if self.data_format == 'channels_first':
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1],
outputs_shape[2] * outputs_shape[3], outputs_shape[4]
])
else:
outputs_4d = array_ops.reshape(outputs, [
outputs_shape[0], outputs_shape[1] * outputs_shape[2],
outputs_shape[3], outputs_shape[4]
])
outputs_4d = nn.bias_add(
outputs_4d,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.reshape(outputs_4d, outputs_shape)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@tf_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by size[0] and size[1] respectively.
Arguments:
size: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
Arguments:
size: int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided,
the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Arguments:
cropping: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g.
spatial or spatio-temporal).
Arguments:
cropping: int, or tuple of 23ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints:
interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
|
girving/tensorflow
|
tensorflow/python/keras/layers/convolutional.py
|
Python
|
apache-2.0
| 115,802
|
"""
pybench2.py: Test speed of one or more Pythons on a set of simple
code-string benchmarks. A function, to allow stmts to vary.
This system itself runs on both 2.X and 3.X, and may spawn both.
Version 2: adds setup statement support, with extra stmts list slot.
Uses timeit to test either the Python running this script by API
calls, or a set of Pythons by reading spawned command-line outputs
(os.popen) with Python's -m flag to find timeit on module search path.
Replaces $listif3 with a list() around generators for 3.X and an
empty string for 2.X, so 3.X does same work as 2.X. In command-line
mode only, must split multiline statements into one separate quoted
argument per line so all will be run (else might run/time first line
only), and replace all \t in indentation with 4 spaces for uniformity.
Caveats: command-line mode (only) may fail if test stmt embeds double
quotes, quoted stmt string is incompatible with shell in general, or
command-line exceeds a length limit on platform's shell--use API call
mode or homegrown timer in such cases.
"""
import sys, os, timeit
defnum, defrep= 1000, 5 # may vary per stmt
def runner(stmts, pythons=None, tracecmd=False):
"""
Main logic: run tests per input lists, caller handles usage modes.
stmts: [(number?, repeat?, setup-string, stmt-string)], replaces $listif3 in stmt
pythons: None=this python only, or [(ispy3?, python-executable-path)]
"""
print(sys.version)
for (number, repeat, setup, stmt) in stmts:
number = number or defnum
repeat = repeat or defrep # 0=default
if not pythons:
# run stmt on this python: API call
# no need to split lines or quote here
ispy3 = sys.version[0] == '3'
stmt = stmt.replace('$listif3', 'list' if ispy3 else '')
best = min(timeit.repeat(setup=setup, stmt=stmt, number=number, repeat=repeat))
print('%.4f [%r]' % (best, stmt[:70]))
else:
# run stmt on all pythons: command line
# split lines into quoted arguments
print('-' * 80)
print('[%r]' % stmt)
# setup handled like stmt, but no $listif3: not timed
setup = setup.replace('\t', ' ' * 4)
setup = ' '.join('-s "%s"' % line for line in setup.split('\n'))
for (ispy3, python) in pythons:
stmt1 = stmt.replace('$listif3', 'list' if ispy3 else '')
stmt1 = stmt1.replace('\t', ' ' * 4)
lines = stmt1.split('\n')
args = ' '.join('"%s"' % line for line in lines)
cmd = '%s -m timeit -n %s -r %s %s %s' % (python, number, repeat, setup, args)
print(python)
if tracecmd: print(cmd)
print('\t' + os.popen(cmd).read().rstrip())
|
simontakite/sysadmin
|
pythonscripts/learningPython/pybench2.py
|
Python
|
gpl-2.0
| 2,909
|
# -----------------------------------------------------------------------------
# Vakhshour - Event and Message layer application
# Copyright (C) 2012 Yellowen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
import json
import logging
import logging.handlers
from argparse import ArgumentParser
from servers import PublisherServer
class Vakhshour(object):
"""
Vakhshour main class.
"""
DESC = "Vakhshour - Event and Message layer application"
FORMAT = '[%(asctime)s] %(module)s - %(lineno)d [%(levelname)s]: %(message)s'
def __init__(self):
self._setup_arguments()
self._parse_config()
self._setup_logger()
def _setup_arguments(self):
"""
Setup command line parser.
"""
self.parser = ArgumentParser(
description=self.DESC)
self.parser.add_argument("-d", "--debug",
action="store_true",
default=False,
help="Turn on debug mode."
)
self.parser.add_argument("-c", "--config",
default="/etc/vakhshour/vakhshour.json",
dest="config",
help="Use CONFIG as configuration file."
)
self.args = self.parser.parse_args()
return
def _setup_logger(self):
"""
Setup logger.
"""
if self.args.debug:
level = 0
else:
level = self.config.get("log_level", 40)
logging.basicConfig(format=self.FORMAT,
level=level)
self.logger = logging.getLogger("vakhshour")
filename = self.config.get("log_file", None)
if filename:
hdlr = logging.handlers.RotatingFileHandler(filename,
mode='w+',
maxBytes=10485760, # 10MB
backupCount=5)
hdlr.setFormatter(logging.Formatter(self.FORMAT))
self.logger.addHandler(hdlr)
return
def _parse_config(self):
"""
Parse the json configuration file.
"""
try:
fd = open(self.args.config)
except IOError:
print("Config file '%s' does not exists." % (
self.args.config))
exit(1)
try:
self.config = json.load(fd)
except ValueError:
fd.close()
print("Config file '%s' is not a json file." % (
self.args.config))
exit(1)
fd.close()
def run(self):
self.logger.info("Hi Vakhshour is here.")
app = PublisherServer(config=self.config)
try:
app.run()
except KeyboardInterrupt:
self.logger.info("Bye bye")
|
Yellowen/Vakhshour
|
vakhshour/application.py
|
Python
|
gpl-2.0
| 3,748
|
import ctypes
import os
import types
from platform_utils import paths
def load_library(libname):
if paths.is_frozen():
libfile = os.path.join(paths.embedded_data_path(), 'accessible_output2', 'lib', libname)
else:
libfile = os.path.join(paths.module_path(), 'lib', libname)
return ctypes.windll[libfile]
def get_output_classes():
import outputs
module_type = types.ModuleType
classes = [m.output_class for m in outputs.__dict__.itervalues() if type(m) == module_type and hasattr(m, 'output_class')]
return sorted(classes, key=lambda c: c.priority)
def find_datafiles():
import os
import platform
from glob import glob
import accessible_output2
if platform.system() != 'Windows':
return []
path = os.path.join(accessible_output2.__path__[0], 'lib', '*.dll')
results = glob(path)
dest_dir = os.path.join('accessible_output2', 'lib')
return [(dest_dir, results)]
|
codeofdusk/ProjectMagenta
|
src/accessible_output2/__init__.py
|
Python
|
gpl-2.0
| 885
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the PipelineRunner and DirectRunner classes.
Note that PipelineRunner and DirectRunner functionality is tested in all
the other unit tests. In this file we choose to test only aspects related to
caching and clearing values that are not tested elsewhere.
"""
import unittest
import hamcrest as hc
import apache_beam as beam
import apache_beam.transforms as ptransform
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metric import Metrics
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.runners import DirectRunner
from apache_beam.runners import create_runner
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class RunnerTest(unittest.TestCase):
default_properties = [
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth=True']
def test_create_runner(self):
self.assertTrue(
isinstance(create_runner('DirectRunner'), DirectRunner))
self.assertRaises(ValueError, create_runner, 'xyz')
def test_create_runner_shorthand(self):
self.assertTrue(
isinstance(create_runner('DiReCtRuNnEr'), DirectRunner))
self.assertTrue(
isinstance(create_runner('directrunner'), DirectRunner))
self.assertTrue(
isinstance(create_runner('direct'), DirectRunner))
self.assertTrue(
isinstance(create_runner('DiReCt'), DirectRunner))
self.assertTrue(
isinstance(create_runner('Direct'), DirectRunner))
def test_direct_runner_metrics(self):
class MyDoFn(beam.DoFn):
def start_bundle(self):
count = Metrics.counter(self.__class__, 'bundles')
count.inc()
def finish_bundle(self):
count = Metrics.counter(self.__class__, 'finished_bundles')
count.inc()
def process(self, element):
count = Metrics.counter(self.__class__, 'elements')
count.inc()
distro = Metrics.distribution(self.__class__, 'element_dist')
distro.update(element)
return [element]
runner = DirectRunner()
p = Pipeline(runner,
options=PipelineOptions(self.default_properties))
pcoll = (p | ptransform.Create([1, 2, 3, 4, 5])
| 'Do' >> beam.ParDo(MyDoFn()))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5]))
result = p.run()
result.wait_until_finish()
metrics = result.metrics().query()
namespace = '{}.{}'.format(MyDoFn.__module__,
MyDoFn.__name__)
hc.assert_that(
metrics['counters'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'elements')),
5, 5),
MetricResult(
MetricKey('Do', MetricName(namespace, 'bundles')),
1, 1),
MetricResult(
MetricKey('Do', MetricName(namespace, 'finished_bundles')),
1, 1)))
hc.assert_that(
metrics['distributions'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'element_dist')),
DistributionResult(DistributionData(15, 5, 1, 5)),
DistributionResult(DistributionData(15, 5, 1, 5)))))
def test_run_api(self):
my_metric = Metrics.counter('namespace', 'my_metric')
runner = DirectRunner()
result = runner.run(
beam.Create([1, 10, 100]) | beam.Map(lambda x: my_metric.inc(x)))
result.wait_until_finish()
# Use counters to assert the pipeline actually ran.
my_metric_value = result.metrics().query()['counters'][0].committed
self.assertEqual(my_metric_value, 111)
if __name__ == '__main__':
unittest.main()
|
jbonofre/beam
|
sdks/python/apache_beam/runners/runner_test.py
|
Python
|
apache-2.0
| 4,894
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'legal',
)
AUTH_USER_MODEL = 'auth.User'
SECRET_KEY = 'abcde12345'
ROOT_URLCONF = 'test_urls'
|
Kyruus/django-legal
|
test_settings.py
|
Python
|
mit
| 299
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-02-17 23:04
from __future__ import unicode_literals
from django.db import migrations, models
import theme.utils
class Migration(migrations.Migration):
dependencies = [
('hs_access_control', '0032_auto_20210607_2027'),
]
operations = [
migrations.AlterField(
model_name='community',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_community),
),
migrations.AlterField(
model_name='groupaccess',
name='picture',
field=models.ImageField(blank=True, null=True, upload_to=theme.utils.get_upload_path_group),
),
]
|
hydroshare/hydroshare
|
hs_access_control/migrations/0033_auto_20220217_2304.py
|
Python
|
bsd-3-clause
| 756
|
import os
import os.path
import pytest
pytest_plugins = ['helpers_namespace']
@pytest.helpers.register
def data_path(fn):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'seqlib/tests/data/' + fn)
@pytest.helpers.register
def check_file(test_file, result_file):
'''
Check if files are existed and same
'''
print('Check %s file...' % test_file)
assert os.path.isfile(test_file), 'No %s file' % test_file
assert compare_file(test_file, result_file), 'Difference in %s' % test_file
def compare_file(fn1, fn2):
'''
Test if two files are same
'''
with open(fn1, 'r') as f1, open(fn2, 'r') as f2:
content1 = set(f1.readlines())
content2 = set(f2.readlines())
return content1 == content2
|
kepbod/seqlib
|
conftest.py
|
Python
|
mit
| 796
|
# -*- coding: utf-8 -*-
# Copyright 2019 Juca Crispim <juca@poraodojuca.net>
# This file is part of toxicbulid.
# toxicbulid is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbulid is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbulid. If not, see <http://www.gnu.org/licenses/>.
from behave import when, then, given
from toxicbuild.integrations import settings
@when('clicks in the bitbucket import repos button')
def click_bitbucket_import(context):
browser = context.browser
btn = browser.wait_element_become_present(
lambda: browser.find_element_by_class_name('fa-bitbucket'))
btn.click()
@then('he is sent to the bitbucket login page')
@given('the user is in the bitbucket login page')
def is_in_bitbucket_login_page(context):
browser = context.browser
el = browser.find_element_by_id('username')
assert el
@when('he fills the bitbucket username field')
def fill_username(context):
browser = context.browser
el = browser.find_element_by_id('username')
el.send_keys(settings.BITBUCKET_USER)
btn = browser.find_element_by_id('login-submit')
btn.click()
@when('fills the bitbucket password field')
def fill_passwd(context):
browser = context.browser
el = browser.wait_element_become_present(
lambda: browser.find_element_by_id('password'))
el.send_keys(settings.BITBUCKET_PASSWD)
@when('clicks in the bitbucket login button')
def click_login_btn(context):
browser = context.browser
btn = browser.find_element_by_id('login-submit')
btn.click()
@then('his repositories beeing imported from bitbucket')
def user_sees_repositories_imported(context):
browser = context.browser
def fn():
repo_row = browser.wait_text_become_present('toxic-bbtest',
timeout=1)
return bool(repo_row)
r = browser.refresh_until(fn)
assert r
browser.get(settings.TOXICUI_URL + 'logout')
|
jucacrispim/toxicbuild
|
tests/integrations_functional/steps/bitbucket_steps.py
|
Python
|
agpl-3.0
| 2,423
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import gym
import logging
import pickle
import ray
from ray.rllib.env.atari_wrappers import wrap_deepmind, is_atari
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.interface import EvaluatorInterface
from ray.rllib.evaluation.sampler import AsyncSampler, SyncSampler
from ray.rllib.policy.sample_batch import MultiAgentBatch, DEFAULT_POLICY_ID
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.offline import NoopOutput, IOContext, OutputWriter, InputReader
from ray.rllib.offline.is_estimator import ImportanceSamplingEstimator
from ray.rllib.offline.wis_estimator import WeightedImportanceSamplingEstimator
from ray.rllib.models import ModelCatalog
from ray.rllib.models.preprocessors import NoPreprocessor
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import disable_log_once_globally, log_once, \
summarize, enable_periodic_logging
from ray.rllib.utils.filter import get_filter
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
# Handle to the current rollout worker, which will be set to the most recently
# created RolloutWorker in this process. This can be helpful to access in
# custom env or policy classes for debugging or advanced use cases.
_global_worker = None
@DeveloperAPI
def get_global_worker():
"""Returns a handle to the active rollout worker in this process."""
global _global_worker
return _global_worker
@DeveloperAPI
class RolloutWorker(EvaluatorInterface):
"""Common experience collection class.
This class wraps a policy instance and an environment class to
collect experiences from the environment. You can create many replicas of
this class as Ray actors to scale RL training.
This class supports vectorized and multi-agent policy evaluation (e.g.,
VectorEnv, MultiAgentEnv, etc.)
Examples:
>>> # Create a rollout worker and using it to collect experiences.
>>> worker = RolloutWorker(
... env_creator=lambda _: gym.make("CartPole-v0"),
... policy=PGTFPolicy)
>>> print(worker.sample())
SampleBatch({
"obs": [[...]], "actions": [[...]], "rewards": [[...]],
"dones": [[...]], "new_obs": [[...]]})
>>> # Creating a multi-agent rollout worker
>>> worker = RolloutWorker(
... env_creator=lambda _: MultiAgentTrafficGrid(num_cars=25),
... policies={
... # Use an ensemble of two policies for car agents
... "car_policy1":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.99}),
... "car_policy2":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.95}),
... # Use a single shared policy for all traffic lights
... "traffic_light_policy":
... (PGTFPolicy, Box(...), Discrete(...), {}),
... },
... policy_mapping_fn=lambda agent_id:
... random.choice(["car_policy1", "car_policy2"])
... if agent_id.startswith("car_") else "traffic_light_policy")
>>> print(worker.sample())
MultiAgentBatch({
"car_policy1": SampleBatch(...),
"car_policy2": SampleBatch(...),
"traffic_light_policy": SampleBatch(...)})
"""
@DeveloperAPI
@classmethod
def as_remote(cls,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None):
return ray.remote(
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources)(cls)
@DeveloperAPI
def __init__(self,
env_creator,
policy,
policy_mapping_fn=None,
policies_to_train=None,
tf_session_creator=None,
batch_steps=100,
batch_mode="truncate_episodes",
episode_horizon=None,
preprocessor_pref="deepmind",
sample_async=False,
compress_observations=False,
num_envs=1,
observation_filter="NoFilter",
clip_rewards=None,
clip_actions=True,
env_config=None,
model_config=None,
policy_config=None,
worker_index=0,
monitor_path=None,
log_dir=None,
log_level=None,
callbacks=None,
input_creator=lambda ioctx: ioctx.default_sampler_input(),
input_evaluation=frozenset([]),
output_creator=lambda ioctx: NoopOutput(),
remote_worker_envs=False,
remote_env_batch_wait_ms=0,
soft_horizon=False,
no_done_at_end=False,
seed=None,
_fake_sampler=False):
"""Initialize a rollout worker.
Arguments:
env_creator (func): Function that returns a gym.Env given an
EnvContext wrapped configuration.
policy (class|dict): Either a class implementing
Policy, or a dictionary of policy id strings to
(Policy, obs_space, action_space, config) tuples. If a
dict is specified, then we are in multi-agent mode and a
policy_mapping_fn should also be set.
policy_mapping_fn (func): A function that maps agent ids to
policy ids in multi-agent mode. This function will be called
each time a new agent appears in an episode, to bind that agent
to a policy for the duration of the episode.
policies_to_train (list): Optional whitelist of policies to train,
or None for all policies.
tf_session_creator (func): A function that returns a TF session.
This is optional and only useful with TFPolicy.
batch_steps (int): The target number of env transitions to include
in each sample batch returned from this worker.
batch_mode (str): One of the following batch modes:
"truncate_episodes": Each call to sample() will return a batch
of at most `batch_steps * num_envs` in size. The batch will
be exactly `batch_steps * num_envs` in size if
postprocessing does not change batch sizes. Episodes may be
truncated in order to meet this size requirement.
"complete_episodes": Each call to sample() will return a batch
of at least `batch_steps * num_envs` in size. Episodes will
not be truncated, but multiple episodes may be packed
within one batch to meet the batch size. Note that when
`num_envs > 1`, episode steps will be buffered until the
episode completes, and hence batches may contain
significant amounts of off-policy data.
episode_horizon (int): Whether to stop episodes at this horizon.
preprocessor_pref (str): Whether to prefer RLlib preprocessors
("rllib") or deepmind ("deepmind") when applicable.
sample_async (bool): Whether to compute samples asynchronously in
the background, which improves throughput but can cause samples
to be slightly off-policy.
compress_observations (bool): If true, compress the observations.
They can be decompressed with rllib/utils/compression.
num_envs (int): If more than one, will create multiple envs
and vectorize the computation of actions. This has no effect if
if the env already implements VectorEnv.
observation_filter (str): Name of observation filter to use.
clip_rewards (bool): Whether to clip rewards to [-1, 1] prior to
experience postprocessing. Setting to None means clip for Atari
only.
clip_actions (bool): Whether to clip action values to the range
specified by the policy action space.
env_config (dict): Config to pass to the env creator.
model_config (dict): Config to use when creating the policy model.
policy_config (dict): Config to pass to the policy. In the
multi-agent case, this config will be merged with the
per-policy configs specified by `policy`.
worker_index (int): For remote workers, this should be set to a
non-zero and unique value. This index is passed to created envs
through EnvContext so that envs can be configured per worker.
monitor_path (str): Write out episode stats and videos to this
directory if specified.
log_dir (str): Directory where logs can be placed.
log_level (str): Set the root log level on creation.
callbacks (dict): Dict of custom debug callbacks.
input_creator (func): Function that returns an InputReader object
for loading previous generated experiences.
input_evaluation (list): How to evaluate the policy performance.
This only makes sense to set when the input is reading offline
data. The possible values include:
- "is": the step-wise importance sampling estimator.
- "wis": the weighted step-wise is estimator.
- "simulation": run the environment in the background, but
use this data for evaluation only and never for learning.
output_creator (func): Function that returns an OutputWriter object
for saving generated experiences.
remote_worker_envs (bool): If using num_envs > 1, whether to create
those new envs in remote processes instead of in the current
process. This adds overheads, but can make sense if your envs
remote_env_batch_wait_ms (float): Timeout that remote workers
are waiting when polling environments. 0 (continue when at
least one env is ready) is a reasonable default, but optimal
value could be obtained by measuring your environment
step / reset and model inference perf.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the
episode and instead record done=False.
seed (int): Set the seed of both np and tf to this value to
to ensure each remote worker has unique exploration behavior.
_fake_sampler (bool): Use a fake (inf speed) sampler for testing.
"""
global _global_worker
_global_worker = self
policy_config = policy_config or {}
if (tf and policy_config.get("eager")
and not policy_config.get("no_eager_on_workers")):
tf.enable_eager_execution()
if log_level:
logging.getLogger("ray.rllib").setLevel(log_level)
if worker_index > 1:
disable_log_once_globally() # only need 1 worker to log
elif log_level == "DEBUG":
enable_periodic_logging()
env_context = EnvContext(env_config or {}, worker_index)
self.policy_config = policy_config
self.callbacks = callbacks or {}
self.worker_index = worker_index
model_config = model_config or {}
policy_mapping_fn = (policy_mapping_fn
or (lambda agent_id: DEFAULT_POLICY_ID))
if not callable(policy_mapping_fn):
raise ValueError("Policy mapping function not callable?")
self.env_creator = env_creator
self.sample_batch_size = batch_steps * num_envs
self.batch_mode = batch_mode
self.compress_observations = compress_observations
self.preprocessing_enabled = True
self.last_batch = None
self._fake_sampler = _fake_sampler
self.env = _validate_env(env_creator(env_context))
if isinstance(self.env, MultiAgentEnv) or \
isinstance(self.env, BaseEnv):
def wrap(env):
return env # we can't auto-wrap these env types
elif is_atari(self.env) and \
not model_config.get("custom_preprocessor") and \
preprocessor_pref == "deepmind":
# Deepmind wrappers already handle all preprocessing
self.preprocessing_enabled = False
if clip_rewards is None:
clip_rewards = True
def wrap(env):
env = wrap_deepmind(
env,
dim=model_config.get("dim"),
framestack=model_config.get("framestack"))
if monitor_path:
env = gym.wrappers.Monitor(env, monitor_path, resume=True)
return env
else:
def wrap(env):
if monitor_path:
env = gym.wrappers.Monitor(env, monitor_path, resume=True)
return env
self.env = wrap(self.env)
def make_env(vector_index):
return wrap(
env_creator(
env_context.copy_with_overrides(
vector_index=vector_index, remote=remote_worker_envs)))
self.tf_sess = None
policy_dict = _validate_and_canonicalize(policy, self.env)
self.policies_to_train = policies_to_train or list(policy_dict.keys())
# set numpy and python seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if not hasattr(self.env, "seed"):
raise ValueError("Env doesn't support env.seed(): {}".format(
self.env))
self.env.seed(seed)
try:
import torch
torch.manual_seed(seed)
except ImportError:
logger.info("Could not seed torch")
if _has_tensorflow_graph(policy_dict) and not (tf and
tf.executing_eagerly()):
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE
and not ray.get_gpu_ids()):
logger.debug("Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
if not tf:
raise ImportError("Could not import tensorflow")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
else:
self.tf_sess = tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True)))
with self.tf_sess.as_default():
# set graph-level seed
if seed is not None:
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)
self.multiagent = set(self.policy_map.keys()) != {DEFAULT_POLICY_ID}
if self.multiagent:
if not ((isinstance(self.env, MultiAgentEnv)
or isinstance(self.env, ExternalMultiAgentEnv))
or isinstance(self.env, BaseEnv)):
raise ValueError(
"Have multiple policies {}, but the env ".format(
self.policy_map) +
"{} is not a subclass of BaseEnv, MultiAgentEnv or "
"ExternalMultiAgentEnv?".format(self.env))
self.filters = {
policy_id: get_filter(observation_filter,
policy.observation_space.shape)
for (policy_id, policy) in self.policy_map.items()
}
if self.worker_index == 0:
logger.info("Built filter map: {}".format(self.filters))
# Always use vector env for consistency even if num_envs = 1
self.async_env = BaseEnv.to_base_env(
self.env,
make_env=make_env,
num_envs=num_envs,
remote_envs=remote_worker_envs,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
self.num_envs = num_envs
if self.batch_mode == "truncate_episodes":
unroll_length = batch_steps
pack_episodes = True
elif self.batch_mode == "complete_episodes":
unroll_length = float("inf") # never cut episodes
pack_episodes = False # sampler will return 1 episode per poll
else:
raise ValueError("Unsupported batch mode: {}".format(
self.batch_mode))
self.io_context = IOContext(log_dir, policy_config, worker_index, self)
self.reward_estimators = []
for method in input_evaluation:
if method == "simulation":
logger.warning(
"Requested 'simulation' input evaluation method: "
"will discard all sampler outputs and keep only metrics.")
sample_async = True
elif method == "is":
ise = ImportanceSamplingEstimator.create(self.io_context)
self.reward_estimators.append(ise)
elif method == "wis":
wise = WeightedImportanceSamplingEstimator.create(
self.io_context)
self.reward_estimators.append(wise)
else:
raise ValueError(
"Unknown evaluation method: {}".format(method))
if sample_async:
self.sampler = AsyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
blackhole_outputs="simulation" in input_evaluation,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.sampler.start()
else:
self.sampler = SyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.input_reader = input_creator(self.io_context)
assert isinstance(self.input_reader, InputReader), self.input_reader
self.output_writer = output_creator(self.io_context)
assert isinstance(self.output_writer, OutputWriter), self.output_writer
logger.debug(
"Created rollout worker with env {} ({}), policies {}".format(
self.async_env, self.env, self.policy_map))
@override(EvaluatorInterface)
def sample(self):
"""Evaluate the current policies and return a batch of experiences.
Return:
SampleBatch|MultiAgentBatch from evaluating the current policies.
"""
if self._fake_sampler and self.last_batch is not None:
return self.last_batch
if log_once("sample_start"):
logger.info("Generating sample batch of size {}".format(
self.sample_batch_size))
batches = [self.input_reader.next()]
steps_so_far = batches[0].count
# In truncate_episodes mode, never pull more than 1 batch per env.
# This avoids over-running the target batch size.
if self.batch_mode == "truncate_episodes":
max_batches = self.num_envs
else:
max_batches = float("inf")
while steps_so_far < self.sample_batch_size and len(
batches) < max_batches:
batch = self.input_reader.next()
steps_so_far += batch.count
batches.append(batch)
batch = batches[0].concat_samples(batches)
if self.callbacks.get("on_sample_end"):
self.callbacks["on_sample_end"]({"worker": self, "samples": batch})
# Always do writes prior to compression for consistency and to allow
# for better compression inside the writer.
self.output_writer.write(batch)
# Do off-policy estimation if needed
if self.reward_estimators:
for sub_batch in batch.split_by_episode():
for estimator in self.reward_estimators:
estimator.process(sub_batch)
if log_once("sample_end"):
logger.info("Completed sample batch:\n\n{}\n".format(
summarize(batch)))
if self.compress_observations == "bulk":
batch.compress(bulk=True)
elif self.compress_observations:
batch.compress()
if self._fake_sampler:
self.last_batch = batch
return batch
@DeveloperAPI
@ray.method(num_return_vals=2)
def sample_with_count(self):
"""Same as sample() but returns the count as a separate future."""
batch = self.sample()
return batch, batch.count
@override(EvaluatorInterface)
def get_weights(self, policies=None):
if policies is None:
policies = self.policy_map.keys()
return {
pid: policy.get_weights()
for pid, policy in self.policy_map.items() if pid in policies
}
@override(EvaluatorInterface)
def set_weights(self, weights):
for pid, w in weights.items():
self.policy_map[pid].set_weights(w)
@override(EvaluatorInterface)
def compute_gradients(self, samples):
if log_once("compute_gradients"):
logger.info("Compute gradients on:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
grad_out, info_out = {}, {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "compute_gradients")
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid]._build_compute_gradients(
builder, batch))
grad_out = {k: builder.get(v) for k, v in grad_out.items()}
info_out = {k: builder.get(v) for k, v in info_out.items()}
else:
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid].compute_gradients(batch))
else:
grad_out, info_out = (
self.policy_map[DEFAULT_POLICY_ID].compute_gradients(samples))
info_out["batch_count"] = samples.count
if log_once("grad_out"):
logger.info("Compute grad info:\n\n{}\n".format(
summarize(info_out)))
return grad_out, info_out
@override(EvaluatorInterface)
def apply_gradients(self, grads):
if log_once("apply_gradients"):
logger.info("Apply gradients:\n\n{}\n".format(summarize(grads)))
if isinstance(grads, dict):
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "apply_gradients")
outputs = {
pid: self.policy_map[pid]._build_apply_gradients(
builder, grad)
for pid, grad in grads.items()
}
return {k: builder.get(v) for k, v in outputs.items()}
else:
return {
pid: self.policy_map[pid].apply_gradients(g)
for pid, g in grads.items()
}
else:
return self.policy_map[DEFAULT_POLICY_ID].apply_gradients(grads)
@override(EvaluatorInterface)
def learn_on_batch(self, samples):
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(
builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples)
if log_once("learn_out"):
logger.debug("Training out:\n\n{}\n".format(summarize(info_out)))
return info_out
@DeveloperAPI
def get_metrics(self):
"""Returns a list of new RolloutMetric objects from evaluation."""
out = self.sampler.get_metrics()
for m in self.reward_estimators:
out.extend(m.get_metrics())
return out
@DeveloperAPI
def foreach_env(self, func):
"""Apply the given function to each underlying env instance."""
envs = self.async_env.get_unwrapped()
if not envs:
return [func(self.async_env)]
else:
return [func(e) for e in envs]
@DeveloperAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.policy_map.get(policy_id)
@DeveloperAPI
def for_policy(self, func, policy_id=DEFAULT_POLICY_ID):
"""Apply the given function to the specified policy."""
return func(self.policy_map[policy_id])
@DeveloperAPI
def foreach_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple."""
return [func(policy, pid) for pid, policy in self.policy_map.items()]
@DeveloperAPI
def foreach_trainable_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple.
This only applies func to policies in `self.policies_to_train`."""
return [
func(policy, pid) for pid, policy in self.policy_map.items()
if pid in self.policies_to_train
]
@DeveloperAPI
def sync_filters(self, new_filters):
"""Changes self's filter to given and rebases any accumulated delta.
Args:
new_filters (dict): Filters with new state to update local copy.
"""
assert all(k in new_filters for k in self.filters)
for k in self.filters:
self.filters[k].sync(new_filters[k])
@DeveloperAPI
def get_filters(self, flush_after=False):
"""Returns a snapshot of filters.
Args:
flush_after (bool): Clears the filter buffer state.
Returns:
return_filters (dict): Dict for serializable filters
"""
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
@DeveloperAPI
def save(self):
filters = self.get_filters(flush_after=True)
state = {
pid: self.policy_map[pid].get_state()
for pid in self.policy_map
}
return pickle.dumps({"filters": filters, "state": state})
@DeveloperAPI
def restore(self, objs):
objs = pickle.loads(objs)
self.sync_filters(objs["filters"])
for pid, state in objs["state"].items():
self.policy_map[pid].set_state(state)
@DeveloperAPI
def set_global_vars(self, global_vars):
self.foreach_policy(lambda p, _: p.on_global_var_update(global_vars))
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_model(export_dir)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_checkpoint(export_dir,
filename_prefix)
@DeveloperAPI
def stop(self):
self.async_env.stop()
def _build_policy_map(self, policy_dict, policy_config):
policy_map = {}
preprocessors = {}
for name, (cls, obs_space, act_space,
conf) in sorted(policy_dict.items()):
logger.debug("Creating policy for {}".format(name))
merged_conf = merge_dicts(policy_config, conf)
if self.preprocessing_enabled:
preprocessor = ModelCatalog.get_preprocessor_for_space(
obs_space, merged_conf.get("model"))
preprocessors[name] = preprocessor
obs_space = preprocessor.observation_space
else:
preprocessors[name] = NoPreprocessor(obs_space)
if isinstance(obs_space, gym.spaces.Dict) or \
isinstance(obs_space, gym.spaces.Tuple):
raise ValueError(
"Found raw Tuple|Dict space as input to policy. "
"Please preprocess these observations with a "
"Tuple|DictFlatteningPreprocessor.")
if tf and tf.executing_eagerly():
if hasattr(cls, "as_eager"):
cls = cls.as_eager()
if policy_config["eager_tracing"]:
cls = cls.with_tracing()
elif not issubclass(cls, TFPolicy):
pass # could be some other type of policy
else:
raise ValueError("This policy does not support eager "
"execution: {}".format(cls))
if tf:
with tf.variable_scope(name):
policy_map[name] = cls(obs_space, act_space, merged_conf)
else:
policy_map[name] = cls(obs_space, act_space, merged_conf)
if self.worker_index == 0:
logger.info("Built policy map: {}".format(policy_map))
logger.info("Built preprocessor map: {}".format(preprocessors))
return policy_map, preprocessors
def __del__(self):
if hasattr(self, "sampler") and isinstance(self.sampler, AsyncSampler):
self.sampler.shutdown = True
def _validate_and_canonicalize(policy, env):
if isinstance(policy, dict):
_validate_multiagent_config(policy)
return policy
elif not issubclass(policy, Policy):
raise ValueError("policy must be a rllib.Policy class")
else:
if (isinstance(env, MultiAgentEnv)
and not hasattr(env, "observation_space")):
raise ValueError(
"MultiAgentEnv must have observation_space defined if run "
"in a single-agent configuration.")
return {
DEFAULT_POLICY_ID: (policy, env.observation_space,
env.action_space, {})
}
def _validate_multiagent_config(policy, allow_none_graph=False):
for k, v in policy.items():
if not isinstance(k, str):
raise ValueError("policy keys must be strs, got {}".format(
type(k)))
if not isinstance(v, (tuple, list)) or len(v) != 4:
raise ValueError(
"policy values must be tuples/lists of "
"(cls or None, obs_space, action_space, config), got {}".
format(v))
if allow_none_graph and v[0] is None:
pass
elif not issubclass(v[0], Policy):
raise ValueError("policy tuple value 0 must be a rllib.Policy "
"class or None, got {}".format(v[0]))
if not isinstance(v[1], gym.Space):
raise ValueError(
"policy tuple value 1 (observation_space) must be a "
"gym.Space, got {}".format(type(v[1])))
if not isinstance(v[2], gym.Space):
raise ValueError("policy tuple value 2 (action_space) must be a "
"gym.Space, got {}".format(type(v[2])))
if not isinstance(v[3], dict):
raise ValueError("policy tuple value 3 (config) must be a dict, "
"got {}".format(type(v[3])))
def _validate_env(env):
# allow this as a special case (assumed gym.Env)
if hasattr(env, "observation_space") and hasattr(env, "action_space"):
return env
allowed_types = [gym.Env, MultiAgentEnv, ExternalEnv, VectorEnv, BaseEnv]
if not any(isinstance(env, tpe) for tpe in allowed_types):
raise ValueError(
"Returned env should be an instance of gym.Env, MultiAgentEnv, "
"ExternalEnv, VectorEnv, or BaseEnv. The provided env creator "
"function returned {} ({}).".format(env, type(env)))
return env
def _has_tensorflow_graph(policy_dict):
for policy, _, _, _ in policy_dict.values():
if issubclass(policy, TFPolicy):
return True
return False
|
ujvl/ray-ng
|
rllib/evaluation/rollout_worker.py
|
Python
|
apache-2.0
| 35,580
|
import numpy as np
class Chart(object):
def __init__(self) -> None:
self.plot = None
self.curve = None
self.downsampling = 'peak'
self.clipToView = True
self.line_color = 'r'
self.ptr = 0
self.x = np.zeros(9000)
self.y = np.zeros(9000)
self.left_label = ''
self.left_label_units = ''
self.bottom_label = ''
self.bottom_label_units = ''
class DataStreamWindow(object):
def __init__(self) -> None:
self.qapp = None
self.win = None
self.charts_list = list()
self.columns_display = 1
self.background_color = 'w'
self.coordinate_system_color = 'b'
|
Canicio/graphtiny
|
graphtiny/domain.py
|
Python
|
mit
| 701
|
#!/usr/bin/env python
# Generate overlay images in PNG format with transparancy which can be
# used to label Splotch frames. This script can be called as a
# standalone program, see below for details. To label an entire
# directory of Splotch frames, use the driver script <splotchLabelFrames.sh>.
#
# (Klaus Reuter, RZG, Sep 2011)
def splotchColormap(time=-1.0, # for time>0, a time stamp is printed in the upper left corner
redshift=-1.0, # for redshift>0, a redshift stamp is printed
valMin=0.1, # minimum value for the log colorscale
valMax=1.e4, # maximum value for the log colorscale
outfile="overlay.png", # default file name of the overlay to be created
xinches=12, # width of the image | at 100 DPI, this corresponds to
yinches=8, # height of the image | the dimensions 1200x800
myFontSize="large",
myFontColor="white",
putMinerva=False): # place the MPG minerva logo in the top right corner
# import necessary modules
import numpy as np
from matplotlib import pyplot
import matplotlib as mpl
from subprocess import call
from math import pow
# *** set font properties for annotations ***
fprops=mpl.font_manager.FontProperties()
fprops.set_size(myFontSize)
#fprops.set_weight("bold")
# *** set up the matplotlib colormap based on a Splotch colormap ***
#$ cat OldSplotch.pal
#OldSplotch
#0100
#3
# 0 0 255
#128 255 128
#255 0 0
# See <http://matplotlib.sourceforge.net/api/colors_api.html>
# to understand what's going on ...
# <OldSplotch.pal> corresponds to:
OldSplotch = {'red': ((0.0, 0.0, 0.0), (0.5, 0.5, 0.5), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0), (0.5, 0.5, 0.5), (1.0, 0.0, 0.0))}
colormap = mpl.colors.LinearSegmentedColormap('colormap', OldSplotch)
# TODO implement a reader for Splotch palette files
# *** set up the figure ***
fig = pyplot.figure(figsize=(xinches,yinches))
# *** set up the colorbar ***
ax1 = fig.add_axes([0.90, 0.05, 0.02, 0.5])
norm = mpl.colors.LogNorm(vmin=valMin, vmax=valMax)
form = mpl.ticker.LogFormatterMathtext()
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=colormap, norm=norm,
format=form, orientation='vertical')
# manipulate the style of the ticklabels, which requires a loop
for tl in cb1.ax.get_yticklabels():
tl.set_fontsize(myFontSize)
tl.set_color(myFontColor)
cb1.set_label('Temperature [K]', fontproperties=fprops, color=myFontColor)
# *** set up the time/redshift variable ***
if (time>=0.0):
timeString="age of universe=%.3f" % (time, )
timeString=timeString+" Gyr"
pyplot.figtext(x=0.025, y=0.950, s=timeString, fontdict=None,
fontproperties=fprops, color=myFontColor)
#
if (redshift>0):
timeString="redshift=%.3f" % (redshift, )
pyplot.figtext(x=0.025, y=0.910, s=timeString, fontdict=None,
fontproperties=fprops, color=myFontColor)
# Minerva needs an intermediate call of the ImageMagick tools
if putMinerva:
plotFile="./splotchColormapTmp.png"
else:
plotFile=outfile
# *** finally, plot the image and write it to a png file ***
pyplot.plot()
F=pyplot.gcf()
myDPI=100
F.savefig(plotFile, transparent=True, dpi=myDPI)
# *** put a logo (e.g. MPG Minerva) on top using ImageMagick convert ***
if putMinerva:
minervaFile="__INSERT_VALID_PATH__/minerva-white-96.png"
xoffset=str(int( (xinches*myDPI)*0.895 ))
yoffset=str(int( (yinches*myDPI)*0.005 ))
#print (xoffset, yoffset)
convertCommand="/usr/bin/env convert "+plotFile+" "+minervaFile+" -geometry +"+xoffset+"+"+yoffset+" -composite -format png "+outfile
call(convertCommand, shell=True)
# *** END SplotchColormap() ***
#
# *** Allow this Python module to be run as a standalone script. ***
#
if __name__ == "__main__":
import sys
import getopt
#
try:
opts, args = getopt.getopt(sys.argv[1:],
"t:r:c:d:o:", # the "-" options, below are the "--" options
["time=", "redshift=", "colormin=", "colormax=", "outfile="])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
#
myOutFile = "overlay.png"
myTime = -1.0
myRedshift = -1.0
myMinVal = 1
myMaxVal = 100
#
for o, a in opts:
# print (o,a)
if o in ("-t", "--time"):
myTime = float(a)
elif o in ("-r", "--redshift"):
myRedshift = float(a)
elif o in ("-c", "--colormin"):
myMinVal = pow(10.0, float(a))
elif o in ("-d", "--colormax"):
myMaxVal = pow(10.0, float(a))
elif o in ("-o", "--outfile"):
myOutFile = a
else:
assert False, "unhandled option"
#
splotchColormap(outfile=myOutFile,
time=myTime,
redshift=myRedshift,
valMin=myMinVal,
valMax=myMaxVal)
# EOF
|
deusconsortium/splotch
|
labeltool/splotchColormap.py
|
Python
|
gpl-2.0
| 5,402
|
"""Trivial test program."""
def helloworld():
print "hello world"
|
pombredanne/pytype
|
pytype/test_data/simple.py
|
Python
|
apache-2.0
| 70
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from wtforms.fields import TextAreaField
from wtforms.fields.html5 import URLField
from wtforms.validators import URL, Optional
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm
from indico.web.forms.widgets import CKEditorWidget
class LegalMessagesForm(IndicoForm):
network_protected_disclaimer = TextAreaField(_("Network-protected information disclaimer"), widget=CKEditorWidget())
restricted_disclaimer = TextAreaField(_("Restricted information disclaimer"), widget=CKEditorWidget())
tos_url = URLField(_('URL'), [Optional(), URL()],
description=_("The URL to an external page with terms and conditions"))
tos = TextAreaField(_("Text"), widget=CKEditorWidget(),
description=_('Only used if no URL is provided'))
privacy_policy_url = URLField(_('URL'), [Optional(), URL()],
description=_("The URL to an external page with the privacy policy"))
privacy_policy = TextAreaField(_("Text"), widget=CKEditorWidget(),
description=_('Only used if no URL is provided'))
|
mic4ael/indico
|
indico/modules/legal/forms.py
|
Python
|
mit
| 1,387
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import urllib
import uuid
import logging
import re
from urlparse import urlsplit, urlunsplit
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.conf import settings
from django.template import RequestContext, loader
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from django.utils import simplejson as json
from django.shortcuts import get_object_or_404
from owslib.wms import WebMapService
from owslib.wfs import WebFeatureService
from owslib.tms import TileMapService
from owslib.csw import CatalogueServiceWeb
from arcrest import Folder as ArcFolder, MapService as ArcMapService
from geoserver.catalog import Catalog
from geonode.services.models import Service, Layer, ServiceLayer, WebServiceHarvestLayersJob
from geonode.security.views import _perms_info_json
from geonode.utils import bbox_to_wkt
from geonode.services.forms import CreateServiceForm, ServiceForm
from geonode.utils import mercator_to_llbbox
from geonode.layers.utils import create_thumbnail
from geonode.geoserver.helpers import set_attributes
from geonode.base.models import Link
logger = logging.getLogger("geonode.core.layers.views")
_user = settings.OGC_SERVER['default']['USER']
_password = settings.OGC_SERVER['default']['PASSWORD']
OGP_ABSTRACT = _("""
The Open Geoportal is a consortium comprised of contributions of several universities and organizations to help
facilitate the discovery and acquisition of geospatial data across many organizations and platforms. Current partners
include: Harvard, MIT, MassGIS, Princeton, Columbia, Stanford, UC Berkeley, UCLA, Yale, and UConn. Built on open source
technology, The Open Geoportal provides organizations the opportunity to share thousands of geospatial data layers,
maps, metadata, and development resources through a single common interface.
""")
@login_required
def services(request):
"""
This view shows the list of all registered services
"""
services = Service.objects.all()
return render_to_response("services/service_list.html", RequestContext(request, {
'services': services,
}))
@login_required
def register_service(request):
"""
This view is used for manually registering a new service, with only URL as a
parameter.
"""
if request.method == "GET":
service_form = CreateServiceForm()
return render_to_response('services/service_register.html',
RequestContext(request, {
'create_service_form': service_form
}))
elif request.method == 'POST':
# Register a new Service
service_form = CreateServiceForm(request.POST)
if service_form.is_valid():
url = _clean_url(service_form.cleaned_data['url'])
# method = request.POST.get('method')
# type = request.POST.get('type')
# name = slugify(request.POST.get('name'))
type = service_form.cleaned_data["type"]
server = None
if type == "AUTO":
type, server = _verify_service_type(url)
if type is None:
return HttpResponse('Could not determine server type', status=400)
if "user" in request.POST and "password" in request.POST:
user = request.POST.get('user')
password = request.POST.get('password')
else:
user = None
password = None
if type in ["WMS", "OWS"]:
return _process_wms_service(url, type, user, password, wms=server, owner=request.user)
elif type == "REST":
return _register_arcgis_url(url, user, password, owner=request.user)
elif type == "CSW":
return _register_harvested_service(url, user, password, owner=request.user)
elif type == "OGP":
return _register_ogp_service(url, owner=request.user)
else:
return HttpResponse('Not Implemented (Yet)', status=501)
elif request.method == 'PUT':
# Update a previously registered Service
return HttpResponse('Not Implemented (Yet)', status=501)
elif request.method == 'DELETE':
# Delete a previously registered Service
return HttpResponse('Not Implemented (Yet)', status=501)
else:
return HttpResponse('Invalid Request', status=400)
def register_service_by_type(request):
"""
Register a service based on a specified type
"""
url = request.POST.get("url")
type = request.POST.get("type")
url = _clean_url(url)
services = Service.objects.filter(base_url=url)
if services.count() > 0:
return
type, server = _verify_service_type(url, type)
if type == "WMS" or type == "OWS":
return _process_wms_service(url, type, None, None, wms=server)
elif type == "REST":
return _register_arcgis_url(url, None, None)
def _is_unique(url):
"""
Determine if a service is already registered based on matching url
"""
return Service.objects.filter(base_url=url).count() == 0
def _clean_url(base_url):
"""
Remove all parameters from a URL
"""
urlprop = urlsplit(base_url)
url = urlunsplit(
(urlprop.scheme, urlprop.netloc, urlprop.path, None, None))
return url
def _get_valid_name(proposed_name):
"""
Return a unique slug name for a service
"""
slug_name = slugify(proposed_name)
name = slug_name
if len(slug_name) > 40:
name = slug_name[:40]
existing_service = Service.objects.filter(name=name)
iter = 1
while existing_service.count() > 0:
name = slug_name + str(iter)
existing_service = Service.objects.filter(name=name)
iter += 1
return name
def _verify_service_type(base_url, service_type=None):
"""
Try to determine service type by process of elimination
"""
if service_type in ['WMS', 'OWS', None]:
try:
service = WebMapService(base_url)
except:
pass
else:
return ['WMS', service]
if service_type in ['WFS', 'OWS', None]:
try:
servicewfs = WebFeatureService(base_url)
except:
pass
else:
return ['WFS', servicewfs]
if service_type in ['TMS', None]:
try:
service = TileMapService(base_url)
except:
pass
else:
return ['TMS', service]
if service_type in ['REST', None]:
try:
service = ArcFolder(base_url)
except:
pass
else:
service.services
return ['REST', service]
if service_type in ['CSW', None]:
try:
service = CatalogueServiceWeb(base_url)
except:
raise
else:
return ['CSW', service]
if service_type in ['OGP', None]:
# Just use a specific OGP URL for now
if base_url == settings.OGP_URL:
return ["OGP", None]
return [None, None]
def _process_wms_service(url, type, username, password, wms=None, owner=None, parent=None):
"""
Create a new WMS/OWS service, cascade it if necessary (i.e. if Web Mercator not available)
"""
if wms is None:
wms = WebMapService(url)
try:
base_url = _clean_url(
wms.getOperationByName('GetMap').methods['Get']['url'])
if base_url and base_url != url:
url = base_url
wms = WebMapService(base_url)
except:
logger.info(
"Could not retrieve GetMap url, using originally supplied URL %s" % url)
pass
try:
service = Service.objects.get(base_url=url)
return_dict = [{'status': 'ok',
'msg': _("This is an existing service"),
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title
}]
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
except:
pass
title = wms.identification.title
if title:
name = _get_valid_name(title)
else:
name = _get_valid_name(urlsplit(url).netloc)
try:
supported_crs = ','.join(wms.contents.itervalues().next().crsOptions)
except:
supported_crs = None
if supported_crs and re.search('EPSG:900913|EPSG:3857|EPSG:102100', supported_crs):
return _register_indexed_service(type, url, name, username, password, wms=wms, owner=owner, parent=parent)
else:
return _register_cascaded_service(url, type, name, username, password, wms=wms, owner=owner, parent=parent)
def _register_cascaded_service(url, type, name, username, password, wms=None, owner=None, parent=None):
"""
Register a service as cascading WMS
"""
try:
service = Service.objects.get(base_url=url)
return_dict = {}
return_dict['service_id'] = service.pk
return_dict['msg'] = "This is an existing Service"
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
except:
# TODO: Handle this error properly
pass
if wms is None:
wms = WebMapService(url)
# TODO: Make sure we are parsing all service level metadata
# TODO: Handle for setting ServiceProfiletRole
service = Service.objects.create(base_url=url,
type=type,
method='C',
name=name,
version=wms.identification.version,
title=wms.identification.title,
abstract=wms.identification.abstract,
online_resource=wms.provider.url,
owner=owner,
parent=parent)
service.keywords = ','.join(wms.identification.keywords)
service.save()
service.set_default_permissions()
if type in ['WMS', 'OWS']:
# Register the Service with GeoServer to be cascaded
cat = Catalog(settings.OGC_SERVER['default']['LOCATION'] + "rest",
_user, _password)
cascade_ws = cat.get_workspace(name)
if cascade_ws is None:
cascade_ws = cat.create_workspace(
name, "http://geonode.org/cascade")
# TODO: Make sure there isn't an existing store with that name, and
# deal with it if there is
try:
cascade_store = cat.get_store(name, cascade_ws)
except:
cascade_store = cat.create_wmsstore(
name, cascade_ws, username, password)
cascade_store.capabilitiesURL = url
cascade_store.type = "WMS"
cat.save(cascade_store)
available_resources = cascade_store.get_resources(available=True)
elif type == 'WFS':
# Register the Service with GeoServer to be cascaded
cat = Catalog(settings.OGC_SERVER['default']['LOCATION'] + "rest",
_user, _password)
# Can we always assume that it is geonode?
cascade_ws = cat.get_workspace(settings.CASCADE_WORKSPACE)
if cascade_ws is None:
cascade_ws = cat.create_workspace(
settings.CASCADE_WORKSPACE, "http://geonode.org/cascade")
try:
wfs_ds = cat.get_store(name, cascade_ws)
except:
wfs_ds = cat.create_datastore(name, cascade_ws)
connection_params = {
"WFSDataStoreFactory:MAXFEATURES": "0",
"WFSDataStoreFactory:TRY_GZIP": "true",
"WFSDataStoreFactory:PROTOCOL": "false",
"WFSDataStoreFactory:LENIENT": "true",
"WFSDataStoreFactory:TIMEOUT": "3000",
"WFSDataStoreFactory:BUFFER_SIZE": "10",
"WFSDataStoreFactory:ENCODING": "UTF-8",
"WFSDataStoreFactory:WFS_STRATEGY": "nonstrict",
"WFSDataStoreFactory:GET_CAPABILITIES_URL": url,
}
if username and password:
connection_params["WFSDataStoreFactory:USERNAME"] = username
connection_params["WFSDataStoreFactory:PASSWORD"] = password
wfs_ds.connection_parameters = connection_params
cat.save(wfs_ds)
available_resources = wfs_ds.get_resources(available=True)
# Save the Service record
service, created = Service.objects.get_or_create(type=type,
method='C',
base_url=url,
name=name,
owner=owner)
service.save()
service.set_default_permissions()
elif type == 'WCS':
return HttpResponse('Not Implemented (Yet)', status=501)
else:
return HttpResponse(
'Invalid Method / Type combo: ' +
'Only Cascaded WMS, WFS and WCS supported',
mimetype="text/plain",
status=400)
message = "Service %s registered" % service.name
return_dict = [{'status': 'ok',
'msg': message,
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'available_layers': available_resources
}]
if settings.USE_QUEUE:
# Create a layer import job
WebServiceHarvestLayersJob.objects.get_or_create(service=service)
else:
_register_cascaded_layers(service)
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
def _register_cascaded_layers(service, owner=None):
"""
Register layers for a cascading WMS
"""
if service.type == 'WMS' or service.type == "OWS":
cat = Catalog(settings.OGC_SERVER['default']['LOCATION'] + "rest",
_user, _password)
# Can we always assume that it is geonode?
# Should cascading layers have a separate workspace?
cascade_ws = cat.get_workspace(service.name)
if cascade_ws is None:
cascade_ws = cat.create_workspace(service.name, 'cascade')
try:
store = cat.get_store(service.name, cascade_ws)
except Exception:
store = cat.create_wmsstore(service.name, cascade_ws)
cat.save(store)
wms = WebMapService(service.base_url)
layers = list(wms.contents)
count = 0
for layer in layers:
lyr = cat.get_resource(layer, store, cascade_ws)
if lyr is None:
if service.type in ["WMS", "OWS"]:
resource = cat.create_wmslayer(cascade_ws, store, layer)
elif service.type == "WFS":
resource = cat.create_wfslayer(cascade_ws, store, layer)
if resource:
bbox = resource.latlon_bbox
cascaded_layer, created = Layer.objects.get_or_create(
typename="%s:%s" % (cascade_ws.name, resource.name),
service=service,
defaults={
"name": resource.name,
"workspace": cascade_ws.name,
"store": store.name,
"storeType": store.resource_type,
"title": resource.title or 'No title provided',
"abstract": resource.abstract or 'No abstract provided',
"owner": None,
"uuid": str(uuid.uuid4()),
"bbox_x0": bbox[0],
"bbox_x1": bbox[1],
"bbox_y0": bbox[2],
"bbox_y1": bbox[3],
})
if created:
cascaded_layer.save()
if cascaded_layer is not None and cascaded_layer.bbox is None:
cascaded_layer._populate_from_gs(
gs_resource=resource)
cascaded_layer.set_default_permissions()
service_layer, created = ServiceLayer.objects.get_or_create(
service=service,
typename=cascaded_layer.name
)
service_layer.layer = cascaded_layer
service_layer.title = cascaded_layer.title,
service_layer.description = cascaded_layer.abstract,
service_layer.styles = cascaded_layer.styles
service_layer.save()
count += 1
else:
logger.error(
"Resource %s from store %s could not be saved as layer" % (layer, store.name))
message = "%d Layers Registered" % count
return_dict = {'status': 'ok', 'msg': message}
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
elif service.type == 'WCS':
return HttpResponse('Not Implemented (Yet)', status=501)
else:
return HttpResponse('Invalid Service Type', status=400)
def _register_indexed_service(type, url, name, username, password, verbosity=False, wms=None, owner=None, parent=None):
"""
Register a service - WMS or OWS currently supported
"""
if type in ['WMS', "OWS", "HGL"]:
# TODO: Handle for errors from owslib
if wms is None:
wms = WebMapService(url)
# TODO: Make sure we are parsing all service level metadata
# TODO: Handle for setting ServiceProfileRole
try:
service = Service.objects.get(base_url=url)
return_dict = {}
return_dict['service_id'] = service.pk
return_dict['msg'] = "This is an existing Service"
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
except:
pass
service = Service.objects.create(base_url=url,
type=type,
method='I',
name=name,
version=wms.identification.version,
title=wms.identification.title or name,
abstract=wms.identification.abstract or _(
"Not provided"),
online_resource=wms.provider.url,
owner=owner,
parent=parent)
service.keywords = ','.join(wms.identification.keywords)
service.save()
service.set_default_permissions()
available_resources = []
for layer in list(wms.contents):
available_resources.append([wms[layer].name, wms[layer].title])
if settings.USE_QUEUE:
# Create a layer import job
WebServiceHarvestLayersJob.objects.get_or_create(service=service)
else:
_register_indexed_layers(service, wms=wms)
message = "Service %s registered" % service.name
return_dict = [{'status': 'ok',
'msg': message,
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'available_layers': available_resources
}]
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
elif type == 'WFS':
return HttpResponse('Not Implemented (Yet)', status=501)
elif type == 'WCS':
return HttpResponse('Not Implemented (Yet)', status=501)
else:
return HttpResponse(
'Invalid Method / Type combo: ' +
'Only Indexed WMS, WFS and WCS supported',
mimetype="text/plain",
status=400)
def _register_indexed_layers(service, wms=None, verbosity=False):
"""
Register layers for an indexed service (only WMS/OWS currently supported)
"""
logger.info("Registering layers for %s" % service.base_url)
if re.match("WMS|OWS", service.type):
wms = wms or WebMapService(service.base_url)
count = 0
for layer in list(wms.contents):
wms_layer = wms[layer]
if wms_layer is None or wms_layer.name is None:
continue
logger.info("Registering layer %s" % wms_layer.name)
if verbosity:
print "Importing layer %s" % layer
layer_uuid = str(uuid.uuid1())
try:
keywords = map(lambda x: x[:100], wms_layer.keywords)
except:
keywords = []
if not wms_layer.abstract:
abstract = ""
else:
abstract = wms_layer.abstract
srid = None
# Some ArcGIS WMSServers indicate they support 900913 but really
# don't
if 'EPSG:900913' in wms_layer.crsOptions and "MapServer/WmsServer" not in service.base_url:
srid = 'EPSG:900913'
elif len(wms_layer.crsOptions) > 0:
matches = re.findall(
'EPSG\:(3857|102100|102113)', ' '.join(wms_layer.crsOptions))
if matches:
srid = 'EPSG:%s' % matches[0]
if srid is None:
message = "%d Incompatible projection - try setting the service as cascaded" % count
return_dict = {'status': 'ok', 'msg': message}
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
bbox = list(
wms_layer.boundingBoxWGS84 or (-179.0, -89.0, 179.0, 89.0))
# Need to check if layer already exists??
saved_layer, created = Layer.objects.get_or_create(
typename=wms_layer.name,
service=service,
defaults=dict(
name=wms_layer.name,
store=service.name, # ??
storeType="remoteStore",
workspace="remoteWorkspace",
title=wms_layer.title or wms_layer.name,
abstract=abstract or _("Not provided"),
uuid=layer_uuid,
owner=None,
srid=srid,
bbox_x0=bbox[0],
bbox_x1=bbox[2],
bbox_y0=bbox[1],
bbox_y1=bbox[3]
)
)
if created:
saved_layer.save()
saved_layer.set_default_permissions()
saved_layer.keywords.add(*keywords)
set_attributes(saved_layer)
service_layer, created = ServiceLayer.objects.get_or_create(
typename=wms_layer.name,
service=service
)
service_layer.layer = saved_layer
service_layer.title = wms_layer.title
service_layer.description = wms_layer.abstract
service_layer.styles = wms_layer.styles
service_layer.save()
count += 1
message = "%d Layers Registered" % count
return_dict = {'status': 'ok', 'msg': message}
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
elif service.type == 'WFS':
return HttpResponse('Not Implemented (Yet)', status=501)
elif service.type == 'WCS':
return HttpResponse('Not Implemented (Yet)', status=501)
else:
return HttpResponse('Invalid Service Type', status=400)
def _register_harvested_service(url, username, password, csw=None, owner=None):
"""
Register a CSW service, then step through results (or queue for asynchronous harvesting)
"""
try:
service = Service.objects.get(base_url=url)
return_dict = [{
'status': 'ok',
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'msg': 'This is an existing Service'
}]
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
except:
pass
if csw is None:
csw = CatalogueServiceWeb(url)
service = Service.objects.create(base_url=url,
type='CSW',
method='H',
name=_get_valid_name(
csw.identification.title or url),
title=csw.identification.title,
version=csw.identification.version,
abstract=csw.identification.abstract,
owner=owner)
service.keywords = ','.join(csw.identification.keywords)
service.save
service.set_default_permissions()
message = "Service %s registered" % service.name
return_dict = [{'status': 'ok',
'msg': message,
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title
}]
if settings.USE_QUEUE:
# Create a layer import job
WebServiceHarvestLayersJob.objects.get_or_create(service=service)
else:
_harvest_csw(service)
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
def _harvest_csw(csw, maxrecords=10, totalrecords=float('inf')):
"""
Step through CSW results, and if one seems to be a WMS or Arc REST service then register it
"""
stop = 0
flag = 0
src = CatalogueServiceWeb(csw.base_url)
while stop == 0:
if flag == 0: # first run, start from 0
startposition = 0
else: # subsequent run, startposition is now paged
startposition = src.results['nextrecord']
src.getrecords(
esn='summary', startposition=startposition, maxrecords=maxrecords)
max = min(src.results['matches'], totalrecords)
if src.results['nextrecord'] == 0 \
or src.results['returned'] == 0 \
or src.results['nextrecord'] > max: # end the loop, exhausted all records or max records to process
stop = 1
break
# harvest each record to destination CSW
for record in list(src.records):
record = src.records[record]
known_types = {}
for ref in record.references:
if ref["scheme"] == "OGC:WMS" or \
"service=wms&request=getcapabilities" in urllib.unquote(ref["url"]).lower():
print "WMS:%s" % ref["url"]
known_types["WMS"] = ref["url"]
if ref["scheme"] == "OGC:WFS" or \
"service=wfs&request=getcapabilities" in urllib.unquote(ref["url"]).lower():
print "WFS:%s" % ref["url"]
known_types["WFS"] = ref["url"]
if ref["scheme"] == "ESRI":
print "ESRI:%s" % ref["url"]
known_types["REST"] = ref["url"]
if "WMS" in known_types:
type = "OWS" if "WFS" in known_types else "WMS"
try:
_process_wms_service(
known_types["WMS"], type, None, None, parent=csw)
except Exception, e:
logger.error("Error registering %s:%s" %
(known_types["WMS"], str(e)))
elif "REST" in known_types:
try:
_register_arcgis_url(ref["url"], None, None, parent=csw)
except Exception, e:
logger.error("Error registering %s:%s" %
(known_types["REST"], str(e)))
flag = 1
stop = 0
def _register_arcgis_url(url, username, password, owner=None, parent=None):
"""
Register an ArcGIS REST service URL
"""
# http://maps1.arcgisonline.com/ArcGIS/rest/services
baseurl = _clean_url(url)
if re.search("\/MapServer\/*(f=json)*", baseurl):
# This is a MapService
arcserver = ArcMapService(baseurl)
if isinstance(arcserver, ArcMapService) and arcserver.spatialReference.wkid in [102100, 3857, 900913]:
return_json = [_process_arcgis_service(arcserver, owner=owner, parent=parent)]
else:
return_json = [{'msg': _("Could not find any layers in a compatible projection.")}]
else:
# This is a Folder
arcserver = ArcFolder(baseurl)
return_json = _process_arcgis_folder(
arcserver, services=[], owner=owner, parent=parent)
return HttpResponse(json.dumps(return_json),
mimetype='application/json',
status=200)
def _register_arcgis_layers(service, arc=None):
"""
Register layers from an ArcGIS REST service
"""
arc = arc or ArcMapService(service.base_url)
for layer in arc.layers:
valid_name = slugify(layer.name)
count = 0
layer_uuid = str(uuid.uuid1())
bbox = [layer.extent.xmin, layer.extent.ymin,
layer.extent.xmax, layer.extent.ymax]
typename = layer.id
existing_layer = None
try:
existing_layer = Layer.objects.get(
typename=typename, service=service)
except Layer.DoesNotExist:
pass
llbbox = mercator_to_llbbox(bbox)
if existing_layer is None:
# Need to check if layer already exists??
saved_layer, created = Layer.objects.get_or_create(
typename=typename,
service=service,
defaults=dict(
name=valid_name,
store=service.name, # ??
storeType="remoteStore",
workspace="remoteWorkspace",
title=layer.name,
abstract=layer._json_struct[
'description'] or _("Not provided"),
uuid=layer_uuid,
owner=None,
srid="EPSG:%s" % layer.extent.spatialReference.wkid,
bbox_x0=llbbox[0],
bbox_x1=llbbox[2],
bbox_y0=llbbox[1],
bbox_y1=llbbox[3],
)
)
saved_layer.set_default_permissions()
saved_layer.save()
service_layer, created = ServiceLayer.objects.get_or_create(
service=service,
typename=layer.id
)
service_layer.layer = saved_layer
service_layer.title = layer.name,
service_layer.description = saved_layer.abstract,
service_layer.styles = None
service_layer.save()
create_arcgis_links(saved_layer)
count += 1
message = "%d Layers Registered" % count
return_dict = {'status': 'ok', 'msg': message}
return return_dict
def _process_arcgis_service(arcserver, owner=None, parent=None):
"""
Create a Service model instance for an ArcGIS REST service
"""
arc_url = _clean_url(arcserver.url)
services = Service.objects.filter(base_url=arc_url)
if services.count() > 0:
service = services[0]
return_dict = [{
'status': 'ok',
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'msg': 'This is an existing Service'
}]
return return_dict
name = _get_valid_name(arcserver.mapName or arc_url)
service = Service.objects.create(base_url=arc_url, name=name,
type='REST',
method='I',
title=arcserver.mapName,
abstract=arcserver.serviceDescription,
online_resource=arc_url,
owner=owner,
parent=parent)
service.set_default_permissions()
available_resources = []
for layer in list(arcserver.layers):
available_resources.append([layer.id, layer.name])
if settings.USE_QUEUE:
# Create a layer import job
WebServiceHarvestLayersJob.objects.get_or_create(service=service)
else:
_register_arcgis_layers(service, arc=arcserver)
message = "Service %s registered" % service.name
return_dict = {'status': 'ok',
'msg': message,
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'available_layers': available_resources
}
return return_dict
def _process_arcgis_folder(folder, services=[], owner=None, parent=None):
"""
Iterate through folders and services in an ArcGIS REST service folder
"""
for service in folder.services:
return_dict = {}
if not isinstance(service, ArcMapService):
return_dict[
'msg'] = 'Service could not be identified as an ArcMapService, URL: %s' % service.url
else:
if service.spatialReference.wkid in [102100, 3857, 900913]:
return_dict = _process_arcgis_service(
service, owner, parent=parent)
else:
return_dict['msg'] = _("Could not find any layers in a compatible projection: \
The spatial id was: %s and the url %s" % (service.spatialReference.wkid, service.url))
services.append(return_dict)
for subfolder in folder.folders:
_process_arcgis_folder(subfolder, services, owner)
return services
def _register_ogp_service(url, owner=None):
"""
Register OpenGeoPortal as a service
"""
services = Service.objects.filter(base_url=url)
if services.count() > 0:
service = services[0]
return_dict = [{
'status': 'ok',
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title,
'msg': 'This is an existing Service'
}]
return return_dict
service = Service.objects.create(base_url=url,
type="OGP",
method='H',
name="OpenGeoPortal",
title="OpenGeoPortal",
abstract=OGP_ABSTRACT,
owner=owner)
service.set_default_permissions()
if settings.USE_QUEUE:
# Create a layer import job
WebServiceHarvestLayersJob.objects.get_or_create(service=service)
else:
_harvest_ogp_layers(service, owner=owner)
message = "Service %s registered" % service.name
return_dict = [{'status': 'ok',
'msg': message,
'service_id': service.pk,
'service_name': service.name,
'service_title': service.title
}]
return HttpResponse(json.dumps(return_dict),
mimetype='application/json',
status=200)
def _harvest_ogp_layers(service, maxrecords=10, start=0, totalrecords=float('inf'), owner=None, institution=None):
"""
Query OpenGeoPortal's solr instance for layers.
"""
query = "?q=_val_:%22sum(sum(product(9.0,map(sum(map(MinX,-180.0,180,1,0)," + \
"map(MaxX,-180.0,180.0,1,0),map(MinY,-90.0,90.0,1,0),map(MaxY,-90.0,90.0,1,0)),4,4,1,0))),0,0)%22" + \
"&debugQuery=false&&fq={!frange+l%3D1+u%3D10}product(2.0,map(sum(map(sub(abs(sub(0,CenterX))," + \
"sum(171.03515625,HalfWidth)),0,400000,1,0),map(sub(abs(sub(0,CenterY)),sum(75.84516854027,HalfHeight))" + \
",0,400000,1,0)),0,0,1,0))&wt=json&fl=Name,CollectionId,Institution,Access,DataType,Availability," + \
"LayerDisplayName,Publisher,GeoReferenced,Originator,Location,MinX,MaxX,MinY,MaxY,ContentDate,LayerId," + \
"score,WorkspaceName,SrsProjectionCode&sort=score+desc&fq=DataType%3APoint+OR+DataType%3ALine+OR+" + \
"DataType%3APolygon+OR+DataType%3ARaster+OR+DataType%3APaper+Map&fq=Access:Public"
if institution:
query += "&fq=%s" % urllib.urlencode(institution)
fullurl = service.base_url + query + \
("&rows=%d&start=%d" % (maxrecords, start))
response = urllib.urlopen(fullurl).read()
json_response = json.loads(response)
process_ogp_results(service, json_response)
max = min(json_response["response"]["numFound"], totalrecords)
while start < max:
start = start + maxrecords
_harvest_ogp_layers(
service, maxrecords, start, totalrecords=totalrecords, owner=owner, institution=institution)
def process_ogp_results(ogp, result_json, owner=None):
"""
Create WMS services and layers from OGP results
"""
for doc in result_json["response"]["docs"]:
try:
locations = json.loads(doc["Location"])
except:
continue
if "tilecache" in locations:
service_url = locations["tilecache"][0]
service_type = "WMS"
elif "wms" in locations:
service_url = locations["wms"][0]
if "wfs" in locations:
service_type = "OWS"
else:
service_type = "WMS"
else:
pass
"""
Harvard Geospatial Library is a special case, requires an activation request
to prepare the layer before WMS requests can be successful.
"""
if doc["Institution"] == "Harvard":
service_type = "HGL"
service = None
try:
service = Service.objects.get(base_url=service_url)
except Service.DoesNotExist:
if service_type in ["WMS", "OWS", "HGL"]:
try:
response = _process_wms_service(
service_url, service_type, None, None, parent=ogp)
r_json = json.loads(response.content)
service = Service.objects.get(id=r_json[0]["service_id"])
except Exception, e:
print str(e)
if service:
typename = doc["Name"]
if service_type == "HGL":
typename = typename.replace("SDE.", "")
elif doc["WorkspaceName"]:
typename = doc["WorkspaceName"] + ":" + typename
bbox = (
float(doc['MinX']),
float(doc['MinY']),
float(doc['MaxX']),
float(doc['MaxY']),
)
layer_uuid = str(uuid.uuid1())
saved_layer, created = Layer.objects.get_or_create(typename=typename,
service=service,
defaults=dict(
name=doc["Name"],
uuid=layer_uuid,
store=service.name,
storeType="remoteStore",
workspace=doc["WorkspaceName"],
title=doc["LayerDisplayName"],
owner=None,
# Assumption
srid="EPSG:900913",
bbox=list(bbox),
geographic_bounding_box=bbox_to_wkt(
str(bbox[0]), str(bbox[1]),
str(bbox[2]), str(bbox[3]), srid="EPSG:4326")
)
)
saved_layer.set_default_permissions()
saved_layer.save()
service_layer, created = ServiceLayer.objects.get_or_create(service=service, typename=typename,
defaults=dict(
title=doc[
"LayerDisplayName"]
)
)
if service_layer.layer is None:
service_layer.layer = saved_layer
service_layer.save()
def service_detail(request, service_id):
'''
This view shows the details of a service
'''
service = get_object_or_404(Service, pk=service_id)
layer_list = service.layer_set.all()
service_list = service.service_set.all()
# Show 25 services per page
service_paginator = Paginator(service_list, 25)
layer_paginator = Paginator(layer_list, 25) # Show 25 services per page
page = request.GET.get('page')
try:
layers = layer_paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
layers = layer_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
layers = layer_paginator.page(layer_paginator.num_pages)
try:
services = service_paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
services = service_paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
services = service_paginator.page(service_paginator.num_pages)
return render_to_response("services/service_detail.html", RequestContext(request, {
'service': service,
'layers': layers,
'services': services,
'permissions_json': _perms_info_json(service)
}))
@login_required
def edit_service(request, service_id):
"""
Edit an existing Service
"""
service_obj = get_object_or_404(Service, pk=service_id)
if request.method == "POST":
service_form = ServiceForm(
request.POST, instance=service_obj, prefix="service")
if service_form.is_valid():
service_obj = service_form.save(commit=False)
service_obj.keywords.clear()
service_obj.keywords.add(*service_form.cleaned_data['keywords'])
service_obj.save()
return HttpResponseRedirect(service_obj.get_absolute_url())
else:
service_form = ServiceForm(instance=service_obj, prefix="service")
return render_to_response("services/service_edit.html", RequestContext(request, {
"service": service_obj,
"service_form": service_form
}))
def update_layers(service):
"""
Import/update layers for an existing service
"""
if service.method == "C":
_register_cascaded_layers(service)
elif service.type in ["WMS", "OWS"]:
_register_indexed_layers(service)
elif service.type == "REST":
_register_arcgis_layers(service)
elif service.type == "CSW":
_harvest_csw(service)
elif service.type == "OGP":
_harvest_ogp_layers(service, 25)
@login_required
def remove_service(request, service_id):
"""
Delete a service, and its constituent layers.
"""
service_obj = get_object_or_404(Service, pk=service_id)
if not request.user.has_perm('maps.delete_service', obj=service_obj):
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {
'error_message':
_("You are not permitted to remove this service.")
})), status=401)
if request.method == 'GET':
return render_to_response("services/service_remove.html", RequestContext(request, {
"service": service_obj
}))
elif request.method == 'POST':
service_obj.delete()
return HttpResponseRedirect(reverse("services"))
@login_required
def ajax_service_permissions(request, service_id):
service = get_object_or_404(Service, pk=service_id)
if not request.user.has_perm("maps.change_service_permissions", obj=service):
return HttpResponse(
'You are not allowed to change permissions for this service',
status=401,
mimetype='text/plain'
)
if not request.method == 'POST':
return HttpResponse(
'You must use POST for editing service permissions',
status=405,
mimetype='text/plain'
)
spec = json.loads(request.body)
service.set_permissions(spec)
return HttpResponse(
"Permissions updated",
status=200,
mimetype='text/plain')
def create_arcgis_links(instance):
kmz_link = instance.ows_url + '?f=kmz'
Link.objects.get_or_create(resource=instance.get_self_resource(),
url=kmz_link,
defaults=dict(
extension='kml',
name="View in Google Earth",
mime='text/xml',
link_type='data',
)
)
# Create legend.
legend_url = instance.ows_url + 'legend?f=json'
Link.objects.get_or_create(resource=instance.get_self_resource(),
url=legend_url,
defaults=dict(
extension='json',
name=_('Legend'),
url=legend_url,
mime='application/json',
link_type='json',
)
)
# Create thumbnails.
bbox = urllib.pathname2url('%s,%s,%s,%s' % (instance.bbox_x0, instance.bbox_y0, instance.bbox_x1, instance.bbox_y1))
thumbnail_remote_url = instance.ows_url + 'export?LAYERS=show%3A' + str(instance.typename) + \
'&TRANSPARENT=true&FORMAT=png&BBOX=' + bbox + '&SIZE=200%2C150&F=image&BBOXSR=4326&IMAGESR=3857'
create_thumbnail(instance, thumbnail_remote_url)
|
isralopez/geonode
|
geonode/services/views.py
|
Python
|
gpl-3.0
| 49,300
|
# -*- coding: utf-8 -*-
"""Pibooth picture regeneration module.
"""
import os
from os import path as osp
from PIL import Image
from pibooth.utils import LOGGER, configure_logging
from pibooth.config import PiConfigParser
from pibooth.pictures import get_picture_factory
def get_captures(images_folder):
"""Get a list of images from the folder given in input
"""
captures_paths = os.listdir(images_folder)
captures = []
for capture_path in captures_paths:
try:
image = Image.open(osp.join(images_folder, capture_path))
captures.append(image)
except OSError:
LOGGER.info("File %s doesn't seem to be an image", capture_path)
return captures
def regenerate_all_images(config):
"""Regenerate the pibboth images from the raw images and the config
"""
captures_folders = config.getpath('GENERAL', 'directory')
capture_choices = config.gettuple('PICTURE', 'captures', int, 2)
backgrounds = config.gettuple('PICTURE', 'backgrounds', ('color', 'path'), 2)
overlays = config.gettuple('PICTURE', 'overlays', 'path', 2)
texts = [config.get('PICTURE', 'footer_text1').strip('"'),
config.get('PICTURE', 'footer_text2').strip('"')]
colors = config.gettuple('PICTURE', 'text_colors', 'color', len(texts))
text_fonts = config.gettuple('PICTURE', 'text_fonts', str, len(texts))
alignments = config.gettuple('PICTURE', 'text_alignments', str, len(texts))
# Part that fetch the captures
for captures_folder in os.listdir(osp.join(captures_folders, 'raw')):
captures_folder_path = osp.join(captures_folders, 'raw', captures_folder)
if not osp.isdir(captures_folder_path):
continue
captures = get_captures(captures_folder_path)
LOGGER.info("Generating image from raws in folder %s", captures_folder_path)
if len(captures) == capture_choices[0]:
overlay = overlays[0]
background = backgrounds[0]
elif len(captures) == capture_choices[1]:
overlay = overlays[1]
background = backgrounds[1]
else:
LOGGER.warning("Folder %s doesn't contain the correct number of pictures", captures_folder_path)
continue
factory = get_picture_factory(captures, config.get('PICTURE', 'orientation'))
factory.set_background(background)
if any(elem != '' for elem in texts):
for params in zip(texts, text_fonts, colors, alignments):
factory.add_text(*params)
if config.getboolean('PICTURE', 'captures_cropping'):
factory.set_cropping()
if overlay:
factory.set_overlay(overlay)
picture_file = osp.join(captures_folders, captures_folder + "_pibooth.jpg")
factory.save(picture_file)
def main():
"""Application entry point.
"""
configure_logging()
config = PiConfigParser("~/.config/pibooth/pibooth.cfg")
regenerate_all_images(config)
if __name__ == "__main__":
main()
|
werdeil/pibooth
|
pibooth/scripts/regenerate.py
|
Python
|
mit
| 3,045
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.util import consts
from federatedml.statistic.statics import MultivariateStatisticalSummary
class LeastSquaredErrorLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
@staticmethod
def predict(value):
return value
@staticmethod
def compute_loss(y, y_pred):
lse_loss = y.join(y_pred, lambda y, yp: ((y - yp) * (y - yp), 1))
lse_sum, sample_num = lse_loss.reduce(lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return lse_sum / sample_num
@staticmethod
def compute_grad(y, y_pred):
return 2 * (y_pred - y)
@staticmethod
def compute_hess(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
shape = (y - y_pred).shape
return np.full(shape, 2)
else:
return 2
class LeastAbsoluteErrorLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
median = statistics.get_median()["label"]
return y.mapValues(lambda x: np.asarray([median])), np.asarray([median])
@staticmethod
def predict(value):
return value
@staticmethod
def compute_loss(y, y_pred):
lae = y.join(y_pred, lambda y, yp: (np.abs(y - yp), 1))
lae_sum, sample_num = lae.reduce(lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return lae_sum / sample_num
@staticmethod
def compute_grad(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
diff = y_pred - y
diff[diff > consts.FLOAT_ZERO] = 1
diff[diff < consts.FLOAT_ZERO] = -1
diff[np.abs(diff) <= consts.FLOAT_ZERO] = 0
return diff
else:
diff = y_pred - y
if diff > consts.FLOAT_ZERO:
return 1
elif diff < consts.FLOAT_ZERO:
return -1
else:
return 0
@staticmethod
def compute_hess(y, y_pred):
if type(y).__name__ == "ndarray" or type(y_pred).__name__ == "ndarray":
shape = (y - y_pred).shape
return np.full(shape, 1)
else:
return 1
class HuberLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
def __init__(self, delta):
if delta is None:
self.delta = consts.FLOAT_ZERO
else:
self.delta = delta
if np.abs(self.delta) < consts.FLOAT_ZERO:
self.delta = consts.FLOAT_ZERO
def compute_loss(self, y, y_pred):
huber_loss = y.join(y_pred, lambda y, yp:
(self.delta ** 2 * (np.sqrt(1 + ((yp - y) / self.delta) ** 2) - 1), 1))
huber_sum, sample_num = huber_loss.reduce(lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return huber_sum / sample_num
@staticmethod
def predict(value):
return value
def compute_grad(self, y, y_pred):
diff = y_pred - y
return diff / np.sqrt(1.0 + diff * diff / (self.delta ** 2))
def compute_hess(self, y, y_pred):
diff = y_pred - y
return 1.0 / (1.0 + diff * diff / (self.delta ** 2)) ** 1.5
class FairLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
def __init__(self, c):
if c is None:
self.c = consts.FLOAT_ZERO
else:
self.c = c
if np.abs(self.c) < consts.FLOAT_ZERO:
self.c = consts.FLOAT_ZERO
@staticmethod
def predict(value):
return value
def compute_loss(self, y, y_pred):
fair_loss = y.join(y_pred, lambda y, yp:
(self.c * np.abs(yp - y) - self.c ** 2 * np.log(np.abs(yp - y) / self.c + 1), 1))
fair_loss_sum, sample_num = fair_loss.reduce(
lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return fair_loss_sum / sample_num
def compute_grad(self, y, y_pred):
diff = y_pred - y
return self.c * diff / (np.abs(diff) + self.c)
def compute_hess(self, y, y_pred):
diff = y_pred - y
return self.c ** 2 / (np.abs(diff) + self.c) ** 2
class LogCoshLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
@staticmethod
def predict(value):
return value
@staticmethod
def compute_loss(y, y_pred):
log_cosh_loss = y.join(y_pred, lambda y, yp: (np.log(np.cosh(yp - y)), 1))
log_cosh_sum, sample_num = log_cosh_loss.reduce(
lambda tuple1, tuple2: (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return log_cosh_sum / sample_num
@staticmethod
def compute_grad(y, y_pred):
return np.tanh(y_pred - y)
@staticmethod
def compute_hess(y, y_pred):
return 1 - np.tanh(y_pred - y) ** 2
class TweedieLoss(object):
@staticmethod
def initialize(y):
y_inst = y.mapValues(lambda label: Instance(features=np.asarray([label])))
y_inst.schema = {"header": ["label"]}
statistics = MultivariateStatisticalSummary(y_inst, -1)
mean = statistics.get_mean()["label"]
return y.mapValues(lambda x: np.asarray([mean])), np.asarray([mean])
def __init__(self, rho=None):
if rho is None:
self.rho = consts.FLOAT_ZERO
else:
self.rho = rho
@staticmethod
def predict(value):
return value
def compute_loss(self, y, y_pred):
tweedie_loss = y.join(y_pred,
lambda y, yp:
(-y * np.exp(1 - self.rho) * np.log(max(yp, consts.FLOAT_ZERO)) / (1 - self.rho) +
np.exp(2 - self.rho) * np.log(max(consts.FLOAT_ZERO, yp)) / (2 - self.rho), 1))
tweedie_loss_sum, sample_num = tweedie_loss.reduce(lambda tuple1, tuple2:
(tuple1[0] + tuple2[0], tuple1[1] + tuple2[1]))
return tweedie_loss_sum / sample_num
def compute_grad(self, y, y_pred):
return -y * np.exp(1 - self.rho) * y_pred + np.exp(2 - self.rho) * y_pred
def compute_hess(self, y, y_pred):
return -y * (1 - self.rho) * np.exp(1 - self.rho) * y_pred + (2 - self.rho) * np.exp(2 - self.rho) * y_pred
|
FederatedAI/FATE
|
python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/loss/regression_loss.py
|
Python
|
apache-2.0
| 8,346
|
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
db = SQLAlchemy(session_options={"autoflush": False})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class Materials(db.Model, CRUD):
__tablename__ = 'materials'
MATERIAL_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
cn_id = db.Column(db.Integer)
pt_id = db.Column(db.Integer)
class MaterialsSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/materials/"
else:
if 'attributes' in data:
self_link = "/materials/{}".format(data['attributes']['MATERIAL_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials'
class MaterialsSalvage(db.Model, CRUD):
__tablename__ = 'materials_salvage'
MATERIAL_SALVAGE_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
class MaterialsSalvageSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
MATERIAL_SALVAGE_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
#self links
def get_top_level_links(self, data, many):
self_link = ''
if many:
self_link = "/materials/salvage/"
else:
if 'attributes' in data:
self_link = "/materials/salvage/{}".format(data['attributes']['MATERIAL_SALVAGE_ID'])
return {'self': self_link}
class Meta:
type_ = 'materials_salvage'
|
konstantinKim/vd-backend
|
app/materials/models.py
|
Python
|
mit
| 2,427
|
import os
inputs = ["","input1.txt","input2.txt","input3.txt","input4.txt","input5.txt","input6.txt","input7.txt","input8.txt","input9.txt","input10.txt","input11.txt"]
outputs = ["","output1.txt","output2.txt","output3.txt","output4.txt","output5.txt","output6.txt","output7.txt","output8.txt","output9.txt","output10.txt","output11.txt"]
failed = False
for h in range(1,len(inputs)):
outputs_file = "output_program{0}.txt".format(h)
os.system("python Lexer-Script.py < {0} > {1}".format(inputs[h],outputs_file))
my_output = open(outputs_file)
output = open(outputs[h])
val1 = my_output.read()
val2 = output.read()
if val1 != val2:
print("Input case #{0} failed".format(h))
failed = True
break
if not failed:
print("================::::::::::::::::==================")
print("================Congratulation==================")
print("================Passed All Tests==================")
print("================::::::::::::::::==================")
|
NeillGiraldo/QB64-Lexer
|
judge.py
|
Python
|
mit
| 1,018
|
from weppy import response
from weppy_mvc_demo import app, auth
@app.route("/")
def welcome():
response.meta.title = "Weppy Mvc Demo"
return dict()
@app.route("/health-check")
def health_check():
return "Status OK"
@app.route('/account(/<str:f>)?(/<str:k>)?')
def account(f, k):
response.meta.title = "Weppy Mvc Demo | Account"
form = auth(f, k)
return dict(req=f, form=form)
@app.route()
def tour():
response.meta.title = "Weppy Mvc Demo | Tour"
return dict()
@app.route()
def my_ajaxf():
return "$('#target').html('<p>something</p>');"
|
mijdavis2/weppy-mvc-demo
|
weppy_mvc_demo/controllers/main.py
|
Python
|
mit
| 585
|
"""
Serialize data to/from JSON
"""
from django.utils import simplejson
from python import Serializer as PythonSerializer
from django.core.serializers.json import Deserializer as JSONDeserializer, \
DjangoJSONEncoder
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
def end_serialization(self):
"""Output a JSON encoded queryset."""
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder,
**self.options)
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream
is not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
Deserializer = JSONDeserializer
|
mjtorn/wadofstuff-django-serializers
|
wadofstuff/django/serializers/json.py
|
Python
|
bsd-3-clause
| 773
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QDesktopServices(): # skipped bases: <class 'sip.simplewrapper'>
"""
QDesktopServices()
QDesktopServices(QDesktopServices)
"""
def displayName(self, QDesktopServices_StandardLocation): # real signature unknown; restored from __doc__
""" QDesktopServices.displayName(QDesktopServices.StandardLocation) -> str """
return ""
def openUrl(self, QUrl): # real signature unknown; restored from __doc__
""" QDesktopServices.openUrl(QUrl) -> bool """
return False
def setUrlHandler(self, p_str, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QDesktopServices.setUrlHandler(str, QObject, str)
QDesktopServices.setUrlHandler(str, callable)
"""
pass
def storageLocation(self, QDesktopServices_StandardLocation): # real signature unknown; restored from __doc__
""" QDesktopServices.storageLocation(QDesktopServices.StandardLocation) -> str """
return ""
def unsetUrlHandler(self, p_str): # real signature unknown; restored from __doc__
""" QDesktopServices.unsetUrlHandler(str) """
pass
def __init__(self, QDesktopServices=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
ApplicationsLocation = 3
CacheLocation = 10
DataLocation = 9
DesktopLocation = 0
DocumentsLocation = 1
FontsLocation = 2
HomeLocation = 8
MoviesLocation = 5
MusicLocation = 4
PicturesLocation = 6
StandardLocation = None # (!) real value is ''
TempLocation = 7
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QDesktopServices.py
|
Python
|
gpl-2.0
| 1,974
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_pool import toolbar_pool
from cms.toolbar_base import CMSToolbar
from . import request_post_identifier
from .models import Post
@toolbar_pool.register
class BlogToolbar(CMSToolbar):
watch_models = (Post, )
def populate(self):
if not (self.is_current_app and self.request.user.has_perm('aldryn_blog.add_post')):
return
menu = self.toolbar.get_or_create_menu('blog-app', _('Blog'))
menu.add_modal_item(_('Add Blog Post'), reverse('admin:aldryn_blog_post_add'))
blog_entry = getattr(self.request, request_post_identifier, None)
if blog_entry and self.request.user.has_perm('aldryn_blog.change_post'):
menu.add_modal_item(_('Edit Blog Post'), reverse('admin:aldryn_blog_post_change', args=(blog_entry.pk,)),
active=True)
|
mikemitr/business_directory
|
business_directory/aldryn_blog/cms_toolbar.py
|
Python
|
bsd-3-clause
| 969
|
"""Test flop calculation"""
import tvm
import numpy as np
from tvm.autotvm.task.task import compute_flop
def test_conv():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
D = tvm.placeholder((N, CI, H, W))
K = tvm.placeholder((CO, CI, KH, KW))
KH = min(H, KH)
KW = min(W, KW)
ci = tvm.reduce_axis((0, CI))
kh = tvm.reduce_axis((0, KH))
kw = tvm.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = tvm.compute((N, CO, OH, OW), lambda n, co, h, w:
tvm.sum(D[n][ci][h][w] * K[co][ci][h][w], axis=[ci, kh, kw]))
s = tvm.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * CI * KH * KW
def test_pack_gemm():
for i in range(5):
N, L, M = [np.random.randint(10, 128) * 4 for _ in range(3)]
A = tvm.placeholder((N, L))
B = tvm.placeholder((M, L))
k = tvm.reduce_axis((0, L))
bn = 4
A_pack = tvm.compute((N // bn, L, bn), lambda i, j, k: A[i * bn + k][j])
B_pack = tvm.compute((M // bn, L, bn), lambda i, j, k: B[i * bn + k][j])
C_pack = tvm.compute((N // bn, M // bn, bn, bn), lambda i, j, ii, jj:
tvm.sum(A_pack[i, k, ii] * B_pack[j, k, jj], axis=[k]))
C = tvm.compute((N, M), lambda i, j: C_pack[i // bn][j // bn][i % bn][j % bn])
s = tvm.create_schedule([C.op])
assert compute_flop(s) == 2 * N * L * M
def test_outer_dot():
for i in range(5):
N, M = [np.random.randint(10, 128) * 4 for _ in range(2)]
A = tvm.placeholder((N,))
B = tvm.placeholder((M,))
C = tvm.compute((N, M), lambda i, j: A[i] * B[j])
s = tvm.create_schedule([C.op])
assert compute_flop(s) == N * M
def test_move():
"""No float number operation in simple move. So the estimator should raise an error """
N = 1024
A = tvm.placeholder((N,))
C = tvm.compute((N,), lambda i: A[i])
s = tvm.create_schedule([C.op])
try:
compute_flop(s)
assert False
except RuntimeError:
pass
if __name__ == '__main__':
test_conv()
test_pack_gemm()
test_outer_dot()
test_move()
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tests/python/unittest/test_autotvm_flop_calculator.py
|
Python
|
apache-2.0
| 2,244
|
"""
IPTables configuration
======================
Module for processing output of the ``iptables-save`` and ``ip6tables-save``
commands. Parsers included are:
IPTables - command ``iptables-save``
------------------------------------
IP6Tables - command ``ip6tables-save``
--------------------------------------
IPTabPermanent - file ``/etc/sysconfig/iptables``
-------------------------------------------------
IP6TabPermanent - file ``/etc/sysconfig/ip6tables``
---------------------------------------------------
Sample input data looks like::
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [769:196899]
:REJECT-LOG - [0:0]
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -s 192.168.0.0/24 -j ACCEPT
-A INPUT -p icmp -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A REJECT-LOG -p tcp -j REJECT --reject-with tcp-reset
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*mangle
:PREROUTING ACCEPT [451:22060]
:INPUT ACCEPT [451:22060]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [594:47151]
:POSTROUTING ACCEPT [594:47151]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
# Generated by iptables-save v1.4.7 on Tue Aug 16 10:18:43 2016
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [3:450]
:OUTPUT ACCEPT [3:450]
COMMIT
# Completed on Tue Aug 16 10:18:43 2016
* Each table of iptables starts with a ``# Generated by ...`` line.
* Each table starts with ``*<table-name>``, for example ``*filter``.
* Each chain specifications starts with a ``:`` sign.
* A chain specification looks like ``:<chain-name> <chain-policy> [<packet-counter>:<byte-counter>]``
* The chain-name may be for example ``INPUT``.
* Each ``iptables`` rule starts with a `-` sign.
Examples:
>>> ipt.rules[0] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-m state --state RELATED,ESTABLISHED -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-m state --state RELATED,ESTABLISHED'}
True
>>> ipt.get_chain('INPUT')[1] == {'target': 'ACCEPT', 'chain': 'INPUT', 'rule': '-s 192.168.0.0/24 -j ACCEPT', 'table': 'filter', 'target_options': None, 'target_action': 'jump', 'constraints': '-s 192.168.0.0/24'}
True
>>> ipt.table_chains('mangle') == {'FORWARD': [], 'INPUT': [], 'POSTROUTING': [], 'PREROUTING': [], 'OUTPUT': []}
True
>>> ipt.get_table('nat')[-1] == {'policy': 'ACCEPT', 'table': 'nat', 'byte_counter': 450, 'name': 'OUTPUT', 'packet_counter': 3}
True
"""
from .. import Parser, parser, get_active_lines, CommandParser
from insights.specs import Specs
class IPTablesConfiguration(Parser):
"""
A general class for parsing iptables configuration in the
``iptables-save``-like format.
"""
def parse_content(self, content):
self.chains = []
self.rules = []
current_table = None
for line in get_active_lines(content):
if line.startswith("*"):
current_table = line[1:].strip()
elif line.startswith(":"):
name, policy, counter = line[1:].split()
packet_counter, byte_counter = counter.strip("[]").split(":")
self.chains.append({
"policy": policy if policy != "-" else None,
"table": current_table,
"name": name,
"packet_counter": int(packet_counter),
"byte_counter": int(byte_counter),
})
elif line.startswith("-"):
line_spl = line[3:].split(None, 1)
if not line_spl:
continue
chain_name = line_spl[0]
rule = line_spl[1] if len(line_spl) == 2 else ''
target_option = [i for i in (' -j', '-j ', ' -g', '-g ') if i in rule]
if target_option:
constraints, target = [i.strip() for i in rule.split(target_option[-1])]
if " " in target:
target, target_options = target.split(None, 1)
else:
target_options = None
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule,
"target_action": "jump" if target_option[-1].strip() == "-j" else "goto",
"constraints": constraints,
"target": target,
"target_options": target_options
})
else:
self.rules.append({
"table": current_table,
"chain": chain_name,
"rule": rule
})
def get_chain(self, name, table="filter"):
"""
Get the list of rules for a particular chain. Chain order is kept intact.
Args:
name (str): chain name, e.g. ``
table (str): table name, defaults to ``filter``
Returns:
list: rules
"""
return [r for r in self.rules if r["table"] == table and r["chain"] == name]
def get_table(self, name="filter"):
"""
Get the list of chains for a particular table.
Args:
name (str): table name, defaults to ``filter``
Returns:
list: chains
"""
return [c for c in self.chains if c["table"] == name]
def table_chains(self, table="filter"):
"""
Get a dict where the keys are all the chains for the given table
and each value is the set of rules defined for the given chain.
Args:
table (str): table name, defaults to ``filter``
Returns:
dict: chains with set of defined rules
"""
return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
def get_rule(self, s):
"""
Get the list of rules that contain the given string.
Args:
s (str): string to look for in iptables rules
Returns:
list: rules containing given string
"""
return [r for r in self.rules if s in r["rule"]]
def __contains__(self, s):
return any(s in r["rule"] for r in self.rules)
@parser(Specs.iptables)
class IPTables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``iptables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables)
class IP6Tables(CommandParser, IPTablesConfiguration):
"""
Process output of the ``ip6tables-save`` command.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.iptables_permanent)
class IPTabPermanent(IPTablesConfiguration):
"""
Process ``iptables`` configuration saved in file ``/etc/sysconfig/iptables``.
The configuration in this file is loaded by the ``iptables`` service when the system boots.
New configuration is saved by using the ``service iptables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
@parser(Specs.ip6tables_permanent)
class IP6TabPermanent(IPTablesConfiguration):
"""
Process ``ip6tables`` configuration saved in file ``/etc/sysconfig/ip6tables``.
The configuration in this file is loaded by the ``ip6tables`` service when the system boots.
New configuration is saved by using the ``service ip6tables save`` command. This configuration
file is not available on a system with ``firewalld`` service.
See the :py:class:`insights.parsers.iptables.IPTablesConfiguration` base
class for additional information.
"""
pass
|
RedHatInsights/insights-core
|
insights/parsers/iptables.py
|
Python
|
apache-2.0
| 8,316
|
#!/usr/bin/env python
"""
@file binary2plain.py
@author Michael Behrisch
@date 2012-03-11
@version $Id: binary2plain.py 13811 2013-05-01 20:31:43Z behrisch $
Converter between SUMO's binary XML and plain XML
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2012-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import sys, struct
BYTE = 0
INTEGER = 1
FLOAT = 2
STRING = 3
LIST = 4
XML_TAG_START = 5
XML_TAG_END = 6
XML_ATTRIBUTE = 7
EDGE = 8
LANE = 9
POSITION_2D = 10
POSITION_3D = 11
BOUNDARY = 12
COLOR = 13
NODE_TYPE = 14
EDGE_FUNCTION = 15
ROUTE = 16
SCALED2INT = 17
SCALED2INT_POSITION_2D = 18
SCALED2INT_POSITION_3D = 19
def read(content, format):
return struct.unpack(format, content.read(struct.calcsize(format)))
def readByte(content):
return read(content, "B")[0]
def readInt(content):
return read(content, "i")[0]
def readDouble(content):
return read(content, "d")[0]
def readString(content):
length = readInt(content)
return read(content, "%ss" % length)[0]
def readStringList(content):
n = readInt(content)
list = []
for i in range(n):
read(content, "B") #type
list.append(readString(content))
return list
def readIntListList(content):
n = readInt(content)
list = []
for i in range(n):
read(content, "B") #type
n1 = readInt(content)
list.append([])
for j in range(n1):
read(content, "B") #type
list[-1].append(readInt(content))
return list
def readRoute(content):
n = readInt(content)
list = []
first = readInt(content)
if first < 0:
bits = -first
numFields = 8 * 4 / bits
mask = (1 << bits) - 1
edge = readInt(content)
list.append(edges[edge])
n -= 1
field = numFields
while n > 0:
if field == numFields:
data = readInt(content)
field = 0
followIndex = (data >> ((numFields - field - 1) * bits)) & mask;
edge = followers[edge][followIndex]
list.append(edges[edge])
field += 1
n -= 1
else:
list.append(edges[first])
n -= 1
while n > 0:
list.append(edges[readInt(content)])
n -= 1
return list
def typedValueStr(content):
valType = readByte(content)
if valType == BYTE:
return str(readByte(content))
elif valType == INTEGER:
return str(readInt(content))
elif valType == FLOAT:
return '%.2f' % readDouble(content)
elif valType == STRING:
return readString(content)
elif valType == LIST:
l = []
for i in range(readInt(content)):
l.append(typedValueStr(content))
return " ".join(l)
elif valType == EDGE:
return edges[readInt(content)]
elif valType == LANE:
return '%s_%s' % (edges[readInt(content)], readByte(content))
elif valType == POSITION_2D:
return '%.2f,%.2f' % (readDouble(content),readDouble(content))
elif valType == POSITION_3D:
return '%.2f,%.2f,%.2f' % (readDouble(content),readDouble(content),readDouble(content))
elif valType == BOUNDARY:
return '%.2f,%.2f,%.2f,%.2f' % (readDouble(content),readDouble(content),
readDouble(content),readDouble(content))
elif valType == COLOR:
val = read(content, "BBBB")
return '%.2f,%.2f,%.2f' % (val[0]/255.,val[1]/255.,val[2]/255.)
elif valType == NODE_TYPE:
return nodeTypes[readByte(content)]
elif valType == EDGE_FUNCTION:
return edgeTypes[readByte(content)]
elif valType == ROUTE:
return " ".join(readRoute(content))
elif valType == SCALED2INT:
return '%.2f' % (readInt(content)/100.)
elif valType == SCALED2INT_POSITION_2D:
return '%.2f,%.2f' % (readInt(content)/100.,readInt(content)/100.)
elif valType == SCALED2INT_POSITION_3D:
return '%.2f,%.2f,%.2f' % (readInt(content)/100.,readInt(content)/100.,readInt(content)/100.)
out = sys.stdout
content = open(sys.argv[1], 'rb')
read(content, "BBB") #type, sbx version, type
readString(content) #sumo version
read(content, "B") #type
elements = readStringList(content)
read(content, "B") #type
attributes = readStringList(content)
read(content, "B") #type
nodeTypes = readStringList(content)
read(content, "B") #type
edgeTypes = readStringList(content)
read(content, "B") #type
edges = readStringList(content)
read(content, "B") #type
followers = readIntListList(content)
stack = []
startOpen = False
while True:
typ = readByte(content)
if typ == XML_TAG_START:
if startOpen:
out.write(">\n")
out.write(" " * len(stack))
stack.append(readByte(content))
out.write("<" + elements[stack[-1]])
startOpen = True
elif typ == XML_TAG_END:
if startOpen:
out.write("/>\n")
stack.pop()
startOpen = False
else:
out.write(" " * (len(stack)-1))
out.write("</%s>\n" % elements[stack.pop()])
readByte(content)
if len(stack) == 0:
break
elif typ == XML_ATTRIBUTE:
out.write(' %s="%s"' % (attributes[readByte(content)], typedValueStr(content)))
else:
print >> sys.stderr, "Unknown type %s" % typ
|
rudhir-upretee/Sumo17_With_Netsim
|
tools/xml/binary2plain.py
|
Python
|
gpl-3.0
| 5,415
|
# file existdb/db.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connect to an eXist XML database and query it.
This module provides :class:`ExistDB` and related classes for connecting to
an eXist-db_ database and executing XQuery_ queries against it.
.. _XQuery: http://www.w3.org/TR/xquery/
.. _eXist-db: http://exist.sourceforge.net/
"""
from functools import wraps
import httplib
import logging
import socket
from urllib import unquote_plus, splittype
import xmlrpclib
from eulcore import xmlmap
from eulcore.existdb.exceptions import ExistDBException, ExistDBTimeout
__all__ = ['ExistDB', 'QueryResult', 'ExistDBException', 'EXISTDB_NAMESPACE']
logger = logging.getLogger(__name__)
EXISTDB_NAMESPACE = 'http://exist.sourceforge.net/NS/exist'
def _wrap_xmlrpc_fault(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except socket.timeout as e:
raise ExistDBTimeout(e)
except (socket.error, xmlrpclib.Fault, \
xmlrpclib.ProtocolError, xmlrpclib.ResponseError) as e:
raise ExistDBException(e)
# FIXME: could we catch IOerror (connection reset) and try again ?
# occasionally getting this error (so far exclusively in unit tests)
# error: [Errno 104] Connection reset by peer
return wrapper
class ExistDB:
"""Connect to an eXist database, and manipulate and query it.
Construction doesn't initiate server communication, only store
information about where the server is, to be used in later
communications.
:param server_url: The XML-RPC endpoint of the server, typically
``/xmlrpc`` within the server root.
:param resultType: The class to use for returning :meth:`query` results;
defaults to :class:`QueryResult`
:param encoding: The encoding used to communicate with the server;
defaults to "UTF-8"
:param verbose: When True, print XML-RPC debugging messages to stdout
:param timeout: Specify a timeout for xmlrpc connection
requests.If not specified, the global default socket timeout
value will be used.
"""
def __init__(self, server_url, resultType=None, encoding='UTF-8', verbose=False,
timeout=None):
# FIXME: Will encoding ever be anything but UTF-8? Does this really
# need to be part of our public interface?
self.resultType = resultType or QueryResult
datetime_opt = {'use_datetime': True}
# determine if we need http or https transport
# (duplicates some logic in xmlrpclib)
type, uri = splittype(server_url)
if type not in ("http", "https"):
raise IOError, "unsupported XML-RPC protocol"
if type == 'https':
transport = TimeoutSafeTransport(timeout=timeout, **datetime_opt)
else:
transport = TimeoutTransport(timeout=timeout, **datetime_opt)
self.server = xmlrpclib.ServerProxy(
uri="%s/xmlrpc" % server_url.rstrip('/'),
transport=transport,
encoding=encoding,
verbose=verbose,
allow_none=True,
**datetime_opt
)
def getDocument(self, name, **kwargs):
"""Retrieve a document from the database.
:param name: database document path to retrieve
:rtype: string contents of the document
"""
logger.debug('getDocumentAsString %s options=%s' % (name, kwargs))
return self.server.getDocumentAsString(name, kwargs)
def getDoc(self, name, **kwargs):
"Alias for :meth:`getDocument`."
return self.getDocument(name, **kwargs)
def createCollection(self, collection_name, overwrite=False):
"""Create a new collection in the database.
:param collection_name: string name of collection
:param overwrite: overwrite existing document?
:rtype: boolean indicating success
"""
if not overwrite and self.hasCollection(collection_name):
raise ExistDBException(collection_name + " exists")
logger.debug('createCollection %s' % collection_name)
return self.server.createCollection(collection_name)
@_wrap_xmlrpc_fault
def removeCollection(self, collection_name):
"""Remove the named collection from the database.
:param collection_name: string name of collection
:rtype: boolean indicating success
"""
if (not self.hasCollection(collection_name)):
raise ExistDBException(collection_name + " does not exist")
logger.debug('removeCollection %s' % collection_name)
return self.server.removeCollection(collection_name)
def hasCollection(self, collection_name):
"""Check if a collection exists.
:param collection_name: string name of collection
:rtype: boolean
"""
try:
logger.debug('describeCollection %s' % collection_name)
self.server.describeCollection(collection_name)
return True
except xmlrpclib.Fault, e:
s = "collection " + collection_name + " not found"
if (e.faultCode == 0 and s in e.faultString):
return False
else:
raise ExistDBException(e)
def reindexCollection(self, collection_name):
"""Reindex a collection.
Reindex will fail if the eXist user does not have the correct permissions
within eXist (must be a member of the DBA group).
:param collection_name: string name of collection
:rtype: boolean success
"""
if (not self.hasCollection(collection_name)):
raise ExistDBException(collection_name + " does not exist")
# xquery reindex function requires that collection name begin with /db/
if collection_name[0:3] != '/db':
collection_name = '/db/' + collection_name.strip('/')
result = self.query("xmldb:reindex('%s')" % collection_name)
return result.values[0] == 'true'
@_wrap_xmlrpc_fault
def hasDocument(self, document_path):
"""Check if a document is present in eXist.
:param document_path: string full path to document in eXist
:rtype: boolean
"""
if self.describeDocument(document_path) == {}:
return False
else:
return True
@_wrap_xmlrpc_fault
def describeDocument(self, document_path):
"""Return information about a document in eXist.
Includes name, owner, group, created date, permissions, mime-type,
type, content-length.
Returns an empty dictionary if document is not found.
:param document_path: string full path to document in eXist
:rtype: dictionary
"""
logger.debug('describeResource %s' % document_path)
return self.server.describeResource(document_path)
@_wrap_xmlrpc_fault
def getCollectionDescription(self, collection_name):
"""Retrieve information about a collection.
:param collection_name: string name of collection
:rtype: boolean
"""
logger.debug('getCollectionDesc %s' % collection_name)
return self.server.getCollectionDesc(collection_name)
@_wrap_xmlrpc_fault
def load(self, xml, path, overwrite=False):
"""Insert or overwrite a document in the database.
:param xml: string or file object with the document contents
:param path: destination location in the database
:param overwrite: True to allow overwriting an existing document
:rtype: boolean indicating success
"""
if hasattr(xml, 'read'):
xml = xml.read()
logger.debug('parse %s overwrite=%s' % (path, overwrite))
return self.server.parse(xml, path, int(overwrite))
@_wrap_xmlrpc_fault
def removeDocument(self, name):
"""Remove a document from the database.
:param name: full eXist path to the database document to be removed
:rtype: boolean indicating success
"""
logger.debug('remove %s' % name)
return self.server.remove(name)
@_wrap_xmlrpc_fault
def moveDocument(self, from_collection, to_collection, document):
"""Move a document in eXist from one collection to another.
:param from_collection: collection where the document currently exists
:param to_collection: collection where the document should be moved
:param document: name of the document in eXist
:rtype: boolean
"""
self.query("xmldb:move('%s', '%s', '%s')" % \
(from_collection, to_collection, document))
# query result does not return any meaningful content,
# but any failure (missing collection, document, etc) should result in
# an exception, so return true if the query completed successfully
return True
@_wrap_xmlrpc_fault
def query(self, xquery, start=1, how_many=10, **kwargs):
"""Execute an XQuery query, returning the results directly.
:param xquery: a string XQuery query
:param start: first index to return (1-based)
:param how_many: maximum number of items to return
:rtype: the resultType specified at the creation of this ExistDB;
defaults to :class:`QueryResult`.
"""
logger.debug('query how_many=%d start=%d args=%s\n%s' % (how_many, start, kwargs, xquery))
xml_s = self.server.query(xquery, how_many, start, kwargs)
# xmlrpclib tries to guess whether the result is a string or
# unicode, returning whichever it deems most appropriate.
# Unfortunately, :meth:`~eulcore.xmlmap.load_xmlobject_from_string`
# requires a byte string. This means that if xmlrpclib gave us a
# unicode, we need to encode it:
if isinstance(xml_s, unicode):
xml_s = xml_s.encode("UTF-8")
return xmlmap.load_xmlobject_from_string(xml_s, self.resultType)
@_wrap_xmlrpc_fault
def executeQuery(self, xquery):
"""Execute an XQuery query, returning a server-provided result
handle.
:param xquery: a string XQuery query
:rtype: an integer handle identifying the query result for future calls
"""
# NOTE: eXist's xmlrpc interface requires a dictionary parameter.
# This parameter is not documented in the eXist docs at
# http://demo.exist-db.org/exist/devguide_xmlrpc.xml
# so it's not clear what we can pass there.
logger.debug('executeQuery\n%s' % xquery)
result_id = self.server.executeQuery(xquery, {})
logger.debug('result id is %s' % result_id)
return result_id
@_wrap_xmlrpc_fault
def querySummary(self, result_id):
"""Retrieve results summary from a past query.
:param result_id: an integer handle returned by :meth:`executeQuery`
:rtype: a dict describing the results
The returned dict has four fields:
* *queryTime*: processing time in milliseconds
* *hits*: number of hits in the result set
* *documents*: a list of lists. Each identifies a document and
takes the form [`doc_id`, `doc_name`, `hits`], where:
* *doc_id*: an internal integer identifier for the document
* *doc_name*: the name of the document as a string
* *hits*: the number of hits within that document
* *doctype*: a list of lists. Each contains a doctype public
identifier and the number of hits found for this
doctype.
"""
# FIXME: This just exposes the existdb xmlrpc querySummary function.
# Frankly, this return is just plain ugly. We should come up with
# something more meaningful.
summary = self.server.querySummary(result_id)
logger.debug('querySummary result id %d : ' % result_id + \
'%(hits)s hits, query took %(queryTime)s ms' % summary)
return summary
@_wrap_xmlrpc_fault
def getHits(self, result_id):
"""Get the number of hits in a query result.
:param result_id: an integer handle returned by :meth:`executeQuery`
:rtype: integer representing the number of hits
"""
hits = self.server.getHits(result_id)
logger.debug('getHits result id %d : %s' % (result_id, hits))
return hits
@_wrap_xmlrpc_fault
def retrieve(self, result_id, position, highlight=False, **options):
"""Retrieve a single result fragment.
:param result_id: an integer handle returned by :meth:`executeQuery`
:param position: the result index to return
:param highlight: enable search term highlighting in result; optional,
defaults to False
:rtype: the query result item as a string
"""
if highlight:
# eXist highlight modes: attributes, elements, or both
# using elements because it seems most reasonable default
options['highlight-matches'] = 'elements'
# pretty-printing with eXist matches can introduce unwanted whitespace
if 'indent' not in options:
options['indent'] = 'no'
logger.debug('retrieve result id %d position=%d options=%s' % (result_id, position, options))
return self.server.retrieve(result_id, position, options)
@_wrap_xmlrpc_fault
def releaseQueryResult(self, result_id):
"""Release a result set handle in the server.
:param result_id: an integer handle returned by :meth:`executeQuery`
"""
logger.debug('releaseQueryResult result id %d' % result_id)
self.server.releaseQueryResult(result_id)
@_wrap_xmlrpc_fault
def setPermissions(self, resource, permissions):
"""Set permissions on a resource in eXist.
:param resource: full path to a collection or document in eXist
:param permissions: int or string permissions statement
"""
# TODO: support setting owner, group ?
logger.debug('setPermissions %s %s' % (resource, permissions))
self.server.setPermissions(resource, permissions)
@_wrap_xmlrpc_fault
def getPermissions(self, resource):
"""Retrieve permissions for a resource in eXist.
:param resource: full path to a collection or document in eXist
:rtype: ExistPermissions
"""
return ExistPermissions(self.server.getPermissions(resource))
def loadCollectionIndex(self, collection_name, index, overwrite=True):
"""Load an index configuration for the specified collection.
Creates the eXist system config collection if it is not already there,
and loads the specified index config file, as per eXist collection and
index naming conventions.
:param collection_name: name of the collection to be indexed
:param index: string or file object with the document contents (as used by :meth:`load`)
:param overwrite: set to False to disallow overwriting current index (overwrite allowed by default)
:rtype: boolean indicating success
"""
index_collection = self._configCollectionName(collection_name)
# FIXME: what error handling should be done at this level?
# create config collection if it does not exist
if not self.hasCollection(index_collection):
self.createCollection(index_collection)
# load index content as the collection index configuration file
return self.load(index, self._collectionIndexPath(collection_name), overwrite)
def removeCollectionIndex(self, collection_name):
"""Remove index configuration for the specified collection.
If index collection has no documents or subcollections after the index
file is removed, the configuration collection will also be removed.
:param collection: name of the collection with an index to be removed
:rtype: boolean indicating success
"""
# collection indexes information must be stored under system/config/db/collection_name
index_collection = self._configCollectionName(collection_name)
# remove collection.xconf in the configuration collection
self.removeDocument(self._collectionIndexPath(collection_name))
desc = self.getCollectionDescription(index_collection)
# no documents and no sub-collections - safe to remove index collection
if desc['collections'] == [] and desc['documents'] == []:
self.removeCollection(index_collection)
return True
def hasCollectionIndex(self, collection_name):
"""Check if the specified collection has an index configuration in eXist.
Note: according to eXist documentation, index config file does not *have*
to be named *collection.xconf* for reasons of backward compatibility.
This function assumes that the recommended naming conventions are followed.
:param collection: name of the collection with an index to be removed
:rtype: boolean indicating collection index is present
"""
return self.hasCollection(self._configCollectionName(collection_name)) \
and self.hasDocument(self._collectionIndexPath(collection_name))
def _configCollectionName(self, collection_name):
"""Generate eXist db path to the configuration collection for a specified collection
according to eXist collection naming conventions.
"""
# collection indexes information must be stored under system/config/db/collection_name
return "/db/system/config/db/" + collection_name.strip('/')
def _collectionIndexPath(self, collection_name):
"""Generate full eXist db path to the index configuration file for a specified
collection according to eXist collection naming conventions.
"""
# collection indexes information must be stored under system/config/db/collection_name
return self._configCollectionName(collection_name) + "/collection.xconf"
class ExistPermissions:
"Permissions for an eXist resource - owner, group, and active permissions."
def __init__(self, data):
self.owner = data['owner']
self.group = data['group']
self.permissions = data['permissions']
def __str__(self):
return "owner: %s; group: %s; permissions: %s" % (self.owner, self.group, self.permissions)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
class QueryResult(xmlmap.XmlObject):
"""The results of an eXist XQuery query"""
start = xmlmap.IntegerField("@start")
"""The index of the first result returned"""
values = xmlmap.StringListField("exist:value")
"Generic value (*exist:value*) returned from an exist xquery"
_raw_count = xmlmap.IntegerField("@count")
@property
def count(self):
"""The number of results returned in this chunk"""
return self._raw_count or 0
_raw_hits = xmlmap.IntegerField("@hits")
@property
def hits(self):
"""The total number of hits found by the search"""
return self._raw_hits or 0
@property
def results(self):
"""The result documents themselves as nodes, starting at
:attr:`start` and containing :attr:`count` members"""
return self.node.xpath('*')
# FIXME: Why do we have two properties here with the same value?
# start == show_from. We should pick one and deprecate the other.
@property
def show_from(self):
"""The index of first object in this result chunk.
Equivalent to :attr:`start`."""
return self.start
# FIXME: Not sure how we're using this, but it feels wonky. If we're
# using it for chunking or paging then we should probably follow the
# slice convention of returning the index past the last one. If we're
# using it for pretty-printing results ranges then the rVal < 0 branch
# sounds like an exception condition that should be handled at a higher
# level. Regardless, shouldn't some system invariant handle the rVal >
# self.hits branch for us? This whole method just *feels* weird. It
# warrants some examination.
@property
def show_to(self):
"""The index of last object in this result chunk"""
rVal = (self.start - 1) + self.count
if rVal > self.hits:
#show_to can not exceed total hits
return self.hits
elif rVal < 0:
return 0
else:
return rVal
# FIXME: This, too, feels like it checks a number of things that should
# probably be system invariants. We should coordinate what this does
# with how it's actually used.
def hasMore(self):
"""Are there more matches after this one?"""
if not self.hits or not self.start or not self.count:
return False
return self.hits > (self.start + self.count)
# Custom xmlrpclib Transport classes for configurable timeout
# Adapted from code found here:
# http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
class TimeoutHTTP(httplib.HTTP):
def __init__(self, host='', port=None, strict=None, timeout=None):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutHTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, strict=None, timeout=None):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutTransport(xmlrpclib.Transport):
'''Extend the default :class:`xmlrpclib.Transport` to expose a
connection timeout parameter. Uses :class:`TimeoutHTTP` for http
connections.'''
_http_connection = TimeoutHTTP
def __init__(self, timeout=None, *args, **kwargs):
if timeout is None:
timeout = socket._GLOBAL_DEFAULT_TIMEOUT
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
def make_connection(self, host):
host, extra_headers, x509 = self.get_host_info(host)
conn = self._http_connection(host, timeout=self.timeout)
return conn
class TimeoutSafeTransport(TimeoutTransport):
'''Extend class:`TimeoutTransport` but use :class:`TimeoutHTTPS`
for the http connections; timeout-enabled equivalent to
:class:`xmlrpclib.SafeTransport`.'''
_http_connection = TimeoutHTTPS
|
emory-libraries/eulcore-history
|
src/eulcore/existdb/db.py
|
Python
|
apache-2.0
| 23,396
|
def set_direction(self):
if self.direction == 'left' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'left':
if self.steps == 3:
self.tower['tower_id'] = 't4'
self.tower['current_pos'] = "right"
if self.steps == 2:
self.tower['tower_id'] = 't1'
self.tower['current_pos'] = "left"
if self.steps == 1:
self.tower['tower_id'] = 't1'
self.tower['current_pos'] = 'right'
elif self.direction == 'right' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'left':
if self.steps == 3:
self.tower['tower_id'] = 't3'
self.tower['current_pos'] = "right"
if self.steps == 2:
self.tower['tower_id'] = 't3'
self.tower['current_pos'] = "left"
if self.steps == 1:
self.tower['tower_id'] = 't2'
self.tower['current_pos'] = 'right'
elif self.direction == 'left' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'right':
if self.steps == 3:
self.tower['tower_id'] = 't1'
self.tower['current_pos'] = "left"
if self.steps == 2:
self.tower['tower_id'] = 't1'
self.tower['current_pos'] = "right"
if self.steps == 1:
self.tower['tower_id'] = 't2'
self.tower['current_pos'] = 'left'
elif self.direction == 'right' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'right':
if self.steps == 3:
self.tower['tower_id'] = 't4'
self.tower['current_pos'] = "left"
if self.steps == 2:
self.tower['tower_id'] = 't3'
self.tower['current_pos'] = "right"
if self.steps == 1:
self.tower['tower_id'] = 't3'
self.tower['current_pos'] = 'left'
elif self.direction == 'right' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'middle':
if self.steps == 3:
self.tower['tower_id'] = 't1'
if self.steps == 2:
self.tower['tower_id'] = 't4'
if self.steps == 1:
self.tower['tower_id'] = 't3'
elif self.direction == 'left' and self.tower['tower_id'] == 't2' and self.tower['current_pos'] == 'middle':
if self.steps == 3:
self.tower['tower_id'] = 't3'
if self.steps == 2:
self.tower['tower_id'] = 't4'
if self.steps == 1:
self.tower['tower_id'] = 't1'
|
daniellinye/HRINFG3
|
euromast/components/t2.py
|
Python
|
mit
| 2,527
|
from django.test import TestCase
from ..models import Contact, Consent, Locator
class TestContat(TestCase):
def setUp(self):
self.consent = Consent.objects.create(
subject_identifier='12345',
first_name='test_firstname',
last_name='test_surname')
self.locator = Locator.objects.create(
subject_identifier='12345',
subject_cell='26771522602',
subject_cell_alt='26771883071')
def test_send_message(self):
"""Test creation of a outgoing message after sending sms.
"""
contact = Contact.objects.all()
self.assertEqual(contact.count(), 1)
|
botswana-harvard/edc-sms
|
edc_sms/tests/test_create_contact.py
|
Python
|
gpl-2.0
| 667
|
from core import Machine, Node
from core.conditions import Condition as C
from pprint import pprint
class A(Node):
on = False
foo = 'Wibble'
def get_conditions(self):
conds = [
# No state provided defaults State.CHANGED
C('B', 'on', True),
# Also condition should be set False
C('X', 'on', False),
]
return conds
class B(Node):
on = False
def get_conditions(self):
'''
Returns the conditions to be set for the node.
When this state is completely true, the Node is
completed and a state is performed.
In this case. A.foo = 'bar' would validate this
condition, validating this Node.
'''
return [
C('A', 'foo', 'bar')
]
class D(Node):
on = False
class E(Node):
on = False
class X(Node):
on = False
def cond_B_on_True(self, node):
'''
This node condition is set to
change. B<C:0> should react. B should be
valid.
'''
self.log('callback X cond_B_on_True', node)
# a = self.nodes.get('A')
# a.foo = 'bar'
def get_conditions(self):
return [
C('B', 'on', True, callback=self.cond_B_on_True),
C('F', 'on', True)
]
class G(Node):
on = False
def get_conditions(self):
'''
G Simply reacts to F on False.
As default is this state, the G
should activate on F expose.
'''
return [
C('F', 'on', False)
]
class F(Node):
on = False
class H(Node):
on = True
_conditions = (
C('A', 'on', True),
C('B', 'on', True),
C('D', 'on', True),
C('E', 'on', True),
C('F', 'on', True),
C('G', 'on', True),
# C('H', 'on', True),
)
def expose(*args, **kw):
print 'got expose', kw.get('node')
def valid(*args, **kw):
print 'Valid: ', kw.get('node')
def run():
machine = Machine('example')
nodes = [A(),B(), X(), G()]
machine.events.listen('expose', expose)
machine.events.listen('valid', valid)
machine.start(nodes)
# B:True
# A<C:0>
# X<C:0> A.foo = 'bar'
# B<C:0> +
b = machine.nodes.get('B')
b.set('on',True)
b = machine.nodes.get('B')
b.set('red','dwarf')
machine.activate_node( F() )
f = machine.nodes.get('F')
f.set('on', True)
x2 = machine.activate_node( X() )
x2.set('on',True)
machine.activate_node( D() )
h = machine.activate_node( H() )
machine.activate_node( E() )
machine.activate_node( G() )
# [<demo.example.A object at 0xb7181bac>, <demo.example.B object at 0xb7181c4c>, <demo.example.X object at 0xb7181d4c>]
print 'register.nodes'
pprint(machine.register.nodes)
# {'A': {}, 'X': {}, 'B': {}}
print 'register.conditions'
pprint( machine.register.conditions )
# {'B.on:changed': ['A', 'X'], 'A.foo:bar': ['B'], 'X.on:False': ['A']}
import pdb; pdb.set_trace()
# dipatch change
|
Strangemother/python-state-machine
|
scratch/machine/demo/example.py
|
Python
|
mit
| 3,087
|
import sys, os
project = u'phpDocumentor'
copyright = u'2013, Mike van Riel'
version = '2.1'
release = '2.1.0'
sys.path.append(os.path.abspath('../../.exts'))
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.ifconfig', 'plantuml']
templates_path = ['.']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['.build']
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
html_title = 'phpDocumentor'
#html_favicon = None
html_static_path = ['../../.static']
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpDocumentor'
# -- Options for LaTeX output --------------------------------------------------
latex_paper_size = 'a4'
#latex_font_size = '10pt'
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'phpDocumentor.tex', u'phpDocumentor', u'Mike van Riel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpDocumentor', u'phpDocumentor', [u'Mike van Riel'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpDocumentor'
epub_author = u'Mike van Riel'
epub_publisher = u'Mike van Riel'
epub_copyright = u'2012, Mike van Riel'
epub_scheme = 'http://www.phpdoc.org'
epub_identifier = 'http://www.phpdoc.org'
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# UML diagramming tool
plantuml = ['java', '-jar', '.exts/plantuml.jar']
plantuml_latex_output_format = 'pdf'
|
Targoniy/-json-schema-for-trip-api
|
vendor/phpdocumentor/phpdocumentor/docs/.templates/bare/conf.py
|
Python
|
mit
| 2,977
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from ..utils import util
class ASAOV_PT_panel_base(object):
bl_context = "view_layer"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER'
class ASAOV_PT_aovs(bpy.types.Panel, ASAOV_PT_panel_base):
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_label = "Render Passes"
def draw(self, context):
layout = self.layout
asr_scene_props = context.scene.appleseed
col = layout.column(align=True)
col.prop(asr_scene_props, "diffuse_aov", text="Diffuse", toggle=True)
col.prop(asr_scene_props, "direct_diffuse_aov", text="Direct Diffuse", toggle=True)
col.prop(asr_scene_props, "indirect_diffuse_aov", text="Indirect Diffuse", toggle=True)
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "glossy_aov", text="Glossy", toggle=True)
col.prop(asr_scene_props, "direct_glossy_aov", text="Direct Glossy", toggle=True)
col.prop(asr_scene_props, "indirect_glossy_aov", text="Indirect Glossy", toggle=True)
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "albedo_aov", text="Albedo", toggle=True)
col.prop(asr_scene_props, "emission_aov", text="Emission", toggle=True)
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "npr_shading_aov", text="NPR Shading", toggle=True)
col.prop(asr_scene_props, "npr_contour_aov", text="NPR Contour", toggle=True)
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "normal_aov", text="Normals", toggle=True)
col.prop(asr_scene_props, "position_aov", text="Position", toggle=True)
col.prop(asr_scene_props, "uv_aov", text="UV Coordinates", toggle=True)
col.prop(asr_scene_props, "depth_aov", text="Depth", toggle=True)
col.prop(asr_scene_props, "screen_space_velocity_aov", text="Screen Space Velocity", toggle=True)
col.prop(asr_scene_props, "pixel_time_aov", text="Pixel Time", toggle=True)
col.prop(asr_scene_props, "pixel_variation_aov", text="Pixel Variation", toggle=True)
col.prop(asr_scene_props, "pixel_sample_count_aov", text="Sample Count", toggle=True)
col.prop(asr_scene_props, "invalid_samples_aov", text="Invalid Samples", toggle=True)
layout.separator()
col = layout.column(align=True)
col.prop(asr_scene_props, "cryptomatte_object_aov", text="Cryptomatte Object", toggle=True)
col.prop(asr_scene_props, "cryptomatte_material_aov", text="Cryptomatte Material", toggle=True)
def register():
util.safe_register_class(ASAOV_PT_aovs)
def unregister():
util.safe_unregister_class(ASAOV_PT_aovs)
|
dictoon/blenderseed
|
ui/scene.py
|
Python
|
mit
| 4,201
|
# Copyright (c) 2016-2017 Rocky Bernstein
# Copyright (c) 2000-2002 by hartmut Goebel <hartmut@goebel.noris.de>
# Copyright (c) 1999 John Aycock
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
from uncompyle6.parser import PythonParserSingle
from uncompyle6.parsers.parse24 import Python24Parser
class Python23Parser(Python24Parser):
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
super(Python24Parser, self).__init__(debug_parser)
self.customized = {}
def p_misc23(self, args):
'''
# Python 2.4 only adds something like the below for if 1:
# However we will just treat it as a noop (which of course messes up
# simple verify of bytecode.
# See also below in reduce_is_invalid where we check that the JUMP_FORWARD
# target matches the COME_FROM target
stmt ::= if1_stmt
if1_stmt ::= JUMP_FORWARD JUMP_IF_FALSE THEN POP_TOP COME_FROM
stmts
JUMP_FORWARD COME_FROM POP_TOP COME_FROM
# Used to keep semantic positions the same across later versions
# of Python
_while1test ::= SETUP_LOOP JUMP_FORWARD JUMP_IF_FALSE POP_TOP COME_FROM
while1stmt ::= _while1test l_stmts_opt JUMP_BACK
POP_TOP POP_BLOCK COME_FROM
while1stmt ::= _while1test l_stmts_opt JUMP_BACK
COME_FROM POP_TOP POP_BLOCK COME_FROM
list_compr ::= BUILD_LIST_0 DUP_TOP LOAD_ATTR designator list_iter del_stmt
list_for ::= expr _for designator list_iter JUMP_BACK come_froms POP_TOP JUMP_BACK
lc_body ::= LOAD_NAME expr CALL_FUNCTION_1 POP_TOP
lc_body ::= LOAD_FAST expr CALL_FUNCTION_1 POP_TOP
lc_body ::= LOAD_NAME expr LIST_APPEND
lc_body ::= LOAD_FAST expr LIST_APPEND
'''
def add_custom_rules(self, tokens, customize):
super(Python23Parser, self).add_custom_rules(tokens, customize)
def reduce_is_invalid(self, rule, ast, tokens, first, last):
invalid = super(Python24Parser,
self).reduce_is_invalid(rule, ast,
tokens, first, last)
if invalid:
return invalid
# FiXME: this code never gets called...
lhs = rule[0]
if lhs == 'nop_stmt':
return not int(tokens[first].pattr) == tokens[last].offset
return False
class Python23ParserSingle(Python23Parser, PythonParserSingle):
pass
if __name__ == '__main__':
# Check grammar
p = Python23Parser()
p.checkGrammar()
p.dumpGrammar()
# local variables:
# tab-width: 4
|
moagstar/python-uncompyle6
|
uncompyle6/parsers/parse23.py
|
Python
|
mit
| 2,672
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2017 University of Oxford
#
# This file is part of msprime.
#
# msprime is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# msprime is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with msprime. If not, see <http://www.gnu.org/licenses/>.
#
"""
Module responsible to generating and reading tree files.
"""
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import sys
import base64
try:
import numpy as np
_numpy_imported = True
except ImportError:
_numpy_imported = False
import _msprime
import msprime.drawing as drawing
import msprime.provenance as provenance
import msprime.tables as tables
from _msprime import NODE_IS_SAMPLE
NULL_NODE = -1
NULL_POPULATION = -1
NULL_MUTATION = -1
IS_PY2 = sys.version_info[0] < 3
def check_numpy():
if not _numpy_imported:
raise RuntimeError("numpy is required for this operation.")
CoalescenceRecord = collections.namedtuple(
"CoalescenceRecord",
["left", "right", "node", "children", "time", "population"])
# TODO this interface is rubbish. Should have much better printing options.
# TODO we should be use __slots__ here probably.
class SimpleContainer(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return repr(self.__dict__)
class Node(SimpleContainer):
"""
A :ref:`node <sec_node_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this node. Varies from 0 to
:attr:`.TreeSequence.num_nodes` - 1.
:vartype id: int
:ivar flags: The bitwise flags for this node.
:vartype flags: int
:ivar time: The birth time of the individual represented by this node.
:vartype float: float
:ivar population: The integer ID of the population that this node was born in.
:vartype population: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this node.
:vartype metadata: bytes
"""
def __init__(
self, id_=None, flags=0, time=0, population=NULL_POPULATION, metadata=""):
self.id = id_
self.time = time
self.population = population
self.metadata = metadata
self.flags = flags
def is_sample(self):
"""
Returns True if this node is a sample. This value is derived from the
``flag`` variable.
:rtype: bool
"""
return self.flags & NODE_IS_SAMPLE
class Edge(SimpleContainer):
"""
An :ref:`edge <sec_edge_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left coordinate of this edge.
:vartype left: float
:ivar right: The right coordinate of this edge.
:vartype right: float
:ivar parent: The integer ID of the parent node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype parent: int
:ivar child: The integer ID of the child node for this edge.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype child: int
"""
def __init__(self, left, right, parent, child):
self.left = left
self.right = right
self.parent = parent
self.child = child
def __repr__(self):
return "{{left={:.3f}, right={:.3f}, parent={}, child={}}}".format(
self.left, self.right, self.parent, self.child)
class Site(SimpleContainer):
"""
A :ref:`site <sec_site_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this site. Varies from 0 to
:attr:`.TreeSequence.num_sites` - 1.
:vartype id: int
:ivar position: The floating point location of this site in genome coordinates.
Ranges from 0 (inclusive) to :attr:`.TreeSequence.sequence_length`
(exclusive).
:vartype position: float
:ivar ancestral_state: The ancestral state at this site (i.e., the state
inherited by nodes, unless mutations occur).
:vartype ancestral_state: str
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metadata: bytes
:ivar mutations: The list of mutations at this site. Mutations
within a site are returned in the order they are specified in the
underlying :class:`.MutationTable`.
:vartype mutations: list[:class:`.Mutation`]
"""
def __init__(self, id_, position, ancestral_state, mutations, metadata):
self.id = id_
self.position = position
self.ancestral_state = ancestral_state
self.mutations = mutations
self.metadata = metadata
class Mutation(SimpleContainer):
"""
A :ref:`mutation <sec_mutation_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar id: The integer ID of this mutation. Varies from 0 to
:attr:`.TreeSequence.num_mutations` - 1.
:vartype id: int
:ivar site: The integer ID of the site that this mutation occurs at. To obtain
further information about a site with a given ID use
:meth:`.TreeSequence.site`.
:vartype site: int
:ivar node: The integer ID of the first node that inherits this mutation.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar derived_state: The derived state for this mutation. This is the state
inherited by nodes in the subtree rooted at this mutation's node, unless
another mutation occurs.
:vartype derived_state: str
:ivar parent: The integer ID of this mutation's parent mutation. When multiple
mutations occur at a site along a path in the tree, mutations must
record the mutation that is immediately above them. If the mutation does
not have a parent, this is equal to the :const:`NULL_MUTATION` (-1).
To obtain further information about a mutation with a given ID, use
:meth:`.TreeSequence.mutation`.
:vartype parent: int
:ivar metadata: The :ref:`metadata <sec_metadata_definition>` for this site.
:vartype metadata: bytes
"""
def __init__(self, id_, site, node, derived_state, parent, metadata):
self.id = id_
self.site = site
self.node = node
self.derived_state = derived_state
self.parent = parent
self.metadata = metadata
class Migration(SimpleContainer):
"""
A :ref:`migration <sec_migration_table_definition>` in a tree sequence.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar left: The left end of the genomic interval covered by this
migration (inclusive).
:vartype left: float
:ivar right: The right end of the genomic interval covered by this migration
(exclusive).
:vartype right: float
:ivar node: The integer ID of the node involved in this migration event.
To obtain further information about a node with a given ID, use
:meth:`.TreeSequence.node`.
:vartype node: int
:ivar source: The source population ID.
:vartype source: int
:ivar dest: The destination population ID.
:vartype dest: int
:ivar time: The time at which this migration occured at.
:vartype time: float
"""
def __init__(self, left, right, node, source, dest, time):
self.left = left
self.right = right
self.node = node
self.source = source
self.dest = dest
self.time = time
class Variant(SimpleContainer):
"""
A variant is represents the observed variation among the samples
for a given site. A variant consists (a) of a reference to the
:class:`.Site` instance in question; (b) the **alleles** that may be
observed at the samples for this site; and (c) the **genotypes**
mapping sample IDs to the observed alleles.
Each element in the ``alleles`` tuple is a string, representing the
actual observed state for a given sample. The first element of this
tuple is guaranteed to be the same as the site's ``ancestral_state`` value.
The list of alleles is also guaranteed not to contain any duplicates.
However, allelic values may be listed that are not referred to by any
samples. For example, if we have a site that is fixed for the derived state
(i.e., we have a mutation over the tree root), all genotypes will be 1, but
the alleles list will be equal to ``('0', '1')``. Other than the
ancestral state being the first allele, the alleles are listed in
no particular order, and the ordering should not be relied upon.
The ``genotypes`` represent the observed allelic states for each sample,
such that ``var.alleles[var.genotypes[j]]`` gives the string allele
for sample ID ``j``. Thus, the elements of the genotypes array are
indexes into the ``alleles`` list. The genotypes are provided in this
way via a numpy array to enable efficient calculations.
Modifying the attributes in this class will have **no effect** on the
underlying tree sequence data.
:ivar site: The site object for this variant.
:vartype site: :class:`.Site`
:ivar alleles: A tuple of the allelic values that may be observed at the
samples at the current site. The first element of this tuple is always
the sites's ancestral state.
:vartype alleles: tuple(str)
:ivar genotypes: An array of indexes into the list ``alleles``, giving the
state of each sample at the current site.
:vartype genotypes: numpy.ndarray
"""
def __init__(self, site, alleles, genotypes):
self.site = site
self.alleles = alleles
self.genotypes = genotypes
# Deprecated aliases to avoid breaking existing code.
self.position = site.position
self.index = site.id
class Edgeset(SimpleContainer):
def __init__(self, left, right, parent, children):
self.left = left
self.right = right
self.parent = parent
self.children = children
def __repr__(self):
return "{{left={:.3f}, right={:.3f}, parent={}, children={}}}".format(
self.left, self.right, self.parent, self.children)
class Provenance(SimpleContainer):
def __init__(self, id_=None, timestamp=None, record=None):
self.id = id_
self.timestamp = timestamp
self.record = record
def add_deprecated_mutation_attrs(site, mutation):
"""
Add in attributes for the older deprecated way of defining
mutations. These attributes will be removed in future releases
and are deliberately undocumented in version 0.5.0.
"""
mutation.position = site.position
mutation.index = site.id
return mutation
class SparseTree(object):
"""
A SparseTree is a single tree in a :class:`.TreeSequence`. The SparseTree
implementation differs from most tree implementations by using **integer
node IDs** to refer to nodes rather than objects. Thus, when we wish to
find the parent of the node with ID '0', we use ``tree.parent(0)``, which
returns another integer. If '0' does not have a parent in the current tree
(e.g., if it is a root), then the special value :const:`.NULL_NODE`
(:math:`-1`) is returned. The children of a node are found using the
:meth:`.children` method. To obtain information about a particular node,
one may either use ``tree.tree_sequence.node(u)`` to obtain the
corresponding :class:`Node` instance, or use the :meth:`.time` or
:meth:`.population` shorthands. Tree traversals in various orders
is possible using the :meth:`.SparseTree.nodes` iterator.
Sparse trees are not intended to be instantiated directly, and are
obtained as part of a :class:`.TreeSequence` using the
:meth:`.trees` method.
"""
def __init__(self, ll_sparse_tree, tree_sequence):
self._ll_sparse_tree = ll_sparse_tree
self._tree_sequence = tree_sequence
@property
def tree_sequence(self):
"""
Returns the tree sequence that this tree is from.
:return: The parent tree sequence for this tree.
:rtype: :class:`.TreeSequence`
"""
return self._tree_sequence
def get_branch_length(self, u):
# Deprecated alias for branch_length
return self.branch_length(u)
def branch_length(self, u):
"""
Returns the length of the branch (in generations) joining the
specified node to its parent. This is equivalent to
>>> tree.time(tree.parent(u)) - tree.time(u)
Note that this is not related to the value returned by
:attr:`.length`, which describes the length of the interval
covered by the tree in genomic coordinates.
:param int u: The node of interest.
:return: The branch length from u to its parent.
:rtype: float
"""
return self.time(self.get_parent(u)) - self.time(u)
def get_total_branch_length(self):
# Deprecated alias for total_branch_length
return self.total_branch_length
@property
def total_branch_length(self):
"""
Returns the sum of all the branch lengths in this tree (in
units of generations). This is equivalent to
>>> sum(
>>> tree.branch_length(u) for u in tree.nodes()
>>> if u not in self.roots)
:return: The sum of all the branch lengths in this tree.
:rtype: float
"""
return sum(
self.get_branch_length(u) for u in self.nodes() if u not in self.roots)
def get_mrca(self, u, v):
# Deprecated alias for mrca
return self.mrca(u, v)
def mrca(self, u, v):
"""
Returns the most recent common ancestor of the specified nodes.
:param int u: The first node.
:param int v: The second node.
:return: The most recent common ancestor of u and v.
:rtype: int
"""
return self._ll_sparse_tree.get_mrca(u, v)
def get_tmrca(self, u, v):
# Deprecated alias for tmrca
return self.tmrca(u, v)
def tmrca(self, u, v):
"""
Returns the time of the most recent common ancestor of the specified
nodes. This is equivalent to::
>>> tree.time(tree.mrca(u, v))
:param int u: The first node.
:param int v: The second node.
:return: The time of the most recent common ancestor of u and v.
:rtype: float
"""
return self.get_time(self.get_mrca(u, v))
def get_parent(self, u):
# Deprecated alias for parent
return self.parent(u)
def parent(self, u):
"""
Returns the parent of the specified node. Returns
the :const:`.NULL_NODE` if u is the root or is not a node in
the current tree.
:param int u: The node of interest.
:return: The parent of u.
:rtype: int
"""
return self._ll_sparse_tree.get_parent(u)
# Quintuply linked tree structure.
def left_child(self, u):
return self._ll_sparse_tree.get_left_child(u)
def right_child(self, u):
return self._ll_sparse_tree.get_right_child(u)
def left_sib(self, u):
return self._ll_sparse_tree.get_left_sib(u)
def right_sib(self, u):
return self._ll_sparse_tree.get_right_sib(u)
# TODO do we also have right_root?
@property
def left_root(self):
return self._ll_sparse_tree.get_left_root()
def get_children(self, u):
# Deprecated alias for self.children
return self.children(u)
def children(self, u):
"""
Returns the children of the specified node ``u`` as a tuple of integer node IDs.
If ``u`` is a leaf, return the empty tuple.
:param int u: The node of interest.
:return: The children of ``u`` as a tuple of integers
:rtype: tuple(int)
"""
return self._ll_sparse_tree.get_children(u)
def get_time(self, u):
# Deprecated alias for self.time
return self.time(u)
def time(self, u):
"""
Returns the time of the specified node in generations.
Equivalent to ``tree.tree_sequence.node(u).time``.
:param int u: The node of interest.
:return: The time of u.
:rtype: float
"""
return self._ll_sparse_tree.get_time(u)
def get_population(self, u):
# Deprecated alias for self.population
return self.population(u)
def population(self, u):
"""
Returns the population associated with the specified node.
Equivalent to ``tree.tree_sequence.node(u).population``.
:param int u: The node of interest.
:return: The ID of the population associated with node u.
:rtype: int
"""
return self._ll_sparse_tree.get_population(u)
def is_internal(self, u):
"""
Returns True if the specified node is not a leaf. A node is internal
if it has one or more children in the current tree.
:param int u: The node of interest.
:return: True if u is not a leaf node.
:rtype: bool
"""
return not self.is_leaf(u)
def is_leaf(self, u):
"""
Returns True if the specified node is a leaf. A node :math:`u` is a
leaf if it has zero children.
:param int u: The node of interest.
:return: True if u is a leaf node.
:rtype: bool
"""
return len(self.children(u)) == 0
def is_sample(self, u):
"""
Returns True if the specified node is a sample. A node :math:`u` is a
sample if it has been marked as a sample in the parent tree sequence.
:param int u: The node of interest.
:return: True if u is a sample.
:rtype: bool
"""
return bool(self._ll_sparse_tree.is_sample(u))
@property
def num_nodes(self):
"""
Returns the number of nodes in the sparse tree.
:rtype: int
"""
return self._ll_sparse_tree.get_num_nodes()
@property
def num_roots(self):
"""
The number of roots in this tree, as defined in the :attr:`.roots` attribute.
Requires O(number of roots) time.
:rtype: int
"""
return self._ll_sparse_tree.get_num_roots()
@property
def roots(self):
"""
The list of roots in this tree. A root is defined as a unique endpoint of
the paths starting at samples. We can define the set of roots as follows:
.. code-block:: python
roots = set()
for u in tree_sequence.samples():
while tree.parent(u) != msprime.NULL_NODE:
u = tree.parent(u)
roots.add(u)
# roots is now the set of all roots in this tree.
assert sorted(roots) == sorted(tree.roots)
The roots of the tree are returned in a list, in no particular order.
Requires O(number of roots) time.
:return: The list of roots in this tree.
:rtype: list
"""
roots = []
u = self.left_root
while u != NULL_NODE:
roots.append(u)
u = self.right_sib(u)
return roots
def get_root(self):
# Deprecated alias for self.root
return self.root
@property
def root(self):
"""
The root of this tree. If the tree contains multiple roots, a ValueError is
raised indicating that the :attr:`.roots` attribute should be used instead.
:return: The root node.
:rtype: int
:raises: :class:`ValueError` if this tree contains more than one root.
"""
root = self.left_root
if root != NULL_NODE and self.right_sib(root) != NULL_NODE:
raise ValueError("More than one root exists. Use tree.roots instead")
return root
def get_index(self):
# Deprecated alias for self.index
return self.index
@property
def index(self):
"""
Returns the index this tree occupies in the parent tree sequence.
This index is zero based, so the first tree in the sequence has index 0.
:return: The index of this tree.
:rtype: int
"""
return self._ll_sparse_tree.get_index()
def get_interval(self):
# Deprecated alias for self.interval
return self.interval
@property
def interval(self):
"""
Returns the coordinates of the genomic interval that this tree
represents the history of. The interval is returned as a tuple
:math:`(l, r)` and is a half-open interval such that the left
coordinate is inclusive and the right coordinate is exclusive. This
tree therefore applies to all genomic locations :math:`x` such that
:math:`l \leq x < r`.
:return: A tuple (l, r) representing the left-most (inclusive)
and right-most (exclusive) coordinates of the genomic region
covered by this tree.
:rtype: tuple
"""
return self._ll_sparse_tree.get_left(), self._ll_sparse_tree.get_right()
def get_length(self):
# Deprecated alias for self.length
return self.length
@property
def length(self):
"""
Returns the length of the genomic interval that this tree represents.
This is defined as :math:`r - l`, where :math:`(l, r)` is the genomic
interval returned by :attr:`.interval`.
:return: The length of the genomic interval covered by this tree.
:rtype: int
"""
left, right = self.get_interval()
return right - left
# The sample_size (or num_samples) is really a property of the tree sequence,
# and so we should provide access to this via a tree.tree_sequence.num_samples
# property access. However, we can't just remove the method as a lot of code
# may depend on it. To complicate things a bit more, sample_size has been
# changed to num_samples elsewhere for consistency. We can't do this here
# because there is already a num_samples method which returns the number of
# samples below a particular node. The best thing to do is probably to
# undocument the sample_size property, but keep it around for ever.
def get_sample_size(self):
# Deprecated alias for self.sample_size
return self.sample_size
@property
def sample_size(self):
"""
Returns the sample size for this tree. This is the number of sample
nodes in the tree.
:return: The number of sample nodes in the tree.
:rtype: int
"""
return self._ll_sparse_tree.get_sample_size()
def draw(
self, path=None, width=None, height=None,
node_labels=None, node_colours=None,
mutation_labels=None, mutation_colours=None,
format=None):
"""
Returns a drawing of this tree.
When working in a Jupyter notebook, use the ``IPython.display.SVG``
function to display the SVG output from this function inline in the notebook::
>>> SVG(tree.draw())
The unicode format uses unicode `box drawing characters
<https://en.wikipedia.org/wiki/Box-drawing_character>`_ to render the tree.
This allows rendered trees to be printed out to the terminal::
>>> print(tree.draw(format="unicode"))
6
┏━┻━┓
┃ 5
┃ ┏━┻┓
┃ ┃ 4
┃ ┃ ┏┻┓
3 0 1 2
The ``node_labels`` argument allows the user to specify custom labels
for nodes, or no labels at all::
>>> print(tree.draw(format="unicode", node_labels={}))
┃
┏━┻━┓
┃ ┃
┃ ┏━┻┓
┃ ┃ ┃
┃ ┃ ┏┻┓
┃ ┃ ┃ ┃
:param str path: The path to the file to write the output. If None, do not
write to file.
:param int width: The width of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param int height: The height of the image in pixels. If not specified, either
defaults to the minimum size required to depict the tree (text formats)
or 200 pixels.
:param map node_labels: If specified, show custom labels for the nodes
that are present in the map. Any nodes not specified in the map will
not have a node label.
:param map node_colours: If specified, show custom colours for nodes. (Only
supported in the SVG format.)
:param str format: The format of the returned image. Currently supported
are 'svg', 'ascii' and 'unicode'.
:return: A representation of this tree in the requested format.
:rtype: str
"""
output = drawing.draw_tree(
self, format=format, width=width, height=height,
node_labels=node_labels, node_colours=node_colours,
mutation_labels=mutation_labels, mutation_colours=mutation_colours)
if path is not None:
with open(path, "w") as f:
f.write(output)
return output
def get_num_mutations(self):
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the total number of mutations across all sites on this tree.
:return: The total number of mutations over all sites on this tree.
:rtype: int
"""
return sum(len(site.mutations) for site in self.sites())
@property
def num_sites(self):
"""
Returns the number of sites on this tree.
:return: The number of sites on this tree.
:rtype: int
"""
return self._ll_sparse_tree.get_num_sites()
def sites(self):
"""
Returns an iterator over all the :ref:`sites <sec_site_table_definition>`
in this tree. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterator over all sites in this tree.
:rtype: iter(:class:`.Site`)
"""
# TODO change the low-level API to just return the IDs of the sites.
for ll_site in self._ll_sparse_tree.get_sites():
_, _, _, id_, _ = ll_site
yield self.tree_sequence.site(id_)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree.
Mutations are returned in order of nondecreasing site ID.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all mutations in this tree.
:rtype: iter(:class:`.Mutation`)
"""
for site in self.sites():
for mutation in site.mutations:
yield add_deprecated_mutation_attrs(site, mutation)
def get_leaves(self, u):
# Deprecated alias for samples. See the discussion in the get_num_leaves
# method for why this method is here and why it is semantically incorrect.
# The 'leaves' iterator below correctly returns the leaves below a given
# node.
return self.samples(u)
def leaves(self, u=None):
"""
Returns an iterator over all the leaves in this tree that are
underneath the specified node. If u is not specified, return all leaves
in the tree.
:param int u: The node of interest.
:return: An iterator over all leaves in the subtree rooted at u.
:rtype: iterator
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
for v in self.nodes(root):
if self.is_leaf(v):
yield v
def _sample_generator(self, u):
for v in self.nodes(u):
if self.is_sample(v):
yield v
def samples(self, u=None):
"""
Returns an iterator over all the samples in this tree that are
underneath the specified node. If u is a sample, it is included in the
returned iterator. If u is not specified, return all samples in the tree.
If the :meth:`.TreeSequence.trees` method is called with
``sample_lists=True``, this method uses an efficient algorithm to find
the samples. If not, a simple traversal based method is used.
:param int u: The node of interest.
:return: An iterator over all samples in the subtree rooted at u.
:rtype: iterator
"""
roots = [u]
if u is None:
roots = self.roots
for root in roots:
if self._ll_sparse_tree.get_flags() & _msprime.SAMPLE_LISTS:
for v in _msprime.SampleListIterator(self._ll_sparse_tree, root):
yield v
else:
for v in self._sample_generator(root):
yield v
def get_num_leaves(self, u):
# Deprecated alias for num_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code. We could
# change this method to be semantically correct and just count the
# number of leaves we hit in the leaves() iterator. However, this would
# have the undesirable effect of making code that depends on the constant
# time performance of get_num_leaves many times slower. So, the best option
# is to leave this method as is, and to slowly deprecate it out. Once this
# has been removed, we might add in a ``num_leaves`` method that returns the
# length of the leaves() iterator as one would expect.
return self.num_samples(u)
def get_num_samples(self, u=None):
# Deprecated alias for num_samples.
return self.num_samples(u)
def num_samples(self, u=None):
"""
Returns the number of samples in this tree underneath the specified
node (including the node itself). If u is not specified return
the total number of samples in the tree.
If the :meth:`.TreeSequence.trees` method is called with
``sample_counts=True`` this method is a constant time operation. If not,
a slower traversal based algorithm is used to count the samples.
:param int u: The node of interest.
:return: The number of samples in the subtree rooted at u.
:rtype: int
"""
roots = [u]
if u is None:
roots = self.roots
return sum(self._ll_sparse_tree.get_num_samples(u) for u in roots)
def get_num_tracked_leaves(self, u):
# Deprecated alias for num_tracked_samples. The method name is inaccurate
# as this will count the number of tracked _samples_. This is only provided to
# avoid breaking existing code and should not be used in new code.
return self.num_tracked_samples(u)
def get_num_tracked_samples(self, u=None):
# Deprecated alias for num_tracked_samples
return self.num_tracked_samples(u)
def num_tracked_samples(self, u=None):
"""
Returns the number of samples in the set specified in the
``tracked_samples`` parameter of the :meth:`.TreeSequence.trees` method
underneath the specified node. If the input node is not specified,
return the total number of tracked samples in the tree.
This is a constant time operation.
:param int u: The node of interest.
:return: The number of samples within the set of tracked samples in
the subtree rooted at u.
:rtype: int
:raises RuntimeError: if the :meth:`.TreeSequence.trees`
method is not called with ``sample_counts=True``.
"""
roots = [u]
if u is None:
roots = self.roots
if not (self._ll_sparse_tree.get_flags() & _msprime.SAMPLE_COUNTS):
raise RuntimeError(
"The get_num_tracked_samples method is only supported "
"when sample_counts=True.")
return sum(self._ll_sparse_tree.get_num_tracked_samples(root) for root in roots)
def _preorder_traversal(self, u):
stack = [u]
while len(stack) > 0:
v = stack.pop()
if self.is_internal(v):
stack.extend(reversed(self.get_children(v)))
yield v
def _postorder_traversal(self, u):
stack = [u]
k = NULL_NODE
while stack:
v = stack[-1]
if self.is_internal(v) and v != k:
stack.extend(reversed(self.get_children(v)))
else:
k = self.get_parent(v)
yield stack.pop()
def _inorder_traversal(self, u):
# TODO add a nonrecursive version of the inorder traversal.
children = self.get_children(u)
mid = len(children) // 2
for c in children[:mid]:
for v in self._inorder_traversal(c):
yield v
yield u
for c in children[mid:]:
for v in self._inorder_traversal(c):
yield v
def _levelorder_traversal(self, u):
queue = collections.deque([u])
while queue:
v = queue.popleft()
if self.is_internal(v):
queue.extend(self.get_children(v))
yield v
def nodes(self, root=None, order="preorder"):
"""
Returns an iterator over the nodes in this tree. If the root parameter
is provided, iterate over the nodes in the subtree rooted at this
node. If this is None, iterate over all nodes. If the order parameter
is provided, iterate over the nodes in required tree traversal order.
:param int root: The root of the subtree we are traversing.
:param str order: The traversal ordering. Currently 'preorder',
'inorder', 'postorder' and 'levelorder' ('breadthfirst')
are supported.
:return: An iterator over the nodes in the tree in some traversal order.
:rtype: iterator
"""
methods = {
"preorder": self._preorder_traversal,
"inorder": self._inorder_traversal,
"postorder": self._postorder_traversal,
"levelorder": self._levelorder_traversal,
"breadthfirst": self._levelorder_traversal
}
try:
iterator = methods[order]
except KeyError:
raise ValueError("Traversal ordering '{}' not supported".format(order))
roots = [root]
if root is None:
roots = self.roots
for u in roots:
for v in iterator(u):
yield v
def newick(self, precision=14, time_scale=1):
"""
Returns a `newick encoding <https://en.wikipedia.org/wiki/Newick_format>`_
of this tree. Leaf nodes are labelled with their numerical ID + 1,
and internal nodes are not labelled.
This method is currently primarily for ms-compatibility and
is not intended as a consistent means of data interchange.
:param int precision: The numerical precision with which branch lengths are
printed.
:param float time_scale: A value which all branch lengths are multiplied by.
:return: A newick representation of this tree.
:rtype: str
"""
s = self._ll_sparse_tree.get_newick(precision=precision, time_scale=time_scale)
if not IS_PY2:
s = s.decode()
return s
@property
def parent_dict(self):
return self.get_parent_dict()
def get_parent_dict(self):
pi = {
u: self.parent(u) for u in range(self.num_nodes)
if self.parent(u) != NULL_NODE}
return pi
def __str__(self):
return str(self.get_parent_dict())
def load(path):
"""
Loads a tree sequence from the specified file path. This file must be in the
:ref:`HDF5 file format <sec_hdf5_file_format>` produced by the
:meth:`.TreeSequence.dump` method.
:param str path: The file path of the HDF5 file containing the
tree sequence we wish to load.
:return: The tree sequence object containing the information
stored in the specified file path.
:rtype: :class:`msprime.TreeSequence`
"""
return TreeSequence.load(path)
def load_tables(
nodes, edges, migrations=None, sites=None, mutations=None,
provenances=None, sequence_length=0):
"""
Loads the tree sequence data from the specified table objects, and
returns the resulting :class:`.TreeSequence` object. These tables
must fulfil the properties required for an input tree sequence as
described in the :ref:`sec_valid_tree_sequence_requirements` section.
The ``sequence_length`` parameter determines the
:attr:`.TreeSequence.sequence_length` of the returned tree sequence. If it
is 0 or not specified, the value is taken to be the maximum right
coordinate of the input edges. This parameter is useful in degenerate
situations (such as when there are zero edges), but can usually be ignored.
:param NodeTable nodes: The :ref:`node table <sec_node_table_definition>`
(required).
:param EdgeTable edges: The :ref:`edge table <sec_edge_table_definition>`
(required).
:param MigrationTable migrations: The :ref:`migration table
<sec_migration_table_definition>` (optional).
:param SiteTable sites: The :ref:`site table <sec_site_table_definition>`
(optional; but if supplied, ``mutations`` must also be specified).
:param MutationTable mutations: The :ref:`mutation table
<sec_mutation_table_definition>` (optional; but if supplied, ``sites``
must also be specified).
:param ProvenanceTable provenances: The :ref:`provenance table
<sec_provenance_table_definition>` (optional).
:param float sequence_length: The sequence length of the returned tree sequence. If
not supplied or zero this will be inferred from the set of edges.
:return: A :class:`.TreeSequence` consistent with the specified tables.
:rtype: TreeSequence
"""
# TODO update the low-level module to accept None and remove this
kwargs = {"nodes": nodes, "edges": edges, "sequence_length": sequence_length}
if migrations is not None:
kwargs["migrations"] = migrations
if sites is not None:
kwargs["sites"] = sites
if mutations is not None:
kwargs["mutations"] = mutations
if provenances is not None:
kwargs["provenances"] = provenances
return TreeSequence.load_tables(**kwargs)
def parse_nodes(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a node table and returns the corresponding :class:`NodeTable`
instance. See the :ref:`node text format <sec_node_text_format>` section
for the details of the required format and the
:ref:`node table definition <sec_node_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
# Read the header and find the indexes of the required fields.
table = tables.NodeTable()
header = source.readline().strip("\n").split(sep)
is_sample_index = header.index("is_sample")
time_index = header.index("time")
population_index = None
metadata_index = None
try:
population_index = header.index("population")
except ValueError:
pass
try:
metadata_index = header.index("metadata")
except ValueError:
pass
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
is_sample = int(tokens[is_sample_index])
time = float(tokens[time_index])
flags = 0
if is_sample != 0:
flags |= NODE_IS_SAMPLE
population = NULL_POPULATION
if population_index is not None:
population = int(tokens[population_index])
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
flags=flags, time=time, population=population, metadata=metadata)
return table
def parse_edges(source, strict=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a edge table and returns the corresponding :class:`EdgeTable`
instance. See the :ref:`edge text format <sec_edge_text_format>` section
for the details of the required format and the
:ref:`edge table definition <sec_edge_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict`` parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
"""
sep = None
if strict:
sep = "\t"
table = tables.EdgeTable()
header = source.readline().strip("\n").split(sep)
left_index = header.index("left")
right_index = header.index("right")
parent_index = header.index("parent")
children_index = header.index("child")
table = tables.EdgeTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 4:
left = float(tokens[left_index])
right = float(tokens[right_index])
parent = int(tokens[parent_index])
children = tuple(map(int, tokens[children_index].split(",")))
for child in children:
table.add_row(left=left, right=right, parent=parent, child=child)
return table
def parse_sites(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a site table and returns the corresponding :class:`SiteTable`
instance. See the :ref:`site text format <sec_site_text_format>` section
for the details of the required format and the
:ref:`site table definition <sec_site_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
header = source.readline().strip("\n").split(sep)
position_index = header.index("position")
ancestral_state_index = header.index("ancestral_state")
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
table = tables.SiteTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 2:
position = float(tokens[position_index])
ancestral_state = tokens[ancestral_state_index]
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
position=position, ancestral_state=ancestral_state, metadata=metadata)
return table
def parse_mutations(source, strict=True, encoding='utf8', base64_metadata=True):
"""
Parse the specified file-like object containing a whitespace delimited
description of a mutation table and returns the corresponding :class:`MutationTable`
instance. See the :ref:`mutation text format <sec_mutation_text_format>` section
for the details of the required format and the
:ref:`mutation table definition <sec_mutation_table_definition>` section for the
required properties of the contents.
See :func:`.load_text` for a detailed explanation of the ``strict``
parameter.
:param stream source: The file-like object containing the text.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
sep = None
if strict:
sep = "\t"
header = source.readline().strip("\n").split(sep)
site_index = header.index("site")
node_index = header.index("node")
derived_state_index = header.index("derived_state")
parent_index = None
parent = NULL_MUTATION
try:
parent_index = header.index("parent")
except ValueError:
pass
metadata_index = None
try:
metadata_index = header.index("metadata")
except ValueError:
pass
table = tables.MutationTable()
for line in source:
tokens = line.split(sep)
if len(tokens) >= 3:
site = int(tokens[site_index])
node = int(tokens[node_index])
derived_state = tokens[derived_state_index]
if parent_index is not None:
parent = int(tokens[parent_index])
metadata = b''
if metadata_index is not None and metadata_index < len(tokens):
metadata = tokens[metadata_index].encode(encoding)
if base64_metadata:
metadata = base64.b64decode(metadata)
table.add_row(
site=site, node=node, derived_state=derived_state, parent=parent,
metadata=metadata)
return table
def load_text(nodes, edges, sites=None, mutations=None, sequence_length=0, strict=True,
encoding='utf8', base64_metadata=True):
"""
Parses the tree sequence data from the specified file-like objects, and
returns the resulting :class:`.TreeSequence` object. The format
for these files is documented in the :ref:`sec_text_file_format` section,
and is produced by the :meth:`.TreeSequence.dump_text` method. Further
properties required for an input tree sequence are described in the
:ref:`sec_valid_tree_sequence_requirements` section. This method is intended as a
convenient interface for importing external data into msprime; the HDF5
based file format using by :meth:`msprime.load` is many times more
efficient than this text format.
The ``nodes`` and ``edges`` parameters are mandatory and must be file-like
objects containing text with whitespace delimited columns, parsable by
:func:`parse_nodes` and :func:`parse_edges`, respectively. ``sites`` and
``mutations`` are optional, and must be parsable by :func:`parse_sites` and
:func:`parse_mutations`, respectively.
The ``sequence_length`` parameter determines the
:attr:`.TreeSequence.sequence_length` of the returned tree sequence. If it
is 0 or not specified, the value is taken to be the maximum right
coordinate of the input edges. This parameter is useful in degenerate
situations (such as when there are zero edges), but can usually be ignored.
The ``strict`` parameter controls the field delimiting algorithm that
is used. If ``strict`` is True (the default), we require exactly one
tab character separating each field. If ``strict`` is False, a more relaxed
whitespace delimiting algorithm is used, such that any run of whitespace
is regarded as a field separator. In most situations, ``strict=False``
is more convenient, but it can lead to error in certain situations. For
example, if a deletion is encoded in the mutation table this will not
be parseable when ``strict=False``.
After parsing the tables, :func:`sort_tables` is called to ensure that
the loaded tables satisfy the tree sequence :ref:`ordering requirements
<sec_valid_tree_sequence_requirements>`. Note that this may result in the
IDs of various entities changing from their positions in the input file.
:param stream nodes: The file-like object containing text describing a
:class:`.NodeTable`.
:param stream edges: The file-like object containing text
describing an :class:`.EdgeTable`.
:param stream sites: The file-like object containing text describing a
:class:`.SiteTable`.
:param stream mutations: The file-like object containing text
describing a :class:`MutationTable`.
:param float sequence_length: The sequence length of the returned tree sequence. If
not supplied or zero this will be inferred from the set of edges.
:param bool strict: If True, require strict tab delimiting (default). If
False, a relaxed whitespace splitting algorithm is used.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
:return: The tree sequence object containing the information
stored in the specified file paths.
:rtype: :class:`msprime.TreeSequence`
"""
node_table = parse_nodes(nodes, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
edge_table = parse_edges(edges, strict=strict)
site_table = tables.SiteTable()
mutation_table = tables.MutationTable()
if sites is not None:
site_table = parse_sites(sites, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
if mutations is not None:
mutation_table = parse_mutations(mutations, strict=strict, encoding=encoding,
base64_metadata=base64_metadata)
tables.sort_tables(
nodes=node_table, edges=edge_table, sites=site_table, mutations=mutation_table)
return load_tables(
nodes=node_table, edges=edge_table, sites=site_table, mutations=mutation_table,
sequence_length=sequence_length)
class TreeSequence(object):
"""
A single tree sequence, as defined by the :ref:`data model <sec_data_model>`.
A TreeSequence instance can be created from a set of
:ref:`tables <sec_table_definitions>` using :func:`.load_tables`; or loaded
from a set of text files using :func:`.load_text`; or, loaded from a
native file using :func:`load`.
TreeSequences are immutable. To change the data held in a particular
tree sequence, first output the informatinn to a set of tables
(using :meth:`.dump_tables`), edit those tables using the
:ref:`tables api <sec_tables_api>`, and create a new tree sequence using
:func:`.load_tables`.
The :meth:`.trees` method iterates over all trees in a tree sequence, and
the :meth:`.variants` method iterates over all sites and their genotypes.
"""
def __init__(self, ll_tree_sequence):
self._ll_tree_sequence = ll_tree_sequence
@property
def ll_tree_sequence(self):
return self.get_ll_tree_sequence()
def get_ll_tree_sequence(self):
return self._ll_tree_sequence
@classmethod
def load(cls, path):
ts = _msprime.TreeSequence()
ts.load(path)
return TreeSequence(ts)
@classmethod
def load_tables(cls, **kwargs):
ts = _msprime.TreeSequence()
ts.load_tables(**kwargs)
return TreeSequence(ts)
def dump(self, path, zlib_compression=False):
"""
Writes the tree sequence to the specified file path.
:param str path: The file path to write the TreeSequence to.
:param bool zlib_compression: If True, use HDF5's native
compression when storing the data leading to smaller
file size. When loading, data will be decompressed
transparently, but load times will be significantly slower.
"""
self._ll_tree_sequence.dump(path, zlib_compression)
@property
def tables(self):
"""
A copy of the tables underlying this tree sequence. See also
:meth:`.dump_tables`.
:return: A :class:`.TableCollection` containing all a copy of the
tables underlying this tree sequence.
:rtype: TableCollection
"""
return self.dump_tables()
def dump_tables(
self, nodes=None, edges=None, migrations=None, sites=None,
mutations=None, provenances=None):
"""
Copy the contents of the tables underlying the tree sequence to the
specified objects.
:param NodeTable nodes: The NodeTable to load the nodes into.
:param EdgeTable edges: The EdgeTable to load the edges into.
:param MigrationTable migrations: The MigrationTable to load the migrations into.
:param SiteTable sites: The SiteTable to load the sites into.
:param MutationTable mutations: The MutationTable to load the mutations into.
:param ProvenanceTable provenances: The ProvenanceTable to load the provenances
into.
:return: A :class:`.TableCollection` containing all tables underlying
the tree sequence.
:rtype: TableCollection
"""
# TODO document this and test the semantics to passing in new tables
# as well as returning the updated tables.
if nodes is None:
nodes = tables.NodeTable()
if edges is None:
edges = tables.EdgeTable()
if migrations is None:
migrations = tables.MigrationTable()
if sites is None:
sites = tables.SiteTable()
if mutations is None:
mutations = tables.MutationTable()
if provenances is None:
provenances = tables.ProvenanceTable()
self._ll_tree_sequence.dump_tables(
nodes=nodes, edges=edges, migrations=migrations, sites=sites,
mutations=mutations, provenances=provenances)
return tables.TableCollection(
nodes=nodes, edges=edges, migrations=migrations, sites=sites,
mutations=mutations, provenances=provenances)
def dump_text(
self, nodes=None, edges=None, sites=None, mutations=None, provenances=None,
precision=6, encoding='utf8', base64_metadata=True):
"""
Writes a text representation of the tables underlying the tree sequence
to the specified connections.
If Base64 encoding is not used, then metadata will be saved directly, possibly
resulting in errors reading the tables back in if metadata includes whitespace.
:param stream nodes: The file-like object (having a .write() method) to write
the NodeTable to.
:param stream edges: The file-like object to write the EdgeTable to.
:param stream sites: The file-like object to write the SiteTable to.
:param stream mutations: The file-like object to write the MutationTable to.
:param stream provenances: The file-like object to write the ProvenanceTable to.
:param int precision: The number of digits of precision.
:param string encoding: Encoding used for text representation.
:param bool base64_metadata: If True, metadata is encoded using Base64
encoding; otherwise, as plain text.
"""
if nodes is not None:
print(
"id", "is_sample", "time", "population", "metadata", sep="\t",
file=nodes)
for node in self.nodes():
metadata = node.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{id:d}\t"
"{is_sample:d}\t"
"{time:.{precision}f}\t"
"{population:d}\t"
"{metadata}").format(
precision=precision, id=node.id,
is_sample=node.is_sample(), time=node.time,
population=node.population,
metadata=metadata)
print(row, file=nodes)
if edges is not None:
print("left", "right", "parent", "child", sep="\t", file=edges)
for edge in self.edges():
row = (
"{left:.{precision}f}\t"
"{right:.{precision}f}\t"
"{parent:d}\t"
"{child:d}").format(
precision=precision, left=edge.left, right=edge.right,
parent=edge.parent, child=edge.child)
print(row, file=edges)
if sites is not None:
print("position", "ancestral_state", "metadata", sep="\t", file=sites)
for site in self.sites():
metadata = site.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{position:.{precision}f}\t"
"{ancestral_state}\t"
"{metadata}").format(
precision=precision, position=site.position,
ancestral_state=site.ancestral_state,
metadata=metadata)
print(row, file=sites)
if mutations is not None:
print(
"site", "node", "derived_state", "parent", "metadata",
sep="\t", file=mutations)
for site in self.sites():
for mutation in site.mutations:
metadata = mutation.metadata
if base64_metadata:
metadata = base64.b64encode(metadata).decode(encoding)
row = (
"{site}\t"
"{node}\t"
"{derived_state}\t"
"{parent}\t"
"{metadata}").format(
site=mutation.site, node=mutation.node,
derived_state=mutation.derived_state,
parent=mutation.parent,
metadata=metadata)
print(row, file=mutations)
if provenances is not None:
print("id", "timestamp", "record", sep="\t", file=provenances)
for provenance in self.provenances():
row = (
"{id}\t"
"{timestamp}\t"
"{record}\t").format(
id=provenance.id,
timestamp=provenance.timestamp,
record=provenance.record)
print(row, file=provenances)
# num_samples was originally called sample_size, and so we must keep sample_size
# around as a deprecated alias.
@property
def num_samples(self):
"""
Returns the number of samples in this tree sequence. This is the number
of sample nodes in each tree.
:return: The number of sample nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_samples()
@property
def sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
def get_sample_size(self):
# Deprecated alias for num_samples
return self.num_samples
@property
def sequence_length(self):
"""
Returns the sequence length in this tree sequence. This defines the
genomic scale over which tree coordinates are defined. Given a
tree sequence with a sequence length :math:`L`, the constituent
trees will be defined over the half-closed interval
:math:`(0, L]`. Each tree then covers some subset of this
interval --- see :meth:`msprime.SparseTree.get_interval` for details.
:return: The length of the sequence in this tree sequence in bases.
:rtype: float
"""
return self.get_sequence_length()
def get_sequence_length(self):
return self._ll_tree_sequence.get_sequence_length()
@property
def num_edges(self):
"""
Returns the number of :ref:`edges <sec_edge_table_definition>` in this
tree sequence.
:return: The number of edges in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_edges()
def get_num_trees(self):
# Deprecated alias for self.num_trees
return self.num_trees
@property
def num_trees(self):
"""
Returns the number of distinct trees in this tree sequence. This
is equal to the number of trees returned by the :meth:`.trees`
method.
:return: The number of trees in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_trees()
def get_num_sites(self):
# Deprecated alias for self.num_sites
return self._ll_tree_sequence.get_num_sites()
@property
def num_sites(self):
"""
Returns the number of sites in this tree sequence.
:return: The number of sites in this tree sequence.
:rtype: int
"""
return self.get_num_sites()
def get_num_mutations(self):
# Deprecated alias for self.num_mutations
return self.num_mutations
@property
def num_mutations(self):
"""
Returns the number of :ref:`mutations <sec_mutation_table_definition>`
in this tree sequence.
:return: The number of mutations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_mutations()
def get_num_nodes(self):
# Deprecated alias for self.num_nodes
return self.num_nodes
@property
def num_nodes(self):
"""
Returns the number of :ref:`nodes <sec_node_table_definition>` in
this tree sequence.
:return: The number of nodes in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_nodes()
@property
def num_provenances(self):
"""
Returns the number of :ref:`provenances <sec_provenance_table_definition>`
in this tree sequence.
:return: The number of provenances in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_provenances()
@property
def num_migrations(self):
"""
Returns the number of :ref:`migrations <sec_migration_table_definition>`
in this tree sequence.
:return: The number of migrations in this tree sequence.
:rtype: int
"""
return self._ll_tree_sequence.get_num_migrations()
def migrations(self):
"""
Returns an iterator over all the
:ref:`migrations <sec_migration_table_definition>` in this tree sequence.
Migrations are returned in nondecreasing order of the ``time`` value.
:return: An iterator over all migrations.
:rtype: iter(:class:`.Migration`)
"""
for j in range(self._ll_tree_sequence.get_num_migrations()):
yield Migration(*self._ll_tree_sequence.get_migration(j))
def provenances(self):
for j in range(self.num_provenances):
yield self.provenance(j)
def nodes(self):
"""
Returns an iterator over all the :ref:`nodes <sec_node_table_definition>`
in this tree sequence.
:return: An iterator over all nodes.
:rtype: iter(:class:`.Node`)
"""
for j in range(self.num_nodes):
yield self.node(j)
def edges(self):
"""
Returns an iterator over all the :ref:`edges <sec_edge_table_definition>`
in this tree sequence. Edges are returned in the order required
for a :ref:`valid tree sequence <sec_valid_tree_sequence_requirements>`. So,
edges are guaranteed to be ordered such that (a) all parents with a
given ID are contiguous; (b) edges are returned in non-descreasing
order of parent time; (c) within the edges for a given parent, edges
are sorted first by child ID and then by left coordinate.
:return: An iterator over all edges.
:rtype: iter(:class:`.Edge`)
"""
for j in range(self.num_edges):
left, right, parent, child = self._ll_tree_sequence.get_edge(j)
yield Edge(left=left, right=right, parent=parent, child=child)
def edgesets(self):
# TODO the order that these records are returned in is not well specified.
# Hopefully this does not matter, and we can just state that the ordering
# should not be depended on.
children = collections.defaultdict(set)
active_edgesets = {}
for (left, right), edges_out, edges_in in self.edge_diffs():
# Complete and return any edgesets that are affected by this tree
# transition
parents = iter(edge.parent for edge in itertools.chain(edges_out, edges_in))
for parent in parents:
if parent in active_edgesets:
edgeset = active_edgesets.pop(parent)
edgeset.right = left
edgeset.children = sorted(children[parent])
yield edgeset
for edge in edges_out:
children[edge.parent].remove(edge.child)
for edge in edges_in:
children[edge.parent].add(edge.child)
# Update the active edgesets
for edge in itertools.chain(edges_out, edges_in):
if len(children[edge.parent]) > 0 and edge.parent not in active_edgesets:
active_edgesets[edge.parent] = Edgeset(left, right, edge.parent, [])
for parent in active_edgesets.keys():
edgeset = active_edgesets[parent]
edgeset.right = self.sequence_length
edgeset.children = sorted(children[edgeset.parent])
yield edgeset
def edge_diffs(self):
iterator = _msprime.TreeDiffIterator(self._ll_tree_sequence)
for interval, edge_tuples_out, edge_tuples_in in iterator:
edges_out = [Edge(*e) for e in edge_tuples_out]
edges_in = [Edge(*e) for e in edge_tuples_in]
yield interval, edges_out, edges_in
def sites(self):
"""
Returns an iterator over all the :ref:`sites <sec_site_table_definition>`
in this tree sequence. Sites are returned in order of increasing ID
(and also position). See the :class:`Site` class for details on
the available fields for each site.
:return: An iterator over all sites.
:rtype: iter(:class:`.Site`)
"""
for j in range(self.num_sites):
yield self.site(j)
def mutations(self):
"""
Returns an iterator over all the
:ref:`mutations <sec_mutation_table_definition>` in this tree sequence.
Mutations are returned in order of nondecreasing site ID.
See the :class:`Mutation` class for details on the available fields for
each mutation.
The returned iterator is equivalent to iterating over all sites
and all mutations in each site, i.e.::
>>> for site in tree_sequence.sites():
>>> for mutation in site.mutations:
>>> yield mutation
:return: An iterator over all mutations in this tree sequence.
:rtype: iter(:class:`.Mutation`)
"""
for site in self.sites():
for mutation in site.mutations:
yield add_deprecated_mutation_attrs(site, mutation)
def breakpoints(self):
"""
Returns an iterator over the breakpoints along the chromosome,
including the two extreme points 0 and L. This is equivalent to
>>> [0] + [t.get_interval()[1] for t in self.trees()]
although we do not build an explicit list.
:return: An iterator over all the breakpoints along the simulated
sequence.
:rtype: iter
"""
yield 0
for t in self.trees():
yield t.get_interval()[1]
def first(self):
"""
Returns the first tree in this :class:`.TreeSequence`. To iterate over all
trees in the sequence, use the :meth:`.trees` method.
Currently does not support the extra options for the :meth:`.trees` method.
:return: The first tree in this tree sequence.
:rtype: :class:`.SparseTree`.
"""
return next(self.trees())
def trees(
self, tracked_samples=None, sample_counts=True, sample_lists=False,
tracked_leaves=None, leaf_counts=None, leaf_lists=None):
"""
Returns an iterator over the trees in this tree sequence. Each value
returned in this iterator is an instance of :class:`.SparseTree`.
The ``sample_counts`` and ``sample_lists`` parameters control the
features that are enabled for the resulting trees. If ``sample_counts``
is True, then it is possible to count the number of samples underneath
a particular node in constant time using the :meth:`.get_num_samples`
method. If ``sample_lists`` is True a more efficient algorithm is
used in the :meth:`.SparseTree.samples` method.
The ``tracked_samples`` parameter can be used to efficiently count the
number of samples in a given set that exist in a particular subtree
using the :meth:`.SparseTree.get_num_tracked_samples` method. It is an
error to use the ``tracked_samples`` parameter when the ``sample_counts``
flag is False.
:warning: Do not store the results of this iterator in a list!
For performance reasons, the same underlying object is used
for every tree returned which will most likely lead to unexpected
behaviour.
:param list tracked_samples: The list of samples to be tracked and
counted using the :meth:`.SparseTree.get_num_tracked_samples`
method.
:param bool sample_counts: If True, support constant time sample counts
via the :meth:`.SparseTree.get_num_samples` and
:meth:`.SparseTree.get_num_tracked_samples` methods.
:param bool sample_lists: If True, provide more efficient access
to the samples beneath a give node using the
:meth:`.SparseTree.samples` method.
:return: An iterator over the sparse trees in this tree sequence.
:rtype: iter
"""
# tracked_leaves, leaf_counts and leaf_lists are deprecated aliases
# for tracked_samples, sample_counts and sample_lists respectively.
# These are left over from an older version of the API when leaves
# and samples were synonymous.
if tracked_leaves is not None:
tracked_samples = tracked_leaves
if leaf_counts is not None:
sample_counts = leaf_counts
if leaf_lists is not None:
sample_lists = leaf_lists
flags = 0
if sample_counts:
flags |= _msprime.SAMPLE_COUNTS
elif tracked_samples is not None:
raise ValueError("Cannot set tracked_samples without sample_counts")
if sample_lists:
flags |= _msprime.SAMPLE_LISTS
kwargs = {"flags": flags}
if tracked_samples is not None:
# TODO remove this when we allow numpy arrays in the low-level API.
kwargs["tracked_samples"] = list(tracked_samples)
ll_sparse_tree = _msprime.SparseTree(self._ll_tree_sequence, **kwargs)
iterator = _msprime.SparseTreeIterator(ll_sparse_tree)
sparse_tree = SparseTree(ll_sparse_tree, self)
for _ in iterator:
yield sparse_tree
def haplotypes(self):
"""
Returns an iterator over the haplotypes resulting from the trees
and mutations in this tree sequence as a string.
The iterator returns a total of :math:`n` strings, each of which
contains :math:`s` characters (:math:`n` is the sample size
returned by :attr:`msprime.TreeSequence.num_samples` and
:math:`s` is the number of sites returned by
:attr:`msprime.TreeSequence.num_sites`). The first
string returned is the haplotype for sample `0`, and so on.
For a given haplotype ``h``, the value of ``h[j]`` is the observed
allelic state at site ``j``.
See also the :meth:`variants` iterator for site-centric access
to sample genotypes.
This method is only supported for single-letter alleles.
:return: An iterator over the haplotype strings for the samples in
this tree sequence.
:rtype: iter
:raises: LibraryError if called on a tree sequence containing
multiletter alleles.
"""
hapgen = _msprime.HaplotypeGenerator(self._ll_tree_sequence)
j = 0
# Would use range here except for Python 2.
while j < self.num_samples:
yield hapgen.get_haplotype(j)
j += 1
def variants(self, as_bytes=False):
"""
Returns an iterator over the variants in this tree sequence. See the
:class:`Variant` class for details on the fields of each returned
object. By default the ``genotypes`` for the variants are numpy arrays,
corresponding to indexes into the ``alleles`` array. If the
``as_bytes`` parameter is true, these allelic values are recorded
directly into a bytes array.
.. note::
The ``as_bytes`` parameter is kept as a compatibility
option for older code. It is not the recommended way of
accessing variant data, and will be deprecated in a later
release. Another method will be provided to obtain the allelic
states for each site directly.
:param bool as_bytes: If True, the genotype values will be returned
as a Python bytes object. This is useful in certain situations
(i.e., directly printing the genotypes) or when numpy is
not available. Otherwise, genotypes are returned as a numpy
array (the default).
:return: An iterator of all variants this tree sequence.
:rtype: iter(:class:`Variant`)
"""
# See comments for the Variant type for discussion on why the
# present form was chosen.
check_numpy()
iterator = _msprime.VariantGenerator(self._ll_tree_sequence)
for site_id, genotypes, alleles in iterator:
site = self.site(site_id)
if as_bytes:
if any(len(allele) > 1 for allele in alleles):
raise ValueError(
"as_bytes only supported for single-letter alleles")
bytes_genotypes = np.empty(self.num_samples, dtype=np.uint8)
lookup = np.array([ord(a[0]) for a in alleles], dtype=np.uint8)
bytes_genotypes[:] = lookup[genotypes]
genotypes = bytes_genotypes.tobytes()
yield Variant(site, alleles, genotypes)
def genotype_matrix(self):
"""
Returns an :math:`m \\times n` numpy array of the genotypes in this
tree sequence, where :math:`m` is the number of site and :math:`n`
the number of samples. The genotypes are the indexes into the array
of ``alleles``, as described for the :class:`Variant` class. The value
0 always corresponds to the ancestal state, and values > 0 represent
distinct derived states.
.. warning::
This method can consume a **very large** amount of memory! If
all genotypes are not needed at once, it is usually better to
access them sequentially using the :meth:`.variants` iterator.
:return: The full matrix of genotypes.
:rtype: numpy.ndarray (dtype=np.uint8)
"""
return self._ll_tree_sequence.get_genotype_matrix()
def get_pairwise_diversity(self, samples=None):
# Deprecated alias for self.pairwise_diversity
return self.pairwise_diversity(samples)
def pairwise_diversity(self, samples=None):
"""
Returns the value of :math:`\pi`, the pairwise nucleotide site diversity,
which is the average number of mutations that differ between a randomly
chosen pair of samples. If `samples` is specified, calculate the
diversity within this set.
.. note:: This method does not currently support sites that have more
than one mutation. Using it on such a tree sequence will raise
a LibraryError with an "Unsupported operation" message.
:param iterable samples: The set of samples within which we calculate
the diversity. If None, calculate diversity within the entire sample.
:return: The pairwise nucleotide site diversity.
:rtype: float
"""
if samples is None:
samples = self.samples()
return self._ll_tree_sequence.get_pairwise_diversity(list(samples))
def node(self, id_):
"""
Returns the :ref:`node <sec_node_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Node`
"""
flags, time, population, metadata = self._ll_tree_sequence.get_node(id_)
return Node(
id_=id_, flags=flags, time=time, population=population, metadata=metadata)
def mutation(self, id_):
"""
Returns the :ref:`mutation <sec_mutation_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Mutation`
"""
ll_mut = self._ll_tree_sequence.get_mutation(id_)
return Mutation(
id_=id_, site=ll_mut[0], node=ll_mut[1], derived_state=ll_mut[2],
parent=ll_mut[3], metadata=ll_mut[4])
def site(self, id_):
"""
Returns the :ref:`site <sec_site_table_definition>` in this tree sequence
with the specified ID.
:rtype: :class:`.Site`
"""
ll_site = self._ll_tree_sequence.get_site(id_)
pos, ancestral_state, ll_mutations, _, metadata = ll_site
mutations = [self.mutation(mut_id) for mut_id in ll_mutations]
return Site(
id_=id_, position=pos, ancestral_state=ancestral_state,
mutations=mutations, metadata=metadata)
def provenance(self, id_):
timestamp, record = self._ll_tree_sequence.get_provenance(id_)
return Provenance(id_=id_, timestamp=timestamp, record=record)
def get_samples(self, population_id=None):
# Deprecated alias for samples()
return self.samples(population_id)
def samples(self, population=None, population_id=None):
"""
Returns an array of the sample node IDs in this tree sequence. If the
``population`` parameter is specified, only return sample IDs from this
population.
:param int population: The population of interest. If None,
return all samples.
:param int population_id: Deprecated alias for ``population``.
:return: A numpy array of the node IDs for the samples of interest.
:rtype: numpy.ndarray (dtype=np.int32)
"""
if population is not None and population_id is not None:
raise ValueError(
"population_id and population are aliases. Cannot specify both")
if population_id is not None:
population = population_id
# TODO the low-level tree sequence should perform this operation natively
# and return a numpy array.
samples = self._ll_tree_sequence.get_samples()
if population is not None:
samples = [
u for u in samples if self.get_population(u) == population]
return np.array(samples, dtype=np.int32)
def write_vcf(self, output, ploidy=1, contig_id="1"):
"""
Writes a VCF formatted file to the specified file-like object. If a
ploidy value is supplied, allele values are combined among adjacent
samples to form a phased genotype of the required ploidy. For example,
if we have a ploidy of 2 and a sample of size 6, then we will have
3 diploid samples in the output, consisting of the combined alleles
for samples [0, 1], [2, 3] and [4, 5]. If we had alleles 011110 at
a particular variant, then we would output the genotypes 0|1, 1|1
and 1|0 in VCF. Sample names are generated by appending the index
to the prefix ``msp_`` such that we would have the sample names
``msp_0``, ``msp_1`` and ``msp_2`` in the running example.
Example usage:
>>> with open("output.vcf", "w") as vcf_file:
>>> tree_sequence.write_vcf(vcf_file, 2)
:param File output: The file-like object to write the VCF output.
:param int ploidy: The ploidy of the individual samples in the
VCF. This sample size must be divisible by ploidy.
:param str contig_id: The value of the CHROM column in the output VCF.
"""
if ploidy < 1:
raise ValueError("Ploidy must be >= sample size")
if self.get_sample_size() % ploidy != 0:
raise ValueError("Sample size must be divisible by ploidy")
converter = _msprime.VcfConverter(
self._ll_tree_sequence, ploidy=ploidy, contig_id=contig_id)
output.write(converter.get_header())
for record in converter:
output.write(record)
def simplify(self, samples=None, filter_zero_mutation_sites=True, map_nodes=False):
"""
Returns a simplified tree sequence that retains only the history of
the nodes given in the list ``samples``. If ``map_nodes`` is true,
also return a numpy array mapping the node IDs in this tree sequence to
their node IDs in the simplified tree tree sequence. If a node ``u`` is not
present in the new tree sequence, the value of this mapping will be
NULL_NODE (-1).
In the returned tree sequence, the node with ID ``0`` corresponds to
``samples[0]``, node ``1`` corresponds to ``samples[1]``, and so on. Node
IDs in the returned tree sequence are then allocated sequentially
in time order. Note that this does **not** necessarily mean that nodes
in the returned tree sequence will be in strict time order (as we
may have internal or ancient samples).
If you wish to convert a set of tables that do not satisfy all
requirements for building a TreeSequence, then use
:func:`.simplify_tables()`.
:param list samples: The list of nodes for which to retain information. This
may be a numpy array (or array-like) object (dtype=np.int32).
:param bool filter_zero_mutation_sites: If True, remove any sites that have
no mutations in the simplified tree sequence. Defaults to True.
:param bool map_nodes: If True, return a tuple containing the resulting
tree sequence and a numpy array mapping node IDs in the current tree
sequence to their corresponding node IDs in the returned tree sequence.
If False (the default), return only the tree sequence object itself.
:return: The simplified tree sequence, or (if ``map_nodes`` is True)
a tuple containing the simplified tree sequence and a numpy array
mapping source node IDs to their corresponding IDs in the new tree
sequence.
:rtype: .TreeSequence or a (.TreeSequence, numpy.array) tuple
"""
check_numpy()
t = self.dump_tables()
if samples is None:
samples = self.get_samples()
node_map = tables.simplify_tables(
samples=samples, sequence_length=self.sequence_length,
nodes=t.nodes, edges=t.edges,
sites=t.sites, mutations=t.mutations,
filter_zero_mutation_sites=filter_zero_mutation_sites)
# TODO add simplify arguments here??
t.provenances.add_row(record=json.dumps(
provenance.get_provenance_dict("simplify", [])))
new_ts = load_tables(
nodes=t.nodes, edges=t.edges, migrations=t.migrations, sites=t.sites,
mutations=t.mutations, provenances=t.provenances,
sequence_length=self.sequence_length)
if map_nodes:
return new_ts, node_map
else:
return new_ts
############################################
#
# Deprecated APIs. These are either already unsupported, or will be unsupported in a
# later release.
#
############################################
def get_time(self, u):
# Deprecated. Use ts.node(u).time
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.time
def get_population(self, u):
# Deprecated. Use ts.node(u).population
if u < 0 or u >= self.get_num_nodes():
raise ValueError("ID out of bounds")
node = self.node(u)
return node.population
def records(self):
# Deprecated. Use either ts.edges() or ts.edgesets().
t = [node.time for node in self.nodes()]
pop = [node.population for node in self.nodes()]
for e in self.edgesets():
yield CoalescenceRecord(
e.left, e.right, e.parent, e.children, t[e.parent], pop[e.parent])
# Unsupported old methods.
def get_num_records(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.num_edges if possible to work with edges rather "
"than coalescence records. If not, please use len(list(ts.edgesets())) "
"which should return the number of coalescence records, as previously "
"defined. Please open an issue on GitHub if this is "
"important for your workflow.")
def diffs(self):
raise NotImplementedError(
"This method is no longer supported. Please use the "
"TreeSequence.edge_diffs() method instead")
def newick_trees(self, precision=3, breakpoints=None, Ne=1):
raise NotImplementedError(
"This method is no longer supported. Please use the SparseTree.newick"
" method instead")
|
ashander/msprime
|
msprime/trees.py
|
Python
|
gpl-3.0
| 89,405
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import six
import sys
import collections
import math
import paddle.fluid as fluid
from op_test import OpTest
class TestDetectionMAPOp(OpTest):
def set_data(self):
self.class_num = 4
self.init_test_case()
self.mAP = [self.calc_map(self.tf_pos, self.tf_pos_lod)]
self.label = np.array(self.label).astype('float32')
self.detect = np.array(self.detect).astype('float32')
self.mAP = np.array(self.mAP).astype('float32')
if len(self.class_pos_count) > 0:
self.class_pos_count = np.array(self.class_pos_count).astype(
'int32')
self.true_pos = np.array(self.true_pos).astype('float32')
self.false_pos = np.array(self.false_pos).astype('float32')
self.has_state = np.array([1]).astype('int32')
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
'HasState': self.has_state,
'PosCount': self.class_pos_count,
'TruePos': (self.true_pos, self.true_pos_lod),
'FalsePos': (self.false_pos, self.false_pos_lod)
}
else:
self.inputs = {
'Label': (self.label, self.label_lod),
'DetectRes': (self.detect, self.detect_lod),
}
self.attrs = {
'overlap_threshold': self.overlap_threshold,
'evaluate_difficult': self.evaluate_difficult,
'ap_type': self.ap_type,
'class_num': self.class_num
}
self.out_class_pos_count = np.array(self.out_class_pos_count).astype(
'int')
self.out_true_pos = np.array(self.out_true_pos).astype('float32')
self.out_false_pos = np.array(self.out_false_pos).astype('float32')
self.outputs = {
'MAP': self.mAP,
'AccumPosCount': self.out_class_pos_count,
'AccumTruePos': (self.out_true_pos, self.out_true_pos_lod),
'AccumFalsePos': (self.out_false_pos, self.out_false_pos_lod)
}
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "integral"
self.label_lod = [[2, 2]]
# label difficult xmin ymin xmax ymax
self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8],
[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[3, 4]]
self.detect = [
[1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3],
[1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4],
[2, 0.1, 0.4, 0.3, 0.7, 0.5], [1, 0.2, 0.8, 0.1, 1.0, 0.3],
[3, 0.2, 0.8, 0.1, 1.0, 0.3]
]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1],
[1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0],
[3, 0.2, 0, 1]]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
def calc_map(self, tf_pos, tf_pos_lod):
mAP = 0.0
count = 0
def get_input_pos(class_pos_count, true_pos, true_pos_lod, false_pos,
false_pos_lod):
class_pos_count_dict = collections.Counter()
true_pos_dict = collections.defaultdict(list)
false_pos_dict = collections.defaultdict(list)
for i, count in enumerate(class_pos_count):
class_pos_count_dict[i] = count
cur_pos = 0
for i in range(len(true_pos_lod[0])):
start = cur_pos
cur_pos += true_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
true_pos_dict[i].append(true_pos[j])
cur_pos = 0
for i in range(len(false_pos_lod[0])):
start = cur_pos
cur_pos += false_pos_lod[0][i]
end = cur_pos
for j in range(start, end):
false_pos_dict[i].append(false_pos[j])
return class_pos_count_dict, true_pos_dict, false_pos_dict
def get_output_pos(label_count, true_pos, false_pos):
label_number = self.class_num
out_class_pos_count = []
out_true_pos_lod = []
out_true_pos = []
out_false_pos_lod = []
out_false_pos = []
for i in range(label_number):
out_class_pos_count.append([label_count[i]])
true_pos_list = true_pos[i]
out_true_pos += true_pos_list
out_true_pos_lod.append(len(true_pos_list))
false_pos_list = false_pos[i]
out_false_pos += false_pos_list
out_false_pos_lod.append(len(false_pos_list))
return out_class_pos_count, out_true_pos, [
out_true_pos_lod
], out_false_pos, [out_false_pos_lod]
def get_accumulation(pos_list):
sorted_list = sorted(pos_list, key=lambda pos: pos[0], reverse=True)
sum = 0
accu_list = []
for (score, count) in sorted_list:
sum += count
accu_list.append(sum)
return accu_list
label_count, true_pos, false_pos = get_input_pos(
self.class_pos_count, self.true_pos, self.true_pos_lod,
self.false_pos, self.false_pos_lod)
for v in self.label:
label = v[0]
difficult = False if len(v) == 5 else v[1]
if self.evaluate_difficult:
label_count[label] += 1
elif not difficult:
label_count[label] += 1
for (label, score, tp, fp) in tf_pos:
true_pos[label].append([score, tp])
false_pos[label].append([score, fp])
for (label, label_pos_num) in six.iteritems(label_count):
if label_pos_num == 0: continue
if label not in true_pos:
count += 1
continue
label_true_pos = true_pos[label]
label_false_pos = false_pos[label]
accu_tp_sum = get_accumulation(label_true_pos)
accu_fp_sum = get_accumulation(label_false_pos)
precision = []
recall = []
for i in range(len(accu_tp_sum)):
precision.append(
float(accu_tp_sum[i]) /
float(accu_tp_sum[i] + accu_fp_sum[i]))
recall.append(float(accu_tp_sum[i]) / label_pos_num)
if self.ap_type == "11point":
max_precisions = [0.0] * 11
start_idx = len(accu_tp_sum) - 1
for j in range(10, -1, -1):
for i in range(start_idx, -1, -1):
if recall[i] < float(j) / 10.0:
start_idx = i
if j > 0:
max_precisions[j - 1] = max_precisions[j]
break
else:
if max_precisions[j] < precision[i]:
max_precisions[j] = precision[i]
for j in range(10, -1, -1):
mAP += max_precisions[j] / 11
count += 1
elif self.ap_type == "integral":
average_precisions = 0.0
prev_recall = 0.0
for i in range(len(accu_tp_sum)):
if math.fabs(recall[i] - prev_recall) > 1e-6:
average_precisions += precision[i] * \
math.fabs(recall[i] - prev_recall)
prev_recall = recall[i]
mAP += average_precisions
count += 1
pcnt, tp, tp_lod, fp, fp_lod = get_output_pos(label_count, true_pos,
false_pos)
self.out_class_pos_count = pcnt
self.out_true_pos = tp
self.out_true_pos_lod = tp_lod
self.out_false_pos = fp
self.out_false_pos_lod = fp_lod
if count != 0:
mAP /= count
return mAP
def setUp(self):
self.op_type = "detection_map"
self.set_data()
def test_check_output(self):
self.check_output()
class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpSkipDiff, self).init_test_case()
self.evaluate_difficult = False
self.tf_pos_lod = [[2, 4]]
# label score true_pos false_pos
self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0],
[2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]]
class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpWithoutDiff, self).init_test_case()
# label xmin ymin xmax ymax
self.label = [[1, 0.1, 0.1, 0.3, 0.3], [1, 0.6, 0.6, 0.8, 0.8],
[2, 0.3, 0.3, 0.6, 0.5], [1, 0.7, 0.1, 0.9, 0.3]]
class TestDetectionMAPOp11Point(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOp11Point, self).init_test_case()
self.ap_type = "11point"
class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp):
def init_test_case(self):
super(TestDetectionMAPOpMultiBatch, self).init_test_case()
self.class_pos_count = [0, 2, 1, 0]
self.true_pos_lod = [[0, 3, 2]]
self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]]
self.false_pos_lod = [[0, 3, 2]]
self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]]
class TestDetectionMAPOp11PointWithClassNoTP(TestDetectionMAPOp):
def init_test_case(self):
self.overlap_threshold = 0.3
self.evaluate_difficult = True
self.ap_type = "11point"
self.label_lod = [[2]]
# label difficult xmin ymin xmax ymax
self.label = [[2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]]
# label score xmin ymin xmax ymax difficult
self.detect_lod = [[1]]
self.detect = [[1, 0.2, 0.8, 0.1, 1.0, 0.3]]
# label score true_pos false_pos
self.tf_pos_lod = [[3, 4]]
self.tf_pos = [[1, 0.2, 1, 0]]
self.class_pos_count = []
self.true_pos_lod = [[]]
self.true_pos = [[]]
self.false_pos_lod = [[]]
self.false_pos = [[]]
if __name__ == '__main__':
unittest.main()
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/test_detection_map_op.py
|
Python
|
apache-2.0
| 11,509
|
"""
Support for the Tuya light.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.tuya/
"""
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ENTITY_ID_FORMAT,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, Light)
from homeassistant.components.tuya import DATA_TUYA, TuyaDevice
from homeassistant.util import color as colorutil
DEPENDENCIES = ['tuya']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Tuya light platform."""
if discovery_info is None:
return
tuya = hass.data[DATA_TUYA]
dev_ids = discovery_info.get('dev_ids')
devices = []
for dev_id in dev_ids:
device = tuya.get_device_by_id(dev_id)
if device is None:
continue
devices.append(TuyaLight(device))
add_entities(devices)
class TuyaLight(TuyaDevice, Light):
"""Tuya light device."""
def __init__(self, tuya):
"""Init Tuya light device."""
super().__init__(tuya)
self.entity_id = ENTITY_ID_FORMAT.format(tuya.object_id())
@property
def brightness(self):
"""Return the brightness of the light."""
return self.tuya.brightness()
@property
def hs_color(self):
"""Return the hs_color of the light."""
return self.tuya.hs_color()
@property
def color_temp(self):
"""Return the color_temp of the light."""
color_temp = self.tuya.color_temp()
if color_temp is None:
return None
return colorutil.color_temperature_kelvin_to_mired(color_temp)
@property
def is_on(self):
"""Return true if light is on."""
return self.tuya.state()
@property
def min_mireds(self):
"""Return color temperature min mireds."""
return colorutil.color_temperature_kelvin_to_mired(
self.tuya.min_color_temp())
@property
def max_mireds(self):
"""Return color temperature max mireds."""
return colorutil.color_temperature_kelvin_to_mired(
self.tuya.max_color_temp())
def turn_on(self, **kwargs):
"""Turn on or control the light."""
if (ATTR_BRIGHTNESS not in kwargs
and ATTR_HS_COLOR not in kwargs
and ATTR_COLOR_TEMP not in kwargs):
self.tuya.turn_on()
if ATTR_BRIGHTNESS in kwargs:
self.tuya.set_brightness(kwargs[ATTR_BRIGHTNESS])
if ATTR_HS_COLOR in kwargs:
self.tuya.set_color(kwargs[ATTR_HS_COLOR])
if ATTR_COLOR_TEMP in kwargs:
color_temp = colorutil.color_temperature_mired_to_kelvin(
kwargs[ATTR_COLOR_TEMP])
self.tuya.set_color_temp(color_temp)
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self.tuya.turn_off()
@property
def supported_features(self):
"""Flag supported features."""
supports = SUPPORT_BRIGHTNESS
if self.tuya.support_color():
supports = supports | SUPPORT_COLOR
if self.tuya.support_color_temp():
supports = supports | SUPPORT_COLOR_TEMP
return supports
|
persandstrom/home-assistant
|
homeassistant/components/light/tuya.py
|
Python
|
apache-2.0
| 3,246
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.geoip import HAS_GEOIP
from django.utils import six
if HAS_GEOIP:
from . import GeoIP, GeoIPException
if HAS_GEOS:
from ..geos import GEOSGeometry
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("www.osnabrueck.de")
self.assertEqual('Osnabrück', d['city'])
d = g.country('200.7.49.81')
self.assertEqual('Curaçao', d['country_name'])
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/geoip/tests.py
|
Python
|
gpl-2.0
| 4,734
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Jan 07 23:37:51 2015
@author: Vidar Tonaas Fauske
"""
from hyperspyui.plugins.plugin import Plugin
from python_qt_binding import QtGui, QtCore
from QtCore import *
from QtGui import *
from hyperspyui.widgets.extendedqwidgets import ExToolWindow
def tr(text):
return QCoreApplication.translate("Rebin", text)
class RebinPlugin(Plugin):
name = "Rebin"
def create_actions(self):
self.add_action('rebin', tr("Rebin"), self.rebin_dialog,
icon='rebin.svg',
selection_callback=self.ui.select_signal,
tip=tr("Rebin the signal"))
def create_menu(self):
self.add_menuitem('Signal', self.ui.actions['rebin'])
def rebin(self, factors, signal=None):
if signal is None:
signal = self.ui.get_selected_wrapper()
s = signal.signal
shape = []
mods = [tuple()] * len(s.axes_manager.shape)
for i in range(len(s.axes_manager.shape)):
ax = s.axes_manager[i]
factor = factors[i]
if factor > ax.size:
factor = ax.size
if ax.size % factor == 0:
mods[ax.index_in_array] = slice(None)
else:
mods[ax.index_in_array] = slice(None, - (ax.size % factor))
shape.append(ax.size // factor)
# Crop to multiple of factors
s.data = s.data[tuple(mods)]
# Update shape
s.get_dimensions_from_data()
# Do actual rebin
signal.switch_signal(s.rebin(shape))
self.ui.setUpdatesEnabled(False)
try:
signal.replot()
finally:
self.ui.setUpdatesEnabled(True) # Always resume updates!
self.record_code("<p>.rebin({0})".format(factors))
def rebin_dialog(self, signal=None):
if signal is None:
signal = self.ui.get_selected_wrapper()
if signal is None:
return
d = RebinDialog(signal, self, self.ui, self.ui)
d.show()
class RebinDialog(ExToolWindow):
def __init__(self, signal, plugin, ui, parent):
super(RebinDialog, self).__init__(parent)
self.signal = signal
self.ui = ui
self.setWindowTitle(tr("Rebin ") + signal.name)
self.create_controls()
self.plugin = plugin
def rebin(self):
factors = []
for ax in self.signal.signal.axes_manager._get_axes_in_natural_order():
spin = self.spins[ax.name]
factors.append(spin.value())
self.plugin.rebin(factors, self.signal)
def validate(self):
style = QApplication.style()
tmpIcon = style.standardIcon(style.SP_MessageBoxWarning, None, self)
s = self.signal
for ax in s.signal.axes_manager._get_axes_in_natural_order():
spin = self.spins[ax.name]
hbox = self.hboxes[ax.name]
if ax.size % spin.value() != 0:
# Not a factor, show warning
if hbox.count() <= 1:
# No warning icon yet
iconSize = spin.height()
pm = tmpIcon.pixmap(iconSize, iconSize)
lbl = QLabel()
lbl.setPixmap(pm)
lbl.setToolTip(tr("Not a factor of shape. Input data " +
"will be trimmed before binning."))
sp = lbl.sizePolicy()
sp.setHorizontalPolicy(QSizePolicy.Maximum)
lbl.setSizePolicy(sp)
hbox.insertWidget(0, lbl)
else:
# Everything OK, remove warning icon if there
if hbox.count() > 1:
lbl = hbox.takeAt(0)
lbl.widget().deleteLater()
def create_controls(self):
self.spins = {}
self.hboxes = {}
form = QFormLayout()
for ax in self.signal.signal.axes_manager._get_axes_in_natural_order():
spin = QSpinBox(self)
spin.setValue(1)
spin.setMinimum(1)
spin.valueChanged.connect(self.validate)
self.spins[ax.name] = spin
hbox = QHBoxLayout()
hbox.addWidget(spin)
form.addRow(ax.name, hbox)
self.hboxes[ax.name] = hbox
self.form = form
self.btn_rebin = QPushButton(tr("Rebin"))
self.btn_rebin.clicked.connect(self.rebin)
vbox = QVBoxLayout()
vbox.addLayout(form)
vbox.addWidget(self.btn_rebin)
self.setLayout(vbox)
|
vidartf/hyperspyUI
|
hyperspyui/plugins/rebin.py
|
Python
|
gpl-3.0
| 5,330
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from autoupgrade import Package, ver_to_tuple
class TestFunctions(TestCase):
def test_ver_to_tuple(self):
self.assertGreater(
ver_to_tuple('0.1.2'),
ver_to_tuple('0.1.1'))
self.assertGreater(
ver_to_tuple('0.1.5A'),
ver_to_tuple('0.1.5'))
self.assertGreater(
ver_to_tuple('0.10.0'),
ver_to_tuple('0.9.5'))
self.assertGreater(
ver_to_tuple('1.2.3'),
ver_to_tuple('1.2'))
self.assertGreater(
ver_to_tuple('1.2A.3'),
ver_to_tuple('1.2.3'))
self.assertEqual(
ver_to_tuple('1.2.3'),
ver_to_tuple('1.2.3'))
def test_upgrade_default(self):
inst = Package("pip", verbose=True)
inst.smartupgrade(restart=False)
def test_upgrade_index(self):
inst = Package("pip",
"https://pypi.python.org/simple",
verbose=True)
inst.smartupgrade(restart=False)
|
vuolter/autoupgrade
|
tests/test_autoupgrade.py
|
Python
|
mit
| 1,078
|
#!python
#
# The Python Imaging Library
# $Id$
#
# split an animation into a number of frame files
#
from __future__ import print_function
from PIL import Image
import os
import sys
class Interval(object):
def __init__(self, interval="0"):
self.setinterval(interval)
def setinterval(self, interval):
self.hilo = []
for s in interval.split(","):
if not s.strip():
continue
try:
v = int(s)
if v < 0:
lo, hi = 0, -v
else:
lo = hi = v
except ValueError:
i = s.find("-")
lo, hi = int(s[:i]), int(s[i+1:])
self.hilo.append((hi, lo))
if not self.hilo:
self.hilo = [(sys.maxsize, 0)]
def __getitem__(self, index):
for hi, lo in self.hilo:
if hi >= index >= lo:
return 1
return 0
# --------------------------------------------------------------------
# main program
html = 0
if sys.argv[1:2] == ["-h"]:
html = 1
del sys.argv[1]
if not sys.argv[2:]:
print()
print("Syntax: python explode.py infile template [range]")
print()
print("The template argument is used to construct the names of the")
print("individual frame files. The frames are numbered file001.ext,")
print("file002.ext, etc. You can insert %d to control the placement")
print("and syntax of the frame number.")
print()
print("The optional range argument specifies which frames to extract.")
print("You can give one or more ranges like 1-10, 5, -15 etc. If")
print("omitted, all frames are extracted.")
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
frames = Interval(",".join(sys.argv[3:]))
try:
# check if outfile contains a placeholder
outfile % 1
except TypeError:
file, ext = os.path.splitext(outfile)
outfile = file + "%03d" + ext
ix = 1
im = Image.open(infile)
if html:
file, ext = os.path.splitext(outfile)
html = open(file+".html", "w")
html.write("<html>\n<body>\n")
while True:
if frames[ix]:
im.save(outfile % ix)
print(outfile % ix)
if html:
html.write("<img src='%s'><br>\n" % outfile % ix)
try:
im.seek(ix)
except EOFError:
break
ix += 1
if html:
html.write("</body>\n</html>\n")
|
jojoriveraa/titulacion-NFCOW
|
venv/bin/explode.py
|
Python
|
apache-2.0
| 2,428
|
# Copyright (C) 2007-2012 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class SysVIPC(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
short_desc = 'SysV IPC'
plugin_name = "sysvipc"
profiles = ('system', 'services')
def setup(self):
self.add_copy_spec([
"/proc/sysvipc/msg",
"/proc/sysvipc/sem",
"/proc/sysvipc/shm"
])
self.add_cmd_output([
"ipcs",
"ipcs -u"
])
# vim: set et ts=4 sw=4 :
|
BryanQuigley/sos
|
sos/report/plugins/sysvipc.py
|
Python
|
gpl-2.0
| 932
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrapper of Beam runners that's built for running and verifying e2e tests."""
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.runner import PipelineState
__all__ = ['TestDataflowRunner']
# Dataflow take up to 10mins for the long tail of starting/stopping worker
# pool.
WAIT_IN_STATE_TIMEOUT = 10 * 60
class TestDataflowRunner(DataflowRunner):
def run_pipeline(self, pipeline, options):
"""Execute test pipeline and verify test matcher"""
test_options = options.view_as(TestOptions)
on_success_matcher = test_options.on_success_matcher
wait_duration = test_options.wait_until_finish_duration
is_streaming = options.view_as(StandardOptions).streaming
# [BEAM-1889] Do not send this to remote workers also, there is no need to
# send this option to remote executors.
test_options.on_success_matcher = None
self.result = super(TestDataflowRunner, self).run_pipeline(
pipeline, options)
if self.result.has_job:
# TODO(markflyhigh)(BEAM-1890): Use print since Nose dosen't show logs
# in some cases.
print('Found: %s.' % self.build_console_url(options))
try:
self.wait_until_in_state(PipelineState.RUNNING)
if is_streaming and not wait_duration:
logging.warning('Waiting indefinitely for streaming job.')
self.result.wait_until_finish(duration=wait_duration)
if on_success_matcher:
from hamcrest import assert_that as hc_assert_that
hc_assert_that(self.result, pickler.loads(on_success_matcher))
finally:
if not self.result.is_in_terminal_state():
self.result.cancel()
self.wait_until_in_state(PipelineState.CANCELLED)
return self.result
def build_console_url(self, options):
"""Build a console url of Dataflow job."""
project = options.view_as(GoogleCloudOptions).project
region_id = options.view_as(GoogleCloudOptions).region
job_id = self.result.job_id()
return (
'https://console.cloud.google.com/dataflow/jobsDetail/locations'
'/%s/jobs/%s?project=%s' % (region_id, job_id, project))
def wait_until_in_state(self, expected_state, timeout=WAIT_IN_STATE_TIMEOUT):
"""Wait until Dataflow pipeline enters a certain state."""
if not self.result.has_job:
raise IOError('Failed to get the Dataflow job id.')
start_time = time.time()
while time.time() - start_time <= timeout:
job_state = self.result.state
if self.result.is_in_terminal_state() or job_state == expected_state:
return job_state
time.sleep(5)
raise RuntimeError('Timeout after %d seconds while waiting for job %s '
'enters expected state %s. Current state is %s.' %
(timeout, self.result.job_id(),
expected_state, self.result.state))
|
mxm/incubator-beam
|
sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py
|
Python
|
apache-2.0
| 3,995
|
# astronomy.py 12.10.3
from math import *
G = 6.673e-11
c = 2.998e8
H = 80 # km/s/Mpc
v = 0
relg = 1/(sqrt(1-((v/c)**2)))
def gforce(m1, m2, r):
''' (int, int, int) -> int
Calculates gravitational force between masses m1 and m2 (kg) at a separation of r (m).
'''
global G
return str((G*m1*m2)/(r**2)) + ' N'
def magabs(mapp, d):
''' (number, number) -> float
Return absolute magnitude given apparent magnitude and distance (parsecs), mapp and d.
'''
return str(5 + mapp - (5*math.log(d, 10)))
def magapp(mabs, d):
''' (number, number) -> float
Return apparent magnitude given absolute magnitude and distance (parsecs), mapp and d.
'''
return str((5*math.log(d) - 5) + M)
def luminosity(flux):
''' (number) -> float
Return luminosity of a star at a given distance d, considering its flux.
'''
return str(4*math.pi*(d**2)*flux) + ' W'
def schwradius(m):
''' (number) -> float
Return the Schwarzchild radius of an object of mass m
'''
global G
global c
return str((2*G*m)/(c**2)) + ' m'
def hubblevel(d):
global H
return str(H*d) + ' km/s/Mpc'
def hubbledis(v):
global H
return str(v/H) + ' km/s'
def specrelt(t):
''' (number) -> float
Return relativistic time when given stationary time.
'''
global relg
return str(relg*t) + ' s'
def specrelm(m):
''' Return relativistic mass. '''
global relg
return str(relg*m) + ' kg'
def specrelx(x):
''' Return relativistic length.'''
global relg
return str(x/relg) + ' m'
|
lithiumoxide/scical
|
astronomy.py
|
Python
|
gpl-3.0
| 1,465
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-01 09:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('communities', '0013_auto_20160801_1241'),
('acl', '0004_auto_20150806_1031'),
('users', '0015_auto_20160619_1409'),
]
operations = [
migrations.CreateModel(
name='CommitteeMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('in_position_since', models.DateField(default=django.utils.timezone.now, verbose_name='In position since')),
('committee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='committee_memberships', to='communities.Committee', verbose_name='Committee')),
('invited_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='committee_members_invited', to=settings.AUTH_USER_MODEL, verbose_name='Invited by')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='committee_memberships', to='acl.Role', verbose_name='Role')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='committee_memberships', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'ordering': ['committee'],
'verbose_name': 'Committee Member',
'verbose_name_plural': 'Committee Members',
},
),
migrations.CreateModel(
name='CommunityMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_manager', models.BooleanField(default=False, verbose_name='Is community manager?')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='community_memberships', to='communities.Community', verbose_name='Community')),
('invited_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='community_members_invited', to=settings.AUTH_USER_MODEL, verbose_name='Invited by')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='community_memberships', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'ordering': ['community'],
'verbose_name': 'Community Member',
'verbose_name_plural': 'Community Members',
},
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([]),
),
migrations.RemoveField(
model_name='membership',
name='community',
),
migrations.RemoveField(
model_name='membership',
name='group_name',
),
migrations.RemoveField(
model_name='membership',
name='invited_by',
),
migrations.RemoveField(
model_name='membership',
name='user',
),
migrations.DeleteModel(
name='Membership',
),
migrations.AlterUniqueTogether(
name='communitymembership',
unique_together=set([('community', 'user')]),
),
migrations.AlterUniqueTogether(
name='committeemembership',
unique_together=set([('committee', 'user')]),
),
]
|
yaniv14/OpenCommunity
|
src/users/migrations/0016_auto_20160801_1241.py
|
Python
|
bsd-3-clause
| 4,026
|
import pytest
from ray.tests.test_autoscaler import MockProvider, MockProcessRunner
from ray.autoscaler._private.command_runner import CommandRunnerInterface, \
SSHCommandRunner, _with_environment_variables, DockerCommandRunner, \
KubernetesCommandRunner
from ray.autoscaler._private.docker import DOCKER_MOUNT_PREFIX
from getpass import getuser
import hashlib
auth_config = {
"ssh_user": "ray",
"ssh_private_key": "8265.pem",
}
def test_environment_variable_encoder_strings():
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
res = _with_environment_variables("echo hello", env_vars)
expected = """export var1='"quote between this \\" and this"';export var2='"123"';echo hello""" # noqa: E501
assert res == expected
def test_environment_variable_encoder_dict():
env_vars = {"value1": "string1", "value2": {"a": "b", "c": 2}}
res = _with_environment_variables("echo hello", env_vars)
expected = """export value1='"string1"';export value2='{"a":"b","c":2}';echo hello""" # noqa: E501
assert res == expected
def test_command_runner_interface_abstraction_violation():
"""Enforces the CommandRunnerInterface functions on the subclasses.
This is important to make sure the subclasses do not violate the
function abstractions. If you need to add a new function to one of
the CommandRunnerInterface subclasses, you have to add it to
CommandRunnerInterface and all of its subclasses.
"""
cmd_runner_interface_public_functions = dir(CommandRunnerInterface)
allowed_public_interface_functions = {
func
for func in cmd_runner_interface_public_functions
if not func.startswith("_")
}
for subcls in [
SSHCommandRunner, DockerCommandRunner, KubernetesCommandRunner
]:
subclass_available_functions = dir(subcls)
subclass_public_functions = {
func
for func in subclass_available_functions
if not func.startswith("_")
}
assert allowed_public_interface_functions == subclass_public_functions
def test_ssh_command_runner():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(ssh_user_hash[:10],
ssh_control_hash[:10])
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
}
cmd_runner = SSHCommandRunner(**args)
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run(
"echo helloo",
port_forward=[(8265, 8265)],
environment_variables=env_vars)
expected = [
"ssh",
"-tt",
"-L",
"8265:localhost:8265",
"-i",
"8265.pem",
"-o",
"StrictHostKeyChecking=no",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"IdentitiesOnly=yes",
"-o",
"ExitOnForwardFailure=yes",
"-o",
"ServerAliveInterval=5",
"-o",
"ServerAliveCountMax=3",
"-o",
"ControlMaster=auto",
"-o",
"ControlPath={}/%C".format(ssh_control_path),
"-o",
"ControlPersist=10s",
"-o",
"ConnectTimeout=120s",
"ray@1.2.3.4",
"bash",
"--login",
"-c",
"-i",
"""'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"quote between this \\" and this"'"'"';export var2='"'"'"123"'"'"';echo helloo)'""" # noqa: E501
]
# Much easier to debug this loop than the function call.
for x, y in zip(process_runner.calls[0], expected):
assert x == y
process_runner.assert_has_call("1.2.3.4", exact=expected)
def test_kubernetes_command_runner():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
args = {
"log_prefix": "prefix",
"namespace": "namespace",
"node_id": 0,
"auth_config": auth_config,
"process_runner": process_runner,
}
cmd_runner = KubernetesCommandRunner(**args)
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run("echo helloo", environment_variables=env_vars)
expected = [
"kubectl",
"-n",
"namespace",
"exec",
"-it",
"0",
"--",
"bash",
"--login",
"-c",
"-i",
"""\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1=\'"\'"\'"quote between this \\" and this"\'"\'"\';export var2=\'"\'"\'"123"\'"\'"\';echo helloo)\'""" # noqa: E501
]
assert process_runner.calls[0] == " ".join(expected)
def test_docker_command_runner():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
ssh_control_hash = hashlib.md5(cluster_name.encode()).hexdigest()
ssh_user_hash = hashlib.md5(getuser().encode()).hexdigest()
ssh_control_path = "/tmp/ray_ssh_{}/{}".format(ssh_user_hash[:10],
ssh_control_hash[:10])
docker_config = {"container_name": "container"}
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = DockerCommandRunner(**args)
assert len(process_runner.calls) == 0, "No calls should be made in ctor"
env_vars = {"var1": "quote between this \" and this", "var2": "123"}
cmd_runner.run("echo hello", environment_variables=env_vars)
# This string is insane because there are an absurd number of embedded
# quotes. While this is a ridiculous string, the escape behavior is
# important and somewhat difficult to get right for environment variables.
cmd = """'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (docker exec -it container /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (export var1='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"quote between this \\" and this"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';export var2='"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"123"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"';echo hello)'"'"'"'"'"'"'"'"''"'"' )'""" # noqa: E501
expected = [
"ssh", "-tt", "-i", "8265.pem", "-o", "StrictHostKeyChecking=no", "-o",
"UserKnownHostsFile=/dev/null", "-o", "IdentitiesOnly=yes", "-o",
"ExitOnForwardFailure=yes", "-o", "ServerAliveInterval=5", "-o",
"ServerAliveCountMax=3", "-o", "ControlMaster=auto", "-o",
"ControlPath={}/%C".format(ssh_control_path), "-o",
"ControlPersist=10s", "-o", "ConnectTimeout=120s", "ray@1.2.3.4",
"bash", "--login", "-c", "-i", cmd
]
# Much easier to debug this loop than the function call.
for x, y in zip(process_runner.calls[0], expected):
print(f"expeted:\t{y}")
print(f"actual: \t{x}")
assert x == y
process_runner.assert_has_call("1.2.3.4", exact=expected)
def test_docker_rsync():
process_runner = MockProcessRunner()
provider = MockProvider()
provider.create_node({}, {}, 1)
cluster_name = "cluster"
docker_config = {"container_name": "container"}
args = {
"log_prefix": "prefix",
"node_id": 0,
"provider": provider,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": False,
"docker_config": docker_config,
}
cmd_runner = DockerCommandRunner(**args)
local_mount = "/home/ubuntu/base/mount/"
remote_mount = "/root/protected_mount/"
remote_host_mount = f"{DOCKER_MOUNT_PREFIX}{remote_mount}"
local_file = "/home/ubuntu/base-file"
remote_file = "/root/protected-file"
remote_host_file = f"{DOCKER_MOUNT_PREFIX}{remote_file}"
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_mount, remote_mount, options={"file_mount": True})
# Make sure we do not copy directly to raw destination
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz {local_mount} ray@1.2.3.4:{remote_mount}")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"mkdir -p {remote_mount}")
# No docker cp for file_mounts
process_runner.assert_not_has_call("1.2.3.4", pattern=f"docker cp")
process_runner.assert_has_call(
"1.2.3.4",
pattern=f"-avz {local_mount} ray@1.2.3.4:{remote_host_mount}")
process_runner.clear_history()
##############################
process_runner.respond_to_call("docker inspect -f", ["true"])
cmd_runner.run_rsync_up(
local_file, remote_file, options={"file_mount": False})
# Make sure we do not copy directly to raw destination
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz {local_file} ray@1.2.3.4:{remote_file}")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"mkdir -p {remote_file}")
process_runner.assert_has_call("1.2.3.4", pattern=f"docker cp")
process_runner.assert_has_call(
"1.2.3.4", pattern=f"-avz {local_file} ray@1.2.3.4:{remote_host_file}")
process_runner.clear_history()
##############################
cmd_runner.run_rsync_down(
remote_mount, local_mount, options={"file_mount": True})
process_runner.assert_not_has_call("1.2.3.4", pattern=f"docker cp")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_mount} {local_mount}")
process_runner.assert_has_call(
"1.2.3.4",
pattern=f"-avz ray@1.2.3.4:{remote_host_mount} {local_mount}")
process_runner.clear_history()
##############################
cmd_runner.run_rsync_down(
remote_file, local_file, options={"file_mount": False})
process_runner.assert_has_call("1.2.3.4", pattern=f"docker cp")
process_runner.assert_not_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_file} {local_file}")
process_runner.assert_has_call(
"1.2.3.4", pattern=f"-avz ray@1.2.3.4:{remote_host_file} {local_file}")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
robertnishihara/ray
|
python/ray/tests/test_command_runner.py
|
Python
|
apache-2.0
| 11,023
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import random
import shutil
import subprocess
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = 'regtest'
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.set_test_params()
self.parse_args()
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave peercoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop peercoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use peercoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
self.add_options(parser)
self.options = parser.parse_args()
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/peercoin-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: peercoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' | less to consolidate and view all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("BitcoinRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# connect_nodes(self.nodes[1], 2)
for i in range(self.num_nodes - 1):
connect_nodes(self.nodes[i + 1], i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
if binary_cli is None:
binary_cli = [self.options.bitcoincli] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
bitcoind=binary[i],
bitcoin_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
))
def start_node(self, i, *args, **kwargs):
"""Start a peercoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple peercoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a peercoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple peercoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes(self.nodes[1], 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as peercoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
))
self.start_node(CACHE_NODE_ID)
# Wait for RPC connections to be ready
self.nodes[CACHE_NODE_ID].wait_for_rpc_connection()
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
self.nodes[CACHE_NODE_ID].generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(self.nodes[CACHE_NODE_ID].getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("bitcoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether bitcoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
|
peercoin/peercoin
|
test/functional/test_framework/test_framework.py
|
Python
|
mit
| 26,817
|
from django.conf.urls import url
from . import views
from Grundgeruest.views import DetailMitMenue
from .models import Veranstaltung
app_name = 'Veranstaltungen'
veranstaltungen_urls = ([
url(r'^$',
views.ListeAlle.as_view(),
name='liste_alle'),
url(r'^(?P<slug>[-\w]+)/$',
views.eine_veranstaltung,
name='veranstaltung_detail'),
url(r'^aus_alt_einlesen',
views.daten_einlesen,
name='aus_alt_einlesen'),
], 'Veranstaltungen')
salons_urls = ([
url(r'^s/',
views.liste_veranstaltungen,
{'art': 'Salon'}, name='liste_salons'),
url(r'^/livestream/',
views.livestream,
name='aktueller_livestream'),
url(r'^/(?P<slug>[-\w]+)/$',
views.VeranstaltungDetail.as_view(),
{'art': 'Salon'}, name='salon_detail'),
], 'Veranstaltungen')
seminare_urls = ([
url(r'^e',
views.liste_veranstaltungen,
{'art': 'Seminar'}, name='liste_seminare'),
url(r'^/(?P<slug>[-\w]+)/$',
views.VeranstaltungDetail.as_view(),
{'art': 'Seminar'}, name='seminar_detail'),
], 'Veranstaltungen')
|
wmles/scholarium.at
|
Veranstaltungen/urls.py
|
Python
|
mit
| 1,190
|
"""
More generic ML and DL tools
ML part could contain
- cluster
- decomposition, e.g. SVD, PCA, NMF
- other unmixing (e.g., nonlinear unmixing, tensor factorization, etc.)
- gaussian mixture modelling
Dl part could contain
- cleaning / denoising
- autoencoders
Submodules
----------
.. autosummary::
:toctree: _autosummary
"""
from .dl import nnblocks, models, datautils
from .dl.trainer import Trainer
from .ml import tensor_decomposition
__all__ = ['nnblocks', 'models', 'Trainer', 'datautils', 'tensor_decomposition']
|
pycroscopy/pycroscopy
|
pycroscopy/learn/__init__.py
|
Python
|
mit
| 532
|
# -*- coding: utf-8 -*-
"""
Started on wed, jun 4th, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0812'
M.NombreParametro = 'Índice de Impunidad'
M.DescParam = 'Índice Global de impunidad en México'
M.UnidadesParam = 'puntos'
M.TituloParametro = 'IGI' # Para nombrar la columna del parametro
M.PeriodoParam = '2016'
M.TipoInt = 1 # 1: Binaria; 2: Multivariable, 3: Integral
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'mean'
# Descripciones del proceso de Minería
M.nomarchivodataset = M.ClaveParametro
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Índice de impunidad estatal asignado a las 135 ciudades que componen el Subsistema ' \
'Principal del SUN'
M.ClaveDataset = 'UDLAP'
M.ActDatos = '2018'
M.Agregacion = 'Se asignó el valor estatal del IGI a cada municipio de las 135 ciudades que componen el Subsistema ' \
'Principal del SUN, de acuerdo con el estado en el que se encuentra cada municipio. Se prometió el ' \
'valor del índice en los municipios que componen cada ciudad del SUN'
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
list(dataset)
# Generar dataset para parámetro y Variable de Integridad
var1 = 'IGI-2018'
par_dataset = dataset[var1]
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
|
Caranarq/01_Dmine
|
08_Habitabilidad/P0812/P0812.py
|
Python
|
gpl-3.0
| 2,972
|
# coding=utf-8
import unittest
"""741. Cherry Pickup
https://leetcode.com/problems/cherry-pickup/description/
In a N x N `grid` representing a field of cherries, each cell is one of three
possible integers.
* 0 means the cell is empty, so you can pass through;
* 1 means the cell contains a cherry, that you can pick up and pass through;
* -1 means the cell contains a thorn that blocks your way.
Your task is to collect maximum number of cherries possible by following the
rules below:
* Starting at the position (0, 0) and reaching (N-1, N-1) by moving right or down through valid path cells (cells with value 0 or 1);
* After reaching (N-1, N-1), returning to (0, 0) by moving left or up through valid path cells;
* When passing through a path cell containing a cherry, you pick it up and the cell becomes an empty cell (0);
* If there is no valid path between (0, 0) and (N-1, N-1), then no cherries can be collected.
**Example 1:**
**Input:** grid =
[[0, 1, -1],
[1, 0, -1],
[1, 1, 1]]
**Output:** 5
**Explanation:**
The player started at (0, 0) and went down, down, right right to reach (2, 2).
4 cherries were picked up during this single trip, and the matrix becomes [[0,1,-1],[0,0,-1],[0,0,0]].
Then, the player went left, up, up, left to return home, picking up one more cherry.
The total number of cherries picked up is 5, and this is the maximum possible.
**Note:**
* `grid` is an `N` by `N` 2D array, with `1 <= N <= 50`.
* Each `grid[i][j]` is an integer in the set `{-1, 0, 1}`.
* It is guaranteed that grid[0][0] and grid[N-1][N-1] are not -1.
*
Similar Questions:
Minimum Path Sum (minimum-path-sum)
Dungeon Game (dungeon-game)
"""
class Solution(object):
def cherryPickup(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/lc741-cherry-pickup.py
|
Python
|
gpl-3.0
| 1,968
|
from sklearn.tree import DecisionTreeClassifier
# weak classifier
# decision tree (max depth = 2) using scikit-learn
class WeakClassifier:
# initialize
def __init__(self):
self.clf = DecisionTreeClassifier(max_depth = 2)
# train on dataset (X, y) with distribution weight w
def fit(self, X, y, w):
self.clf.fit(X, y, sample_weight = w)
# predict
def predict(self, X):
return self.clf.predict(X)
|
huangshenno1/algo
|
ml/iris/ada_MO/weakclassifier.py
|
Python
|
mit
| 425
|
from flask import current_app, Markup, render_template, request
from werkzeug.exceptions import default_exceptions, HTTPException
def error_handler(error):
msg = "Request resulted in {}".format(error)
current_app.logger.warning(msg, exc_info=error)
if isinstance(error, HTTPException):
description = error.get_description(request.environ)
code = error.code
name = error.name
else:
description = ("We encountered an error "
"while trying to fulfill your request")
code = 500
name = 'Internal Server Error'
# Flask supports looking up multiple templates and rendering the first
# one it finds. This will let us create specific error pages
# for errors where we can provide the user some additional help.
# (Like a 404, for example).
templates_to_try = ['errors/{}.html'.format(code), 'errors/generic.html']
return render_template(templates_to_try,
code=code,
name=Markup(name),
description=Markup(description),
error=error,
title='Hata')
def init_app(app):
for exception in default_exceptions:
app.register_error_handler(exception, error_handler)
app.register_error_handler(Exception, error_handler)
# This can be used in __init__ with a
# import .errors
# errors.init_app(app)
|
Rassilion/ProjectC
|
web/app/errors.py
|
Python
|
gpl-3.0
| 1,459
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.