hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace5524e3e3acc678b42be9213532fe6c2771df1 | 9,992 | py | Python | setup.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 2 | 2021-08-20T02:33:58.000Z | 2021-11-17T10:54:00.000Z | setup.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 1 | 2020-01-20T08:52:52.000Z | 2020-02-26T14:57:39.000Z | setup.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 1 | 2019-10-06T10:49:39.000Z | 2019-10-06T10:49:39.000Z | #!/usr/bin/env python
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
import os
import stat
import subprocess
import textwrap
import sys
import platform
is_cpython = platform.python_implementation() == 'CPython'
# this specifies which versions of python we support, pip >= 9 knows to skip
# versions of packages which are not compatible with the running python
PYTHON_REQUIRES = '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup_args = {}
def add_command_class(name, cls):
cmdclasses = setup_args.get('cmdclass', {})
cmdclasses[name] = cls
setup_args['cmdclass'] = cmdclasses
from distutils.command.sdist import sdist as sdist_orig
class sdist(sdist_orig):
def run(self):
self.force_manifest = 1
if (sys.platform != "win32" and
os.path.isdir('.git')):
assert os.system("git rev-parse --verify HEAD > .gitrev") == 0
sdist_orig.run(self)
add_command_class('sdist', sdist)
pxd_include_dirs = [
directory for directory, dirs, files
in os.walk(os.path.join('Cython', 'Includes'))
if '__init__.pyx' in files or '__init__.pxd' in files
or directory == os.path.join('Cython', 'Includes')]
pxd_include_patterns = [
p+'/*.pxd' for p in pxd_include_dirs ] + [
p+'/*.pyx' for p in pxd_include_dirs ]
setup_args['package_data'] = {
'Cython.Plex' : ['*.pxd'],
'Cython.Compiler' : ['*.pxd'],
'Cython.Runtime' : ['*.pyx', '*.pxd'],
'Cython.Utility' : ['*.pyx', '*.pxd', '*.c', '*.h', '*.cpp'],
'Cython' : [ p[7:] for p in pxd_include_patterns ],
'Cython.Debugger.Tests': ['codefile', 'cfuncs.c'],
}
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
if 'setuptools' in sys.modules:
setuptools_extra_args['python_requires'] = PYTHON_REQUIRES
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = {
'console_scripts': [
'cython = Cython.Compiler.Main:setuptools_main',
'cythonize = Cython.Build.Cythonize:main',
'cygdb = Cython.Debugger.Cygdb:main',
]
}
scripts = []
else:
if os.name == "posix":
scripts = ["bin/cython", "bin/cythonize", "bin/cygdb"]
else:
scripts = ["cython.py", "cythonize.py", "cygdb.py"]
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
source_root = os.path.abspath(os.path.dirname(__file__))
compiled_modules = [
"Cython.Plex.Scanners",
"Cython.Plex.Actions",
"Cython.Compiler.Scanning",
"Cython.Compiler.Visitor",
"Cython.Compiler.FlowControl",
"Cython.Runtime.refnanny",
"Cython.Compiler.FusedNode",
"Cython.Tempita._tempita",
]
if compile_more:
compiled_modules.extend([
"Cython.StringIOTree",
"Cython.Compiler.Code",
"Cython.Compiler.Lexicon",
"Cython.Compiler.Parsing",
"Cython.Compiler.Pythran",
"Cython.Build.Dependencies",
"Cython.Compiler.ParseTreeTransforms",
"Cython.Compiler.Nodes",
"Cython.Compiler.ExprNodes",
"Cython.Compiler.ModuleNode",
"Cython.Compiler.Optimize",
])
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_inc
pgen = find_executable(
'pgen', os.pathsep.join([os.environ['PATH'], os.path.join(get_python_inc(), '..', 'Parser')]))
if not pgen:
sys.stderr.write("Unable to find pgen, not compiling formal grammar.\n")
else:
parser_dir = os.path.join(os.path.dirname(__file__), 'Cython', 'Parser')
grammar = os.path.join(parser_dir, 'Grammar')
subprocess.check_call([
pgen,
os.path.join(grammar),
os.path.join(parser_dir, 'graminit.h'),
os.path.join(parser_dir, 'graminit.c'),
])
cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')
if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:
mtime = os.stat(grammar)[stat.ST_MTIME]
os.utime(cst_pyx, (mtime, mtime))
compiled_modules.extend([
"Cython.Parser.ConcreteSyntaxTree",
])
defines = []
if cython_with_refnanny:
defines.append(('CYTHON_REFNANNY', '1'))
extensions = []
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
dep_files = []
if os.path.exists(source_file + '.pxd'):
dep_files.append(source_file + '.pxd')
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(Extension(
module, sources=[pyx_source_file],
define_macros=defines_for_module,
depends=dep_files))
# XXX hack around setuptools quirk for '*.pyx' sources
extensions[-1].sources[0] = pyx_source_file
from Cython.Distutils.build_ext import new_build_ext
from Cython.Compiler.Options import get_directive_defaults
get_directive_defaults()['language_level'] = 2
if profile:
get_directive_defaults()['profile'] = True
sys.stderr.write("Enabled profiling for the Cython binary modules\n")
# not using cythonize() directly to let distutils decide whether building extensions was requested
add_command_class("build_ext", new_build_ext)
setup_args['ext_modules'] = extensions
cython_profile = '--cython-profile' in sys.argv
if cython_profile:
sys.argv.remove('--cython-profile')
try:
sys.argv.remove("--cython-compile-all")
cython_compile_more = True
except ValueError:
cython_compile_more = False
try:
sys.argv.remove("--cython-with-refnanny")
cython_with_refnanny = True
except ValueError:
cython_with_refnanny = False
try:
sys.argv.remove("--no-cython-compile")
compile_cython_itself = False
except ValueError:
compile_cython_itself = True
if compile_cython_itself and (is_cpython or cython_compile_more):
compile_cython_modules(cython_profile, cython_compile_more, cython_with_refnanny)
setup_args.update(setuptools_extra_args)
from Cython import __version__ as version
def dev_status():
if 'b' in version or 'c' in version:
# 1b1, 1beta1, 2rc1, ...
return 'Development Status :: 4 - Beta'
elif 'a' in version:
# 1a1, 1alpha1, ...
return 'Development Status :: 3 - Alpha'
else:
return 'Development Status :: 5 - Production/Stable'
packages = [
'Cython',
'Cython.Build',
'Cython.Compiler',
'Cython.Runtime',
'Cython.Distutils',
'Cython.Debugger',
'Cython.Debugger.Tests',
'Cython.Plex',
'Cython.Tests',
'Cython.Build.Tests',
'Cython.Compiler.Tests',
'Cython.Utility',
'Cython.Tempita',
'pyximport',
]
setup(
name='Cython',
version=version,
url='https://cython.org/',
author='Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',
author_email='cython-devel@python.org',
description="The Cython compiler for writing C extensions for the Python language.",
long_description=textwrap.dedent("""\
The Cython language makes writing C extensions for the Python language as
easy as Python itself. Cython is a source code translator based on Pyrex_,
but supports more cutting edge functionality and optimizations.
The Cython language is a superset of the Python language (almost all Python
code is also valid Cython code), but Cython additionally supports optional
static typing to natively call C functions, operate with C++ classes and
declare fast C types on variables and class attributes. This allows the
compiler to generate very efficient C code from Cython code.
This makes Cython the ideal language for writing glue code for external
C/C++ libraries, and for fast C modules that speed up the execution of
Python code.
Note that for one-time builds, e.g. for CI/testing, on platforms that are not
covered by one of the wheel packages provided on PyPI, it is substantially faster
than a full source build to install an uncompiled (slower) version of Cython with::
pip install Cython --install-option="--no-cython-compile"
.. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
"""),
license='Apache',
classifiers=[
dev_status(),
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: C",
"Programming Language :: Cython",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts=scripts,
packages=packages,
py_modules=["cython"],
**setup_args
)
| 35.059649 | 102 | 0.648619 |
ace552610765403799d52352fec470830c3e31d6 | 52,391 | py | Python | test/functional/__init__.py | fvennetier/swift | 127bf9707c383ac737fce9f243bcc8f6655910df | [
"Apache-2.0"
] | 1 | 2021-09-30T14:00:22.000Z | 2021-09-30T14:00:22.000Z | test/functional/__init__.py | fvennetier/swift | 127bf9707c383ac737fce9f243bcc8f6655910df | [
"Apache-2.0"
] | 2 | 2020-10-09T13:20:33.000Z | 2020-10-28T16:02:16.000Z | test/functional/__init__.py | fvennetier/swift | 127bf9707c383ac737fce9f243bcc8f6655910df | [
"Apache-2.0"
] | 2 | 2020-09-21T14:24:56.000Z | 2020-10-01T10:08:46.000Z | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import mock
import os
import six
from six.moves.urllib.parse import urlparse, urlsplit, urlunsplit
import sys
import pickle
import socket
import locale
import eventlet
import eventlet.debug
import functools
import random
import base64
from time import time, sleep
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from six.moves.configparser import ConfigParser, NoSectionError
from six.moves import http_client
from six.moves.http_client import HTTPException
from swift.common.middleware.memcache import MemcacheMiddleware
from swift.common.storage_policy import parse_storage_policies, PolicyError
from swift.common.utils import set_swift_dir
from test import get_config, listen_zero
from test.unit import debug_logger, FakeMemcache
# importing skip_if_no_xattrs so that functional tests can grab it from the
# test.functional namespace. Importing SkipTest so this works under both
# nose and testr test runners.
from test.unit import skip_if_no_xattrs as real_skip_if_no_xattrs
from test.unit import SkipTest
from swift.common import constraints, utils, ring, storage_policy
from swift.common.ring import Ring
from swift.common.wsgi import loadapp, SwiftHttpProtocol
from swift.common.utils import config_true_value, split_path
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
DEBUG = True
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
# swift_test_client import from swiftclient, so move after the monkey-patching
from test.functional.swift_test_client import Account, Connection, Container, \
ResponseError
from swiftclient import get_auth, http_connection
has_insecure = False
try:
from swiftclient import __version__ as client_version
# Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
client_version = '.'.join(client_version.split('.')[:3])
except ImportError:
# Pre-PBR we had version, not __version__. Anyhow...
client_version = '1.2'
from distutils.version import StrictVersion
if StrictVersion(client_version) >= StrictVersion('2.0'):
has_insecure = True
config = {}
web_front_end = None
normalized_urls = None
# If no config was read, we will fall back to old school env vars
swift_test_auth_version = None
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '', '', '']
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '', '', '']
swift_test_tenant = ['', '', '', '', '', '']
swift_test_perm = ['', '', '', '', '', '']
swift_test_domain = ['', '', '', '', '', '']
swift_test_user_id = ['', '', '', '', '', '']
swift_test_tenant_id = ['', '', '', '', '', '']
skip, skip2, skip3, skip_if_not_v3, skip_service_tokens, \
skip_if_no_reseller_admin = False, False, False, False, False, False
orig_collate = ''
insecure = False
in_process = False
_testdir = _test_servers = _test_coros = _test_socks = None
policy_specified = None
skip_if_no_xattrs = None
class FakeMemcacheMiddleware(MemcacheMiddleware):
"""
Caching middleware that fakes out caching in swift if memcached
does not appear to be running.
"""
def __init__(self, app, conf):
super(FakeMemcacheMiddleware, self).__init__(app, conf)
self.memcache = FakeMemcache()
class InProcessException(BaseException):
pass
def _info(msg):
print(msg, file=sys.stderr)
def _debug(msg):
if DEBUG:
_info('DEBUG: ' + msg)
def _in_process_setup_swift_conf(swift_conf_src, testdir):
# override swift.conf contents for in-process functional test runs
conf = ConfigParser()
conf.read(swift_conf_src)
try:
section = 'swift-hash'
conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
section = 'swift-constraints'
max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
conf.set(section, 'max_file_size', str(max_file_size))
except NoSectionError:
msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
raise InProcessException(msg)
test_conf_file = os.path.join(testdir, 'swift.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file
def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
"""
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found
"""
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir,
'etc'))
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
if use_sample:
# fall back to using the corresponding sample conf file
conf_file_name += '-sample'
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
msg = 'Failed to find config file %s' % conf_file_name
raise InProcessException(msg)
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
"""
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file.
"""
conf = ConfigParser()
conf.read(swift_conf)
sp_prefix = 'storage-policy:'
try:
# policy index 0 will be created if no policy exists in conf
policies = parse_storage_policies(conf)
except PolicyError as e:
raise InProcessException(e)
# clear all policies from test swift.conf before adding test policy back
for policy in policies:
conf.remove_section(sp_prefix + str(policy.idx))
if policy_specified:
policy_to_test = policies.get_by_name(policy_specified)
if policy_to_test is None:
raise InProcessException('Failed to find policy name "%s"'
% policy_specified)
_info('Using specified policy %s' % policy_to_test.name)
else:
policy_to_test = policies.default
_info('Defaulting to policy %s' % policy_to_test.name)
# make policy_to_test be policy index 0 and default for the test config
sp_zero_section = sp_prefix + '0'
conf.add_section(sp_zero_section)
for (k, v) in policy_to_test.get_info(config=True).items():
conf.set(sp_zero_section, k, str(v))
conf.set(sp_zero_section, 'default', 'True')
with open(swift_conf, 'w') as fp:
conf.write(fp)
# look for a source ring file
ring_file_src = ring_file_test = 'object.ring.gz'
if policy_to_test.idx:
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
try:
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
use_sample=False)
except InProcessException:
if policy_specified:
raise InProcessException('Failed to find ring file %s'
% ring_file_src)
ring_file_src = None
ring_file_test = os.path.join(testdir, ring_file_test)
if ring_file_src:
# copy source ring file to a policy-0 test ring file, re-homing servers
_info('Using source ring file %s' % ring_file_src)
ring_data = ring.RingData.load(ring_file_src)
obj_sockets = []
for dev in ring_data.devs:
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
obj_socket = listen_zero()
obj_sockets.append(obj_socket)
dev['port'] = obj_socket.getsockname()[1]
dev['ip'] = '127.0.0.1'
dev['device'] = device
dev['replication_port'] = dev['port']
dev['replication_ip'] = dev['ip']
ring_data.save(ring_file_test)
else:
# make default test ring, 3 replicas, 4 partitions, 3 devices
# which will work for a replication policy or a 2+1 EC policy
_info('No source object ring file, creating 3rep/4part/3dev ring')
obj_sockets = [listen_zero() for _ in (0, 1, 2)]
replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj_sockets[0].getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj_sockets[1].getsockname()[1]},
{'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
'port': obj_sockets[2].getsockname()[1]}]
ring_data = ring.RingData(replica2part2dev_id, devs, 30)
with closing(GzipFile(ring_file_test, 'wb')) as f:
pickle.dump(ring_data, f)
for dev in ring_data.devs:
_debug('Ring file dev: %s' % dev)
return obj_sockets
def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load encryption configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for encryption')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"proxy-logging proxy-server",
"keymaster encryption proxy-logging proxy-server")
pipeline = pipeline.replace(
"cache listing_formats",
"cache etag-quoter listing_formats")
conf.set(section, 'pipeline', pipeline)
root_secret = base64.b64encode(os.urandom(32))
if not six.PY2:
root_secret = root_secret.decode('ascii')
conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
conf.set('filter:etag-quoter', 'enable_by_default', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
"""
Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
"""
_debug('Setting configuration for default EC policy')
conf = ConfigParser()
conf.read(swift_conf_file)
# remove existing policy sections that came with swift.conf-sample
for section in list(conf.sections()):
if section.startswith('storage-policy'):
conf.remove_section(section)
# add new policy 0 section for an EC policy
conf.add_section('storage-policy:0')
ec_policy_spec = {
'name': 'ec-test',
'policy_type': 'erasure_coding',
'ec_type': 'liberasurecode_rs_vand',
'ec_num_data_fragments': 2,
'ec_num_parity_fragments': 1,
'ec_object_segment_size': 1048576,
'default': True
}
for k, v in ec_policy_spec.items():
conf.set('storage-policy:0', k, str(v))
with open(swift_conf_file, 'w') as fp:
conf.write(fp)
return proxy_conf_file, swift_conf_file
def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load domain_remap and staticweb into proxy server pipeline.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for domain_remap')
# add a domain_remap storage_domain to the test configuration
storage_domain = 'example.net'
global config
config['storage_domain'] = storage_domain
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
old_pipeline = conf.get(section, 'pipeline')
pipeline = old_pipeline.replace(
" tempauth ",
" tempauth staticweb ")
pipeline = pipeline.replace(
" listing_formats ",
" domain_remap listing_formats ")
if pipeline == old_pipeline:
raise InProcessException(
"Failed to insert domain_remap and staticweb into pipeline: %s"
% old_pipeline)
conf.set(section, 'pipeline', pipeline)
# set storage_domain in domain_remap middleware to match test config
section = 'filter:domain_remap'
conf.set(section, 'storage_domain', storage_domain)
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load s3api configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for s3api')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"tempauth",
"s3api tempauth")
conf.set(section, 'pipeline', pipeline)
conf.set('filter:s3api', 's3_acl', 'true')
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
# Mapping from possible values of the variable
# SWIFT_TEST_IN_PROCESS_CONF_LOADER
# to the method to call for loading the associated configuration
# The expected signature for these methods is:
# conf_filename_to_use loader(input_conf_filename, **kwargs)
conf_loaders = {
'encryption': _load_encryption,
'ec': _load_ec_as_default_policy,
}
def in_process_setup(the_object_server=object_server):
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
_info('Using object_server class: %s' % the_object_server.__name__)
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')
if conf_src_dir is not None:
if not os.path.isdir(conf_src_dir):
msg = 'Config source %s is not a dir' % conf_src_dir
raise InProcessException(msg)
_info('Using config source dir: %s' % conf_src_dir)
# If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
# prefer config files from there, otherwise read config from source tree
# sample files. A mixture of files from the two sources is allowed.
proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
_info('Using proxy config from %s' % proxy_conf)
swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
_info('Using swift config from %s' % swift_conf_src)
global _testdir
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
utils.mkdirs(_testdir)
rmtree(_testdir)
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdc1'))
utils.mkdirs(os.path.join(_testdir, 'sdc1', 'tmp'))
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
_info('prepared swift.conf: %s' % swift_conf)
# load s3api and staticweb configs
proxy_conf, swift_conf = _load_s3api(proxy_conf, swift_conf)
proxy_conf, swift_conf = _load_domain_remap_staticweb(proxy_conf,
swift_conf)
# Call the associated method for the value of
# 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
conf_loader_label = os.environ.get(
'SWIFT_TEST_IN_PROCESS_CONF_LOADER')
if conf_loader_label is not None:
try:
conf_loader = conf_loaders[conf_loader_label]
_debug('Calling method %s mapped to conf loader %s' %
(conf_loader.__name__, conf_loader_label))
except KeyError as missing_key:
raise InProcessException('No function mapped for conf loader %s' %
missing_key)
try:
# Pass-in proxy_conf, swift_conf files
proxy_conf, swift_conf = conf_loader(proxy_conf, swift_conf)
_debug('Now using proxy conf %s' % proxy_conf)
_debug('Now using swift conf %s' % swift_conf)
except Exception as err: # noqa
raise InProcessException(err)
obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
# load new swift.conf file
if set_swift_dir(os.path.dirname(swift_conf)):
constraints.reload_constraints()
storage_policy.reload_storage_policies()
global config
if constraints.SWIFT_CONSTRAINTS_LOADED:
# Use the swift constraints that are loaded for the test framework
# configuration
_c = dict((k, str(v))
for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
config.update(_c)
else:
# In-process swift constraints were not loaded, somethings wrong
raise SkipTest
global _test_socks
_test_socks = []
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = listen_zero()
_test_socks.append(prolis)
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
# and object servers.
config.update({
# Values needed by the various in-process swift servers
'devices': _testdir,
'swift_dir': _testdir,
'mount_check': 'false',
'client_timeout': '4',
'allow_account_management': 'true',
'account_autocreate': 'true',
'allow_versions': 'True',
'allow_versioned_writes': 'True',
# TODO: move this into s3api config loader because they are
# required by only s3api
'allowed_headers':
"Content-Disposition, Content-Encoding, X-Delete-At, "
"X-Object-Manifest, X-Static-Large-Object, Cache-Control, "
"Content-Language, Expires, X-Robots-Tag",
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0/' % prolis.getsockname()[1],
's3_storage_url': 'http://%s:%d/' % prolis.getsockname(),
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
'username': 'tester',
'password': 'testing',
's3_access_key': 'test:tester',
's3_secret_key': 'testing',
# Secondary user of the primary test account (needs admin access
# to the account) for s3api
's3_access_key2': 'test:tester2',
's3_secret_key2': 'testing2',
# User on a second account (needs admin access to the account)
'account2': 'test2',
'username2': 'tester2',
'password2': 'testing2',
# User on same account as first, but without admin access
'username3': 'tester3',
'password3': 'testing3',
's3_access_key3': 'test:tester3',
's3_secret_key3': 'testing3',
# Service user and prefix (emulates glance, cinder, etc. user)
'account5': 'test5',
'username5': 'tester5',
'password5': 'testing5',
'service_prefix': 'SERVICE',
# For tempauth middleware. Update reseller_prefix
'reseller_prefix': 'AUTH, SERVICE',
'SERVICE_require_group': 'service',
# Reseller admin user (needs reseller_admin_role)
'account6': 'test6',
'username6': 'tester6',
'password6': 'testing6'
})
acc1lis = listen_zero()
acc2lis = listen_zero()
con1lis = listen_zero()
con2lis = listen_zero()
_test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
# Default to only 4 seconds for in-process functional test runs
eventlet.wsgi.WRITE_TIMEOUT = 4
def get_logger_name(name):
if show_debug_logs:
return debug_logger(name)
else:
return None
acc1srv = account_server.AccountController(
config, logger=get_logger_name('acct1'))
acc2srv = account_server.AccountController(
config, logger=get_logger_name('acct2'))
con1srv = container_server.ContainerController(
config, logger=get_logger_name('cont1'))
con2srv = container_server.ContainerController(
config, logger=get_logger_name('cont2'))
objsrvs = [
(obj_sockets[index],
the_object_server.ObjectController(
config, logger=get_logger_name('obj%d' % (index + 1))))
for index in range(len(obj_sockets))
]
if show_debug_logs:
logger = get_logger_name('proxy')
else:
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
def get_logger(name, *args, **kwargs):
return logger
with mock.patch('swift.common.utils.get_logger', get_logger):
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
FakeMemcacheMiddleware):
try:
app = loadapp(proxy_conf, global_conf=config)
except Exception as e:
raise InProcessException(e)
nl = utils.NullLogger()
global proxy_srv
proxy_srv = prolis
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl,
protocol=SwiftHttpProtocol)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl,
protocol=SwiftHttpProtocol)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl,
protocol=SwiftHttpProtocol)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl,
protocol=SwiftHttpProtocol)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl,
protocol=SwiftHttpProtocol)
objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl,
protocol=SwiftHttpProtocol)
for objsrv in objsrvs]
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
# Create accounts "test" and "test2"
def create_account(act):
ts = utils.normalize_timestamp(time())
account_ring = Ring(_testdir, ring_name='account')
partition, nodes = account_ring.get_nodes(act)
for node in nodes:
# Note: we are just using the http_connect method in the object
# controller here to talk to the account server nodes.
conn = swift.proxy.controllers.obj.http_connect(
node['ip'], node['port'], node['device'], partition, 'PUT',
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
resp = conn.getresponse()
assert resp.status == 201, 'Unable to create account: %s\n%s' % (
resp.status, resp.read())
create_account('AUTH_test')
create_account('AUTH_test2')
cluster_info = {}
def get_cluster_info():
# The fallback constraints used for testing will come from the current
# effective constraints.
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
global config
try:
conn = Connection(config)
conn.authenticate()
cluster_info.update(conn.cluster_info())
except (ResponseError, socket.error, SkipTest):
# Failed to get cluster_information via /info API, so fall back on
# test.conf data
pass
else:
try:
eff_constraints.update(cluster_info['swift'])
except KeyError:
# Most likely the swift cluster has "expose_info = false" set
# in its proxy-server.conf file, so we'll just do the best we
# can.
print("** Swift Cluster not exposing /info **", file=sys.stderr)
# Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those
# constraints defined in the constraints module are converted to integers.
test_constraints = get_config('swift-constraints')
for k in constraints.DEFAULT_CONSTRAINTS:
try:
test_constraints[k] = int(test_constraints[k])
except KeyError:
pass
except ValueError:
print("Invalid constraint value: %s = %s" % (
k, test_constraints[k]), file=sys.stderr)
eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call,
# even if the /info call failed, or when they are overridden by values
# from the swift-constraints section of test.conf
cluster_info['swift'] = eff_constraints
def setup_package():
global policy_specified
global skip_if_no_xattrs
policy_specified = os.environ.get('SWIFT_TEST_POLICY')
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
if in_process_env is not None:
use_in_process = utils.config_true_value(in_process_env)
else:
use_in_process = None
global in_process
global config
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
in_process = True
# NOTE: No attempt is made to a read local test.conf file.
else:
if use_in_process is None:
# Not explicitly set, default to using in-process functional tests
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if not config:
in_process = True
# else... leave in_process value unchanged. It may be that
# setup_package is called twice, in which case in_process_setup may
# have loaded config before we reach here a second time, so the
# existence of config is not reliable to determine that in_process
# should be False. Anyway, it's default value is False.
else:
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
# test.conf file.
in_process = False
config.update(get_config('func_test'))
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
skip_if_no_xattrs = real_skip_if_no_xattrs
try:
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
except InProcessException as exc:
print(('Exception during in-process setup: %s'
% str(exc)), file=sys.stderr)
raise
else:
skip_if_no_xattrs = lambda: None
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
global normalized_urls
normalized_urls = config.get('normalized_urls', False)
global orig_collate
orig_collate = locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
global insecure
insecure = config_true_value(config.get('insecure', False))
global swift_test_auth_version
global swift_test_auth
global swift_test_user
global swift_test_key
global swift_test_tenant
global swift_test_perm
global swift_test_domain
global swift_test_service_prefix
swift_test_service_prefix = None
if config:
swift_test_auth_version = str(config.get('auth_version', '1'))
if 'auth_uri' in config:
swift_test_auth = config['auth_uri']
# Back-fill the individual parts -- really, we should just need
# host and port for s3_test_client, and that's only until we
# improve it to take a s3_storage_url option
parsed = urlsplit(config['auth_uri'])
config.update({
'auth_ssl': str(parsed.scheme == 'https'),
'auth_host': parsed.hostname,
'auth_port': str(
parsed.port if parsed.port is not None else
443 if parsed.scheme == 'https' else 80),
'auth_prefix': parsed.path,
})
config.setdefault('s3_storage_url',
urlunsplit(parsed[:2] + ('', None, None)))
elif 'auth_host' in config:
scheme = 'http'
if config_true_value(config.get('auth_ssl', 'no')):
scheme = 'https'
netloc = config['auth_host']
if 'auth_port' in config:
netloc += ':' + config['auth_port']
auth_prefix = config.get('auth_prefix', '/')
if swift_test_auth_version == "1":
auth_prefix += 'v1.0'
config['auth_uri'] = swift_test_auth = urlunsplit(
(scheme, netloc, auth_prefix, None, None))
config.setdefault('s3_storage_url', urlunsplit(
(scheme, netloc, '', None, None)))
# else, neither auth_uri nor auth_host; swift_test_auth will be unset
# and we'll skip everything later
if 'service_prefix' in config:
swift_test_service_prefix = utils.append_underscore(
config['service_prefix'])
if swift_test_auth_version == "1":
try:
if 'account' in config:
swift_test_user[0] = '%(account)s:%(username)s' % config
else:
swift_test_user[0] = '%(username)s' % config
swift_test_key[0] = config['password']
except KeyError:
# bad config, no account/username configured, tests cannot be
# run
pass
try:
swift_test_user[1] = '%s%s' % (
'%s:' % config['account2'] if 'account2' in config else '',
config['username2'])
swift_test_key[1] = config['password2']
except KeyError:
pass # old config, no second account tests can be run
try:
swift_test_user[2] = '%s%s' % (
'%s:' % config['account'] if 'account'
in config else '', config['username3'])
swift_test_key[2] = config['password3']
except KeyError:
pass # old config, no third account tests can be run
try:
swift_test_user[4] = '%s%s' % (
'%s:' % config['account5'], config['username5'])
swift_test_key[4] = config['password5']
swift_test_tenant[4] = config['account5']
except KeyError:
pass # no service token tests can be run
for _ in range(3):
swift_test_perm[_] = swift_test_user[_]
else:
swift_test_user[0] = config['username']
swift_test_tenant[0] = config['account']
swift_test_key[0] = config['password']
if 'domain' in config:
swift_test_domain[0] = config['domain']
swift_test_user[1] = config['username2']
swift_test_tenant[1] = config['account2']
swift_test_key[1] = config['password2']
if 'domain2' in config:
swift_test_domain[1] = config['domain2']
swift_test_user[2] = config['username3']
swift_test_tenant[2] = config['account']
swift_test_key[2] = config['password3']
if 'domain3' in config:
swift_test_domain[2] = config['domain3']
if 'username4' in config:
swift_test_user[3] = config['username4']
swift_test_tenant[3] = config['account4']
swift_test_key[3] = config['password4']
swift_test_domain[3] = config['domain4']
if 'username5' in config:
swift_test_user[4] = config['username5']
swift_test_tenant[4] = config['account5']
swift_test_key[4] = config['password5']
if 'domain5' in config:
swift_test_domain[4] = config['domain5']
if 'username6' in config:
swift_test_user[5] = config['username6']
swift_test_tenant[5] = config['account6']
swift_test_key[5] = config['password6']
if 'domain6' in config:
swift_test_domain[5] = config['domain6']
for _ in range(5):
swift_test_perm[_] = swift_test_tenant[_] + ':' \
+ swift_test_user[_]
global skip
if not skip:
skip = not all([swift_test_auth, swift_test_user[0],
swift_test_key[0]])
if skip:
print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG',
file=sys.stderr)
global skip2
if not skip2:
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
if not skip and skip2:
print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS '
'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
global skip3
if not skip3:
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
if not skip and skip3:
print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS '
'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
global skip_if_not_v3
if not skip_if_not_v3:
skip_if_not_v3 = (swift_test_auth_version != '3'
or not all([not skip,
swift_test_user[3],
swift_test_key[3]]))
if not skip and skip_if_not_v3:
print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3',
file=sys.stderr)
global skip_service_tokens
if not skip_service_tokens:
skip_service_tokens = not all([not skip, swift_test_user[4],
swift_test_key[4], swift_test_tenant[4],
swift_test_service_prefix])
if not skip and skip_service_tokens:
print(
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS',
file=sys.stderr)
if policy_specified:
policies = FunctionalStoragePolicyCollection.from_info()
for p in policies:
# policy names are case-insensitive
if policy_specified.lower() == p['name'].lower():
_info('Using specified policy %s' % policy_specified)
FunctionalStoragePolicyCollection.policy_specified = p
Container.policy_specified = policy_specified
break
else:
_info(
'SKIPPING FUNCTIONAL TESTS: Failed to find specified policy %s'
% policy_specified)
raise Exception('Failed to find specified policy %s'
% policy_specified)
global skip_if_no_reseller_admin
if not skip_if_no_reseller_admin:
skip_if_no_reseller_admin = not all([not skip, swift_test_user[5],
swift_test_key[5],
swift_test_tenant[5]])
if not skip and skip_if_no_reseller_admin:
print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG FOR '
'RESELLER ADMIN', file=sys.stderr)
get_cluster_info()
def teardown_package():
global orig_collate
locale.setlocale(locale.LC_COLLATE, orig_collate)
# clean up containers and objects left behind after running tests
global config
if config:
try:
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
except (SkipTest):
pass
global in_process
global _test_socks
if in_process:
try:
for i, server in enumerate(_test_coros):
server.kill()
if not server.dead:
# kill it from the socket level
_test_socks[i].close()
except Exception:
pass
try:
rmtree(os.path.dirname(_testdir))
except Exception:
pass
reset_globals()
class AuthError(Exception):
pass
class InternalServerError(Exception):
pass
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
def reset_globals():
global url, token, service_token, parsed, conn, config
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
if config:
config = {}
def connection(url):
if has_insecure:
parsed_url, http_conn = http_connection(url, insecure=insecure)
else:
parsed_url, http_conn = http_connection(url)
orig_request = http_conn.request
# Add the policy header if policy_specified is set
def request_with_policy(method, url, body=None, headers={}):
version, account, container, obj = split_path(url, 1, 4, True)
if policy_specified and method == 'PUT' and container and not obj \
and 'X-Storage-Policy' not in headers:
headers['X-Storage-Policy'] = policy_specified
return orig_request(method, url, body, headers)
http_conn.request = request_with_policy
return parsed_url, http_conn
def get_url_token(user_index, os_options):
authargs = dict(snet=False,
tenant_name=swift_test_tenant[user_index],
auth_version=swift_test_auth_version,
os_options=os_options,
insecure=insecure)
url, token = get_auth(swift_test_auth,
swift_test_user[user_index],
swift_test_key[user_index],
**authargs)
if six.PY2 and not isinstance(url, bytes):
url = url.encode('utf-8')
if six.PY2 and not isinstance(token, bytes):
token = token.encode('utf-8')
return url, token
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
'service_user' - add a service token from this user (1 indexed)
"""
global url, token, service_token, parsed, conn
retries = kwargs.get('retries', 5)
attempts, backoff = 0, 1
# use account #1 by default; turn user's 1-indexed account into 0-indexed
use_account = kwargs.pop('use_account', 1) - 1
service_user = kwargs.pop('service_user', None)
if service_user:
service_user -= 1 # 0-index
# access our own account by default
url_account = kwargs.pop('url_account', use_account + 1) - 1
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
auth_failure = False
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = get_url_token(
use_account, os_options)
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
connection(url[use_account])
# default resource is the account url[url_account]
resource = kwargs.pop('resource', '%(storage_url)s')
template_vars = {'storage_url': url[url_account]}
parsed_result = urlparse(resource % template_vars)
if isinstance(service_user, int):
if not service_token[service_user]:
dummy, service_token[service_user] = get_url_token(
service_user, os_options)
kwargs['service_token'] = service_token[service_user]
return func(url[url_account], token[use_account],
parsed_result, conn[url_account],
*args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
if service_user:
service_token[service_user] = None
except AuthError:
auth_failure = True
url[use_account] = token[use_account] = None
if service_user:
service_token[service_user] = None
except InternalServerError:
pass
if attempts <= retries:
if not auth_failure:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries)
def check_response(conn):
resp = conn.getresponse()
if resp.status == 401:
resp.read()
raise AuthError()
elif resp.status // 100 == 5:
resp.read()
raise InternalServerError()
return resp
def load_constraint(name):
global cluster_info
try:
c = cluster_info['swift'][name]
except KeyError:
raise SkipTest("Missing constraint: %s" % name)
if not isinstance(c, int):
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
return c
def get_storage_policy_from_cluster_info(info):
policies = info['swift'].get('policies', {})
default_policy = []
non_default_policies = []
for p in policies:
if p.get('default', {}):
default_policy.append(p)
else:
non_default_policies.append(p)
return default_policy, non_default_policies
def reset_acl():
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {
'X-Auth-Token': token,
'X-Account-Access-Control': '{}'
})
return check_response(conn)
resp = retry(post, use_account=1)
resp.read()
def requires_acls(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global skip, cluster_info
if skip or not cluster_info:
raise SkipTest('Requires account ACLs')
# Determine whether this cluster has account ACLs; if not, skip test
if not cluster_info.get('tempauth', {}).get('account_acls'):
raise SkipTest('Requires account ACLs')
if swift_test_auth_version != '1':
# remove when keystoneauth supports account acls
raise SkipTest('Requires account ACLs')
reset_acl()
try:
rv = f(*args, **kwargs)
finally:
reset_acl()
return rv
return wrapper
class FunctionalStoragePolicyCollection(object):
# policy_specified is set in __init__.py when tests are being set up.
policy_specified = None
def __init__(self, policies):
self._all = policies
self.default = None
for p in self:
if p.get('default', False):
assert self.default is None, 'Found multiple default ' \
'policies %r and %r' % (self.default, p)
self.default = p
@classmethod
def from_info(cls, info=None):
if not (info or cluster_info):
get_cluster_info()
info = info or cluster_info
try:
policy_info = info['swift']['policies']
except KeyError:
raise AssertionError('Did not find any policy info in %r' % info)
policies = cls(policy_info)
assert policies.default, \
'Did not find default policy in %r' % policy_info
return policies
def __len__(self):
return len(self._all)
def __iter__(self):
return iter(self._all)
def __getitem__(self, index):
return self._all[index]
def filter(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) == v for k, v in kwargs.items())])
def exclude(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) != v for k, v in kwargs.items())])
def select(self):
# check that a policy was specified and that it is available
# in the current list (i.e., hasn't been excluded of the current list)
if self.policy_specified and self.policy_specified in self:
return self.policy_specified
else:
return random.choice(self)
def requires_policies(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if skip:
raise SkipTest
try:
self.policies = FunctionalStoragePolicyCollection.from_info()
except AssertionError:
raise SkipTest("Unable to determine available policies")
if len(self.policies) < 2:
raise SkipTest("Multiple policies not enabled")
return f(self, *args, **kwargs)
return wrapper
def requires_bulk(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if skip or not cluster_info:
raise SkipTest('Requires bulk middleware')
# Determine whether this cluster has bulk middleware; if not, skip test
if not cluster_info.get('bulk_upload', {}):
raise SkipTest('Requires bulk middleware')
return f(*args, **kwargs)
return wrapper
| 38.494489 | 79 | 0.628791 |
ace55401ffc470dccf6aa3ad02b4e435a3b90261 | 1,310 | py | Python | Python3-Basics/Chapter04_FlowControl03.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-Basics/Chapter04_FlowControl03.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-Basics/Chapter04_FlowControl03.py | anliven/Reading-Code-Learning-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
L1 = ['aaa', 'bbb', 'ccc']
L2 = [111, 222, 333]
print(zip(L1, L2), list(zip(L1, L2))) # zip函数的返回值是一个迭代对象,可用list将其转换为列表
for k, v in zip(L1, L2):
print("{} --- {}.".format(k, v))
S = "abc123ABC123abcABC"
for index, string in enumerate(S): # 使用内置函数enumerate()可在迭代时同时获取索引
if 'B' in string:
print("string:{} - index:{}".format(string, index))
L = ['aaa', 'bbb', 'ccc', 'ddd', 'ccc']
for index, value in enumerate(L):
print("index:{} - value:{}".format(index, value))
if 'ccc' in value:
print("# Found:", value, index)
print(list(reversed("Hello, Python3!"))) # 内置函数reversed()支持反向迭代
print("".join(reversed("Python3")))
# ### 并行迭代
# 内置函数zip支持并行迭代,可将两个序列“缝合”并返回一个有元组组成的序列;
# 内置函数zip可“缝合”任意数量的序列;
# 特别注意:序列的长度不同时,内置函数zip将在最短的序列用完后停止“缝合”;
#
# ### 迭代时获取索引
# 使用内置函数enumerate可在迭代时同时获取索引;
#
# ### zip()
# https://docs.python.org/3/library/functions.html#zip
# 内置函数zip()用于创建一个迭代器,聚合来自每个迭代器的元素;
#
# ### list()
# https://docs.python.org/3/library/functions.html#func-list
# 内置函数list()用于将一个可迭代对象转换为列表;
#
# ### enumerate()
# https://docs.python.org/3/library/functions.html#enumerate
# 内置函数enumerate()用于返回一个枚举对象(enumerate object);
#
# ### reversed()
# https://docs.python.org/3/library/functions.html#reversed
# 内置函数reversed()用于返回一个方向的迭代器,可用于任何序列和可迭代对象,而且不就地修改原对象;
| 28.478261 | 71 | 0.661069 |
ace55438309b1f9c99ab2758d027e5f4a2f6b2b9 | 3,744 | py | Python | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/download_slowlog_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/download_slowlog_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/download_slowlog_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DownloadSlowlogRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'body': 'DownloadSlowlogRequestBody'
}
attribute_map = {
'instance_id': 'instance_id',
'body': 'body'
}
def __init__(self, instance_id=None, body=None):
"""DownloadSlowlogRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._body = None
self.discriminator = None
self.instance_id = instance_id
if body is not None:
self.body = body
@property
def instance_id(self):
"""Gets the instance_id of this DownloadSlowlogRequest.
实例ID,可以调用“查询实例列表”接口获取。如果未申请实例,可以调用“创建实例”接口创建。
:return: The instance_id of this DownloadSlowlogRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DownloadSlowlogRequest.
实例ID,可以调用“查询实例列表”接口获取。如果未申请实例,可以调用“创建实例”接口创建。
:param instance_id: The instance_id of this DownloadSlowlogRequest.
:type: str
"""
self._instance_id = instance_id
@property
def body(self):
"""Gets the body of this DownloadSlowlogRequest.
:return: The body of this DownloadSlowlogRequest.
:rtype: DownloadSlowlogRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this DownloadSlowlogRequest.
:param body: The body of this DownloadSlowlogRequest.
:type: DownloadSlowlogRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DownloadSlowlogRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.935252 | 79 | 0.567842 |
ace554d554e7bfdeb5313140510c6cda0506faa6 | 2,664 | py | Python | odps/tests/test_accounts.py | walker83/aliyun-odps-python-sdk | f69c2520d346554131f4129360cb7ae1211699ce | [
"Apache-2.0"
] | null | null | null | odps/tests/test_accounts.py | walker83/aliyun-odps-python-sdk | f69c2520d346554131f4129360cb7ae1211699ce | [
"Apache-2.0"
] | null | null | null | odps/tests/test_accounts.py | walker83/aliyun-odps-python-sdk | f69c2520d346554131f4129360cb7ae1211699ce | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from odps import ODPS
from odps.tests.core import TestBase, tn
from odps.accounts import SignServer, SignServerAccount, SignServerError
class Test(TestBase):
def testSignServerAccount(self):
server = SignServer()
server.accounts[self.odps.account.access_id] = self.odps.account.secret_access_key
try:
server.start(('127.0.0.1', 0))
account = SignServerAccount(self.odps.account.access_id, server.server.server_address)
odps = ODPS(None, None, self.odps.project, self.odps.endpoint, account=account)
odps.delete_table(tn('test_sign_account_table'), if_exists=True)
t = odps.create_table(tn('test_sign_account_table'), 'col string', lifecycle=1)
self.assertTrue(odps.exist_table(tn('test_sign_account_table')))
t.drop(async_=True)
finally:
server.stop()
def testTokenizedSignServerAccount(self):
server = SignServer(token=str(uuid.uuid4()))
server.accounts[self.odps.account.access_id] = self.odps.account.secret_access_key
try:
server.start(('127.0.0.1', 0))
account = SignServerAccount(self.odps.account.access_id, server.server.server_address)
odps = ODPS(None, None, self.odps.project, self.odps.endpoint, account=account)
self.assertRaises(SignServerError,
lambda: odps.delete_table(tn('test_sign_account_table'), if_exists=True))
account = SignServerAccount(self.odps.account.access_id, server.server.server_address, token=server.token)
odps = ODPS(None, None, self.odps.project, self.odps.endpoint, account=account)
odps.delete_table(tn('test_sign_account_table'), if_exists=True)
t = odps.create_table(tn('test_sign_account_table'), 'col string', lifecycle=1)
self.assertTrue(odps.exist_table(tn('test_sign_account_table')))
t.drop(async_=True)
finally:
server.stop()
| 46.736842 | 118 | 0.687688 |
ace555ace40b4f7e61b92ea7a6da804936da77be | 1,962 | py | Python | werobot/utils.py | shenglihu/WeRoBot | 6fd376e3055d0c132720ebe8cc5d442072a2171b | [
"MIT"
] | 2 | 2015-12-08T08:35:42.000Z | 2015-12-08T08:35:44.000Z | werobot/utils.py | shenglihu/WeRoBot | 6fd376e3055d0c132720ebe8cc5d442072a2171b | [
"MIT"
] | null | null | null | werobot/utils.py | shenglihu/WeRoBot | 6fd376e3055d0c132720ebe8cc5d442072a2171b | [
"MIT"
] | 2 | 2016-02-28T16:55:32.000Z | 2016-10-17T08:13:46.000Z | #coding: utf8
import re
import random
import json
import six
import time
from hashlib import sha1
string_types = (six.string_types, six.text_type, six.binary_type)
def check_token(token):
return re.match('^[A-Za-z0-9]{3,32}$', token)
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def is_string(value):
return isinstance(value, string_types)
def generate_token(length=''):
if not length:
length = random.randint(3, 32)
length = int(length)
assert 3 <= length <= 32
token = []
letters = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789'
for _ in range(length):
token.append(random.choice(letters))
return ''.join(token)
def json_loads(s):
s = to_text(s)
return json.loads(s)
def json_dumps(d):
return json.dumps(d)
def pay_sign_dict(appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs):
"""
支付参数签名
"""
assert pay_sign_key, "PAY SIGN KEY IS EMPTY"
if add_appid:
kwargs.update({'appid': appid})
if add_noncestr:
kwargs.update({'noncestr': generate_token()})
if add_timestamp:
kwargs.update({'timestamp': int(time.time())})
params = kwargs.items()
_params = [(k.lower(), v) for k, v in kwargs.items() if k.lower() != "appid"] + [('appid', appid), ('appkey', pay_sign_key)]
_params.sort()
sign = sha1('&'.join(["%s=%s" % (str(p[0]), str(p[1])) for p in _params])).hexdigest()
sign_type = 'SHA1'
return dict(params), sign, sign_type
| 23.082353 | 128 | 0.641692 |
ace556794a1a27443e0f9cb625c1a1d7835ea3eb | 1,553 | py | Python | src/targetdb/models/proposal_category.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | src/targetdb/models/proposal_category.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | src/targetdb/models/proposal_category.py | Subaru-PFS/ets_target_database | 781e4507c3a625de1a59e86dc18ec4f16cd1986f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
# from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import backref
from sqlalchemy.orm import relation
from . import Base
class proposal_category(Base):
__tablename__ = "proposal_category"
proposal_category_id = Column(
Integer,
primary_key=True,
unique=True,
autoincrement=False,
comment="Unique identifier of proposal category",
)
proposal_category_name = Column(
String,
unique=True,
comment="Proposal category name (e.g., Openuse, Keck, Gemini, and UH)",
)
proposal_category_description = Column(
String,
comment="Proposal category description (e.g., Openuse, Time exchange, etc.",
)
created_at = Column(DateTime, comment="Creation time")
updated_at = Column(DateTime, comment="Update time")
def __init__(
self,
proposal_category_id,
proposal_category_name,
proposal_category_description,
created_at,
updated_at,
):
self.proposal_category_id = proposal_category_id
self.proposal_category_name = proposal_category_name
self.proposal_category_description = proposal_category_description
self.created_at = created_at
self.updated_at = updated_at
| 28.759259 | 84 | 0.712814 |
ace55727c2bcba78d86aa4724f4aa02b199abe4e | 5,761 | py | Python | google/cloud/compute_v1/services/packet_mirrorings/pagers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | 19 | 2021-02-10T21:17:20.000Z | 2022-02-20T03:16:36.000Z | google/cloud/compute_v1/services/packet_mirrorings/pagers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | 121 | 2021-01-08T23:46:58.000Z | 2022-03-26T04:34:36.000Z | google/cloud/compute_v1/services/packet_mirrorings/pagers.py | vam-google/python-compute | 799f2f55e5e205317862a17ca7ed548ce2ca66e5 | [
"Apache-2.0"
] | 20 | 2021-01-08T23:14:16.000Z | 2022-02-25T01:27:20.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.PacketMirroringAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.PacketMirroringAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.PacketMirroringAggregatedList],
request: compute.AggregatedListPacketMirroringsRequest,
response: compute.PacketMirroringAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListPacketMirroringsRequest):
The initial request object.
response (google.cloud.compute_v1.types.PacketMirroringAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListPacketMirroringsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.PacketMirroringAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.PacketMirroringsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.PacketMirroringsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.PacketMirroringList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.PacketMirroringList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.PacketMirroringList],
request: compute.ListPacketMirroringsRequest,
response: compute.PacketMirroringList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListPacketMirroringsRequest):
The initial request object.
response (google.cloud.compute_v1.types.PacketMirroringList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListPacketMirroringsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.PacketMirroringList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.PacketMirroring]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 37.167742 | 90 | 0.677139 |
ace55784334f075665b0ae8d255adb94277994ec | 160 | py | Python | bruges/transform/__init__.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | bruges/transform/__init__.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | bruges/transform/__init__.py | sbachkheti/bruges | 10fa2524bf8f1b02df2e4e195dc44ead1ed97cbd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from .timedepthconv import time_to_depth, depth_to_time
from .coordinates import CoordTransform
from .cumavg import v_rms, v_avg, v_bac
| 32 | 55 | 0.78125 |
ace55834c5fc174bacbaa27b3a92528ded28e3c2 | 22,643 | py | Python | test/TemplateUtil_test.py | msneddon/KBaseReport | b8bb470a4c88d65230e525db48d6a3730069185d | [
"MIT"
] | null | null | null | test/TemplateUtil_test.py | msneddon/KBaseReport | b8bb470a4c88d65230e525db48d6a3730069185d | [
"MIT"
] | 15 | 2017-03-10T21:15:44.000Z | 2021-07-08T21:43:56.000Z | test/TemplateUtil_test.py | msneddon/KBaseReport | b8bb470a4c88d65230e525db48d6a3730069185d | [
"MIT"
] | 9 | 2017-01-20T18:27:06.000Z | 2021-01-05T17:52:38.000Z | # -*- coding: utf-8 -*-
import contextlib
import json
import os
import re
import unittest
from configparser import ConfigParser
from template import Template
from template.util import TemplateException
from uuid import uuid4
from KBaseReport.KBaseReportImpl import KBaseReport
from KBaseReport.utils.TemplateUtil import TemplateUtil
from KBaseReport.utils.validation_utils import validate_template_params
def get_test_data():
TEMPLATE_DIR = os.environ.get('TEMPLATE_DIR', '/kb/module/kbase_report_templates')
APP_DIR = os.environ.get('APP_DIR', '/kb/module')
TEST_DATA = {
'template_dir': TEMPLATE_DIR,
'title': {
'page_title': 'My First Template'
},
'content': {
'value': ['this', 'that', 'the other']
},
'template_file': 'views/test/test_template.tt',
'scratch': '/kb/module/work/tmp',
'template_toolkit_config': {
'ABSOLUTE': 1,
'RELATIVE': 1,
'INCLUDE_PATH': TEMPLATE_DIR + ':' + os.path.join(TEMPLATE_DIR, 'views')
},
}
TEST_DATA['title_json'] = json.dumps(TEST_DATA['title'])
TEST_DATA['content_json'] = json.dumps(TEST_DATA['content'])
TEST_DATA['template'] = os.path.join(TEMPLATE_DIR, TEST_DATA['template_file'])
TEST_DATA['output_file'] = os.path.join(TEST_DATA['scratch'], 'outfile.txt')
TEST_DATA['output_file_with_dirs'] = os.path.join(TEST_DATA['scratch'], 'path', 'to', 'out.txt')
# set up the rendering test ref data
TEST_DATA['render_test'] = {}
for type in [None, 'title', 'content']:
type_str = 'None' if type is None else type
rendered_file_path = os.path.join(APP_DIR, 'test/data/tmpl_output_' + type_str + '.txt')
with open(rendered_file_path, 'r') as f:
rendered_text = f.read()
TEST_DATA['render_test'][type] = {
'abs_path': rendered_text.rstrip(),
'rel_path': rendered_text.rstrip().replace(
TEST_DATA['template'], TEST_DATA['template_file']
),
}
return TEST_DATA
TEST_DATA = get_test_data()
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
class TestTemplateUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
cls.cfg_ttp = {}
if not config_file:
cls.cfg = {
'scratch': TEST_DATA['scratch'],
'template_toolkit': TEST_DATA['template_toolkit_config'],
}
cls.cfg_ttp = TEST_DATA['template_toolkit_config']
else:
config = ConfigParser()
config.read(config_file)
for nameval in config.items('KBaseReport'):
cls.cfg[nameval[0]] = nameval[1]
for nameval in config.items('TemplateToolkitPython'):
cls.cfg_ttp[nameval[0]] = nameval[1]
cls.scratch = cls.cfg['scratch']
cls.serviceImpl = KBaseReport(cls.cfg)
cls.templateUtil = TemplateUtil(cls.cfg)
def getImpl(self):
return self.__class__.serviceImpl
def getTmpl(self):
return self.__class__.templateUtil
def get_input_set(self):
input_set = [
# config errors
{
'desc': 'no conf - fails on the lack of a required field',
'regex': 'scratch.*?required field',
'config': {},
},
{
'desc': 'wrong format for tt params',
'regex': 'must be of dict type',
'config': {
'template_toolkit': 'is a super cool templating library',
}
},
{
'desc': 'invalid scratch dir',
'regex': 'scratch.*?does not exist on filesystem',
'config': {
'scratch': '/does/not/exist',
}
},
# input errors
{
'desc': 'missing required param',
'regex': "required field",
'params': {},
},
{
'desc': 'template file is wrong type',
'regex': "must be of string type",
'params': {
'template_file': {
'path': '/does/not/exist'
},
}
},
{
'desc': 'invalid JSON',
'regex': "Invalid JSON",
'params': {
'template_data_json': '"this is not valid JSON',
}
},
{
'desc': 'invalid JSON',
'regex': "Invalid JSON",
'params': {
'template_data_json': '["this",{"is":"not"},{"valid":"json"]',
}
},
{
'desc': 'output file is not in scratch dir',
'regex': "not in the scratch directory",
'params': {
'output_file': 'path/to/file',
}
},
]
valid = {
'params': {
'output_file': TEST_DATA['output_file'],
'template_file': TEST_DATA['template'],
},
'config': self.cfg
}
valid['config']['template_toolkit'] = self.cfg_ttp
for item in input_set:
for facet in ['params', 'config']:
if facet not in item:
item[facet] = valid[facet]
elif len(item[facet]) > 0:
item['err_field'] = [facet + '.' + key for key in item[facet].keys()]
temp = { **valid[facet], **item[facet] }
item[facet] = temp
return input_set
def _title_check_rendering(self, string, has_title=False):
no_title_compiled = re.compile('<title></title>')
title_compiled = re.compile('<title>My First Template</title>')
if (has_title):
self.assertRegex(string, title_compiled)
self.assertNotRegex(string, no_title_compiled)
else:
self.assertNotRegex(string, title_compiled)
self.assertRegex(string, no_title_compiled)
def _content_check_rendering(self, string, has_content=False):
test_text = re.compile('This is a test.')
no_value_text = re.compile('<div>The value is </div>')
value_text = re.compile(
r'<div>The value is \[\s*\'this\',\s*\'that\',\s*\'the other\',?\s*\]</div>'
)
self.assertRegex(string, test_text)
if (has_content):
self.assertRegex(string, value_text)
self.assertNotRegex(string, no_value_text)
else:
self.assertRegex(string, no_value_text)
self.assertNotRegex(string, value_text)
def check_rendering(self, string, params={}):
title_bool = 'title' in params
content_bool = 'content' in params
self._title_check_rendering(string, title_bool)
self._content_check_rendering(string, content_bool)
def check_file_contents(self, new_file, ref_text):
with open(new_file, 'r') as f:
rendered_text = f.read()
self.assertMultiLineEqual(rendered_text.rstrip(), ref_text.rstrip())
def test_class_init(self):
""" TemplateUtil: class initialisation """
# remove the local config variable: cannot init TTP
no_conf_err = 'No config file found. Cannot initialise Template Toolkit'
with modified_environ('KB_DEPLOYMENT_CONFIG'):
with self.assertRaisesRegex(ValueError, no_conf_err):
KBaseReport(self.cfg)
input_tests = self.get_input_set()
for test_item in input_tests:
if 'err_field' in test_item and 'config' in test_item['err_field'][0]:
# init the class directly to check config validation errors
with self.assertRaisesRegex(TypeError, test_item['regex']):
TemplateUtil(test_item['config'])
def test_template_engine(self):
""" template engine initialisation """
tt_config = self.getImpl().config
tmpl_util = TemplateUtil(tt_config)
self.assertIsNone(tmpl_util._template)
# init the template engine
tmpl_engine = tmpl_util.template_engine()
self.assertIsInstance(tmpl_engine, Template)
def test_validate_template_params_errors(self):
""" test TemplateUtil input validation errors """
# no scratch dir
with self.assertRaisesRegex(TypeError, "KBaseReport parameter validation errors:") as cm:
validate_template_params({'template_file': TEST_DATA['template']}, {
'this': 'that',
'the': 'other',
}, True)
error_message = str(cm.exception)
self.assertRegex(error_message, 'scratch.*?required field')
input_tests = self.get_input_set()
for test_item in input_tests:
with self.subTest(test_item['desc']):
# assume this is input for the saving-to-file method
with self.assertRaisesRegex(TypeError, test_item['regex']):
validate_template_params(
test_item['params'], test_item['config'], True)
with self.assertRaisesRegex(TypeError, test_item['regex']):
tmpl_util = TemplateUtil(test_item['config'])
tmpl_util.render_template_to_file(test_item['params'])
# save to direct html -- no output file checks required
if 'err_field' in test_item and 'params.output_file' in test_item['err_field']:
# this should execute without errors
validate_template_params(test_item['params'], test_item['config'], False)
else:
with self.assertRaisesRegex(TypeError, test_item['regex']):
validate_template_params(
test_item['params'], test_item['config'], False)
with self.assertRaisesRegex(TypeError, test_item['regex']):
tmpl_util = TemplateUtil(test_item['config'])
tmpl_util.render_template_to_direct_html({'template': test_item['params']})
def test_validate_template_params(self):
""" test TemplateUtil input validation """
expected = {
'template_file': TEST_DATA['template'],
'output_file': TEST_DATA['output_file'],
'template_data': {},
}
# with output file
self.assertEqual(
validate_template_params({
'template_file': TEST_DATA['template'],
'output_file': TEST_DATA['output_file'],
}, self.getTmpl().config, True),
expected
)
# JSON input converted to data structure
expected['template_data'] = TEST_DATA['content']
self.assertEqual(
validate_template_params({
'template_file': TEST_DATA['template'],
'template_data_json': TEST_DATA['content_json'],
'output_file': TEST_DATA['output_file'],
}, self.getTmpl().config, True),
expected
)
# no output file required -- 'output_file' will not be returned in the validated params
del expected['output_file']
expected['template_data'] = {}
self.assertEqual(
validate_template_params({
'template_file': TEST_DATA['template'],
'output_file': TEST_DATA['output_file'],
}, self.getTmpl().config, False),
expected
)
def test_render_template(self):
"""
basic rendering test
function signature:
_render_template(template_file, template_data={}, template_config={}):
"""
render_test = TEST_DATA['render_test']
tmpl_util = self.getTmpl()
# template not found
for path in ['/does/not/exist', 'does/not/exist']:
with self.assertRaisesRegex(TemplateException, 'file error - ' + path + ': not found'):
tmpl_util._render_template(path)
for test_item in render_test.keys():
desc = test_item if test_item is not None else 'None'
with self.subTest('rendered content: ' + desc):
if not test_item:
# no title or content specified
tmpl_str = tmpl_util._render_template(TEST_DATA['template'])
self.check_rendering(tmpl_str)
self.assertEqual(tmpl_str.rstrip(), render_test[test_item]['abs_path'])
elif test_item == 'title' or test_item == 'content':
tmpl_str = tmpl_util._render_template(
TEST_DATA['template'], TEST_DATA[test_item]
)
self.check_rendering(tmpl_str, {test_item: True})
self.assertEqual(tmpl_str.rstrip(), render_test[test_item]['abs_path'])
# check whether we can use a relative path for a template
relative_tmpl_str = tmpl_util._render_template(TEST_DATA['template_file'], {})
self.check_rendering(relative_tmpl_str)
self.assertEqual(relative_tmpl_str.rstrip(), render_test[None]['rel_path'])
def test_render_template_to_direct_html(self):
""" test rendering and saving output to the 'direct_html' param """
render_test = TEST_DATA['render_test']
tmpl_util = self.getTmpl()
# see test_validate_template_params for more validation errors
with self.assertRaisesRegex(KeyError, 'template.*?required field'):
tmpl_util.render_template_to_direct_html({
'template_file': TEST_DATA['template'],
'template_data_json': TEST_DATA['content_json']
})
# template not found
for path in ['/does/not/exist', 'does/not/exist']:
with self.assertRaisesRegex(TemplateException, 'file error - ' + path + ': not found'):
tmpl_util.render_template_to_direct_html({'template': {'template_file': path}})
for test_item in render_test.keys():
desc = test_item if test_item is not None else 'None'
with self.subTest('rendered content: ' + desc):
test_args = {'template_file': TEST_DATA['template']}
if test_item:
test_args['template_data_json'] = TEST_DATA[test_item + '_json']
new_params = tmpl_util.render_template_to_direct_html({'template': test_args})
expected = render_test[test_item]['abs_path']
direct_html_str = new_params['direct_html']
self.assertMultiLineEqual(direct_html_str.rstrip(), expected)
self.assertNotIn('template', new_params)
def test_render_template_to_file(self):
""" test rendering and saving to a (scratch) file """
tmpl_util = self.getTmpl()
# see test_validate_template_params_errors for more validation errors
# template not found, absolute and relative paths
for path in ['/does/not/exist', 'does/not/exist']:
with self.assertRaisesRegex(TemplateException, 'file error - ' + path + ': not found'):
tmpl_util.render_template_to_file({
'template_file': path,
'output_file': TEST_DATA['output_file']
})
with self.assertRaisesRegex(TemplateException, 'file error - ' + path + ': not found'):
tmpl_util.render_template_to_scratch_file({
'template_file': path
})
# ensure that we can create intervening directories
output_dir = os.path.join(self.scratch, 'path', 'to', 'new', 'dir')
self.assertFalse(os.path.isdir(output_dir) and os.path.isfile(output_dir))
for test_item in TEST_DATA['render_test'].keys():
desc = test_item if test_item is not None else 'None'
with self.subTest('rendered content: ' + desc):
output_file = os.path.join(output_dir, 'temp_file-' + str(uuid4()) + '.txt')
self.assertFalse(os.path.isfile(output_file))
test_args = {
'template_file': TEST_DATA['template'],
'output_file': output_file,
}
if test_item:
test_args['template_data_json'] = TEST_DATA[test_item + '_json']
new_file = tmpl_util.render_template_to_file(test_args)
self.assertEqual(new_file, {'path': output_file})
self.check_file_contents(
new_file['path'], TEST_DATA['render_test'][test_item]['abs_path'])
# now render these to a temp file instead
del test_args['output_file']
tmp_file = tmpl_util.render_template_to_scratch_file(test_args)
# make sure the file name matches expectations
self.assertRegex(tmp_file['path'], self.cfg['scratch'] + ".*?.txt$")
self.check_file_contents(
tmp_file['path'], TEST_DATA['render_test'][test_item]['abs_path'])
def test_impl_render_template_errors(self):
""" full Impl test errors """
input_tests = self.get_input_set()
for test_item in input_tests:
with self.subTest(test_item['desc']):
if 'err_field' in test_item:
if 'config.template_toolkit' in test_item['err_field'][0]:
# this will pass as the TT config gets read (and overwritten) in KBaseReport
kbr = KBaseReport(test_item['config'])
kbr.render_template({}, test_item['params'])
else:
with self.assertRaisesRegex(TypeError, test_item['regex']):
kbr = KBaseReport(test_item['config'])
kbr.render_template({}, test_item['params'])
with self.assertRaisesRegex(TypeError, test_item['regex']):
kbr = KBaseReport(test_item['config'])
kbr.render_templates({}, [test_item['params']])
def test_impl_render_template(self):
""" full Impl test: input validation, rendering, saving to file, return """
for test_item in TEST_DATA['render_test'].keys():
desc = test_item if test_item is not None else 'none'
ref_text = TEST_DATA['render_test'][test_item]
with self.subTest('test content: ' + desc):
test_args = {
'template_file': TEST_DATA['template'],
'output_file': os.path.join(self.scratch, 'temp_file-' + str(uuid4()) + '.txt')
}
if test_item:
test_args['template_data_json'] = TEST_DATA[test_item + '_json']
impl_output = self.getImpl().render_template({}, test_args)
self.assertEqual(impl_output, [{'path': test_args['output_file']}])
self.check_file_contents(impl_output[0]['path'], ref_text['abs_path'])
# use a relative path instead of the absolute
test_args['template_file'] = TEST_DATA['template_file']
test_args['output_file'] = test_args['output_file'] + '-2'
impl_output_rel_template = self.getImpl().render_template({}, test_args)
self.assertEqual(impl_output_rel_template, [{'path': test_args['output_file']}])
self.check_file_contents(impl_output_rel_template[0]['path'], ref_text['rel_path'])
def test_impl_render_templates_multi_file_errors(self):
""" full Impl test, multiple templates, errors """
# name collision in output file names
args_list = [{
'template_file': TEST_DATA['template'],
'output_file': os.path.join(self.scratch, 'temp_file.txt')
} for _ in range(4)]
err_str = 'output_file paths must be unique'
with self.assertRaisesRegex(ValueError, err_str):
impl_output = self.getImpl().render_templates({}, args_list)
def test_impl_render_templates(self):
"""
full Impl test, multiple template rendering:
input validation, rendering, saving to file, return
"""
args_list = []
expected_output = []
expected_content = {}
for test_item in TEST_DATA['render_test'].keys():
ref_text = TEST_DATA['render_test'][test_item]
test_args = {
'template_file': TEST_DATA['template'],
'output_file': os.path.join(self.scratch, 'temp_file-' + str(uuid4()) + '.txt')
}
if test_item:
test_args['template_data_json'] = TEST_DATA[test_item + '_json']
args_list.append(test_args)
expected_output.append({
'path': test_args['output_file'],
})
expected_content[test_args['output_file']] = ref_text['abs_path']
impl_output = self.getImpl().render_templates({}, args_list)
self.assertEqual(impl_output[0], expected_output)
for output_file in impl_output[0]:
self.check_file_contents(output_file['path'], expected_content[output_file['path']])
if __name__ == '__main__':
unittest.main()
| 39.310764 | 100 | 0.574173 |
ace55853b1b72d71d522c83b3c672bdf04282b11 | 47 | py | Python | dictcc/__init__.py | omargfh/dict.cc.py | da25e28e41891ca679a883ea0eb05f85166dfab5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 134 | 2015-01-28T10:39:39.000Z | 2022-03-26T16:17:37.000Z | dictcc/__init__.py | jiggimclaw/dict.cc.py | 675d683d6e4eb8738b5f806530a2adad51d60dd0 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 26 | 2015-01-24T20:42:27.000Z | 2022-02-06T19:08:13.000Z | dictcc/__init__.py | jiggimclaw/dict.cc.py | 675d683d6e4eb8738b5f806530a2adad51d60dd0 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 37 | 2015-01-24T20:16:52.000Z | 2022-02-06T17:20:06.000Z | # -*- coding: utf-8 -*-
from .dictcc import *
| 11.75 | 23 | 0.553191 |
ace559378e5715565e20cd826e025d1ffc1c003a | 8,489 | py | Python | pipenv/patched/notpip/_vendor/rich/box.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | 23 | 2017-01-20T01:18:31.000Z | 2017-01-20T17:25:11.000Z | pipenv/patched/notpip/_vendor/rich/box.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | 1 | 2017-01-20T05:13:58.000Z | 2017-01-20T05:13:58.000Z | pipenv/patched/notpip/_vendor/rich/box.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | null | null | null | import sys
from typing import TYPE_CHECKING, Iterable, List
if sys.version_info >= (3, 8):
from typing import Literal
else:
from pipenv.patched.notpip._vendor.typing_extensions import Literal # pragma: no cover
from ._loop import loop_last
if TYPE_CHECKING:
from pipenv.patched.notpip._vendor.rich.console import ConsoleOptions
class Box:
"""Defines characters to render boxes.
┌─┬┐ top
│ ││ head
├─┼┤ head_row
│ ││ mid
├─┼┤ row
├─┼┤ foot_row
│ ││ foot
└─┴┘ bottom
Args:
box (str): Characters making up box.
ascii (bool, optional): True if this box uses ascii characters only. Default is False.
"""
def __init__(self, box: str, *, ascii: bool = False) -> None:
self._box = box
self.ascii = ascii
line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
# top
self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
# head
self.head_left, _, self.head_vertical, self.head_right = iter(line2)
# head_row
(
self.head_row_left,
self.head_row_horizontal,
self.head_row_cross,
self.head_row_right,
) = iter(line3)
# mid
self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
# row
self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
# foot_row
(
self.foot_row_left,
self.foot_row_horizontal,
self.foot_row_cross,
self.foot_row_right,
) = iter(line6)
# foot
self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
# bottom
self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
line8
)
def __repr__(self) -> str:
return "Box(...)"
def __str__(self) -> str:
return self._box
def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
"""Substitute this box for another if it won't render due to platform issues.
Args:
options (ConsoleOptions): Console options used in rendering.
safe (bool, optional): Substitute this for another Box if there are known problems
displaying on the platform (currently only relevant on Windows). Default is True.
Returns:
Box: A different Box or the same Box.
"""
box = self
if options.legacy_windows and safe:
box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
if options.ascii_only and not box.ascii:
box = ASCII
return box
def get_top(self, widths: Iterable[int]) -> str:
"""Get the top of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.top_left)
for last, width in loop_last(widths):
append(self.top * width)
if not last:
append(self.top_divider)
append(self.top_right)
return "".join(parts)
def get_row(
self,
widths: Iterable[int],
level: Literal["head", "row", "foot", "mid"] = "row",
edge: bool = True,
) -> str:
"""Get the top of a simple box.
Args:
width (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
if level == "head":
left = self.head_row_left
horizontal = self.head_row_horizontal
cross = self.head_row_cross
right = self.head_row_right
elif level == "row":
left = self.row_left
horizontal = self.row_horizontal
cross = self.row_cross
right = self.row_right
elif level == "mid":
left = self.mid_left
horizontal = " "
cross = self.mid_vertical
right = self.mid_right
elif level == "foot":
left = self.foot_row_left
horizontal = self.foot_row_horizontal
cross = self.foot_row_cross
right = self.foot_row_right
else:
raise ValueError("level must be 'head', 'row' or 'foot'")
parts: List[str] = []
append = parts.append
if edge:
append(left)
for last, width in loop_last(widths):
append(horizontal * width)
if not last:
append(cross)
if edge:
append(right)
return "".join(parts)
def get_bottom(self, widths: Iterable[int]) -> str:
"""Get the bottom of a simple box.
Args:
widths (List[int]): Widths of columns.
Returns:
str: A string of box characters.
"""
parts: List[str] = []
append = parts.append
append(self.bottom_left)
for last, width in loop_last(widths):
append(self.bottom * width)
if not last:
append(self.bottom_divider)
append(self.bottom_right)
return "".join(parts)
ASCII: Box = Box(
"""\
+--+
| ||
|-+|
| ||
|-+|
|-+|
| ||
+--+
""",
ascii=True,
)
ASCII2: Box = Box(
"""\
+-++
| ||
+-++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
ASCII_DOUBLE_HEAD: Box = Box(
"""\
+-++
| ||
+=++
| ||
+-++
+-++
| ||
+-++
""",
ascii=True,
)
SQUARE: Box = Box(
"""\
┌─┬┐
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
SQUARE_DOUBLE_HEAD: Box = Box(
"""\
┌─┬┐
│ ││
╞═╪╡
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
MINIMAL: Box = Box(
"""\
╷
│
╶─┼╴
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_HEAVY_HEAD: Box = Box(
"""\
╷
│
╺━┿╸
│
╶─┼╴
╶─┼╴
│
╵
"""
)
MINIMAL_DOUBLE_HEAD: Box = Box(
"""\
╷
│
═╪
│
─┼
─┼
│
╵
"""
)
SIMPLE: Box = Box(
"""\
──
──
"""
)
SIMPLE_HEAD: Box = Box(
"""\
──
"""
)
SIMPLE_HEAVY: Box = Box(
"""\
━━
━━
"""
)
HORIZONTALS: Box = Box(
"""\
──
──
──
──
──
"""
)
ROUNDED: Box = Box(
"""\
╭─┬╮
│ ││
├─┼┤
│ ││
├─┼┤
├─┼┤
│ ││
╰─┴╯
"""
)
HEAVY: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┣━╋┫
┃ ┃┃
┣━╋┫
┣━╋┫
┃ ┃┃
┗━┻┛
"""
)
HEAVY_EDGE: Box = Box(
"""\
┏━┯┓
┃ │┃
┠─┼┨
┃ │┃
┠─┼┨
┠─┼┨
┃ │┃
┗━┷┛
"""
)
HEAVY_HEAD: Box = Box(
"""\
┏━┳┓
┃ ┃┃
┡━╇┩
│ ││
├─┼┤
├─┼┤
│ ││
└─┴┘
"""
)
DOUBLE: Box = Box(
"""\
╔═╦╗
║ ║║
╠═╬╣
║ ║║
╠═╬╣
╠═╬╣
║ ║║
╚═╩╝
"""
)
DOUBLE_EDGE: Box = Box(
"""\
╔═╤╗
║ │║
╟─┼╢
║ │║
╟─┼╢
╟─┼╢
║ │║
╚═╧╝
"""
)
# Map Boxes that don't render with raster fonts on to equivalent that do
LEGACY_WINDOWS_SUBSTITUTIONS = {
ROUNDED: SQUARE,
MINIMAL_HEAVY_HEAD: MINIMAL,
SIMPLE_HEAVY: SIMPLE,
HEAVY: SQUARE,
HEAVY_EDGE: SQUARE,
HEAVY_HEAD: SQUARE,
}
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.notpip._vendor.rich.columns import Columns
from pipenv.patched.notpip._vendor.rich.panel import Panel
from . import box as box
from .console import Console
from .table import Table
from .text import Text
console = Console(record=True)
BOXES = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
]
console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
console.print()
columns = Columns(expand=True, padding=2)
for box_name in sorted(BOXES):
table = Table(
show_footer=True, style="dim", border_style="not dim", expand=True
)
table.add_column("Header 1", "Footer 1")
table.add_column("Header 2", "Footer 2")
table.add_row("Cell", "Cell")
table.add_row("Cell", "Cell")
table.box = getattr(box, box_name)
table.title = Text(f"box.{box_name}", style="magenta")
columns.add_renderable(table)
console.print(columns)
# console.save_html("box.html", inline_styles=True)
| 17.539256 | 97 | 0.509012 |
ace55a5da2030d0b5b189453bf36ea1bf91615f1 | 69,464 | py | Python | django/db/models/query.py | tqrg-bot/django | 67b46ba7016da2d259c1ecc7d666d11f5e1cfaab | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/models/query.py | tqrg-bot/django | 67b46ba7016da2d259c1ecc7d666d11f5e1cfaab | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/models/query.py | tqrg-bot/django | 67b46ba7016da2d259c1ecc7d666d11f5e1cfaab | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
Q, InvalidQuery, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<QuerySet %r>' % data
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. Multi-table models are not supported.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self, field):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
else:
# If the query is used as a subquery for a ForeignKey with non-pk
# target field, make sure to select the target field in the subquery.
foreign_fields = getattr(field, 'foreign_related_fields', ())
if len(foreign_fields) == 1 and not foreign_fields[0].primary_key:
return self.values(foreign_fields[0].name)
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
is_fetched = attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
obj._prefetched_objects_cache[cache_name] = vals
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| 40.106236 | 115 | 0.612216 |
ace55a7a4d4e59366d0eee6a4ea8e05f7070e764 | 1,839 | py | Python | EShop/shop/migrations/0001_initial.py | namtrung205/3dVistaWebVRTours | cacab5061cebfa6778f9cc919d80759148aeb6d2 | [
"MIT"
] | null | null | null | EShop/shop/migrations/0001_initial.py | namtrung205/3dVistaWebVRTours | cacab5061cebfa6778f9cc919d80759148aeb6d2 | [
"MIT"
] | null | null | null | EShop/shop/migrations/0001_initial.py | namtrung205/3dVistaWebVRTours | cacab5061cebfa6778f9cc919d80759148aeb6d2 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-02-23 15:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=255)),
('slug', models.CharField(max_length=255, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=255)),
('slug', models.SlugField(max_length=255)),
('image', models.ImageField(blank=True, upload_to='product/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='shop.category')),
],
options={
'ordering': ('name',),
'index_together': {('id', 'slug')},
},
),
]
| 38.3125 | 137 | 0.54758 |
ace55a8ebbde9a222e77031e23f35d5d063c7cb1 | 3,528 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/manifest/music_content_type.py | rivamarco/alexa-apis-for-python | 62e3a9057a26003e836fa09aa12a2e1c8b62d6e0 | [
"Apache-2.0"
] | 2 | 2021-10-30T06:52:48.000Z | 2021-11-16T12:34:16.000Z | ask-smapi-model/ask_smapi_model/v1/skill/manifest/music_content_type.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/manifest/music_content_type.py | Shreyas-vgr/alexa-apis-for-python | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.manifest.music_content_name import MusicContentName
class MusicContentType(object):
"""
Defines the structure for content that can be provided by a music skill.
:param name:
:type name: (optional) ask_smapi_model.v1.skill.manifest.music_content_name.MusicContentName
"""
deserialized_types = {
'name': 'ask_smapi_model.v1.skill.manifest.music_content_name.MusicContentName'
} # type: Dict
attribute_map = {
'name': 'name'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None):
# type: (Optional[MusicContentName]) -> None
"""Defines the structure for content that can be provided by a music skill.
:param name:
:type name: (optional) ask_smapi_model.v1.skill.manifest.music_content_name.MusicContentName
"""
self.__discriminator_value = None # type: str
self.name = name
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, MusicContentType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 32.072727 | 100 | 0.601474 |
ace55d940ef087f0e4836da8a37a09da0861daea | 3,771 | py | Python | test/model/deepvar/test_deepvar.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 2,648 | 2019-06-03T17:18:27.000Z | 2022-03-31T08:29:22.000Z | test/model/deepvar/test_deepvar.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 1,220 | 2019-06-04T09:00:14.000Z | 2022-03-31T10:45:43.000Z | test/model/deepvar/test_deepvar.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 595 | 2019-06-04T01:04:31.000Z | 2022-03-30T10:40:26.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from gluonts.dataset.artificial import constant_dataset
from gluonts.dataset.common import TrainDatasets
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
from gluonts.evaluation import backtest_metrics, MultivariateEvaluator
from gluonts.model.deepvar import DeepVAREstimator
from gluonts.mx.distribution import (
LowrankMultivariateGaussianOutput,
MultivariateGaussianOutput,
)
from gluonts.mx.trainer import Trainer
def load_multivariate_constant_dataset():
dataset_info, train_ds, test_ds = constant_dataset()
grouper_train = MultivariateGrouper(max_target_dim=10)
grouper_test = MultivariateGrouper(num_test_dates=1, max_target_dim=10)
metadata = dataset_info.metadata
metadata.prediction_length = dataset_info.prediction_length
return TrainDatasets(
metadata=dataset_info.metadata,
train=grouper_train(train_ds),
test=grouper_test(test_ds),
)
dataset = load_multivariate_constant_dataset()
target_dim = int(dataset.metadata.feat_static_cat[0].cardinality)
metadata = dataset.metadata
estimator = DeepVAREstimator
@pytest.mark.parametrize(
"distr_output, num_batches_per_epoch, Estimator, hybridize, "
"use_marginal_transformation",
[
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
True,
True,
),
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
False,
False,
),
(
LowrankMultivariateGaussianOutput(dim=target_dim, rank=2),
10,
estimator,
True,
False,
),
(None, 10, estimator, True, True),
(
MultivariateGaussianOutput(dim=target_dim),
10,
estimator,
False,
True,
),
(
MultivariateGaussianOutput(dim=target_dim),
10,
estimator,
True,
True,
),
],
)
def test_deepvar(
distr_output,
num_batches_per_epoch,
Estimator,
hybridize,
use_marginal_transformation,
):
estimator = Estimator(
num_cells=20,
num_layers=1,
pick_incomplete=True,
target_dim=target_dim,
prediction_length=metadata.prediction_length,
# target_dim=target_dim,
freq=metadata.freq,
distr_output=distr_output,
scaling=False,
use_marginal_transformation=use_marginal_transformation,
trainer=Trainer(
epochs=1,
batch_size=8,
learning_rate=1e-10,
minimum_learning_rate=1e-13,
num_batches_per_epoch=num_batches_per_epoch,
hybridize=hybridize,
),
)
predictor = estimator.train(training_data=dataset.train)
agg_metrics, _ = backtest_metrics(
test_dataset=dataset.test,
predictor=predictor,
evaluator=MultivariateEvaluator(
quantiles=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
),
)
assert agg_metrics["ND"] < 1.5
| 29.232558 | 75 | 0.654203 |
ace55dd1709c2bf0e0aac9344b2f89b89124c0b4 | 144,745 | py | Python | src/sage/combinat/growth.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1 | 2021-10-18T01:24:04.000Z | 2021-10-18T01:24:04.000Z | src/sage/combinat/growth.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/growth.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Growth diagrams and dual graded graphs
AUTHORS:
- Martin Rubey (2016-09): Initial version
- Martin Rubey (2017-09): generalize, more rules, improve documentation
- Travis Scrimshaw (2017-09): switch to rule-based framework
.. TODO::
- provide examples for the P and Q-symbol in the skew case
- implement a method providing a visualization of the growth
diagram with all labels, perhaps as LaTeX code
- when shape is given, check that it is compatible with filling
or labels
- optimize rules, mainly for :class:`RuleRSK` and
:class:`RuleBurge`
- implement backward rules for :class:`GrowthDiagram.rules.Domino`
- implement backward rule from [LLMSSZ2013]_, [LS2007]_
- make semistandard extension generic
- accommodate dual filtered graphs
A guided tour
=============
Growth diagrams, invented by Sergey Fomin [Fom1994]_, [Fom1995]_,
provide a vast generalization of the Robinson-Schensted-Knuth (RSK)
correspondence between matrices with non-negative integer entries and
pairs of semistandard Young tableaux of the same shape.
The main fact is that many correspondences similar to RSK can be
defined by providing a pair of so-called local rules: a 'forward'
rule, whose input are three vertices `y`, `t` and `x` of a certain
directed graph (in the case of Robinson-Schensted: the directed graph
corresponding to Young's lattice) and an integer (in the case of
Robinson-Schensted: `0` or `1`), and whose output is a fourth vertex
`z`. This rule should be invertible in the following sense: there is
a so-called 'backward' rule that recovers the integer and `t` given
`y`, `z` and `x`.
As an example, the growth rules for the classical RSK correspondence
are provided by :class:`RuleRSK`. To produce a growth diagram, pass
the desired rule and a permutation to :class:`GrowthDiagram`::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: w = [2,3,6,1,4,5]; G = GrowthDiagram(RuleRSK, w); G
0 0 0 1 0 0
1 0 0 0 0 0
0 1 0 0 0 0
0 0 0 0 1 0
0 0 0 0 0 1
0 0 1 0 0 0
The forward rule just mentioned assigns 49 partitions to the corners
of each of the 36 cells of this matrix (i.e., 49 the vertices of a
`(6+1) \times (6+1)` grid graph), with the exception of the corners
on the left and top boundary, which are initialized with the empty
partition. More precisely, for each cell, the
:meth:`~sage.combinat.growth.RuleRSK.forward_rule` computes the
partition `z` labelling the lower right corner, given the content `c`
of a cell and the other three partitions::
t --- x
| c |
y --- z
.. WARNING::
Note that a growth diagram is printed with matrix coordinates,
the origin being in the top-left corner. Therefore, the growth
is from the top left to the bottom right!
The partitions along the boundary opposite of the origin, reading
from the bottom left to the top right, are obtained by using the
method :meth:`~sage.combinat.growth.GrowthDiagram.out_labels`::
sage: G.out_labels()
[[],
[1],
[2],
[3],
[3, 1],
[3, 2],
[4, 2],
[4, 1],
[3, 1],
[2, 1],
[1, 1],
[1],
[]]
However, in the case of a rectangular filling, it is more practical
to split this sequence of labels in two. Interpreting the sequence
of partitions along the right boundary as a standard Young tableau,
we then obtain the so-called
:meth:`~sage.combinat.growth.RulePartitions.P_symbol`, the partitions
along the bottom boundary yield the so-called
:meth:`~sage.combinat.growth.RulePartitions.Q_symbol`. These
coincide with the output of the classical
:func:`~sage.combinat.rsk.RSK` insertion algorithm::
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 3 4 5 1 2 3 6 ]
[ 2 6 , 4 5 ]
sage: ascii_art(RSK(w))
[ 1 3 4 5 1 2 3 6 ]
[ 2 6 , 4 5 ]
The filling can be recovered knowing the partitions labelling the
corners of the bottom and the right boundary alone, by repeatedly
applying the :meth:`~sage.combinat.growth.RuleRSK.backward_rule`.
Therefore, to initialize a :class:`GrowthDiagram`, we can provide
these labels instead of the filling::
sage: GrowthDiagram(RuleRSK, labels=G.out_labels())
0 0 0 1 0 0
1 0 0 0 0 0
0 1 0 0 0 0
0 0 0 0 1 0
0 0 0 0 0 1
0 0 1 0 0 0
Invocation
----------
In general, growth diagrams are defined for `0-1`-fillings of
arbitrary skew shapes. In the case of the Robinson-Schensted-Knuth
correspondence, even arbitrary non-negative integers are allowed. In
other cases, entries may be either zero or an `r`-th root of unity -
for example, :class:`~sage.combinat.growth.RuleDomino` insertion is
defined for signed permutations, that is, `r=2`. Traditionally, words
and permutations are also used to specify a filling in special cases.
To accommodate all this, the filling may be passed in various ways.
The most general possibility is to pass a dictionary of coordinates
to (signed) entries, where zeros can be omitted. In this case, when
the parameter ``shape`` is not explicitly specified, it is assumed
to be the minimal rectangle containing the origin and all coordinates
with non-zero entries.
For example, consider the following generalized permutation::
1 2 2 2 4 4
4 2 3 3 2 3
that we encode as the dictionary::
sage: P = {(1-1,4-1): 1, (2-1,2-1): 1, (2-1,3-1): 2, (4-1,2-1): 1, (4-1,3-1): 1}
Note that we are subtracting `1` from all entries because of
zero-based indexing, we obtain::
sage: GrowthDiagram(RuleRSK, P)
0 0 0 0
0 1 0 1
0 2 0 1
1 0 0 0
Alternatively, we could create the same growth diagram using a
matrix.
Let us also mention that one can pass the arguments specifying
a growth diagram directly to the rule::
sage: RuleRSK(P)
0 0 0 0
0 1 0 1
0 2 0 1
1 0 0 0
In contrast to the classical insertion algorithms, growth diagrams
immediately generalize to fillings whose shape is an arbitrary skew
partition::
sage: GrowthDiagram(RuleRSK, [3,1,2], shape=SkewPartition([[3,3,2],[1,1]]))
. 1 0
. 0 1
1 0
As an important example, consider the Stanley-Sundaram correspondence
between oscillating tableaux and (partial) perfect matchings.
Perfect matchings of `\{1, \ldots, 2r\}` are in bijection with
`0-1`-fillings of a triangular shape with `2r-1` rows, such that for
each `k` there is either exactly one non-zero entry in row `k` or
exactly one non-zero entry in column `2r-k`. Explicitly, if `(i,j)`
is a pair in the perfect matching, the entry in column `i-1` and row
`2r-j` equals `1`. For example::
sage: m = [[1,5],[3,4],[2,7],[6,8]]
sage: G = RuleRSK({(i-1, 8-j): 1 for i,j in m}, shape=[7,6,5,4,3,2,1]); G
0 0 0 0 0 1 0
0 1 0 0 0 0
0 0 0 0 0
1 0 0 0
0 0 1
0 0
0
The partitions labelling the bottom-right corners along the boundary
opposite of the origin then form a so-called oscillating tableau -
the remaining partitions along the bottom-right boundary are
redundant::
sage: G.out_labels()[1::2]
[[1], [1, 1], [2, 1], [1, 1], [1], [1, 1], [1]]
Another great advantage of growth diagrams is that we immediately
have access to a skew version of the correspondence, by providing
different initialization for the labels on the side of the origin.
We reproduce the original example of Bruce Sagan and Richard Stanley,
see also Tom Roby's thesis [Rob1991]_::
sage: w = {(1-1,4-1): 1, (2-1,2-1): 1, (4-1,3-1): 1}
sage: T = SkewTableau([[None, None], [None, 5], [1]])
sage: U = SkewTableau([[None, None], [None, 3], [5]])
sage: labels = T.to_chain()[::-1] + U.to_chain()[1:]
sage: G = GrowthDiagram(RuleRSK, filling=w, shape=[5,5,5,5,5], labels=labels); G
0 0 0 0 0
0 1 0 0 0
0 0 0 1 0
1 0 0 0 0
0 0 0 0 0
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ . . 2 3 . . 1 4 ]
[ . . . . ]
[ . 4 . 2 ]
[ 1 3 ]
[ 5 , 5 ]
Similarly, there is a correspondence for skew oscillating tableau.
Let us conclude by reproducing Example 4.2.6 from [Rob1991]_. The
oscillating tableau, as given, is::
sage: o = [[2,1],[2,2],[3,2],[4,2],[4,1],[4,1,1],[3,1,1],[3,1],[3,2],[3,1],[2,1]]
From this, we have to construct the list of labels of the corners
along the bottom-right boundary. The labels with odd indices are
given by the oscillating tableau, the other labels are obtained by
taking the smaller of the two neighbouring partitions::
sage: l = [o[i//2] if is_even(i) else min(o[(i-1)//2],o[(i+1)//2])
....: for i in range(2*len(o)-1)]
sage: la = list(range(len(o)-2, 0, -1))
sage: G = RuleRSK(labels=l[1:-1], shape=la); G
0 0 0 0 0 0 0 1 0
0 1 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0
0 0 1 0 0
0 0 0 0
0 0 0
0 0
0
The skew tableaux can now be read off the partitions labelling the
left and the top boundary. These can be accessed using the method
:meth:`~sage.combinat.growth.GrowthDiagram.in_labels`::
sage: ascii_art(SkewTableau(chain=G.in_labels()[len(o)-2:]),
....: SkewTableau(chain=G.in_labels()[len(o)-2::-1]))
. 1 . 7
5 4
Rules currently available
-------------------------
As mentioned at the beginning, the Robinson-Schensted-Knuth
correspondence is just a special case of growth diagrams. In
particular, we have implemented the following local rules:
- RSK (:class:`~sage.combinat.growth.RuleRSK`).
- A variation of RSK originally due to Burge
(:class:`~sage.combinat.growth.RuleBurge`).
- A correspondence producing binary words originally due to Viennot
(:class:`~sage.combinat.growth.RuleBinaryWord`).
- A correspondence producing domino tableaux
(:class:`~sage.combinat.growth.RuleDomino`) originally due
to Barbasch and Vogan.
- A correspondence for shifted shapes
(:class:`~sage.combinat.growth.RuleShiftedShapes`), where the
original insertion algorithm is due to Sagan and Worley, and Haiman.
- The Sylvester correspondence, producing binary trees
(:class:`~sage.combinat.growth.RuleSylvester`).
- The Young-Fibonacci correspondence
(:class:`~sage.combinat.growth.RuleYoungFibonacci`).
- LLMS insertion (:class:`~sage.combinat.growth.RuleLLMS`).
Background
----------
At the heart of Fomin's framework is the notion of dual graded
graphs. This is a pair of digraphs `P, Q` (multiple edges being
allowed) on the same set of vertices `V`, that satisfy the following
conditions:
* the graphs are graded, that is, there is a function `\rho : V \to
\NN`, such that for any edge `(v, w)` of `P` and also of `Q` we
have `\rho(w) = \rho(v) + 1`,
* there is a vertex `0` with rank zero, and
* there is a positive integer `r` such that `DU = UD + rI` on the
free `\ZZ`-module `\ZZ[V]`, where `D` is the down operator of `Q`,
assigning to each vertex the formal sum of its predecessors, `U` is
the up operator of `P`, assigning to each vertex the formal sum of
its successors, and `I` is the identity operator.
Note that the condition `DU = UD + rI` is symmetric with respect to
the interchange of the graphs `P` and `Q`, because the up operator of
a graph is the transpose of its down operator.
For example, taking for both `P` and `Q` to be Young's lattice and
`r=1`, we obtain the dual graded graphs for classical Schensted
insertion.
Given such a pair of graphs, there is a bijection between the
`r`-colored permutations on `k` letters and pairs `(p, q)`, where `p`
is a path in `P` from zero to a vertex of rank `k` and `q` is a path
in `Q` from zero to the same vertex.
It turns out that - in principle - this bijection can always be
described by so-called local forward and backward rules, see
[Fom1995]_ for a detailed description. Knowing at least the forward
rules, or the backward rules, you can implement your own growth
diagram class.
Implementing your own growth diagrams
-------------------------------------
The class :class:`GrowthDiagram` is written so that it is easy to
implement growth diagrams you come across in your research.
Moreover, the class tolerates some deviations from Fomin's
definitions. For example, although the general
Robinson-Schensted-Knuth correspondence between integer matrices and
semistandard tableaux is, strictly speaking, not a growth on dual
graded graphs, it is supported by our framework.
For illustration, let us implement a growth diagram class with the
backward rule only. Suppose that the vertices of the graph are the
non-negative integers, the rank is given by the integer itself, and
the backward rule is `(y, z, x) \mapsto (\min(x,y), 0)` if `y = z`
or `x = z` and `(y, z, x) \mapsto (\min(x,y), 1)` otherwise.
We first need to import the base class for a rule::
sage: from sage.combinat.growth import Rule
Next, we implement the backward rule and the rank function and
provide the bottom element ``zero`` of the graph. For more
information, see :class:`~sage.combinat.growth.Rule`. ::
sage: class RulePascal(Rule):
....: zero = 0
....: def rank(self, v): return v
....: def backward_rule(self, y, z, x):
....: return (min(x,y), 0 if y==z or x==z else 1)
We can now compute the filling corresponding to a sequence of labels
as follows::
sage: GrowthDiagram(RulePascal(), labels=[0,1,2,1,2,1,0])
1 0 0
0 0 1
0 1
Of course, since we have not provided the forward rule, we cannot
compute the labels belonging to a filling::
sage: GrowthDiagram(RulePascal(), [3,1,2])
Traceback (most recent call last):
...
AttributeError: 'RulePascal' object has no attribute 'forward_rule'
We now re-implement the rule where we provide the dual graded graphs::
sage: class RulePascal(Rule):
....: zero = 0
....: def rank(self, v): return v
....: def backward_rule(self, y, z, x):
....: return (min(x,y), 0 if y==z or x==z else 1)
....: def vertices(self, n): return [n]
....: def is_P_edge(self, v, w): return w == v + 1
....: def is_Q_edge(self, v, w): return w == v + 1
Are they really dual? ::
sage: RulePascal()._check_duality(3)
Traceback (most recent call last):
...
ValueError: D U - U D differs from 1 I for vertex 3:
D U = [3]
U D + 1 I = [3, 3]
With our current definition, duality fails - in fact, there are no
dual graded graphs on the integers without multiple edges.
Consequently, also the backward rule cannot work as ``backward_rule``
requires additional information (the edge labels as arguments).
Let us thus continue with the example from Section 4.7 of [Fom1995]_
instead, which defines dual graded graphs with multiple edges on the
integers. The color ``self.zero_edge``, which defaults to ``0`` is
reserved for degenerate edges, but may be abused for the unique edge
if one of the graphs has no multiple edges. For greater clarity in
this example we set it to ``None``::
sage: class RulePascal(Rule):
....: zero = 0
....: has_multiple_edges = True
....: zero_edge = None
....: def rank(self, v): return v
....: def vertices(self, n): return [n]
....: def is_P_edge(self, v, w): return [0] if w == v + 1 else []
....: def is_Q_edge(self, v, w): return list(range(w)) if w == v+1 else []
We verify these are `1` dual at level `5`::
sage: RulePascal()._check_duality(5)
Finally, let us provide the backward rule. The arguments of the rule
are vertices together with the edge labels now, specifying the path
from the lower left to the upper right of the cell. The horizontal
edges come from `Q`, whereas the vertical edges come from `P`.
Thus, the definition in Section 4.7 of [Fom1995]_ translates as
follows::
sage: class RulePascal(Rule):
....: zero = 0
....: has_multiple_edges = True
....: zero_edge = None
....: def rank(self, v): return v
....: def vertices(self, n): return [n]
....: def is_P_edge(self, v, w): return [0] if w == v + 1 else []
....: def is_Q_edge(self, v, w): return list(range(w)) if w == v+1 else []
....: def backward_rule(self, y, g, z, h, x):
....: if g is None:
....: return (0, x, None, 0)
....: if h is None:
....: return (None, y, g, 0)
....: if g == 0:
....: return (None, y, None, 1)
....: else:
....: return (0, x-1, g-1, 0)
The labels are now alternating between vertices and edge-colors::
sage: GrowthDiagram(RulePascal(), labels=[0,0,1,0,2,0,1,0,0])
1 0
0 1
sage: GrowthDiagram(RulePascal(), labels=[0,0,1,1,2,0,1,0,0])
0 1
1 0
"""
# ****************************************************************************
# Copyright (C) 2017 Martin Rubey <martin.rubey at tuwien.ac.at>
# 2017 Travis Scrimshaw <tcscrims at gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ***************************************************************************
from copy import copy
from itertools import zip_longest
from sage.structure.sage_object import SageObject
from sage.structure.unique_representation import UniqueRepresentation
from sage.combinat.posets.posets import Poset
from sage.combinat.words.word import Word
from sage.combinat.words.words import Words
from sage.combinat.binary_tree import BinaryTree, BinaryTrees, LabelledBinaryTree
from sage.combinat.composition import Compositions
from sage.combinat.partition import _Partitions, Partitions
from sage.combinat.skew_partition import SkewPartition
from sage.combinat.skew_tableau import SkewTableau
from sage.combinat.core import Core, Cores
from sage.combinat.k_tableau import WeakTableau, StrongTableau
from sage.combinat.shifted_primed_tableau import ShiftedPrimedTableau
from sage.graphs.digraph import DiGraph
def _make_partition(l):
"""
Return the list as a partition.
This is intended to be fast, so checks are bypassed.
TESTS::
sage: from sage.combinat.growth import _make_partition
sage: p = _make_partition([3,2,1,0]); p
[3, 2, 1]
sage: p.parent()
Partitions
"""
return _Partitions.element_class(_Partitions, l)
class GrowthDiagram(SageObject):
r"""
A generalized Schensted growth diagram in the sense of Fomin.
Growth diagrams were introduced by Sergey Fomin [Fom1994]_,
[Fom1995]_ and provide a vast generalization of the
Robinson-Schensted-Knuth (RSK) correspondence between matrices
with non-negative integer entries and pairs of semistandard Young
tableaux of the same shape.
A growth diagram is based on the notion of *dual graded graphs*,
a pair of digraphs `P, Q` (multiple edges being allowed) on the
same set of vertices `V`, that satisfy the following conditions:
* the graphs are graded, that is, there is a function `\rho:
V \to \NN`, such that for any edge `(v, w)` of `P` and also
of `Q` we have `\rho(w) = \rho(v) + 1`,
* there is a vertex `0` with rank zero, and
* there is a positive integer `r` such that `DU = UD + rI` on the
free `\ZZ`-module `\ZZ[V]`, where `D` is the down operator of
`Q`, assigning to each vertex the formal sum of its
predecessors, `U` is the up operator of `P`, assigning to each
vertex the formal sum of its successors, and `I` is the
identity operator.
Growth diagrams are defined by providing a pair of local rules: a
'forward' rule, whose input are three vertices `y`, `t` and `x`
of the dual graded graphs and an integer, and whose output is a
fourth vertex `z`. This rule should be invertible in the
following sense: there is a so-called 'backward' rule that
recovers the integer and `t` given `y`, `z` and `x`.
All implemented growth diagram rules are available by
``GrowthDiagram.rules.<tab>``. The current list is:
- :class:`~sage.combinat.growth.RuleRSK` -- RSK
- :class:`~sage.combinat.growth.RuleBurge` -- a variation of RSK
originally due to Burge
- :class:`~sage.combinat.growth.RuleBinaryWord` -- a correspondence
producing binary words originally due to Viennot
- :class:`~sage.combinat.growth.RuleDomino` -- a correspondence
producing domino tableaux originally due to Barbasch and Vogan
- :class:`~sage.combinat.growth.RuleShiftedShapes` -- a correspondence
for shifted shapes, where the original insertion algorithm is due
to Sagan and Worley, and Haiman.
- :class:`~sage.combinat.growth.RuleSylvester` -- the Sylvester
correspondence, producing binary trees
- :class:`~sage.combinat.growth.RuleYoungFibonacci` -- the
Young-Fibonacci correspondence
- :class:`~sage.combinat.growth.RuleLLMS` -- LLMS insertion
INPUT:
- ``rule`` -- :class:`~sage.combinat.growth.Rule`;
the growth diagram rule
- ``filling`` -- (optional) a dictionary whose keys are coordinates
and values are integers, a list of lists of integers, or a word
with integer values; if a word, then negative letters but without
repetitions are allowed and interpreted as coloured permutations
- ``shape`` -- (optional) a (possibly skew) partition
- ``labels`` -- (optional) a list that specifies a path whose length
in the half-perimeter of the shape; more details given below
If ``filling`` is not given, then the growth diagram is determined
by applying the backward rule to ``labels`` decorating the
boundary opposite of the origin of the ``shape``. In this case,
``labels`` are interpreted as labelling the boundary opposite of
the origin.
Otherwise, ``shape`` is inferred from ``filling`` or ``labels`` if
possible and ``labels`` is set to ``rule.zero`` if not specified.
Here, ``labels`` are labelling the boundary on the side of the origin.
For ``labels``, if ``rule.has_multiple_edges`` is ``True``, then the
elements should be of the form `(v_1, e_1, \ldots, e_{n-1}, v_n)`,
where `n` is the half-perimeter of ``shape``, and `(v_{i-1}, e_i, v_i)`
is an edge in the dual graded graph for all `i`. Otherwise, it is a
list of `n` vertices.
.. NOTE::
Coordinates are of the form ``(col, row)`` where the origin is
in the upper left, to be consistent with permutation matrices
and skew tableaux (in English convention). This is different
from Fomin's convention, who uses a Cartesian coordinate system.
Conventions are chosen such that for permutations, the same
growth diagram is constructed when passing the permutation
matrix instead.
EXAMPLES:
We create a growth diagram using the forward RSK rule and a permutation::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: pi = Permutation([4, 1, 2, 3])
sage: G = GrowthDiagram(RuleRSK, pi); G
0 1 0 0
0 0 1 0
0 0 0 1
1 0 0 0
sage: G.out_labels()
[[], [1], [1, 1], [2, 1], [3, 1], [3], [2], [1], []]
Passing the permutation matrix instead gives the same result::
sage: G = GrowthDiagram(RuleRSK, pi.to_matrix())
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 3 1 3 4 ]
[ 4 , 2 ]
We give the same example but using a skew shape::
sage: shape = SkewPartition([[4,4,4,2],[1,1]])
sage: G = GrowthDiagram(RuleRSK, pi, shape=shape); G
. 1 0 0
. 0 1 0
0 0 0 1
1 0
sage: G.out_labels()
[[], [1], [1, 1], [1], [2], [3], [2], [1], []]
We construct a growth diagram using the backwards RSK rule by
specifying the labels::
sage: GrowthDiagram(RuleRSK, labels=G.out_labels())
0 1 0 0
0 0 1 0
0 0 0 1
1 0
"""
def __init__(self, rule, filling=None, shape=None, labels=None):
r"""
Initialize ``self``.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: w = [3,3,2,4,1]; G = GrowthDiagram(RuleRSK, w)
sage: [G.P_symbol(), G.Q_symbol()]
[[[1, 3, 4], [2], [3]], [[1, 2, 4], [3], [5]]]
sage: RSK(w)
[[[1, 3, 4], [2], [3]], [[1, 2, 4], [3], [5]]]
sage: TestSuite(G).run()
sage: GrowthDiagram(RuleRSK)
Traceback (most recent call last):
...
ValueError: please provide a filling or a sequence of labels
"""
if not isinstance(rule, Rule):
raise TypeError("the rule must be an instance of Rule")
self.rule = rule
if filling is None:
if labels is None:
raise ValueError("please provide a filling or a sequence of labels")
labels = self._process_labels(labels)
if shape is None:
shape = self._shape_from_labels(labels)
self._lambda, self._mu = self._process_shape(shape)
self._out_labels = labels
self._check_labels(self._out_labels)
self._shrink()
else:
self._filling, (self._lambda, self._mu) = self._process_filling_and_shape(filling, shape)
if labels is None:
rule = self.rule
if rule.has_multiple_edges:
self._in_labels = [rule.zero, rule.zero_edge]*(self.half_perimeter()-1) + [rule.zero]
else:
self._in_labels = [rule.zero] * self.half_perimeter()
else:
labels = self._process_labels(labels)
self._in_labels = labels
self._check_labels(self._in_labels)
self._grow()
def filling(self):
r"""
Return the filling of the diagram as a dictionary.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: G.filling()
{(0, 1): 1, (1, 0): 1, (2, 1): 2}
"""
return self._filling
def conjugate(self):
r"""
Return the conjugate growth diagram of ``self``.
This is the growth diagram with the filling reflected over the
main diagonal.
The sequence of labels along the boundary on the side of the
origin is the reversal of the corresponding sequence of the
original growth diagram.
When the filling is a permutation, the conjugate filling
corresponds to its inverse.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: Gc = G.conjugate()
sage: (Gc.P_symbol(), Gc.Q_symbol()) == (G.Q_symbol(), G.P_symbol())
True
TESTS:
Check that labels and shape are handled correctly::
sage: o = [[2,1],[2,2],[3,2],[4,2],[4,1],[4,1,1],[3,1,1],[3,1],[3,2],[3,1],[2,1]]
sage: l = [o[i//2] if is_even(i) else min(o[(i-1)//2],o[(i+1)//2])
....: for i in range(2*len(o)-1)]
sage: la = list(range(len(o)-2, 0, -1))
sage: G = RuleRSK(labels=l[1:-1], shape=la)
sage: G.out_labels() == G.conjugate().out_labels()[::-1]
True
"""
F = {(j,i): v for (i,j),v in self._filling.items()}
return GrowthDiagram(self.rule,
filling=F,
shape=self.shape().conjugate(),
labels=self.in_labels()[::-1])
def rotate(self):
r"""
Return the growth diagram with the filling rotated by 180 degrees.
The rotated growth diagram is initialized with
``labels=None``, that is, all labels along the boundary on
the side of the origin are set to ``rule.zero``.
For RSK-growth diagrams and rectangular fillings, this
corresponds to evacuation of the `P`- and the `Q`-symbol.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: Gc = G.rotate()
sage: ascii_art([Gc.P_symbol(), Gc.Q_symbol()])
[ 1 1 1 1 1 2 ]
[ 2 , 3 ]
sage: ascii_art([Tableau(t).evacuation()
....: for t in [G.P_symbol(), G.Q_symbol()]])
[ 1 1 1 1 1 2 ]
[ 2 , 3 ]
TESTS:
Check that shape is handled correctly::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK,
....: filling={(0,2):1, (3,1):2, (2,1):3},
....: shape=SkewPartition([[5,5,5,3],[3,1]]))
sage: G
. . . 0 0
. 0 3 2 0
1 0 0 0 0
0 0 0
sage: G.rotate()
. . 0 0 0
0 0 0 0 1
0 2 3 0
0 0
"""
l = self._lambda[0]
h = len(self._lambda)
shape_lambda = [l-p for p in self._mu] + [l]*(h-len(self._mu))
shape_mu = [l-p for p in self._lambda]
shape = SkewPartition([shape_lambda[::-1], shape_mu[::-1]])
F = {(l-i-1, h-j-1): v for (i,j),v in self._filling.items()}
return GrowthDiagram(self.rule,
filling=F,
shape=shape)
def half_perimeter(self):
r"""
Return half the perimeter of the shape of the growth diagram.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, {(0,1):1, (2,0):1}, SkewPartition([[3,1],[1]])); G
. 0 1
1
sage: G.half_perimeter()
6
"""
# Assume that ``self._lambda`` is already set.
if not self._lambda:
return 1
return self._lambda[0] + len(self._lambda) + 1
def shape(self):
r"""
Return the shape of the growth diagram as a skew partition.
.. WARNING::
In the literature the label on the corner opposite of the
origin of a rectangular filling is often called the shape
of the filling. This method returns the shape of the
region instead.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: GrowthDiagram(RuleRSK, [1]).shape()
[1] / []
"""
return SkewPartition([self._lambda, self._mu])
def out_labels(self):
r"""
Return the labels along the boundary opposite of the origin.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: G.out_labels()
[[], [1], [1, 1], [3, 1], [1], []]
"""
return self._out_labels
def in_labels(self):
r"""
Return the labels along the boundary on the side of the origin.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, labels=[[2,2],[3,2],[3,3],[3,2]]); G
1 0
sage: G.in_labels()
[[2, 2], [2, 2], [2, 2], [3, 2]]
"""
return self._in_labels
def P_symbol(self):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a generalized standard tableau.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 2 1 3 3 ]
[ 2 , 2 ]
"""
return self.rule.P_symbol(self.P_chain())
def Q_symbol(self):
r"""
Return the labels along the horizontal boundary of a rectangular
growth diagram as a generalized standard tableau.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, [[0,1,0], [1,0,2]])
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 2 1 3 3 ]
[ 2 , 2 ]
"""
return self.rule.Q_symbol(self.Q_chain())
def P_chain(self):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: G = GrowthDiagram(BinaryWord, [4, 1, 2, 3])
sage: G.P_chain()
[word: , word: 1, word: 11, word: 111, word: 1011]
Check that :trac:`25631` is fixed::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord(filling = {}).P_chain()
[word: ]
"""
if not self.is_rectangular():
raise ValueError("the P symbol is only defined for rectangular shapes")
if self._lambda:
if self.rule.has_multiple_edges:
r = 2*self._lambda[0]
else:
r = self._lambda[0]
else:
r = 0
return self._out_labels[r:][::-1]
def Q_chain(self):
r"""
Return the labels along the horizontal boundary of a rectangular
growth diagram.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: G = GrowthDiagram(BinaryWord, [[0,1,0,0], [0,0,1,0], [0,0,0,1], [1,0,0,0]])
sage: G.Q_chain()
[word: , word: 1, word: 10, word: 101, word: 1011]
Check that :trac:`25631` is fixed::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord(filling = {}).Q_chain()
[word: ]
"""
if not self.is_rectangular():
raise ValueError("the Q symbol is only defined for rectangular shapes")
if self._lambda:
if self.rule.has_multiple_edges:
r = 2*self._lambda[0]+1
else:
r = self._lambda[0]+1
else:
r = 1
return self._out_labels[:r]
def is_rectangular(self):
r"""
Return ``True`` if the shape of the growth diagram is rectangular.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: GrowthDiagram(RuleRSK, [2,3,1]).is_rectangular()
True
sage: GrowthDiagram(RuleRSK, [[1,0,1],[0,1]]).is_rectangular()
False
"""
return (all(x == 0 for x in self._mu)
and all(x == self._lambda[0] for x in self._lambda))
def to_word(self):
r"""
Return the filling as a word, if the shape is rectangular and
there is at most one nonzero entry in each column, which must
be 1.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: w = [3,3,2,4,1]; G = GrowthDiagram(RuleRSK, w)
sage: G
0 0 0 0 1
0 0 1 0 0
1 1 0 0 0
0 0 0 1 0
sage: G.to_word()
[3, 3, 2, 4, 1]
"""
if not self.is_rectangular():
raise ValueError("can only convert fillings of rectangular shapes to words")
w = [0] * self._lambda[0]
for ((i,j), v) in self._filling.items():
if v != 0:
if v == 1:
if w[i] == 0:
w[i] = j+1
else:
raise ValueError("can only convert fillings with at"
" most one entry per column to words")
elif v == -1:
if w[i] == 0:
w[i] = -(j+1)
else:
raise ValueError("can only convert fillings with at"
" most one entry per column to words")
else:
raise ValueError("can only convert 0-1 fillings to words;"
" try 'to_biword'")
return w
def to_biword(self):
r"""
Return the filling as a biword, if the shape is rectangular.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: P = Tableau([[1,2,2],[2]])
sage: Q = Tableau([[1,3,3],[2]])
sage: bw = RSK_inverse(P, Q); bw
[[1, 2, 3, 3], [2, 1, 2, 2]]
sage: G = GrowthDiagram(RuleRSK, labels=Q.to_chain()[:-1]+P.to_chain()[::-1]); G
0 1 0
1 0 2
sage: P = SemistandardTableau([[1, 1, 2], [2]])
sage: Q = SemistandardTableau([[1, 2, 2], [2]])
sage: G = GrowthDiagram(RuleRSK, labels=Q.to_chain()[:-1]+P.to_chain()[::-1]); G
0 2
1 1
sage: G.to_biword()
([1, 2, 2, 2], [2, 1, 1, 2])
sage: RSK([1, 2, 2, 2], [2, 1, 1, 2])
[[[1, 1, 2], [2]], [[1, 2, 2], [2]]]
"""
if not self.is_rectangular():
raise ValueError("can only convert fillings of rectangular shapes to words")
w1 = []
w2 = []
for ((i,j), v) in sorted(self._filling.items()):
if v >= 0:
w1.extend([i+1]*v)
w2.extend([j+1]*v)
else:
raise ValueError("can only convert fillings with"
" non-negative entries to words")
return (w1, w2)
def __iter__(self):
r"""
Return the rows of the filling.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = GrowthDiagram(RuleRSK, {(0,1):1, (1,0):1}, SkewPartition([[2,1],[1]]))
sage: list(G)
[[None, 1], [1]]
sage: pi = Permutation([2,3,1,6,4,5])
sage: G = GrowthDiagram(RuleRSK, pi)
sage: list(G)
[[0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0]]
"""
return ([None]*self._mu[r] + [self._filling.get((self._mu[r]+j,r), 0)
for j in range(self._lambda[r]-self._mu[r])]
for r in range(len(self._lambda)))
def _repr_(self):
r"""
Return a string with the filling of the growth diagram
as a skew tableau.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: GrowthDiagram(RuleRSK, {(0,1):1, (1,0):1}, SkewPartition([[2,1],[1]]))
. 1
1
sage: GrowthDiagram(RuleRSK, {(0,1):1, (2,0):1}, SkewPartition([[3,1],[1]]))
. 0 1
1
"""
return SkewTableau(expr=[self._mu,
[[self._filling.get((self._mu[r]+j,r), 0)
for j in range(self._lambda[r]-self._mu[r])]
for r in range(len(self._lambda))][::-1]])._repr_diagram()
def __eq__(self, other):
r"""
Return ``True`` if the growth diagram ``other`` has the same
shape and the same filling as ``self``.
EXAMPLES:
Equality ignores zeros in fillings::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G1 = GrowthDiagram(RuleRSK, {(0, 1): 1, (1, 0): 1})
sage: G2 = GrowthDiagram(RuleRSK, {(0, 0): 0, (0, 1): 1, (1, 0): 1})
sage: G1 == G2
True
Growth diagrams with different shapes are different::
sage: G1 = GrowthDiagram(RuleRSK, [[0,1,0],[1,0]])
sage: G2 = GrowthDiagram(RuleRSK, [[0,1,0],[1]])
sage: G1 == G2
False
Growth diagrams with different rules are different::
sage: G1 = GrowthDiagram(RuleRSK, {(0, 1): 1, (1, 0): 1})
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: G2 = GrowthDiagram(BinaryWord, {(0, 1): 1, (1, 0): 1})
sage: G1 == G2
False
"""
return (type(self) == type(other) and
self.rule == other.rule and
self._lambda == other._lambda and
self._mu == other._mu and
self._filling == other._filling)
def __ne__(self, other):
r"""
Return ``True`` if the growth diagram ``other`` does not have the
same shape and the same filling as ``self``.
TESTS:
Equality ignores zeros in fillings::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G1 = GrowthDiagram(RuleRSK, {(0, 1): 1, (1, 0): 1})
sage: G2 = GrowthDiagram(RuleRSK, {(0, 0): 0, (0, 1): 1, (1, 0): 1})
sage: G1 != G2
False
Growth diagrams with different shapes are different::
sage: G1 = GrowthDiagram(RuleRSK, [[0,1,0],[1,0]])
sage: G2 = GrowthDiagram(RuleRSK, [[0,1,0],[1]])
sage: G1 != G2
True
Growth diagrams with different rules are different::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: G1 = GrowthDiagram(RuleRSK, {(0, 1): 1, (1, 0): 1})
sage: G2 = GrowthDiagram(BinaryWord, {(0, 1): 1, (1, 0): 1})
sage: G1 != G2
True
"""
return not (self == other)
def _process_labels(self, labels):
r"""
Return the list of labels such that each element has the
correct type from the rule.
.. WARNING::
Assumes that ``self.rule`` is set.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: labels = [[], [1], [1,1], [1], []]
sage: G = GrowthDiagram(RuleRSK, labels=labels) # indirect doctest
sage: G.out_labels()[2].parent()
Partitions
"""
rule = self.rule
if rule.has_multiple_edges:
return [rule.normalize_vertex(val) if i % 2 == 0 else val
for i, val in enumerate(labels)]
else:
return [rule.normalize_vertex(la) for la in labels]
def _shape_from_labels(self, labels):
r"""
Determine the shape of the growth diagram given a list of labels
during initialization.
The shape can be determined from the labels if the size of
each label differs from the size of its successor.
Otherwise raise an error.
.. WARNING::
Assumes that ``self.rule`` and ``self.rank`` is set.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: labels = [[],[2],[1],[],[1],[]]
sage: G = GrowthDiagram(RuleRSK, labels=labels); G
0 1
1
1
sage: G._shape_from_labels(G.out_labels())
[2, 1, 1]
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted({(0, 0): 1}).out_labels()
[[], 1, [1], 0, []]
sage: Shifted(labels=[[], 1, [2], 0, []]) # indirect doctest
Traceback (most recent call last):
...
ValueError: [] has smaller rank than [2] but there is no edge of color 1 in Q
"""
# we can determine the shape even if is_P_edge is not implemented
rule = self.rule
is_P_edge = getattr(rule, "is_P_edge", None)
is_Q_edge = getattr(rule, "is_Q_edge", None)
if rule.has_multiple_edges:
def right_left(la, mu, e):
if rule.rank(la) < rule.rank(mu):
if is_Q_edge is not None and e not in is_Q_edge(la, mu):
raise ValueError("%s has smaller rank than %s but there is no edge of color %s in Q" % (la, mu, e))
return 1
elif rule.rank(la) > rule.rank(mu):
if is_P_edge is not None and e not in is_P_edge(mu, la):
raise ValueError("%s has smaller rank than %s but there is no edge of color %s in P" % (mu, la, e))
return 0
else:
raise ValueError("can only determine the shape of the growth"
" diagram if ranks of successive labels differ")
return _Partitions.from_zero_one([right_left(labels[i], labels[i+2], labels[i+1])
for i in range(0, len(labels)-2, 2)])
else:
def right_left(la, mu):
if rule.rank(la) < rule.rank(mu):
if is_Q_edge is not None and not is_Q_edge(la, mu):
raise ValueError("%s has smaller rank than %s but is not covered by it in Q" % (la, mu))
return 1
elif rule.rank(la) > rule.rank(mu):
if is_P_edge is not None and not is_P_edge(mu, la):
raise ValueError("%s has smaller rank than %s but is not covered by it in P" % (mu, la))
return 0
else:
raise ValueError("can only determine the shape of the growth"
" diagram if ranks of successive labels differ")
return _Partitions.from_zero_one([right_left(labels[i], labels[i+1])
for i in range(len(labels)-1)])
def _check_labels(self, labels):
r"""
Check sanity of the parameter ``labels``.
.. WARNING::
Assumes that ``self.rule`` and ``self._lambda`` is set.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: GrowthDiagram(RuleRSK, shape=[1], labels=[[], [1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: the number of labels is 2, but for this shape we need 3
sage: GrowthDiagram(RuleRSK, labels=[[], [1], [2], [2,1]]) # indirect doctest
Traceback (most recent call last):
...
ValueError: the number of labels is 4, but for this shape we need 1
.. TODO::
Can we do something more sensible when the chain
of labels is strictly increasing?
"""
half_perimeter = self.half_perimeter()
if self.rule.has_multiple_edges:
if not (len(labels) % 2):
raise ValueError("only a list of odd length can specify a path, but %s has even length" % len(labels))
path_length = (len(labels) + 1) / 2
else:
path_length = len(labels)
if path_length != half_perimeter:
raise ValueError("the number of labels is %s, but for this shape we need %s"
% (path_length, half_perimeter))
def _process_shape(self, shape):
r"""
Return a pair of partitions as lists describing the region
of the growth diagram.
TESTS:
``shape`` is a skew partition::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: filling = []
sage: shape = SkewPartition([[4,2,1,1],[2,1,1]])
sage: G = GrowthDiagram(RuleRSK, filling, shape) # indirect doctest
sage: G._lambda, G._mu
([4, 2, 1, 1], [2, 1, 1, 0])
``shape`` is a partition::
sage: filling = []
sage: shape = Partition([3,2,1,1])
sage: G = GrowthDiagram(RuleRSK, filling, shape) # indirect doctest
sage: G._lambda, G._mu
([3, 2, 1, 1], [0, 0, 0, 0])
"""
try:
shape = _Partitions(shape)
except ValueError:
try:
shape = SkewPartition(shape)
except ValueError:
raise ValueError("cannot make sense of shape %s" % shape)
return ( list(shape[0]),
list(shape[1]) + [0]*(len(shape[0])-len(shape[1])) )
return (list(shape), [0]*len(shape))
def _process_filling_and_shape(self, filling, shape):
r"""
Return a dict ``F`` such that ``F[(i,j)]`` is the element in row
``i`` and column ``j`` and a pair of partitions describing the
region of the growth diagram.
TESTS:
``filling`` is a dict of coordinates::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: pi = Permutation([2,3,1,6,4,5])
sage: G = GrowthDiagram(RuleRSK, {(i,pi[i]-1):1 for i in range(len(pi))}) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 0): 1, (3, 5): 1, (4, 3): 1, (5, 4): 1}
sage: G.shape()
[6, 6, 6, 6, 6, 6] / []
``filling`` is a dict of dicts::
sage: G = GrowthDiagram(RuleRSK, {i:{pi[i]-1:1} for i in range(len(pi))}) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 0): 1, (3, 5): 1, (4, 3): 1, (5, 4): 1}
sage: G.shape()
[6, 6, 6, 6, 6, 6] / []
``filling`` is a matrix::
sage: G = GrowthDiagram(RuleRSK, pi.to_matrix()) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 0): 1, (3, 5): 1, (4, 3): 1, (5, 4): 1}
sage: G.shape()
[6, 6, 6, 6, 6, 6] / []
``filling`` is a permutation::
sage: G = GrowthDiagram(RuleRSK, pi) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 0): 1, (3, 5): 1, (4, 3): 1, (5, 4): 1}
sage: G.shape()
[6, 6, 6, 6, 6, 6] / []
``filling`` is a list::
sage: G = GrowthDiagram(RuleRSK, [3,1,4,1,5]) # indirect doctest
sage: G._filling
{(0, 2): 1, (1, 0): 1, (2, 3): 1, (3, 0): 1, (4, 4): 1}
sage: G.shape()
[5, 5, 5, 5, 5] / []
``filling`` is a list of lists::
sage: G = GrowthDiagram(RuleRSK, [[1,0,1],[0,1]]) # indirect doctest
sage: G._filling
{(0, 0): 1, (1, 1): 1, (2, 0): 1}
sage: G.shape()
[3, 2] / []
``filling`` is a list of lists and shape is given::
sage: G = GrowthDiagram(RuleRSK, [[1,0,1],[0,1]], shape=SkewPartition([[3,2],[1]])) # indirect doctest
sage: G._filling
{(0, 0): 1, (1, 1): 1, (2, 0): 1}
sage: G.shape()
[3, 2] / [1]
``filling`` is empty and shape is ``None``::
sage: G = GrowthDiagram(RuleRSK, {})
sage: (G.filling(), G.shape())
({}, [] / [])
"""
if isinstance(filling, dict):
try:
v = next(iter(filling.values()))
if isinstance(v, dict):
# it is a dict of dicts
F = dict()
for (i, row) in filling.items():
for (j, v) in row.items():
if v != 0:
F[(i,j)] = int(v)
else:
# it is dict of coordinates
F = {(i,j): v for ((i,j), v) in filling.items()
if v != 0}
except StopIteration:
# it is an empty dict of coordinates
F = filling
else:
# it is a sequence
F = dict()
try:
# it is a sequence of sequences
for i, row in enumerate(filling):
for j, v in enumerate(row):
if v != 0:
F[j,i] = int(v)
if shape is None:
shape = [len(row) for row in filling]
except TypeError:
# it is a word - for convenience we allow signed words
for i, l in enumerate(filling):
if l > 0:
F[i, l-1] = 1
else:
F[i, -l-1] = -1
if shape is None:
if F == {}:
shape = []
else:
# find bounding rectangle of ``filling``
max_row = max(i for i, _ in F)+1
max_col = max(j for _, j in F)+1
shape = [max_row] * max_col
return (F, self._process_shape(shape))
def _grow(self):
r"""
Compute the labels on the boundary opposite of the origin, given
the filling.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: pi = Permutation([1])
sage: G = GrowthDiagram(RuleRSK, pi) # indirect doctest
sage: G._out_labels
[[], [1], []]
sage: pi = Permutation([1,2])
sage: G = GrowthDiagram(RuleRSK, pi) # indirect doctest
sage: G._out_labels
[[], [1], [2], [1], []]
sage: pi = Permutation([2,1])
sage: G = GrowthDiagram(RuleRSK, pi) # indirect doctest
sage: G._out_labels
[[], [1], [1, 1], [1], []]
sage: G = GrowthDiagram(RuleRSK, {(0,1):1, (1,0):1}, SkewPartition([[2,1],[1]])) # indirect doctest
sage: G._out_labels
[[], [1], [], [1], []]
sage: G = GrowthDiagram(RuleRSK, {(1,1):1}, SkewPartition([[2,2],[1]]), labels=[[],[],[1],[],[]]) # indirect doctest
sage: G._out_labels
[[], [1], [2], [1], []]
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: G = GrowthDiagram(BinaryWord, {(1,1):1}, SkewPartition([[2,2],[1]]), labels=[[],[],[1],[],[]]) # indirect doctest
sage: G._out_labels
[word: , word: 1, word: 11, word: 1, word: ]
"""
labels = list(self._in_labels)
l = len(self._lambda)
rule = self.rule
if rule.has_multiple_edges:
for r in range(l):
for c in range(self._mu[r]+l-r, self._lambda[r]+l-r):
j = r
i = c-l+r
(labels[2*c-1],
labels[2*c],
labels[2*c+1]) = rule.forward_rule(labels[2*c-2],
labels[2*c-1],
labels[2*c],
labels[2*c+1],
labels[2*c+2],
self._filling.get((i,j), 0))
else:
for r in range(l):
for c in range(self._mu[r]+l-r, self._lambda[r]+l-r):
j = r
i = c-l+r
labels[c] = rule.forward_rule(labels[c-1],
labels[c],
labels[c+1],
self._filling.get((i,j), 0))
self._out_labels = labels
def _shrink(self):
r"""
Compute the labels on the boundary near the origin, and the filling.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: filling = [[0,0,1,0,0,0,0], [0,1,0,0,0,0,0], [1,0,0,0,0,0,0],
....: [0,0,0,1,0,0,0], [0,0,0,0,0,0,1],
....: [0,0,0,0,0,1,0], [0,0,0,0,1,0,0]]
sage: G = GrowthDiagram(RuleRSK, filling)
sage: list(GrowthDiagram(RuleRSK, labels=G._out_labels)) == filling
True
sage: labels = [[], [1], []]
sage: G = GrowthDiagram(RuleRSK, labels=labels) # indirect doctest
sage: G._filling
{(0, 0): 1}
sage: G._in_labels
[[], [], []]
sage: labels = [[], [1], [2], [2,1], [1,1], [1], []]
sage: G = GrowthDiagram(RuleRSK, labels=labels) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 0): 1}
sage: G._in_labels
[[], [], [], [], [], [], []]
sage: labels = [[], [1], [2], [3], [3, 1], [3, 2], [4, 2], [4, 1], [3, 1], [2, 1], [1, 1], [1], []]
sage: G = GrowthDiagram(RuleRSK, labels=labels) # indirect doctest
sage: G._filling
{(0, 1): 1, (1, 2): 1, (2, 5): 1, (3, 0): 1, (4, 3): 1, (5, 4): 1}
sage: labels = [[],[1],[1],[2],[2],[2,1],[2]]
sage: G = GrowthDiagram(RuleRSK, labels=labels)
Traceback (most recent call last):
...
ValueError: can only determine the shape of the growth diagram
if ranks of successive labels differ
sage: G = GrowthDiagram(RuleRSK, shape=[3,2,1], labels=labels) # indirect doctest
sage: G._filling
{(1, 0): 1}
sage: G._in_labels
[[], [], [], [], [1], [1], [2]]
sage: labels = [[], [1],[1],[2],[2],[2,1],[2],[2,1],[1,1],[2,1],[1,1]]
sage: G = GrowthDiagram(RuleRSK, shape=[5,4,3,2,1], labels=labels) # indirect doctest
sage: G._filling
{(1, 2): 1, (2, 1): 1, (4, 0): 1}
sage: G._in_labels
[[], [], [], [], [], [], [1], [1], [1], [1, 1], [1, 1]]
sage: labels = [[], [1],[1],[2],[2],[2,1],[2],[2,1],[1,1],[2,1],[1,1]]
sage: G = GrowthDiagram(RuleRSK, shape=SkewPartition([[5,4,3,2,1],[3,2,1]]), labels=labels) # indirect doctest
sage: G._filling
{(1, 2): 1, (2, 1): 1, (4, 0): 1}
sage: G._in_labels
[[], [], [], [1], [1], [1], [1], [1], [1], [1, 1], [1, 1]]
"""
F = dict()
labels = list(self._out_labels)
l = len(self._lambda)
rule = self.rule
if rule.has_multiple_edges:
for r in range(l):
for c in range(self._lambda[l-r-1]+r, self._mu[l-r-1]+r, -1):
j = l-r-1
i = c-r-1
(labels[2*c-1],
labels[2*c],
labels[2*c+1], v) = rule.backward_rule(labels[2*c-2],
labels[2*c-1],
labels[2*c],
labels[2*c+1],
labels[2*c+2])
if v != 0:
F[(i,j)] = v
else:
for r in range(l):
for c in range(self._lambda[l-r-1]+r, self._mu[l-r-1]+r, -1):
j = l-r-1
i = c-r-1
labels[c], v = rule.backward_rule(labels[c-1],
labels[c],
labels[c+1])
if v != 0:
F[(i,j)] = v
self._in_labels = labels
self._filling = F
######################################################################
# ABC for rules of growth diagrams
######################################################################
class Rule(UniqueRepresentation):
r"""
Generic base class for a rule for a growth diagram.
Subclasses may provide the following attributes:
- ``zero`` -- the zero element of the vertices of the graphs
- ``r`` -- (default: 1) the parameter in the equation `DU - UD = rI`
- ``has_multiple_edges`` -- (default: ``False``) if the dual
graded graph has multiple edges and therefore edges are
triples consisting of two vertices and a label.
- ``zero_edge`` -- (default: 0) the zero label of the
edges of the graphs used for degenerate edges. It is
allowed to use this label also for other edges.
Subclasses may provide the following methods:
- ``normalize_vertex`` -- a function that converts its input to a
vertex.
- ``vertices`` -- a function that takes a non-negative integer
as input and returns the list of vertices on this rank.
- ``rank`` -- the rank function of the dual graded graphs.
- ``forward_rule`` -- a function with input ``(y, t, x,
content)`` or ``(y, e, t, f, x, content)`` if
``has_multiple_edges`` is ``True``. ``(y, e, t)`` is an
edge in the graph `P`, ``(t, f, x)`` an edge in the graph
``Q``. It should return the fourth vertex ``z``, or, if
``has_multiple_edges`` is ``True``, the path ``(g, z, h)``
from ``y`` to ``x``.
- ``backward_rule`` -- a function with input ``(y, z, x)`` or
``(y, g, z, h, x)`` if ``has_multiple_edges`` is ``True``.
``(y, g, z)`` is an edge in the graph `Q`, ``(z, h, x)`` an
edge in the graph ``P``. It should return the fourth
vertex and the content ``(t, content)``, or, if
``has_multiple_edges`` is ``True``, the path from ``y`` to
``x`` and the content as ``(e, t, f, content)``.
- ``is_P_edge``, ``is_Q_edge`` -- functions that take two
vertices as arguments and return ``True`` or ``False``, or,
if multiple edges are allowed, the list of edge labels of
the edges from the first vertex to the second in the
respective graded graph. These are only used for checking
user input and providing the dual graded graph, and are
therefore not mandatory.
Note that the class :class:`GrowthDiagram` is able to use
partially implemented subclasses just fine. Suppose that
``MyRule`` is such a subclass. Then:
- ``GrowthDiagram(MyRule, my_filling)`` requires only an
implementation of ``forward_rule``, ``zero`` and possibly
``has_multiple_edges``.
- ``GrowthDiagram(MyRule, labels=my_labels, shape=my_shape)``
requires only an implementation of ``backward_rule`` and
possibly ``has_multiple_edges``, provided that the labels
``my_labels`` are given as needed by ``backward_rule``.
- ``GrowthDiagram(MyRule, labels=my_labels)`` additionally needs
an implementation of ``rank`` to deduce the shape.
In particular, this allows to implement rules which do not quite
fit Fomin's notion of dual graded graphs. An example would be
Bloom and Saracino's variant of the RSK correspondence [BS2012]_,
where a backward rule is not available.
Similarly:
- ``MyRule.P_graph`` only requires an implementation of
``vertices``, ``is_P_edge`` and possibly ``has_multiple_edges``
is required, mutatis mutandis for ``MyRule.Q_graph``.
- ``MyRule._check_duality`` requires ``P_graph`` and ``Q_graph``.
In particular, this allows to work with dual graded graphs
without local rules.
"""
has_multiple_edges = False # override when necessary
zero_edge = 0 # override when necessary
r = 1 # override when necessary
def normalize_vertex(self, v): # override when necessary
r"""
Return ``v`` as a vertex of the dual graded graph.
This is a default implementation, returning its argument.
EXAMPLES::
sage: from sage.combinat.growth import Rule
sage: Rule().normalize_vertex("hello") == "hello"
True
"""
return v
def __call__(self, *args, **kwds):
r"""
Return the growth diagram corresponding to the parameters.
This provides a shorthand for calling :class:`GrowthDiagram`
directly.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: RuleRSK([2,3,1], shape=[3,2,2])
0 0 1
1 0
0 1
sage: RuleRSK(labels=[[], [1], [2], [1], [], [1], []])
0 0 1
1 0
0 1
"""
return GrowthDiagram(self, *args, **kwds)
def _check_duality(self, n):
r"""
Raise an error if the graphs are not `r`-dual at level ``n``.
`P` and `Q` are `r`-dual if `DU = UD + rI` on the free
`\ZZ`-module `\ZZ[V]`, where `D` is the down operator of `Q`,
assigning to each vertex the formal sum of its predecessors,
`U` is the up operator of `P`, assigning to each vertex the
formal sum of its successors, and `I` is the identity
operator.
INPUT:
- ``n`` -- a positive integer specifying which rank of
the graph to test
EXAMPLES:
For binary words, we have indeed provided dual graded graphs::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord._check_duality(3)
The following two graphs are not `1`-dual::
sage: from sage.combinat.growth import Rule
sage: class RuleWrong(Rule):
....: def vertices(self, n): return Partitions(n)
....: def is_Q_edge(self, v, w):
....: return (v, w) in [([1],[2]), ([2],[3])]
....: def is_P_edge(self, v, w):
....: return (v, w) in [([1],[2]), ([1],[1,1]), ([2],[3])]
sage: RuleWrong()._check_duality(2)
Traceback (most recent call last):
...
ValueError: D U - U D differs from 1 I for vertex [2]:
D U = [[2]]
U D + 1 I = [[1, 1], [2], [2]]
"""
if self.has_multiple_edges:
def check_vertex(w, P, Q):
DUw = [v[0] for uw in P.outgoing_edges(w) for v in Q.incoming_edges(uw[1])]
UDw = [v[1] for lw in Q.incoming_edges(w) for v in P.outgoing_edges(lw[0])]
UDw.extend([w]*self.r)
if sorted(DUw) != sorted(UDw):
raise ValueError("D U - U D differs from %s I for vertex %s:\n"
"D U = %s\n"
"U D + %s I = %s"
% (self.r, w, DUw, self.r, UDw))
else:
def check_vertex(w, P, Q):
DUw = [v for uw in P.upper_covers(w) for v in Q.lower_covers(uw)]
UDw = [v for lw in Q.lower_covers(w) for v in P.upper_covers(lw)]
UDw.extend([w]*self.r)
if sorted(DUw) != sorted(UDw):
raise ValueError("D U - U D differs from %s I for vertex %s:\n"
"D U = %s\n"
"U D + %s I = %s"
% (self.r, w, DUw, self.r, UDw))
P = self.P_graph(n + 2)
Q = self.Q_graph(n + 2)
for w in self.vertices(n):
check_vertex(w, P, Q)
def P_graph(self, n):
r"""
Return the first ``n`` levels of the first dual graded graph.
The non-degenerate edges in the vertical direction come from
this graph.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Domino.P_graph(3)
Finite poset containing 8 elements
"""
if self.has_multiple_edges:
D = DiGraph([(x,y,e) for k in range(n-1)
for x in self.vertices(k)
for y in self.vertices(k+1)
for e in self.is_P_edge(x, y)], multiedges=True)
# unfortunately, layout_acyclic will not show multiple edges
# D.layout_default = D.layout_acyclic
return D
else:
return Poset(([w for k in range(n) for w in self.vertices(k)],
lambda x, y: self.is_P_edge(x, y)),
cover_relations=True)
def Q_graph(self, n):
r"""
Return the first ``n`` levels of the second dual graded graph.
The non-degenerate edges in the horizontal direction come
from this graph.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Q = Domino.Q_graph(3); Q
Finite poset containing 8 elements
sage: Q.upper_covers(Partition([1,1]))
[[1, 1, 1, 1], [3, 1], [2, 2]]
"""
if self.has_multiple_edges:
D = DiGraph([(x,y,e) for k in range(n-1)
for x in self.vertices(k)
for y in self.vertices(k+1)
for e in self.is_Q_edge(x, y)], multiedges=True)
# unfortunately, layout_acyclic will not show multiple edges
# D.layout_default = D.layout_acyclic
return D
else:
return Poset(([w for k in range(n) for w in self.vertices(k)],
lambda x,y: self.is_Q_edge(x, y)),
cover_relations=True)
######################################################################
# Specific rules of growth diagrams
######################################################################
class RuleShiftedShapes(Rule):
r"""
A class modelling the Schensted correspondence for shifted
shapes.
This agrees with Sagan [Sag1987]_ and Worley's [Wor1984]_, and
Haiman's [Hai1989]_ insertion algorithms, see Proposition 4.5.2
of [Fom1995]_.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: GrowthDiagram(Shifted, [3,1,2])
0 1 0
0 0 1
1 0 0
The vertices of the dual graded graph are shifted shapes::
sage: Shifted.vertices(3)
Partitions of the integer 3 satisfying constraints max_slope=-1
Let us check the example just before Corollary 3.2 in [Sag1987]_.
Note that, instead of passing the rule to :class:`GrowthDiagram`,
we can also call the rule to create growth diagrams::
sage: G = Shifted([2,6,5,1,7,4,3])
sage: G.P_chain()
[[], 0, [1], 0, [2], 0, [3], 0, [3, 1], 0, [3, 2], 0, [4, 2], 0, [5, 2]]
sage: G.Q_chain()
[[], 1, [1], 2, [2], 1, [2, 1], 3, [3, 1], 2, [4, 1], 3, [4, 2], 3, [5, 2]]
TESTS::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.zero
[]
sage: Shifted._check_duality(4)
Check that the rules are bijective::
sage: all(Shifted(labels=Shifted(pi).out_labels()).to_word() == pi
....: for pi in Permutations(5))
True
sage: pi = Permutations(10).random_element()
sage: G = Shifted(pi)
sage: list(Shifted(labels=G.out_labels())) == list(G)
True
"""
zero = _make_partition([])
has_multiple_edges = True
def normalize_vertex(self, v):
r"""
Return ``v`` as a partition.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.normalize_vertex([3,1]).parent()
Partitions
"""
return _make_partition(v)
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.vertices(3)
Partitions of the integer 3 satisfying constraints max_slope=-1
"""
if n == 0:
return [self.zero]
else:
return Partitions(n, max_slope=-1)
def rank(self, v):
r"""
Return the rank of ``v``: the size of the shifted partition.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.rank(Shifted.vertices(3)[0])
3
"""
return v.size()
def is_Q_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `Q`-edge of ``self``.
``(v, w)`` is an edge if ``w`` is obtained from ``v`` by adding a
cell. It is a black (color 1) edge, if the cell is on the
diagonal, otherwise it can be blue or red (color 2 or 3).
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: v = Shifted.vertices(2)[0]; v
[2]
sage: [(w, Shifted.is_Q_edge(v, w)) for w in Shifted.vertices(3)]
[([3], [2, 3]), ([2, 1], [1])]
sage: all(Shifted.is_Q_edge(v, w) == [] for w in Shifted.vertices(4))
True
"""
if self.rank(v) + 1 != self.rank(w):
return []
try:
l = SkewPartition([w, v]).cells()
except ValueError:
return []
else:
if l[0][1] == 0:
return [1] # black
else:
return [2,3] # blue, red
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
``(v, w)`` is an edge if ``w`` contains ``v``.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: v = Shifted.vertices(2)[0]; v
[2]
sage: [w for w in Shifted.vertices(3) if Shifted.is_P_edge(v, w)]
[[3], [2, 1]]
"""
if self.rank(v) + 1 != self.rank(w):
return []
return [0] if w.contains(v) else []
def P_symbol(self, P_chain):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a shifted tableau.
EXAMPLES:
Check the example just before Corollary 3.2 in [Sag1987]_::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: G = Shifted([2,6,5,1,7,4,3])
sage: G.P_symbol().pp()
1 2 3 6 7
4 5
Check the example just before Corollary 8.2 in [SS1990]_::
sage: T = ShiftedPrimedTableau([[4],[1],[5]], skew=[3,1])
sage: T.pp()
. . . 4
. 1
5
sage: U = ShiftedPrimedTableau([[1],[3.5],[5]], skew=[3,1])
sage: U.pp()
. . . 1
. 4'
5
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: labels = [mu if is_even(i) else 0 for i, mu in enumerate(T.to_chain()[::-1])] + U.to_chain()[1:]
sage: G = Shifted({(1,2):1, (2,1):1}, shape=[5,5,5,5,5], labels=labels)
sage: G.P_symbol().pp()
. . . . 2
. . 1 3
. 4 5
"""
chain = P_chain[::2]
shape = chain[-1]
T = [[None for _ in range(r)] for r in shape]
for i in range(1,len(chain)):
la = chain[i]
mu = chain[i-1]
mu += [0]*(len(la) - len(mu))
for r in range(len(la)):
for c in range(mu[r], la[r]):
T[r][c] = i
skew = _make_partition([row.count(None) for row in T])
T = [[e for e in row if e is not None] for row in T]
return ShiftedPrimedTableau(T, skew=skew)
def Q_symbol(self, Q_chain):
r"""
Return the labels along the horizontal boundary of a rectangular
growth diagram as a skew tableau.
EXAMPLES:
Check the example just before Corollary 3.2 in [Sag1987]_::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: G = Shifted([2,6,5,1,7,4,3])
sage: G.Q_symbol().pp()
1 2 4' 5 7'
3 6'
Check the example just before Corollary 8.2 in [SS1990]_::
sage: T = ShiftedPrimedTableau([[4],[1],[5]], skew=[3,1])
sage: T.pp()
. . . 4
. 1
5
sage: U = ShiftedPrimedTableau([[1],[3.5],[5]], skew=[3,1])
sage: U.pp()
. . . 1
. 4'
5
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: labels = [mu if is_even(i) else 0 for i, mu in enumerate(T.to_chain()[::-1])] + U.to_chain()[1:]
sage: G = Shifted({(1,2):1, (2,1):1}, shape=[5,5,5,5,5], labels=labels)
sage: G.Q_symbol().pp()
. . . . 2
. . 1 4'
. 3' 5'
"""
chain = Q_chain
shape = chain[-1]
T = [[None for _ in range(r)] for r in shape]
for i in range(1,(len(chain)+1)//2):
la = chain[2*i]
if chain[2*i-1] == 3:
prime = 0.5
else:
prime = 0
mu = chain[2*(i-1)]
mu += [0]*(len(la) - len(mu))
for r in range(len(la)):
for c in range(mu[r], la[r]):
T[r][c] = i - prime
skew = _make_partition([row.count(None) for row in T])
T = [[e for e in row if e is not None] for row in T]
return ShiftedPrimedTableau(T, skew=skew)
def forward_rule(self, y, e, t, f, x, content):
r"""
Return the output path given two incident edges and the content.
See [Fom1995]_ Lemma 4.5.1, page 38.
INPUT:
- ``y, e, t, f, x`` -- a path of three partitions and two
colors from a cell in a growth diagram, labelled as::
t f x
e
y
- ``content`` -- `0` or `1`; the content of the cell
OUTPUT:
The two colors and the fourth partition ``g``, ``z``, ``h``
according to Sagan-Worley insertion.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.forward_rule([], 0, [], 0, [], 1)
(1, [1], 0)
sage: Shifted.forward_rule([1], 0, [1], 0, [1], 1)
(2, [2], 0)
if ``x != y``::
sage: Shifted.forward_rule([3], 0, [2], 1, [2,1], 0)
(1, [3, 1], 0)
sage: Shifted.forward_rule([2,1], 0, [2], 2, [3], 0)
(2, [3, 1], 0)
if ``x == y != t``::
sage: Shifted.forward_rule([3], 0, [2], 2, [3], 0)
(1, [3, 1], 0)
sage: Shifted.forward_rule([3,1], 0, [2,1], 2, [3,1], 0)
(2, [3, 2], 0)
sage: Shifted.forward_rule([2,1], 0, [2], 1, [2,1], 0)
(3, [3, 1], 0)
sage: Shifted.forward_rule([3], 0, [2], 3, [3], 0)
(3, [4], 0)
"""
if e != 0:
raise ValueError("the P-graph should not be colored")
h = 0
if x == t == y:
if f != 0:
raise ValueError("degenerate edge f should have color 0")
if content == 0:
g, z = 0, x
elif content == 1:
if not x:
g, z = 1, _Partitions(x).add_cell(0) # black
else:
g, z = 2, _make_partition(x).add_cell(0) # blue
else:
raise NotImplementedError
elif content != 0:
raise ValueError("for y=%s, t=%s, x=%s, the content should be 0 but is %s"
% (y, t, x, content))
elif x != t == y:
g, z = f, x
elif x == t != y:
if f != 0:
raise ValueError("degenerate edge f should have color 0")
g, z = f, y
else:
if x != y:
row = SkewPartition([x, t]).cells()[0][0]
g, z = f, _make_partition(y).add_cell(row)
elif x == y != t and f == 2: # blue
row = 1+SkewPartition([x, t]).cells()[0][0]
if row == len(y):
g, z = 1, _make_partition(y).add_cell(row) # black
else:
g, z = 2, _make_partition(y).add_cell(row) # blue
elif x == y != t and f in [1, 3]: # black or red
c = SkewPartition([x, t]).cells()[0]
col = c[0] + c[1] + 1
# print y, t, x, c, col
for i in range(len(y)):
if i + y[i] == col:
z = y[:i] + [y[i]+1] + y[i+1:]
break
g = 3
else:
raise NotImplementedError
return g, _make_partition(z), h
def backward_rule(self, y, g, z, h, x):
r"""
Return the input path and the content given two incident edges.
See [Fom1995]_ Lemma 4.5.1, page 38.
INPUT:
- ``y, g, z, h, x`` -- a path of three partitions and two
colors from a cell in a growth diagram, labelled as::
x
h
y g z
OUTPUT:
A tuple ``(e, t, f, content)`` consisting of the shape ``t``
of the fourth word, the colours of the incident edges and the
content of the cell according to Sagan - Worley insertion.
EXAMPLES::
sage: Shifted = GrowthDiagram.rules.ShiftedShapes()
sage: Shifted.backward_rule([], 1, [1], 0, [])
(0, [], 0, 1)
sage: Shifted.backward_rule([1], 2, [2], 0, [1])
(0, [1], 0, 1)
if ``x != y``::
sage: Shifted.backward_rule([3], 1, [3, 1], 0, [2,1])
(0, [2], 1, 0)
sage: Shifted.backward_rule([2,1], 2, [3, 1], 0, [3])
(0, [2], 2, 0)
if ``x == y != t``::
sage: Shifted.backward_rule([3], 1, [3, 1], 0, [3])
(0, [2], 2, 0)
sage: Shifted.backward_rule([3,1], 2, [3, 2], 0, [3,1])
(0, [2, 1], 2, 0)
sage: Shifted.backward_rule([2,1], 3, [3, 1], 0, [2,1])
(0, [2], 1, 0)
sage: Shifted.backward_rule([3], 3, [4], 0, [3])
(0, [2], 3, 0)
"""
if h != 0:
raise ValueError("the P-graph should not be colored")
if x == y == z:
if g != 0:
raise ValueError("degenerate edge g should have color 0")
return (0, x, 0, 0)
elif x == z != y:
return (0, y, g, 0)
elif x != z == y:
if g != 0:
raise ValueError("degenerate edge g should have color 0")
return (0, x, 0, 0)
else:
if x != y:
row = SkewPartition([z, x]).cells()[0][0]
return (0, _make_partition(y).remove_cell(row), g, 0)
else:
row, col = SkewPartition([z, x]).cells()[0]
if row > 0 and g in [1, 2]: # black or blue
return (0, _make_partition(y).remove_cell(row-1), 2, 0)
elif row == 0 and g in [1, 2]: # black or blue
return (0, y, 0, 1)
else:
# find last cell in column col-1
for i in range(len(y)-1,-1,-1):
if i + y[i] == col + row:
if y[i] == 1:
t = y[:i]
return (0, t, 1, 0)
else:
t = y[:i] + [y[i]-1] + y[i+1:]
return (0, t, 3, 0)
raise ValueError("this should not happen")
class RuleLLMS(Rule):
r"""
A rule modelling the Schensted correspondence for affine
permutations.
EXAMPLES::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: GrowthDiagram(LLMS3, [3,1,2])
0 1 0
0 0 1
1 0 0
The vertices of the dual graded graph are
:class:`~sage.combinat.core.Cores`::
sage: LLMS3.vertices(4)
3-Cores of length 4
Let us check example of Figure 1 in [LS2007]_. Note that,
instead of passing the rule to :class:`GrowthDiagram`, we can
also call the rule to create growth diagrams::
sage: G = LLMS3([4,1,2,6,3,5]); G
0 1 0 0 0 0
0 0 1 0 0 0
0 0 0 0 1 0
1 0 0 0 0 0
0 0 0 0 0 1
0 0 0 1 0 0
The :meth:`P_symbol` is a
:class:`~sage.combinat.k_tableau.StrongTableau`::
sage: G.P_symbol().pp()
-1 -2 -3 -5
3 5
-4 -6
5
6
The :meth:`Q_symbol` is a
:class:`~sage.combinat.k_tableau.WeakTableau`::
sage: G.Q_symbol().pp()
1 3 4 5
2 5
3 6
5
6
Let us also check Example 6.2 in [LLMSSZ2013]_::
sage: G = LLMS3([4,1,3,2])
sage: G.P_symbol().pp()
-1 -2 3
-3
-4
sage: G.Q_symbol().pp()
1 3 4
2
3
TESTS::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: LLMS3.zero
[]
"""
zero_edge = None # to prevent confusion with the edge labelled with content 0
has_multiple_edges = True
def __init__(self, k):
r"""
Initialize ``self``.
TESTS::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: TestSuite(LLMS3).run()
"""
self.k = k
self.zero = Core([], k)
def normalize_vertex(self, v):
r"""
Convert ``v`` to a `k`-core.
EXAMPLES::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: LLMS3.normalize_vertex([3,1]).parent()
3-Cores of length 3
"""
return Core(v, self.k)
def rank(self, v):
r"""
Return the rank of ``v``: the length of the core.
EXAMPLES::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: LLMS3.rank(LLMS3.vertices(3)[0])
3
"""
return v.length()
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: LLMS3.vertices(2)
3-Cores of length 2
"""
return Cores(self.k, length=n)
def is_Q_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `Q`-edge of ``self``.
``(v, w)`` is an edge if ``w`` is a weak cover of ``v``, see
:meth:`~sage.combinat.core.Core.weak_covers()`.
EXAMPLES::
sage: LLMS4 = GrowthDiagram.rules.LLMS(4)
sage: v = LLMS4.vertices(3)[1]; v
[2, 1]
sage: [w for w in LLMS4.vertices(4) if len(LLMS4.is_Q_edge(v, w)) > 0]
[[2, 2], [3, 1, 1]]
sage: all(LLMS4.is_Q_edge(v, w) == [] for w in LLMS4.vertices(5))
True
"""
return [None] if w in v.weak_covers() else []
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
For two k-cores v and w containing v, there are as many edges as
there are components in the skew partition w/v. These
components are ribbons, and therefore contain a unique cell
with maximal content. We index the edge with this content.
EXAMPLES::
sage: LLMS4 = GrowthDiagram.rules.LLMS(4)
sage: v = LLMS4.vertices(2)[0]; v
[2]
sage: [(w, LLMS4.is_P_edge(v, w)) for w in LLMS4.vertices(3)]
[([3], [2]), ([2, 1], [-1]), ([1, 1, 1], [])]
sage: all(LLMS4.is_P_edge(v, w) == [] for w in LLMS4.vertices(4))
True
"""
if w in v.strong_covers():
T = SkewPartition([w.to_partition(), v.to_partition()])
return [max([j-i for i,j in c]) for c in T.cell_poset().connected_components()]
else:
return []
def P_symbol(self, P_chain):
r"""
Return the labels along the vertical boundary of a
rectangular growth diagram as a skew
:class:`~sage.combinat.k_tableau.StrongTableau`.
EXAMPLES::
sage: LLMS4 = GrowthDiagram.rules.LLMS(4)
sage: G = LLMS4([3,4,1,2])
sage: G.P_symbol().pp()
-1 -2
-3 -4
"""
C = P_chain
T = SkewTableau(chain=C[::2])
S = T.to_list()
for entry, content in enumerate(C[1::2], 1):
for i,j in T.cells_containing(entry):
if j-i == content:
S[i][j] = -S[i][j]
break
return StrongTableau(S, self.k-1)
def Q_symbol(self, Q_chain):
r"""
Return the labels along the horizontal boundary of a
rectangular growth diagram as a skew
:class:`~sage.combinat.k_tableau.WeakTableau`.
EXAMPLES::
sage: LLMS4 = GrowthDiagram.rules.LLMS(4)
sage: G = LLMS4([3,4,1,2])
sage: G.Q_symbol().pp()
1 2
3 4
"""
return WeakTableau(SkewTableau(chain=Q_chain[::2]), self.k-1)
def forward_rule(self, y, e, t, f, x, content):
r"""
Return the output path given two incident edges and the content.
See [LS2007]_ Section 3.4 and [LLMSSZ2013]_ Section 6.3.
INPUT:
- ``y, e, t, f, x`` -- a path of three partitions and two
colors from a cell in a growth diagram, labelled as::
t f x
e
y
- ``content`` -- `0` or `1`; the content of the cell
OUTPUT:
The two colors and the fourth partition g, z, h according to
LLMS insertion.
EXAMPLES::
sage: LLMS3 = GrowthDiagram.rules.LLMS(3)
sage: LLMS4 = GrowthDiagram.rules.LLMS(4)
sage: Z = LLMS3.zero
sage: LLMS3.forward_rule(Z, None, Z, None, Z, 0)
(None, [], None)
sage: LLMS3.forward_rule(Z, None, Z, None, Z, 1)
(None, [1], 0)
sage: Y = Core([3,1,1], 3)
sage: LLMS3.forward_rule(Y, None, Y, None, Y, 1)
(None, [4, 2, 1, 1], 3)
if ``x != y``::
sage: Y = Core([1,1], 3); T = Core([1], 3); X = Core([2], 3)
sage: LLMS3.forward_rule(Y, -1, T, None, X, 0)
(None, [2, 1, 1], -1)
sage: Y = Core([2], 4); T = Core([1], 4); X = Core([1,1], 4)
sage: LLMS4.forward_rule(Y, 1, T, None, X, 0)
(None, [2, 1], 1)
sage: Y = Core([2,1,1], 3); T = Core([2], 3); X = Core([3,1], 3)
sage: LLMS3.forward_rule(Y, -1, T, None, X, 0)
(None, [3, 1, 1], -2)
if ``x == y != t``::
sage: Y = Core([1], 3); T = Core([], 3); X = Core([1], 3)
sage: LLMS3.forward_rule(Y, 0, T, None, X, 0)
(None, [1, 1], -1)
sage: Y = Core([1], 4); T = Core([], 4); X = Core([1], 4)
sage: LLMS4.forward_rule(Y, 0, T, None, X, 0)
(None, [1, 1], -1)
sage: Y = Core([2,1], 4); T = Core([1,1], 4); X = Core([2,1], 4)
sage: LLMS4.forward_rule(Y, 1, T, None, X, 0)
(None, [2, 2], 0)
"""
if f is not None:
raise ValueError("the Q-graph should not be colored")
g = None
if x == t == y:
if e is not None:
raise ValueError("degenerate edge e should have color None")
if content == 0:
z, h = x, None
elif content == 1:
if t.size() == 0:
z = t.affine_symmetric_group_simple_action(0)
else:
z = t.affine_symmetric_group_simple_action(t[0] % self.k)
h = z[0] - 1
else:
assert False, "BUG in RuleLLMS"
elif content != 0:
raise ValueError("for y=%s, t=%s, x=%s, the content should be 0 but is %s"
% (y, t, x, content))
elif x != t == y:
if e is not None:
raise ValueError("degenerate edge e should have color None")
z, h = x, e
elif x == t != y:
z, h = y, e
else: # x != t and y != t
qx = SkewPartition([x.to_partition(), t.to_partition()])
qy = SkewPartition([y.to_partition(), t.to_partition()])
if not all(c in qx.cells() for c in qy.cells()):
res = [(j-i) % self.k for i,j in qx.cells()]
assert len(set(res)) == 1
r = res[0]
z = y.affine_symmetric_group_simple_action(r)
if e % self.k == r:
h = e-1
else:
h = e
elif x == y != t:
# the addable cell with largest content at most e
cprime = sorted([c for c in y.to_partition().addable_cells()
if c[1]-c[0] <= e],
key = lambda c: -(c[1]-c[0]))[0]
h = cprime[1] - cprime[0]
z = y.affine_symmetric_group_simple_action(h % self.k)
return g, z, h
class RuleBinaryWord(Rule):
r"""
A rule modelling a Schensted-like correspondence for binary words.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: GrowthDiagram(BinaryWord, [3,1,2])
0 1 0
0 0 1
1 0 0
The vertices of the dual graded graph are binary words::
sage: BinaryWord.vertices(3)
[word: 100, word: 101, word: 110, word: 111]
Note that, instead of passing the rule to :class:`GrowthDiagram`,
we can also use call the rule to create growth diagrams. For
example::
sage: BinaryWord([2,4,1,3]).P_chain()
[word: , word: 1, word: 10, word: 101, word: 1101]
sage: BinaryWord([2,4,1,3]).Q_chain()
[word: , word: 1, word: 11, word: 110, word: 1101]
The Kleitman Greene invariant is the descent word, encoded by the
positions of the zeros::
sage: pi = Permutation([4,1,8,3,6,5,2,7,9])
sage: G = BinaryWord(pi); G
0 1 0 0 0 0 0 0 0
0 0 0 0 0 0 1 0 0
0 0 0 1 0 0 0 0 0
1 0 0 0 0 0 0 0 0
0 0 0 0 0 1 0 0 0
0 0 0 0 1 0 0 0 0
0 0 0 0 0 0 0 1 0
0 0 1 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1
sage: pi.descents()
[1, 3, 5, 6]
TESTS::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord.zero
word:
sage: G = BinaryWord(labels=[[1,1],[1,1,0],[0,1]])
Traceback (most recent call last):
...
ValueError: 01 has smaller rank than 110 but is not covered by it in P
sage: G = BinaryWord(labels=[[1,1],[1,0,1],[0,1]])
Traceback (most recent call last):
...
ValueError: 11 has smaller rank than 101 but is not covered by it in Q
Check duality::
sage: BinaryWord._check_duality(4)
Check that the rules are bijective::
sage: all(BinaryWord(labels=BinaryWord(pi).out_labels()).to_word()
....: == pi for pi in Permutations(4))
True
sage: pi = Permutations(10).random_element()
sage: G = BinaryWord(pi)
sage: list(BinaryWord(labels=G.out_labels())) == list(G)
True
Test that the Kleitman Greene invariant is indeed the descent word::
sage: r = 4
sage: all(Word([0 if i in w.descents() else 1 for i in range(r)])
....: == BinaryWord(w).out_labels()[r]
....: for w in Permutations(r))
True
"""
zero = Word([], alphabet=[0,1])
def normalize_vertex(self, v):
r"""
Return ``v`` as a binary word.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord.normalize_vertex([0,1]).parent()
Finite words over {0, 1}
"""
return Word(v, alphabet=[0,1])
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord.vertices(3)
[word: 100, word: 101, word: 110, word: 111]
"""
if n == 0:
return [self.zero]
else:
w1 = Word([1], [0,1])
return [w1 + w for w in Words([0,1], n-1)]
def rank(self, v):
r"""
Return the rank of ``v``: number of letters of the word.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord.rank(BinaryWord.vertices(3)[0])
3
"""
return len(v)
def is_Q_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `Q`-edge of ``self``.
``(w, v)`` is an edge if ``w`` is obtained from ``v`` by
appending a letter.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: v = BinaryWord.vertices(2)[0]; v
word: 10
sage: [w for w in BinaryWord.vertices(3) if BinaryWord.is_Q_edge(v, w)]
[word: 100, word: 101]
sage: [w for w in BinaryWord.vertices(4) if BinaryWord.is_Q_edge(v, w)]
[]
"""
return w[:-1] == v
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
``(v, w)`` is an edge if ``v`` is obtained from ``w`` by
deleting a letter.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: v = BinaryWord.vertices(2)[1]; v
word: 11
sage: [w for w in BinaryWord.vertices(3) if BinaryWord.is_P_edge(v, w)]
[word: 101, word: 110, word: 111]
sage: [w for w in BinaryWord.vertices(4) if BinaryWord.is_P_edge(v, w)]
[]
"""
return len(w) == len(v) + 1 and v.is_subword_of(w)
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Fom1995]_ Lemma 4.6.1, page 40.
INPUT:
- ``y, t, x`` -- three binary words from a cell in a growth
diagram, labelled as::
t x
y
- ``content`` -- `0` or `1`; the content of the cell
OUTPUT:
The fourth binary word ``z`` according to Viennot's
bijection [Vie1983]_.
EXAMPLES::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: BinaryWord.forward_rule([], [], [], 1)
word: 1
sage: BinaryWord.forward_rule([1], [1], [1], 1)
word: 11
if ``x != y`` append last letter of ``x`` to ``y``::
sage: BinaryWord.forward_rule([1,0], [1], [1,1], 0)
word: 101
if ``x == y != t`` append ``0`` to ``y``::
sage: BinaryWord.forward_rule([1,1], [1], [1,1], 0)
word: 110
"""
if x == t == y:
if content == 0:
z = x
elif content == 1:
z = Word(list(y) + [1], alphabet=[0,1])
else:
raise NotImplementedError
elif content != 0:
raise ValueError("for y=%s, t=%s, x=%s, the content should be 0 but is %s"
% (y, t, x, content))
elif x != t == y:
z = x
elif x == t != y:
z = y
else:
if x != y:
z = Word(list(y) + [x[-1]], alphabet=[0,1])
elif x == y != t:
z = Word(list(y) + [0], alphabet=[0,1])
else:
raise NotImplementedError
return z
def backward_rule(self, y, z, x):
r"""
Return the content and the input shape.
See [Fom1995]_ Lemma 4.6.1, page 40.
- ``y, z, x`` -- three binary words from a cell in a growth diagram,
labelled as::
x
y z
OUTPUT:
A pair ``(t, content)`` consisting of the shape of the fourth
word and the content of the cell according to Viennot's
bijection [Vie1983]_.
TESTS::
sage: BinaryWord = GrowthDiagram.rules.BinaryWord()
sage: w = [4,1,8,3,6,5,2,7,9]; G = GrowthDiagram(BinaryWord, w)
sage: BinaryWord(labels=G.out_labels()).to_word() == w # indirect doctest
True
"""
if x == y == z:
return (x, 0)
elif x == z != y:
return (y, 0)
elif x != z == y:
return (x, 0)
else:
if x == y and len(z) > 0 and z[-1] == 1:
return (x, 1)
else:
return (x[:-1], 0)
class RuleSylvester(Rule):
r"""
A rule modelling a Schensted-like correspondence for binary trees.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: GrowthDiagram(Sylvester, [3,1,2])
0 1 0
0 0 1
1 0 0
The vertices of the dual graded graph are
:class:`~sage.combinat.binary_tree.BinaryTrees`::
sage: Sylvester.vertices(3)
Binary trees of size 3
The :meth:`~sage.combinat.growth.Rule.P_graph` is also known as
the bracket tree, the :meth:`~sage.combinat.growth.Rule.Q_graph`
is the lattice of finite order ideals of the infinite binary
tree, see Example 2.4.6 in [Fom1994]_.
For a permutation, the :meth:`P_symbol` is the binary search
tree, the :meth:`Q_symbol` is the increasing tree corresponding
to the inverse permutation. Note that, instead of passing the
rule to :class:`GrowthDiagram`, we can also call the rule to
create growth diagrams. From [Nze2007]_::
sage: pi = Permutation([3,5,1,4,2,6]); G = Sylvester(pi); G
0 0 1 0 0 0
0 0 0 0 1 0
1 0 0 0 0 0
0 0 0 1 0 0
0 1 0 0 0 0
0 0 0 0 0 1
sage: ascii_art(G.P_symbol())
__3__
/ \
1 5
\ / \
2 4 6
sage: ascii_art(G.Q_symbol())
__1__
/ \
3 2
\ / \
5 4 6
sage: all(Sylvester(pi).P_symbol() == pi.binary_search_tree()
....: for pi in Permutations(5))
True
sage: all(Sylvester(pi).Q_symbol() == pi.inverse().increasing_tree()
....: for pi in Permutations(5))
True
TESTS::
sage: Sylvester.zero
.
sage: B = BinaryTree; R = B([None,[]]); L = B([[],None])
sage: T = B([[],[]]); S = B([L,None])
sage: G = Sylvester(labels=[R, T, R])
Traceback (most recent call last):
...
ValueError: [., [., .]] has smaller rank than [[., .], [., .]]
but is not covered by it in P
sage: G = Sylvester(labels=[R, S, R])
Traceback (most recent call last):
...
ValueError: [., [., .]] has smaller rank than [[[., .], .], .]
but is not covered by it in Q
Check duality::
sage: Sylvester._check_duality(4)
Check that the rules are bijective::
sage: all(Sylvester(labels=GrowthDiagram(Sylvester, pi).out_labels()).to_word()
....: == pi for pi in Permutations(4))
True
sage: pi = Permutations(10).random_element()
sage: G = GrowthDiagram(Sylvester, pi)
sage: list(Sylvester(labels=G.out_labels())) == list(G)
True
"""
zero = BinaryTree()
def normalize_vertex(self, v):
r"""
Return ``v`` as a binary tree.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: Sylvester.normalize_vertex([[],[]]).parent()
Binary trees
"""
return BinaryTree(v)
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: Sylvester.vertices(3)
Binary trees of size 3
"""
return BinaryTrees(n)
def rank(self, v):
r"""
Return the rank of ``v``: the number of nodes of the tree.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: Sylvester.rank(Sylvester.vertices(3)[0])
3
"""
return v.node_number()
def is_Q_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `Q`-edge of ``self``.
``(v, w)`` is an edge if ``v`` is a sub-tree of ``w`` with one
node less.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: v = Sylvester.vertices(2)[1]; ascii_art(v)
o
/
o
sage: ascii_art([w for w in Sylvester.vertices(3) if Sylvester.is_Q_edge(v, w)])
[ o , o, o ]
[ / \ / / ]
[ o o o o ]
[ \ / ]
[ o o ]
sage: [w for w in Sylvester.vertices(4) if Sylvester.is_Q_edge(v, w)]
[]
"""
def is_subtree(T1, T2):
if T2.is_empty():
return False
elif T2[0].is_empty() and T2[1].is_empty():
return T1.is_empty()
elif T1.is_empty():
return False
else:
return ((T1[0] == T2[0] and is_subtree(T1[1], T2[1])) or
(T1[1] == T2[1] and is_subtree(T1[0], T2[0])))
return is_subtree(v, w)
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
``(v, w)`` is an edge if ``v`` is obtained from ``w`` by deleting
its right-most node.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: v = Sylvester.vertices(2)[1]; ascii_art(v)
o
/
o
sage: ascii_art([w for w in Sylvester.vertices(3) if Sylvester.is_P_edge(v, w)])
[ o , o ]
[ / \ / ]
[ o o o ]
[ / ]
[ o ]
sage: [w for w in Sylvester.vertices(4) if Sylvester.is_P_edge(v, w)]
[]
"""
if w.is_empty():
return False
else:
return v == RuleSylvester._delete_right_most_node(w)
def P_symbol(self, P_chain):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a labelled binary tree.
For permutations, this coincides with the binary search tree.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: pi = Permutation([2,4,3,1])
sage: ascii_art(Sylvester(pi).P_symbol())
_2_
/ \
1 4
/
3
sage: Sylvester(pi).P_symbol() == pi.binary_search_tree()
True
We can also do the skew version::
sage: B = BinaryTree; E = B(); N = B([])
sage: ascii_art(Sylvester([3,2], shape=[3,3,3], labels=[N,N,N,E,E,E,N]).P_symbol())
__1___
/ \
None 3
/
2
"""
def add_label(L, S, T, m):
if T[0] == S:
L = LabelledBinaryTree([L, None], m)
else:
assert T[0] == S[0]
l = L.label()
L = LabelledBinaryTree([L[0], add_label(L[1], S[1], T[1], m)], l)
return L
L = LabelledBinaryTree(P_chain[0])
for i in range(1, len(P_chain)):
S, T = P_chain[i-1], P_chain[i]
L = add_label(L, S, T, i)
return L
def Q_symbol(self, Q_chain):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a labelled binary tree.
For permutations, this coincides with the increasing tree.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: pi = Permutation([2,4,3,1])
sage: ascii_art(Sylvester(pi).Q_symbol())
_1_
/ \
4 2
/
3
sage: Sylvester(pi).Q_symbol() == pi.inverse().increasing_tree()
True
We can also do the skew version::
sage: B = BinaryTree; E = B(); N = B([])
sage: ascii_art(Sylvester([3,2], shape=[3,3,3], labels=[N,N,N,E,E,E,N]).Q_symbol())
_None_
/ \
3 1
/
2
"""
def add_label(L, S, T, m):
if L.is_empty():
assert T.node_number() == 1
return LabelledBinaryTree([], m)
l = L.label()
if T[0] == S[0]:
return LabelledBinaryTree([L[0], add_label(L[1], S[1], T[1], m)], l)
else:
return LabelledBinaryTree([add_label(L[0], S[0], T[0], m), L[1]], l)
L = LabelledBinaryTree(Q_chain[0])
for i in range(1, len(Q_chain)):
S, T = Q_chain[i-1], Q_chain[i]
L = add_label(L, S, T, i)
return L
@staticmethod
def _delete_right_most_node(b):
r"""
Return the tree obtained by deleting the right most node from ``b``.
TESTS::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: b = BinaryTree([]); b
[., .]
sage: Sylvester._delete_right_most_node(b)
.
sage: b = BinaryTree([[[], []], None]); ascii_art(b)
o
/
o
/ \
o o
sage: ascii_art(Sylvester._delete_right_most_node(b))
o
/ \
o o
"""
if b.is_empty():
raise ValueError("cannot delete right most node from empty tree")
elif b[1].is_empty():
return b[0]
else:
return BinaryTree([b[0], RuleSylvester._delete_right_most_node(b[1])])
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Nze2007]_, page 9.
INPUT:
- ``y, t, x`` -- three binary trees from a cell in a growth
diagram, labelled as::
t x
y
- ``content`` -- `0` or `1`; the content of the cell
OUTPUT:
The fourth binary tree ``z``.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: B = BinaryTree; E = B(); N = B([]); L = B([[],None])
sage: R = B([None,[]]); T = B([[],[]])
sage: ascii_art(Sylvester.forward_rule(E, E, E, 1))
o
sage: ascii_art(Sylvester.forward_rule(N, N, N, 1))
o
\
o
sage: ascii_art(Sylvester.forward_rule(L, L, L, 1))
o
/ \
o o
sage: ascii_art(Sylvester.forward_rule(R, R, R, 1))
o
\
o
\
o
If ``y != x``, obtain ``z`` from ``y`` adding a node such
that deleting the right most gives ``x``::
sage: ascii_art(Sylvester.forward_rule(R, N, L, 0))
o
/ \
o o
sage: ascii_art(Sylvester.forward_rule(L, N, R, 0))
o
/
o
\
o
If ``y == x != t``, obtain ``z`` from ``x`` by adding a node
as left child to the right most node::
sage: ascii_art(Sylvester.forward_rule(N, E, N, 0))
o
/
o
sage: ascii_art(Sylvester.forward_rule(T, L, T, 0))
_o_
/ \
o o
/
o
sage: ascii_art(Sylvester.forward_rule(L, N, L, 0))
o
/
o
/
o
sage: ascii_art(Sylvester.forward_rule(R, N, R, 0))
o
\
o
/
o
"""
def successors(b):
r"""
Return all trees obtained from ``b`` by adding a node.
"""
if b.is_empty():
yield BinaryTree([])
else:
for t in successors(b[0]):
yield BinaryTree([t, b[1]])
for t in successors(b[1]):
yield BinaryTree([b[0], t])
def union(y, x):
r"""
Return the unique tree obtained by adding a node to ``y`` such
that deleting the right most node gives ``x``.
"""
for t in successors(y):
if RuleSylvester._delete_right_most_node(t) == x:
return t
raise ValueError("could not find union of %s and %s" % (y,x))
if y == t == x:
if content == 0:
z = y
elif content == 1:
z = t.over(BinaryTree([]))
else:
raise NotImplementedError
elif content != 0:
raise ValueError("for y=%s, t=%s, x=%s, the content should be 0 but is %s" % (y, t, x, content))
elif y != t == x:
z = y
elif y == t != x:
z = x
else:
z = union(y, x)
return z
def backward_rule(self, y, z, x):
r"""
Return the output shape given three shapes and the content.
See [Nze2007]_, page 9.
INPUT:
- ``y, z, x`` -- three binary trees from a cell in a growth
diagram, labelled as::
x
y z
OUTPUT:
A pair ``(t, content)`` consisting of the shape of the fourth
binary tree t and the content of the cell.
EXAMPLES::
sage: Sylvester = GrowthDiagram.rules.Sylvester()
sage: B = BinaryTree; E = B(); N = B([]); L = B([[],None])
sage: R = B([None,[]]); T = B([[],[]])
sage: ascii_art(Sylvester.backward_rule(E, E, E))
( , 0 )
sage: ascii_art(Sylvester.backward_rule(N, N, N))
( o, 0 )
"""
if x == y == z:
return (x, 0)
elif x == z != y:
return (y, 0)
elif x != z == y:
return (x, 0)
else:
if x == y and z == x.over(BinaryTree([])):
return (x, 1)
else:
t = RuleSylvester._delete_right_most_node(y)
return (t, 0)
class RuleYoungFibonacci(Rule):
r"""
A rule modelling a Schensted-like correspondence for
Young-Fibonacci-tableaux.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: GrowthDiagram(YF, [3,1,2])
0 1 0
0 0 1
1 0 0
The vertices of the dual graded graph are Fibonacci words -
compositions into parts of size at most two::
sage: YF.vertices(4)
[word: 22, word: 211, word: 121, word: 112, word: 1111]
Note that, instead of passing the rule to :class:`GrowthDiagram`,
we can also use call the rule to create growth diagrams. For
example::
sage: G = YF([2, 3, 7, 4, 1, 6, 5]); G
0 0 0 0 1 0 0
1 0 0 0 0 0 0
0 1 0 0 0 0 0
0 0 0 1 0 0 0
0 0 0 0 0 0 1
0 0 0 0 0 1 0
0 0 1 0 0 0 0
The Kleitman Greene invariant is: take the last letter and the
largest letter of the permutation and remove them. If they
coincide write 1, otherwise write 2::
sage: G.P_chain()[-1]
word: 21211
TESTS::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: YF.zero
word:
Check duality::
sage: YF._check_duality(4)
sage: G = YF(labels=[[1],[1,0],[1]])
Traceback (most recent call last):
...
ValueError: 0 not in alphabet!
sage: G = YF(labels=[[1,1],[1,2]])
Traceback (most recent call last):
...
ValueError: 11 has smaller rank than 12 but is not covered by it in Q
sage: G = YF(labels=[[1,2],[1,1]])
Traceback (most recent call last):
...
ValueError: 11 has smaller rank than 12 but is not covered by it in P
sage: all(YF(labels=YF(pi).out_labels()).to_word()
....: == pi for pi in Permutations(4))
True
sage: pi = Permutations(10).random_element()
sage: G = YF(pi)
sage: list(YF(labels=G.out_labels())) == list(G)
True
"""
zero = Word([], alphabet=[1,2])
def normalize_vertex(self, v):
r"""
Return ``v`` as a word with letters 1 and 2.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: YF.normalize_vertex([1,2,1]).parent()
Finite words over {1, 2}
"""
return Word(v, alphabet=[1,2])
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: YF.vertices(3)
[word: 21, word: 12, word: 111]
"""
if n == 0:
return [self.zero]
else:
return [Word(list(w), [1,2]) for w in Compositions(n, max_part=2)]
def rank(self, v):
r"""
Return the rank of ``v``: the size of the corresponding composition.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: YF.rank(YF.vertices(3)[0])
3
"""
return sum(v)
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
``(v, w)`` is an edge if ``v`` is obtained from ``w`` by deleting
a ``1`` or replacing the left-most ``2`` by a ``1``.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: v = YF.vertices(5)[5]; v
word: 1121
sage: [w for w in YF.vertices(6) if YF.is_P_edge(v, w)]
[word: 2121, word: 11121]
sage: [w for w in YF.vertices(7) if YF.is_P_edge(v, w)]
[]
"""
if sum(w) != sum(v) + 1:
return False
ell = len(v)
w = list(w)
for i in range(ell+1):
d = list(v)
d.insert(i, 1)
if w == d:
return True
if i < ell and v[i] == 1:
d = list(v)
d[i] = 2
if w == d:
return True
break
return False
is_Q_edge = is_P_edge
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Fom1995]_ Lemma 4.4.1, page 35.
INPUT:
- ``y, t, x`` -- three Fibonacci words from a
cell in a growth diagram, labelled as::
t x
y
- ``content`` -- `0` or `1`; the content of the cell
OUTPUT:
The fourth Fibonacci word.
EXAMPLES::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: YF.forward_rule([], [], [], 1)
word: 1
sage: YF.forward_rule([1], [1], [1], 1)
word: 11
sage: YF.forward_rule([1,2], [1], [1,1], 0)
word: 21
sage: YF.forward_rule([1,1], [1], [1,1], 0)
word: 21
"""
if x == t == y:
if content == 0:
r = x
elif content == 1:
r = Word([1] + list(y), alphabet=[1,2])
else:
raise NotImplementedError
elif content != 0:
raise ValueError("for y=%s, t=%s, x=%s, the content should be 0 but is %s"
% (y, t, x, content))
elif x == t:
r = y
elif y == t:
r = x
else:
if x != t != y:
r = Word([2] + list(t), alphabet=[1,2])
else:
raise NotImplementedError("for y=%s, t=%s, x=%s, content %s we have no rule"
% (y, t, x, content))
return r
def backward_rule(self, y, z, x):
r"""
Return the content and the input shape.
See [Fom1995]_ Lemma 4.4.1, page 35.
- ``y, z, x`` -- three Fibonacci words from a cell in a
growth diagram, labelled as::
x
y z
OUTPUT:
A pair ``(t, content)`` consisting of the shape of the fourth
word and the content of the cell.
TESTS::
sage: YF = GrowthDiagram.rules.YoungFibonacci()
sage: w = [4,1,8,3,6,5,2,7,9]; G = YF(w)
sage: GrowthDiagram(YF, labels=G.out_labels()).to_word() == w # indirect doctest
True
"""
if x == y == z:
return (x, 0)
elif x == z != y:
return (y, 0)
elif x != z == y:
return (x, 0)
else:
if z[0] == 1:
return (z[1:], 1)
elif z[0] == 2:
return (z[1:], 0)
class RulePartitions(Rule):
r"""
A rule for growth diagrams on Young's lattice on integer
partitions graded by size.
TESTS::
sage: Burge = GrowthDiagram.rules.Burge()
sage: Burge.zero
[]
sage: G = GrowthDiagram(Burge, labels=[[1],[1]])
Traceback (most recent call last):
...
ValueError: can only determine the shape of the growth diagram
if ranks of successive labels differ
"""
zero = _make_partition([])
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: RSK = GrowthDiagram.rules.RSK()
sage: RSK.vertices(3)
Partitions of the integer 3
"""
return Partitions(n)
def normalize_vertex(self, v):
r"""
Return ``v`` as a partition.
EXAMPLES::
sage: RSK = GrowthDiagram.rules.RSK()
sage: RSK.normalize_vertex([3,1]).parent()
Partitions
"""
return _make_partition(v)
def rank(self, v):
r"""
Return the rank of ``v``: the size of the partition.
EXAMPLES::
sage: RSK = GrowthDiagram.rules.RSK()
sage: RSK.rank(RSK.vertices(3)[0])
3
"""
return v.size()
def P_symbol(self, P_chain):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a (skew) tableau.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = RuleRSK([[0,1,0], [1,0,2]])
sage: G.P_symbol().pp()
1 2 2
2
"""
return SkewTableau(chain=P_chain)
def Q_symbol(self, Q_chain):
r"""
Return the labels along the horizontal boundary of a rectangular
growth diagram as a skew tableau.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: G = RuleRSK([[0,1,0], [1,0,2]])
sage: G.Q_symbol().pp()
1 3 3
2
"""
return SkewTableau(chain=Q_chain)
class RuleRSK(RulePartitions):
r"""
A rule modelling Robinson-Schensted-Knuth insertion.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: GrowthDiagram(RuleRSK, [3,2,1,2,3])
0 0 1 0 0
0 1 0 1 0
1 0 0 0 1
The vertices of the dual graded graph are integer partitions::
sage: RuleRSK.vertices(3)
Partitions of the integer 3
The local rules implemented provide the RSK correspondence
between matrices with non-negative integer entries and pairs of
semistandard tableaux, the
:meth:`~sage.combinat.growth.RulePartitions.P_symbol` and the
:meth:`~sage.combinat.growth.RulePartitions.Q_symbol`. For
permutations, it reduces to classical Schensted insertion.
Instead of passing the rule to :class:`GrowthDiagram`, we can
also call the rule to create growth diagrams. For example::
sage: m = matrix([[0,0,0,0,1],[1,1,0,2,0], [0,3,0,0,0]])
sage: G = RuleRSK(m); G
0 0 0 0 1
1 1 0 2 0
0 3 0 0 0
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 2 2 3 1 2 2 2 2 ]
[ 2 3 4 4 ]
[ 3 , 5 ]
For rectangular fillings, the Kleitman-Greene invariant is the
shape of the :meth:`P_symbol` (or the :meth:`Q_symbol`). Put
differently, it is the partition labelling the lower right corner
of the filling (recall that we are using matrix coordinates). It
can be computed alternatively as the partition
`(\mu_1,\dots,\mu_n)`, where `\mu_1 + \dots + \mu_i` is the
maximal sum of entries in a collection of `i` pairwise disjoint
sequences of cells with weakly increasing coordinates.
For rectangular fillings, we could also use the (faster)
implementation provided via :func:`~sage.combinat.rsk.RSK`.
Because the of the coordinate conventions in
:func:`~sage.combinat.rsk.RSK`, we have to transpose matrices::
sage: [G.P_symbol(), G.Q_symbol()] == RSK(m.transpose())
True
sage: n=5; l=[(pi, RuleRSK(pi)) for pi in Permutations(n)]
sage: all([G.P_symbol(), G.Q_symbol()] == RSK(pi) for pi, G in l)
True
sage: n=5; l=[(w, RuleRSK(w)) for w in Words([1,2,3], 5)]
sage: all([G.P_symbol(), G.Q_symbol()] == RSK(pi) for pi, G in l)
True
"""
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Kra2006]_ `(F^1 0)-(F^1 2)`.
INPUT:
- ``y, t, x`` -- three partitions from a cell in a
growth diagram, labelled as::
t x
y
- ``content`` -- a non-negative integer; the content of the cell
OUTPUT:
The fourth partition according to the Robinson-Schensted-Knuth
correspondence.
EXAMPLES::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: RuleRSK.forward_rule([2,1],[2,1],[2,1],1)
[3, 1]
sage: RuleRSK.forward_rule([1],[],[2],2)
[4, 1]
"""
carry = content
z = []
while True:
if x == []:
row1 = 0
else:
row1 = x[0]
if y == []:
row3 = 0
else:
row3 = y[0]
newPart = max(row1, row3) + carry
if newPart == 0:
# returning this as a Partition costs a lot of time
return z[::-1]
else:
z = [newPart] + z
if t == []:
carry = min(row1, row3)
else:
carry = min(row1, row3) - t[0]
x = x[1:]
t = t[1:]
y = y[1:]
def backward_rule(self, y, z, x):
r"""
Return the content and the input shape.
See [Kra2006]_ `(B^1 0)-(B^1 2)`.
INPUT:
- ``y, z, x`` -- three partitions from a cell in a
growth diagram, labelled as::
x
y z
OUTPUT:
A pair ``(t, content)`` consisting of the shape of the fourth
word according to the Robinson-Schensted-Knuth correspondence
and the content of the cell.
TESTS::
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: w = [4,1,8,3,6,5,2,7,9]; G = RuleRSK(w)
sage: GrowthDiagram(RuleRSK, labels=G._out_labels).to_word() == w # indirect doctest
True
"""
carry = 0
i = len(z)
t = []
while i > 0:
if len(x) < i:
row1 = 0
else:
row1 = x[i-1]
if len(y) < i:
row3 = 0
else:
row3 = y[i-1]
t = [min(row1, row3) - carry] + t
carry = z[i-1] - max(row1, row3)
i = i-1
return (_make_partition(t), carry)
class RuleBurge(RulePartitions):
r"""
A rule modelling Burge insertion.
EXAMPLES::
sage: Burge = GrowthDiagram.rules.Burge()
sage: GrowthDiagram(Burge, labels=[[],[1,1,1],[2,1,1,1],[2,1,1],[2,1],[1,1],[]])
1 1
0 1
1 0
1 0
The vertices of the dual graded graph are integer partitions::
sage: Burge.vertices(3)
Partitions of the integer 3
The local rules implemented provide Burge's correspondence
between matrices with non-negative integer entries and pairs of
semistandard tableaux, the
:meth:`~sage.combinat.growth.RulePartitions.P_symbol` and the
:meth:`~sage.combinat.growth.RulePartitions.Q_symbol`. For
permutations, it reduces to classical Schensted insertion.
Instead of passing the rule to :class:`GrowthDiagram`, we can
also call the rule to create growth diagrams. For example::
sage: m = matrix([[2,0,0,1,0],[1,1,0,0,0], [0,0,0,0,3]])
sage: G = Burge(m); G
2 0 0 1 0
1 1 0 0 0
0 0 0 0 3
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 3 1 2 5 ]
[ 1 3 1 5 ]
[ 1 3 1 5 ]
[ 2 , 4 ]
For rectangular fillings, the Kleitman-Greene invariant is the
shape of the
:meth:`~sage.combinat.growth.RulePartitions.P_symbol`. Put
differently, it is the partition labelling the lower right corner
of the filling (recall that we are using matrix coordinates). It
can be computed alternatively as the transpose of the partition
`(\mu_1, \ldots, \mu_n)`, where `\mu_1 + \cdots + \mu_i` is the
maximal sum of entries in a collection of `i` pairwise disjoint
sequences of cells with weakly decreasing row indices and weakly
increasing column indices.
"""
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Kra2006]_ `(F^4 0)-(F^4 2)`.
INPUT:
- ``y, t, x`` -- three from a cell in a growth diagram,
labelled as::
t x
y
- ``content`` -- a non-negative integer; the content of the cell
OUTPUT:
The fourth partition according to the Burge correspondence.
EXAMPLES::
sage: Burge = GrowthDiagram.rules.Burge()
sage: Burge.forward_rule([2,1],[2,1],[2,1],1)
[3, 1]
sage: Burge.forward_rule([1],[],[2],2)
[2, 1, 1, 1]
"""
# n is the maximal length of longest decreasing chain by
# Kleitman-Greene's theorem
n = content + len(x) + len(y)
x += [0]*(n-len(x))
y += [0]*(n-len(y))
t += [0]*(n-len(t))
z = [0]*n
carry = content
for i, (row1, row2, row3) in enumerate(zip(x, t, y)):
s = min(int(row1 == row2 == row3), carry)
new_part = max(row1, row3) + s
if new_part:
z[i] = new_part
carry += -s + min(row1, row3) - row2
else:
break
return _make_partition(z)
def backward_rule(self, y, z, x):
r"""
Return the content and the input shape.
See [Kra2006]_ `(B^4 0)-(B^4 2)`. (In the arXiv version of
the article there is a typo: in the computation of carry in
`(B^4 2)` , `\rho` must be replaced by `\lambda`).
INPUT:
- ``y, z, x`` -- three partitions from a cell in a
growth diagram, labelled as::
x
y z
OUTPUT:
A pair ``(t, content)`` consisting of the shape of the fourth
partition according to the Burge correspondence and the content of
the cell.
EXAMPLES::
sage: Burge = GrowthDiagram.rules.Burge()
sage: Burge.backward_rule([1,1,1],[2,1,1,1],[2,1,1])
([1, 1], 0)
TESTS::
sage: w = [4,1,8,3,6,5,2,7,9]; G = Burge(w)
sage: GrowthDiagram(Burge, labels=G._out_labels).to_word() == w # indirect doctest
True
"""
t = [0]*len(z) # z must be the longest partition
mu = [0]*(len(z)-len(x)) + x[::-1]
nu = [0]*(len(z)-len(y)) + y[::-1]
la = z[::-1]
carry = 0
for i, (mu_i, la_i, nu_i) in enumerate(zip(mu, la, nu)):
s = min(int(mu_i == nu_i == la_i), carry)
t[i] = min(mu_i, nu_i) - s
carry += -s + la_i - max(mu_i, nu_i)
t.reverse()
return (_make_partition(t), carry)
class RuleDomino(Rule):
r"""
A rule modelling domino insertion.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: GrowthDiagram(Domino, [[1,0,0],[0,0,1],[0,-1,0]])
1 0 0
0 0 1
0 -1 0
The vertices of the dual graded graph are integer partitions
whose Ferrers diagram can be tiled with dominoes::
sage: Domino.vertices(2)
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
Instead of passing the rule to :class:`GrowthDiagram`, we can
also call the rule to create growth diagrams. For example, let
us check Figure 3 in [Lam2004]_::
sage: G = Domino([[0,0,0,-1],[0,0,1,0],[-1,0,0,0],[0,1,0,0]]); G
0 0 0 -1
0 0 1 0
-1 0 0 0
0 1 0 0
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 2 4 1 2 2 ]
[ 1 2 4 1 3 3 ]
[ 3 3 , 4 4 ]
The spin of a domino tableau is half the number of vertical dominoes::
sage: def spin(T):
....: return sum(2*len(set(row)) - len(row) for row in T)/4
According to [Lam2004]_, the number of negative entries in the
signed permutation equals the sum of the spins of the two
associated tableaux::
sage: pi = [3,-1,2,4,-5]
sage: G = Domino(pi)
sage: list(G.filling().values()).count(-1) == spin(G.P_symbol()) + spin(G.Q_symbol())
True
Negating all signs transposes all the partitions::
sage: G.P_symbol() == Domino([-e for e in pi]).P_symbol().conjugate()
True
TESTS:
Check duality::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Domino._check_duality(3)
sage: G = Domino([[0,1,0],[0,0,-1],[1,0,0]]); G
0 1 0
0 0 -1
1 0 0
sage: ascii_art([G.P_symbol(), G.Q_symbol()])
[ 1 1 1 1 ]
[ 2 3 2 2 ]
[ 2 3, 3 3 ]
sage: l = {pi: Domino(pi) for pi in SignedPermutations(4)}
sage: S = Set([(G.P_symbol(), G.Q_symbol()) for G in l.values()])
sage: S.cardinality()
384
Check the color-to-spin property for all permutations of size 4::
sage: all(list(G.filling().values()).count(-1) == spin(G.P_symbol()) + spin(G.Q_symbol())
....: for G in l.values())
True
Negating all signs transposes all the partitions::
sage: W = SignedPermutations(4)
sage: all(l[pi].P_symbol() == l[W([-e for e in pi])].P_symbol().conjugate()
....: for pi in l)
True
Check part of Theorem 4.2.3 in [Lee1996]_::
sage: def to_permutation(pi):
....: pi1 = list(pi)
....: n = len(pi1)
....: pi2 = [-e for e in pi][::-1] + pi1
....: return Permutation([e+n+1 if e<0 else e+n for e in pi2])
sage: RuleRSK = GrowthDiagram.rules.RSK()
sage: def good(pi):
....: return Domino(pi).P_chain()[-1] == RuleRSK(to_permutation(pi)).P_chain()[-1]
sage: all(good(pi) for pi in SignedPermutations(4))
True
sage: G = Domino(labels=[[1],[2,1]])
Traceback (most recent call last):
...
ValueError: [1] has smaller rank than [2, 1] but is not covered by it in Q
sage: G = Domino(labels=[[2,1],[1]])
Traceback (most recent call last):
...
ValueError: [1] has smaller rank than [2, 1] but is not covered by it in P
"""
r = 2
zero = _make_partition([])
def normalize_vertex(self, v):
"""
Return ``v`` as a partition.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Domino.normalize_vertex([3,1]).parent()
Partitions
"""
return _make_partition(v)
def vertices(self, n):
r"""
Return the vertices of the dual graded graph on level ``n``.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Domino.vertices(2)
[[4], [3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
return [la for la in Partitions(2*n) if len(la.core(2)) == 0]
def rank(self, v):
r"""
Return the rank of ``v``.
The rank of a vertex is half the size of the partition,
which equals the number of dominoes in any filling.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: Domino.rank(Domino.vertices(3)[0])
3
"""
return v.size() // 2
def is_P_edge(self, v, w):
r"""
Return whether ``(v, w)`` is a `P`-edge of ``self``.
``(v, w)`` is an edge if ``v`` is obtained from ``w`` by deleting
a domino.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: v = Domino.vertices(2)[1]; ascii_art(v)
***
*
sage: ascii_art([w for w in Domino.vertices(3) if Domino.is_P_edge(v, w)])
[ *** ]
[ * ]
[ ***** *** * ]
[ * , ***, * ]
sage: [w for w in Domino.vertices(4) if Domino.is_P_edge(v, w)]
[]
"""
try:
(row_1, col_1), (row_2, col_2) = SkewPartition([w, v]).cells()
except ValueError:
return False
return row_1 == row_2 or col_1 == col_2
is_Q_edge = is_P_edge
def P_symbol(self, P_chain):
r"""
Return the labels along the vertical boundary of a rectangular
growth diagram as a (skew) domino tableau.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
sage: G = Domino([[0,1,0],[0,0,-1],[1,0,0]])
sage: G.P_symbol().pp()
1 1
2 3
2 3
"""
return SkewTableau(chain=P_chain)
Q_symbol = P_symbol
def forward_rule(self, y, t, x, content):
r"""
Return the output shape given three shapes and the content.
See [Lam2004]_ Section 3.1.
INPUT:
- ``y, t, x`` -- three partitions from a cell in a
growth diagram, labelled as::
t x
y
- ``content`` -- `-1`, `0` or `1`; the content of the cell
OUTPUT:
The fourth partition according to domino insertion.
EXAMPLES::
sage: Domino = GrowthDiagram.rules.Domino()
Rule 1::
sage: Domino.forward_rule([], [], [], 1)
[2]
sage: Domino.forward_rule([1,1], [1,1], [1,1], 1)
[3, 1]
Rule 2::
sage: Domino.forward_rule([1,1], [1,1], [1,1], -1)
[1, 1, 1, 1]
Rule 3::
sage: Domino.forward_rule([1,1], [1,1], [2,2], 0)
[2, 2]
Rule 4::
sage: Domino.forward_rule([2,2,2], [2,2], [3,3], 0)
[3, 3, 2]
sage: Domino.forward_rule([2], [], [1,1], 0)
[2, 2]
sage: Domino.forward_rule([1,1], [], [2], 0)
[2, 2]
sage: Domino.forward_rule([2], [], [2], 0)
[2, 2]
sage: Domino.forward_rule([4], [2], [4], 0)
[4, 2]
sage: Domino.forward_rule([1,1,1,1], [1,1], [1,1,1,1], 0)
[2, 2, 1, 1]
sage: Domino.forward_rule([2,1,1], [2], [4], 0)
[4, 1, 1]
"""
def union(la, mu):
r"""
Return the union of the two partitions.
"""
return [max(p,q) for (p,q) in zip_longest(la, mu, fillvalue=0)]
if content not in [0,1,-1]:
raise ValueError("domino: the content of the filling must be in {-1,0,1}")
if content == 1:
if not (x == t == y):
raise ValueError("all shapes must be equal")
if t == []:
z = [2]
else:
z = [t[0] + 2] + t[1:]
elif content == -1:
if not (x == t == y):
raise ValueError("all shapes must be equal")
z = t + [1,1]
elif content == 0 and (t == x or t == y):
z = union(x, y)
else:
# content == 0 and t differs from x and y by
# domino's gamma1 and gamma3
# the following is certainly very slow
gamma3 = set(SkewPartition([y, t]).cells())
gamma1 = set(SkewPartition([x, t]).cells())
diff = gamma1.intersection(gamma3)
cell1, cell2 = gamma3
if len(diff) == 0:
z = union(x, y)
elif len(diff) == 1:
z = copy(x)
# diff is a single cell
(k,l) = diff.pop()
# add (k+1, l+1) to x
# either (k, l+1) or (k+1, l) must also be added
if z[k] <= l + 1:
z[k] += 1
z[k+1] += 1
else:
if len(z) <= k + 1:
z += [2]
else:
z[k+1] += 2
# diff has size 2, that is x == y
elif cell1[0] == cell2[0]:
z = copy(x)
# a horizontal domino - add 2 to row below of gamma
if len(z) <= cell1[0] + 1:
z += [2]
else:
z[cell1[0]+1] += 2
else:
z = copy(x)
# a vertical domino - add 2 to column right of gamma
# find first row shorter than cell1[1]+1
for r, p in enumerate(z):
if p <= cell1[1] + 1:
z[r] += 1
z[r+1] += 1
break
else:
raise NotImplementedError("domino: cannot call forward rule with shapes %s and content %s"
% ((y, t, x), content))
return z
#####################################################################
## Set the rules available from GrowthDiagram.rules.<tab>
#####################################################################
class Rules(object):
"""
Catalog of rules for growth diagrams.
"""
ShiftedShapes = RuleShiftedShapes
LLMS = RuleLLMS
BinaryWord = RuleBinaryWord
Sylvester = RuleSylvester
YoungFibonacci = RuleYoungFibonacci
RSK = RuleRSK
Burge = RuleBurge
Domino = RuleDomino
GrowthDiagram.rules = Rules
| 33.274713 | 132 | 0.504024 |
ace55dd54b9691046a297fdfeb869c495807cc8d | 6,207 | py | Python | modules/dbnd-airflow/src/dbnd_airflow/tracking/dbnd_airflow_handler.py | kalebinn/dbnd | 89b6ac3537f861784be73ffe8989bf63fca7401c | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/src/dbnd_airflow/tracking/dbnd_airflow_handler.py | kalebinn/dbnd | 89b6ac3537f861784be73ffe8989bf63fca7401c | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/src/dbnd_airflow/tracking/dbnd_airflow_handler.py | kalebinn/dbnd | 89b6ac3537f861784be73ffe8989bf63fca7401c | [
"Apache-2.0"
] | null | null | null | import logging
import os
import attr
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.log.file_task_handler import FileTaskHandler
from more_itertools import first_true
import dbnd
from dbnd import config, get_dbnd_project_config
from dbnd._core.constants import AD_HOC_DAG_PREFIX
from dbnd._core.context.databand_context import new_dbnd_context
from dbnd._core.task_run.log_preview import read_dbnd_log_preview
from dbnd._core.tracking.airflow_dag_inplace_tracking import (
calc_task_run_attempt_key_from_af_ti,
)
from dbnd._core.utils.uid_utils import get_uuid
AIRFLOW_FILE_TASK_HANDLER = FileTaskHandler.__name__
AIRFLOW_TASK_LOGGER = "airflow.task"
class DbndAirflowHandler(logging.Handler):
"""
This is a logger handler that is used as an Entry Point to airflow run.
It's injected to the Logger(name="airflow.task"), and used by entering the context on the beginning of the task
instance run, and getting close when the task instance is done.
"""
def __init__(self, logger, local_base, log_file_name_factory):
logging.Handler.__init__(self)
self.dbnd_context = None
self.dbnd_context_manage = None
self.task_run_attempt_uid = None
self.task_env_key = None
self.airflow_logger = logger
self.airflow_base_log_dir = local_base
self.log_file_name_factory = log_file_name_factory
self.log_file = ""
def set_context(self, ti):
"""
Airflow's log handler use this method to setup the context when running a TaskInstance(=ti).
We use this method to setup the dbnd context and communicate information to
the `<airflow_operator>_execute` task, that we create in `execute_tracking.py`.
"""
# we setting up only when we are not in our own orchestration dag
if ti.dag_id.startswith(AD_HOC_DAG_PREFIX):
return
if config.getboolean("mlflow_tracking", "databand_tracking"):
self.airflow_logger.warning(
"dbnd can't track mlflow and airflow together please disable dbnd config "
"`databand_tracking` in section `mlflow_tracking`"
)
return
# we are not tracking SubDagOperator
if ti.operator == SubDagOperator.__name__:
return
task_key = calc_task_run_attempt_key_from_af_ti(ti)
env_attempt_uid = os.environ.get(task_key)
# This key is already set which means we are in --raw run
if env_attempt_uid:
# no need for further actions inside --raw run
return
# communicate the task_run_attempt_uid to inner processes
# will be used for the task_run of `<airflow_operator>_execute` task
self.task_run_attempt_uid = get_uuid()
self.task_env_key = task_key
os.environ[self.task_env_key] = str(self.task_run_attempt_uid)
# airflow calculation for the relevant log_file
log_relative_path = self.log_file_name_factory(ti, ti.try_number)
self.log_file = os.path.join(self.airflow_base_log_dir, log_relative_path)
# make sure we are not polluting the airflow logs
get_dbnd_project_config().quiet_mode = True
# tracking msg
self.airflow_logger.info(
"Tracked by Databand {version}".format(version=dbnd.__version__)
)
# context with disabled logs
self.dbnd_context_manage = new_dbnd_context(conf={"log": {"disabled": True}})
self.dbnd_context = self.dbnd_context_manage.__enter__()
def close(self):
if self.dbnd_context:
try:
fake_task_run = FakeTaskRun(
task_run_attempt_uid=self.task_run_attempt_uid
)
log_body = read_dbnd_log_preview(self.log_file)
self.dbnd_context.tracking_store.save_task_run_log(
task_run=fake_task_run,
log_body=log_body,
local_log_path=self.log_file,
)
except Exception:
self.airflow_logger.exception("Exception occurred when saving task log")
self.dbnd_context = None
try:
if self.dbnd_context_manage:
self.dbnd_context_manage.__exit__(None, None, None)
except Exception:
self.airflow_logger.exception(
"Exception occurred when trying to exit dbnd_context_manager"
)
finally:
self.dbnd_context_manage = None
try:
if self.task_env_key and self.task_env_key in os.environ:
del os.environ[self.task_env_key]
except Exception:
self.airflow_logger.exception(
"Exception occurred when trying to remove task_env_key from env"
)
finally:
self.task_env_key = None
def emit(self, record):
"""
This handler is not really writing records, so ignoring.
"""
pass
@attr.s
class FakeTaskRun(object):
"""
This is a workaround for using `tracking_store.save_task_run_log`
cause it require a TaskRun with task_run_attempt_uid attr.
Should be solved with refactoring the tracking_store interface.
"""
task_run_attempt_uid = attr.ib()
def set_dbnd_handler():
"""
Build and inject the dbnd handler to airflow's logger.
"""
airflow_logger = logging.getLogger(AIRFLOW_TASK_LOGGER)
base_file_handler = first_true(
airflow_logger.handlers,
pred=lambda handler: handler.__class__.__name__ == AIRFLOW_FILE_TASK_HANDLER,
default=None,
)
if base_file_handler:
dbnd_handler = create_dbnd_handler(airflow_logger, base_file_handler)
airflow_logger.addHandler(dbnd_handler)
def create_dbnd_handler(airflow_logger, airflow_file_handler):
"""
Factory for creating dbnd handler with airflow's logger and airflow's file handler (<-log_handler)
"""
return DbndAirflowHandler(
logger=airflow_logger,
local_base=airflow_file_handler.local_base,
log_file_name_factory=airflow_file_handler._render_filename,
)
| 34.104396 | 115 | 0.675528 |
ace55de95e113757f576e730c0f629a6e5eff8a6 | 1,427 | py | Python | src/main/resources/classes/assassin/smoke_bomb.py | WynnLab/WynnLab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 2 | 2021-03-17T19:28:36.000Z | 2021-03-26T09:31:22.000Z | src/main/resources/classes/assassin/smoke_bomb.py | FauxKiwi/Wynnlab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 5 | 2021-06-08T12:13:40.000Z | 2021-08-09T15:04:23.000Z | src/main/resources/classes/assassin/smoke_bomb.py | FauxKiwi/Wynnlab | 9950bc1485fa187394c1b1326fa0b5c6b6a1ac96 | [
"MIT"
] | 4 | 2021-08-09T15:17:23.000Z | 2022-03-05T14:08:26.000Z | from org.bukkit import Material, Sound
from org.bukkit.entity import Snowball
from org.bukkit.inventory import ItemStack
from org.bukkit.potion import PotionEffectType
from com.wynnlab.spells import PySpell
class Spell(PySpell):
def tick(self):
if self.player.hasPotionEffect(PotionEffectType.INVISIBILITY):
self.castSpell('ASSASSIN', 5)
self.sound(Sound.ENTITY_ENDER_PEARL_THROW if self.clone else Sound.ENTITY_SNOWBALL_THROW, .8, 1.3)
self.sound(Sound.ENTITY_FIREWORK_ROCKET_BLAST_FAR, .8, 1.3)
self.sound(Sound.ENTITY_GENERIC_EXTINGUISH_FIRE, 1, 1.6)
v = self.player.getEyeLocation().getDirection().multiply(3)
snowballs = (
self.player.launchProjectile(Snowball, v),
self.player.launchProjectile(Snowball, v.rotateAroundY(.4)),
self.player.launchProjectile(Snowball, v.rotateAroundY(-.8))
)
for snowball in snowballs:
if self.clone:
snowball.setItem(ItemStack(Material.ENDER_PEARL))
snowball.addScoreboardTag('smoke_bomb')
self.player.addScoreboardTag('smoke_bomb')
def bomb_hit(event):
event.getEntity().remove()
PySpell.castSpell(event.getEntity().getShooter(), 'ASSASSIN', 6, event.getHitEntity().getLocation() if not event.getHitEntity() is None else event.getHitBlock().getLocation())
PySpell.registerProjectileHit('smoke_bomb', bomb_hit) | 40.771429 | 179 | 0.708479 |
ace55edf6907fabba42f096fdf8f307916fd7e33 | 3,210 | py | Python | CODE FILES/1-root-causes.py | magnusmel/Troubleshooting-Python-Application-Development | 3a5a29b457244d46619c4744553bce411706a30d | [
"MIT"
] | 1 | 2021-04-19T05:27:10.000Z | 2021-04-19T05:27:10.000Z | CODE FILES/1-root-causes.py | magnusmel/Troubleshooting-Python-Application-Development | 3a5a29b457244d46619c4744553bce411706a30d | [
"MIT"
] | null | null | null | CODE FILES/1-root-causes.py | magnusmel/Troubleshooting-Python-Application-Development | 3a5a29b457244d46619c4744553bce411706a30d | [
"MIT"
] | 4 | 2019-06-28T11:03:24.000Z | 2021-04-18T12:31:25.000Z | import cProfile
import timeit
import profile
import textwrap
import functools
import time
print('Troubleshooting Python Application Development: Chapter 1')
print('-' * 79)
# --------------------------------------------------------------------------------
# 1.1
print('Measuring time between two lines of code with timeit')
print('-' * 79)
t = timeit.Timer(
"print('this line is timed')",
"print('put setup code here')")
print('TIMEIT:')
print(t.timeit(3))
print('REPEAT:')
print(t.repeat(5, 2))
range_size = 2000
count = 2000
vars_for_testing = ';'.join([
"xs = [(str(x), x) for x in range(2000)]",
"d = {}",
])
code_for_testing = textwrap.dedent(
"""
for str_x, x in xs:
d[str_x] = x
""")
def show_results(result):
global count, range_size
print('{:6.2f} usec/pass'.format(
1000000 * (result / count)), end=' ')
print('{:6.2f} usec/item'.format(
(1000000 * (result / count)) / range_size))
print("list len = {}, trying {} iterations".format(
range_size, count))
print('experiment:', end=' ')
t = timeit.Timer(code_for_testing, vars_for_testing)
show_results(t.timeit(number=count))
# --------------------------------------------------------------------------------
# 1.2
print('Figuring out where time is spent with the profile module')
print('-' * 79)
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
def fib_seq(n):
seq = []
if n > 0:
seq.extend(fib_seq(n - 1))
seq.append(fib(n))
return seq
profile.run('print(fib_seq(20)); print()')
@functools.lru_cache(maxsize=None)
def fib_memoized(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib_memoized(n - 1) + fib_memoized(n - 2)
def fib_seq_memoized(n):
seq = []
if n > 0:
seq.extend(fib_seq_memoized(n - 1))
seq.append(fib_memoized(n))
return seq
profile.run('print(fib_seq_memoized(20)); print()')
# Running with context
profile.runctx(
'print(fib_seq(n)); print()',
globals(),
{'n': 20},
)
# --------------------------------------------------------------------------------
# 1.3
print('More precise time tracking with cProfile')
print('-' * 79)
print('Profiling 2 + 2 with cProfile:')
cProfile.run("2 + 2")
print('Profiling 3 functions with cProfile:')
def fast_function():
print('fast')
def medium_func():
print('medium')
time.sleep(1)
def slow_func():
print('slow')
time.sleep(2)
def test_func():
fast_function()
medium_func()
slow_func()
cProfile.run('test_func()')
# --------------------------------------------------------------------------------
# 1.4
print('Looking at memory consumption with memory_profiler')
print('-' * 79)
import memory_profiler
@memory_profiler.profile
def test_func():
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
addition = 0
for num in numbers:
addition += num
return addition
test_func()
@memory_profiler.profile
def memory_clearing_func():
x = [1] * (10 ** 5)
y = [2] * (10 ** 7)
del y
return x
memory_clearing_func()
| 18.448276 | 82 | 0.541121 |
ace55fdd683500f8118bc662174aa2b663a09d9c | 766 | py | Python | .history/List of Capstone Projects/prime_factorization_20200516165205.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | 1 | 2020-05-18T17:50:00.000Z | 2020-05-18T17:50:00.000Z | .history/List of Capstone Projects/prime_factorization_20200516165205.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | .history/List of Capstone Projects/prime_factorization_20200516165205.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | '''
Prime Factorization - Have the user enter a number and find all Prime Factors (if there are any) and display them.
'''
import HeaderOfFiles
def prime_factor(number):
'''
Finding and display all Prime Factors
'''
my_list = []
i = 2
while i < number + 1:
if number % i == 0:
for i in len.my_list[]:
print("hi")
print(len.my_list)
my_list.append(i)
number = number/i
print(number)
i = 2
else:
i += 1
print(my_list)
# while True:
# try:
# x = int(input("Give me a number to find all Prime Factors: "))
# break
# except ValueError:
# print("Give a number please!")
prime_factor(120) | 22.529412 | 114 | 0.52611 |
ace5613dfdd593bb33160fd6f4a8f9e4fd2699ff | 1,256 | py | Python | sorting/Bubble_Sort/python/bubble.py | carpepraedam/data_structures | 526da0e20306a16ea147f857cd40ef41ea004ad3 | [
"MIT"
] | null | null | null | sorting/Bubble_Sort/python/bubble.py | carpepraedam/data_structures | 526da0e20306a16ea147f857cd40ef41ea004ad3 | [
"MIT"
] | null | null | null | sorting/Bubble_Sort/python/bubble.py | carpepraedam/data_structures | 526da0e20306a16ea147f857cd40ef41ea004ad3 | [
"MIT"
] | null | null | null | def comparator(val_a, val_b):
"""
Default comparator, checks if val_a > val_b
@param {number} val_a
@param {number} val_b
@return {bool} : True if val_a > val_b else False
"""
return val_a > val_b
def bubble(l, comparator=comparator):
"""
Bubble sort a given list
@param {list} l - the list to sort
@param {function(arg_a, arg_b)} - comparator
function reference
If return value is True, indices with values arg_a and arg_b will be swapped
Default:
comparator(val_a, val_b):
return val_a > val_b
@return {tuple(list, number)} - (sorted list, number of iterations)
"""
# outer bubble
sweeps = 0
for i in range(len(l) - 1, 0, -1):
sweeps += 1
is_sorted = True
for j in range(i):
if(comparator(l[j], l[j+1])):
is_sorted = False
swap(l, j, j+1)
if is_sorted:
break
return (l, sweeps)
def swap(l, index_a, index_b):
"""
Swaps two index in a list
@param {list} l - the list
@param {number} index_a - The first index
@param {number} index_b - The second index
"""
tmp = l[index_a]
l[index_a] = l[index_b]
l[index_b] = tmp
| 26.723404 | 84 | 0.568471 |
ace5615cdf54c4c949ef5c58b8f0482a57343d7d | 19,298 | py | Python | grr/core/grr_response_core/lib/objectfilter_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/core/grr_response_core/lib/objectfilter_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | null | null | null | grr/core/grr_response_core/lib/objectfilter_test.py | nkrios/grr | 399e078ed522bf0555a2666fb086aa7809d54971 | [
"Apache-2.0"
] | 1 | 2020-07-09T01:08:48.000Z | 2020-07-09T01:08:48.000Z | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for grr.lib.objectfilter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from absl.testing import absltest
from future.utils import iteritems
from grr_response_core.lib import objectfilter
attr1 = "Backup"
attr2 = "Archive"
hash1 = "123abc"
hash2 = "456def"
filename = "boot.ini"
class DummyObject(object):
def __init__(self, key, value):
setattr(self, key, value)
class HashObject(object):
def __init__(self, hash_value=None):
self.value = hash_value
@property
def md5(self):
return self.value
def __eq__(self, y):
return self.value == y
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, y):
return self.value < y
class Dll(object):
def __init__(self, name, imported_functions=None, exported_functions=None):
self.name = name
self._imported_functions = imported_functions or []
self.num_imported_functions = len(self._imported_functions)
self.exported_functions = exported_functions or []
self.num_exported_functions = len(self.exported_functions)
@property
def imported_functions(self):
for fn in self._imported_functions:
yield fn
class DummyFile(object):
non_callable_leaf = "yoda"
def __init__(self):
self.non_callable = HashObject(hash1)
self.non_callable_repeated = [
DummyObject("desmond", ["brotha", "brotha"]),
DummyObject("desmond", ["brotha", "sista"])
]
self.imported_dll1 = Dll("a.dll", ["FindWindow", "CreateFileA"])
self.imported_dll2 = Dll("b.dll", ["RegQueryValueEx"])
@property
def name(self):
return filename
@property
def attributes(self):
return [attr1, attr2]
@property
def hash(self):
return [HashObject(hash1), HashObject(hash2)]
@property
def mapping(self):
return {
"hashes": [HashObject(hash1), HashObject(hash2)],
"nested": {
"attrs": [attr1, attr2]
},
"string": "mate",
"float": 42.0
}
@property
def size(self):
return 10
@property
def deferred_values(self):
for v in ["a", "b"]:
yield v
@property
def novalues(self):
return []
@property
def imported_dlls(self):
return [self.imported_dll1, self.imported_dll2]
def Callable(self):
raise RuntimeError("This can not be called.")
@property
def float(self):
return 123.9823
class ObjectFilterTest(absltest.TestCase):
def setUp(self):
self.file = DummyFile()
self.filter_imp = objectfilter.LowercaseAttributeFilterImplementation
self.value_expander = self.filter_imp.FILTERS["ValueExpander"]
operator_tests = {
objectfilter.Less: [
(True, ["size", 1000]),
(True, ["size", 11]),
(False, ["size", 10]),
(False, ["size", 0]),
(False, ["float", 1.0]),
(True, ["float", 123.9824]),
],
objectfilter.LessEqual: [
(True, ["size", 1000]),
(True, ["size", 11]),
(True, ["size", 10]),
(False, ["size", 9]),
(False, ["float", 1.0]),
(True, ["float", 123.9823]),
],
objectfilter.Greater: [
(True, ["size", 1]),
(True, ["size", 9.23]),
(False, ["size", 10]),
(False, ["size", 1000]),
(True, ["float", 122]),
(True, ["float", 1.0]),
],
objectfilter.GreaterEqual: [
(False, ["size", 1000]),
(False, ["size", 11]),
(True, ["size", 10]),
(True, ["size", 0]),
# Floats work fine too
(True, ["float", 122]),
(True, ["float", 123.9823]),
# Comparisons works with strings, although it might be a bit silly
(True, ["name", "aoot.ini"]),
],
objectfilter.Contains: [
# Contains works with strings
(True, ["name", "boot.ini"]),
(True, ["name", "boot"]),
(False, ["name", "meh"]),
# Works with generators
(True, ["imported_dlls.imported_functions", "FindWindow"]),
# But not with numbers
(False, ["size", 12]),
],
objectfilter.NotContains: [
(False, ["name", "boot.ini"]),
(False, ["name", "boot"]),
(True, ["name", "meh"]),
],
objectfilter.Equals: [
(True, ["name", "boot.ini"]),
(False, ["name", "foobar"]),
(True, ["float", 123.9823]),
],
objectfilter.NotEquals: [
(False, ["name", "boot.ini"]),
(True, ["name", "foobar"]),
(True, ["float", 25]),
],
objectfilter.InSet: [
(True, ["name", ["boot.ini", "autoexec.bat"]]),
(True, ["name", "boot.ini"]),
(False, ["name", "NOPE"]),
# All values of attributes are within these
(True, ["attributes", ["Archive", "Backup", "Nonexisting"]]),
# Not all values of attributes are within these
(False, ["attributes", ["Executable", "Sparse"]]),
],
objectfilter.NotInSet: [
(False, ["name", ["boot.ini", "autoexec.bat"]]),
(False, ["name", "boot.ini"]),
(True, ["name", "NOPE"]),
],
objectfilter.Regexp: [
(True, ["name", "^boot.ini$"]),
(True, ["name", "boot.ini"]),
(False, ["name", "^$"]),
(True, ["attributes", "Archive"]),
# One can regexp numbers if they're inclined
(True, ["size", 0]),
# But regexp doesn't work with lists or generators for the moment
(False, ["imported_dlls.imported_functions", "FindWindow"])
],
}
def testBinaryOperators(self):
for operator, test_data in iteritems(self.operator_tests):
for test_unit in test_data:
print("Testing %s with %s and %s" % (operator, test_unit[0],
test_unit[1]))
kwargs = {
"arguments": test_unit[1],
"value_expander": self.value_expander
}
self.assertEqual(test_unit[0], operator(**kwargs).Matches(self.file))
def testExpand(self):
# Case insensitivity
values_lowercase = self.value_expander().Expand(self.file, "size")
values_uppercase = self.value_expander().Expand(self.file, "Size")
self.assertListEqual(list(values_lowercase), list(values_uppercase))
# Existing, non-repeated, leaf is a value
values = self.value_expander().Expand(self.file, "size")
self.assertListEqual(list(values), [10])
# Existing, non-repeated, leaf is a string in mapping
values = self.value_expander().Expand(self.file, "mapping.string")
self.assertListEqual(list(values), ["mate"])
# Existing, non-repeated, leaf is a scalar in mapping
values = self.value_expander().Expand(self.file, "mapping.float")
self.assertListEqual(list(values), [42.0])
# Existing, non-repeated, leaf is iterable
values = self.value_expander().Expand(self.file, "attributes")
self.assertListEqual(list(values), [[attr1, attr2]])
# Existing, repeated, leaf is value
values = self.value_expander().Expand(self.file, "hash.md5")
self.assertListEqual(list(values), [hash1, hash2])
# Existing, repeated, leaf is iterable
values = self.value_expander().Expand(self.file,
"non_callable_repeated.desmond")
self.assertListEqual(
list(values), [["brotha", "brotha"], ["brotha", "sista"]])
# Existing, repeated, leaf is mapping.
values = self.value_expander().Expand(self.file, "mapping.hashes")
self.assertListEqual(list(values), [hash1, hash2])
values = self.value_expander().Expand(self.file, "mapping.nested.attrs")
self.assertListEqual(list(values), [[attr1, attr2]])
# Now with an iterator
values = self.value_expander().Expand(self.file, "deferred_values")
self.assertListEqual([list(value) for value in values], [["a", "b"]])
# Iterator > generator
values = self.value_expander().Expand(self.file,
"imported_dlls.imported_functions")
expected = [["FindWindow", "CreateFileA"], ["RegQueryValueEx"]]
self.assertListEqual([list(value) for value in values], expected)
# Non-existing first path
values = self.value_expander().Expand(self.file, "nonexistant")
self.assertListEqual(list(values), [])
# Non-existing in the middle
values = self.value_expander().Expand(self.file, "hash.mink.boo")
self.assertListEqual(list(values), [])
# Non-existing as a leaf
values = self.value_expander().Expand(self.file, "hash.mink")
self.assertListEqual(list(values), [])
# Non-callable leaf
values = self.value_expander().Expand(self.file, "non_callable_leaf")
self.assertListEqual(list(values), [DummyFile.non_callable_leaf])
# callable
values = self.value_expander().Expand(self.file, "Callable")
self.assertListEqual(list(values), [])
# leaf under a callable. Will return nothing
values = self.value_expander().Expand(self.file, "Callable.a")
self.assertListEqual(list(values), [])
def testGenericBinaryOperator(self):
class TestBinaryOperator(objectfilter.GenericBinaryOperator):
values = list()
def Operation(self, x, _):
return self.values.append(x)
# Test a common binary operator
tbo = TestBinaryOperator(
arguments=["whatever", 0], value_expander=self.value_expander)
self.assertEqual(tbo.right_operand, 0)
self.assertEqual(tbo.args[0], "whatever")
tbo.Matches(DummyObject("whatever", "id"))
tbo.Matches(DummyObject("whatever", "id2"))
tbo.Matches(DummyObject("whatever", "bg"))
tbo.Matches(DummyObject("whatever", "bg2"))
self.assertListEqual(tbo.values, ["id", "id2", "bg", "bg2"])
def testContext(self):
self.assertRaises(
objectfilter.InvalidNumberOfOperands,
objectfilter.Context,
arguments=["context"],
value_expander=self.value_expander)
self.assertRaises(
objectfilter.InvalidNumberOfOperands,
objectfilter.Context,
arguments=[
"context",
objectfilter.Equals(
arguments=["path", "value"],
value_expander=self.value_expander),
objectfilter.Equals(
arguments=["another_path", "value"],
value_expander=self.value_expander)
],
value_expander=self.value_expander)
# "One imported_dll imports 2 functions AND one imported_dll imports
# function RegQueryValueEx"
arguments = [
objectfilter.Equals(["imported_dlls.num_imported_functions", 1],
value_expander=self.value_expander),
objectfilter.Contains(
["imported_dlls.imported_functions", "RegQueryValueEx"],
value_expander=self.value_expander)
]
condition = objectfilter.AndFilter(arguments=arguments)
# Without context, it matches because both filters match separately
self.assertEqual(True, condition.Matches(self.file))
arguments = [
objectfilter.Equals(["num_imported_functions", 2],
value_expander=self.value_expander),
objectfilter.Contains(["imported_functions", "RegQueryValueEx"],
value_expander=self.value_expander)
]
condition = objectfilter.AndFilter(arguments=arguments)
# "The same DLL imports 2 functions AND one of these is RegQueryValueEx"
context = objectfilter.Context(
arguments=["imported_dlls", condition],
value_expander=self.value_expander)
# With context, it doesn't match because both don't match in the same dll
self.assertEqual(False, context.Matches(self.file))
# "One imported_dll imports only 1 function AND one imported_dll imports
# function RegQueryValueEx"
condition = objectfilter.AndFilter(arguments=[
objectfilter.Equals(
arguments=["num_imported_functions", 1],
value_expander=self.value_expander),
objectfilter.Contains(["imported_functions", "RegQueryValueEx"],
value_expander=self.value_expander)
])
# "The same DLL imports 1 function AND it"s RegQueryValueEx"
context = objectfilter.Context(["imported_dlls", condition],
value_expander=self.value_expander)
self.assertEqual(True, context.Matches(self.file))
# Now test the context with a straight query
query = """
@imported_dlls
(
imported_functions contains "RegQueryValueEx"
AND num_imported_functions == 1
)
"""
filter_ = objectfilter.Parser(query).Parse()
filter_ = filter_.Compile(self.filter_imp)
self.assertEqual(True, filter_.Matches(self.file))
def testRegexpRaises(self):
self.assertRaises(
ValueError,
objectfilter.Regexp,
arguments=["name", "I [dont compile"],
value_expander=self.value_expander)
def testEscaping(self):
parser = objectfilter.Parser(r"a is '\n'").Parse()
self.assertEqual(parser.args[0], "\n")
# Invalid escape sequence
parser = objectfilter.Parser(r"a is '\z'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Can escape the backslash
parser = objectfilter.Parser(r"a is '\\'").Parse()
self.assertEqual(parser.args[0], "\\")
# HEX ESCAPING
# This fails as it's not really a hex escaped string
parser = objectfilter.Parser(r"a is '\xJZ'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Instead, this is what one should write
parser = objectfilter.Parser(r"a is '\\xJZ'").Parse()
self.assertEqual(parser.args[0], r"\xJZ")
# Standard hex-escape
parser = objectfilter.Parser(r"a is '\x41\x41\x41'").Parse()
self.assertEqual(parser.args[0], "AAA")
# Hex-escape + a character
parser = objectfilter.Parser(r"a is '\x414'").Parse()
self.assertEqual(parser.args[0], r"A4")
# How to include r'\x41'
parser = objectfilter.Parser(r"a is '\\x41'").Parse()
self.assertEqual(parser.args[0], r"\x41")
def testParse(self):
# Arguments are either int, float or quoted string
objectfilter.Parser("attribute == 1").Parse()
objectfilter.Parser("attribute == 0x10").Parse()
objectfilter.Parser("attribute == 0xa").Parse()
objectfilter.Parser("attribute == 0xFF").Parse()
parser = objectfilter.Parser("attribute == 1a")
self.assertRaises(objectfilter.ParseError, parser.Parse)
objectfilter.Parser("attribute == 1.2").Parse()
objectfilter.Parser("attribute == 'bla'").Parse()
objectfilter.Parser("attribute == \"bla\"").Parse()
parser = objectfilter.Parser("something == red")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Can't start with AND
parser = objectfilter.Parser("and something is 'Blue'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Need to close braces
objectfilter.Parser("(a is 3)").Parse()
parser = objectfilter.Parser("(a is 3")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Need to open braces to close them
parser = objectfilter.Parser("a is 3)")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Can parse lists
objectfilter.Parser("attribute inset [1, 2, '3', 4.01, 0xa]").Parse()
# Need to close square braces for lists.
parser = objectfilter.Parser("attribute inset [1, 2, '3', 4.01, 0xA")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Need to opensquare braces to close lists.
parser = objectfilter.Parser("attribute inset 1, 2, '3', 4.01]")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Context Operator alone is not accepted
parser = objectfilter.Parser("@attributes")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Accepted only with braces
objectfilter.Parser("@attributes( name is 'adrien')").Parse()
# Not without them
parser = objectfilter.Parser("@attributes name is 'adrien'")
self.assertRaises(objectfilter.ParseError, parser.Parse)
# Can nest context operators
query = "@imported_dlls( @imported_function( name is 'OpenFileA'))"
objectfilter.Parser(query).Parse()
# Can nest context operators and mix braces without it messing up
query = "@imported_dlls( @imported_function( name is 'OpenFileA'))"
parser = objectfilter.Parser(query).Parse()
query = """
@imported_dlls
(
@imported_function
(
name is 'OpenFileA' and ordinal == 12
)
)
"""
parser = objectfilter.Parser(query).Parse()
# Mix context and binary operators
query = """
@imported_dlls
(
@imported_function
(
name is 'OpenFileA'
) AND num_functions == 2
)
"""
parser = objectfilter.Parser(query).Parse()
# Also on the right
query = """
@imported_dlls
(
num_functions == 2 AND
@imported_function
(
name is 'OpenFileA'
)
)
"""
# Altogether
# There's an imported dll that imports OpenFileA AND
# an imported DLL matching advapi32.dll that imports RegQueryValueExA AND
# and it exports a symbol called 'inject'
query = """
@imported_dlls( @imported_function ( name is 'OpenFileA' ) )
AND
@imported_dlls (
name regexp '(?i)advapi32.dll'
AND @imported_function ( name is 'RegQueryValueEx' )
)
AND @exported_symbols(name is 'inject')
"""
def testInset(self):
obj = DummyObject("clone", 2)
parser = objectfilter.Parser("clone inset [1, 2, 3]").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), True)
obj = DummyObject("troubleshooter", "red")
parser = objectfilter.Parser("troubleshooter inset ['red', 'blue']").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), True)
obj = DummyObject("troubleshooter", "infrared")
parser = objectfilter.Parser("troubleshooter inset ['red', 'blue']").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
def testCompile(self):
obj = DummyObject("something", "Blue")
parser = objectfilter.Parser("something == 'Blue'").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), True)
parser = objectfilter.Parser("something == 'Red'").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
parser = objectfilter.Parser("something == \"Red\"").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
obj = DummyObject("size", 4)
parser = objectfilter.Parser("size < 3").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
parser = objectfilter.Parser("size == 4").Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), True)
query = "something is 'Blue' and size notcontains 3"
parser = objectfilter.Parser(query).Parse()
filter_ = parser.Compile(self.filter_imp)
self.assertEqual(filter_.Matches(obj), False)
if __name__ == "__main__":
absltest.main()
| 34.460714 | 80 | 0.639704 |
ace56179b62cadb167107ccee6329443f0185261 | 1,218 | py | Python | src/asobann/config_test.py | hidekazu-higashi/asobann_app | 739032c7db05c6862a5fc08a2ea30b23420463a3 | [
"MIT"
] | null | null | null | src/asobann/config_test.py | hidekazu-higashi/asobann_app | 739032c7db05c6862a5fc08a2ea30b23420463a3 | [
"MIT"
] | null | null | null | src/asobann/config_test.py | hidekazu-higashi/asobann_app | 739032c7db05c6862a5fc08a2ea30b23420463a3 | [
"MIT"
] | null | null | null | import os
if 'REDIS_URI' in os.environ:
REDIS_URI = os.environ['REDIS_URI']
else:
REDIS_URI = None
MONGO_URI = 'mongodb://localhost:27017/ex2test'
PORT = 10011
BASE_URL = '*'
if 'GOOGLE_ANALYTICS_ID' in os.environ:
GOOGLE_ANALYTICS_ID = os.environ['GOOGLE_ANALYTICS_ID']
else:
GOOGLE_ANALYTICS_ID = None
if 'ASOBANN_DEBUG_HANDLER_WAIT' in os.environ:
DEBUG_HANDLER_WAIT = os.environ['ASOBANN_DEBUG_HANDLER_WAIT']
if 'UPLOADED_IMAGE_STORE' in os.environ:
UPLOADED_IMAGE_STORE = os.environ['UPLOADED_IMAGE_STORE']
else:
UPLOADED_IMAGE_STORE = 'local'
use_aws = UPLOADED_IMAGE_STORE.lower() == 's3'
if use_aws:
AWS_KEY = os.environ['AWS_KEY']
AWS_SECRET = os.environ['AWS_SECRET']
AWS_REGION = os.environ['AWS_REGION']
AWS_S3_IMAGE_BUCKET_NAME = os.environ['AWS_S3_IMAGE_BUCKET_NAME']
else:
AWS_KEY = None
AWS_SECRET = None
if 'ASOBANN_DEBUG_OPTS' in os.environ:
opts = os.environ['ASOBANN_DEBUG_OPTS'].split(',')
DEBUG_PERFORMANCE_RECORDING = 'PERFORMANCE_RECORDING' in opts
DEBUG_ORDER_OF_UPDATES = 'ORDER_OF_UPDATES' in opts
DEBUG_LOG = 'LOG' in opts
if "ASOBANN_ACCESS_LOG" in os.environ:
ACCESS_LOG = True
else:
ACCESS_LOG = False
| 26.478261 | 69 | 0.73399 |
ace561f4c01bf64f1fb4e8730a97e4c71aa4cf55 | 3,782 | py | Python | huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/create_instance_user_req.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/create_instance_user_req.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/create_instance_user_req.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class CreateInstanceUserReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'user_name': 'str',
'user_passwd': 'str'
}
attribute_map = {
'user_name': 'user_name',
'user_passwd': 'user_passwd'
}
def __init__(self, user_name=None, user_passwd=None):
"""CreateInstanceUserReq - a model defined in huaweicloud sdk"""
self._user_name = None
self._user_passwd = None
self.discriminator = None
if user_name is not None:
self.user_name = user_name
if user_passwd is not None:
self.user_passwd = user_passwd
@property
def user_name(self):
"""Gets the user_name of this CreateInstanceUserReq.
用户名称。
:return: The user_name of this CreateInstanceUserReq.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this CreateInstanceUserReq.
用户名称。
:param user_name: The user_name of this CreateInstanceUserReq.
:type: str
"""
self._user_name = user_name
@property
def user_passwd(self):
"""Gets the user_passwd of this CreateInstanceUserReq.
用户密码。 密码不能和用户名相同。 复杂度要求: - 输入长度为8到32位的字符串。 - 必须包含如下四种字符中的两种组合: - 小写字母 - 大写字母 - 数字 - 特殊字符包括(`~!@#$%^&*()-_=+\\|[{}]:'\",<.>/?)
:return: The user_passwd of this CreateInstanceUserReq.
:rtype: str
"""
return self._user_passwd
@user_passwd.setter
def user_passwd(self, user_passwd):
"""Sets the user_passwd of this CreateInstanceUserReq.
用户密码。 密码不能和用户名相同。 复杂度要求: - 输入长度为8到32位的字符串。 - 必须包含如下四种字符中的两种组合: - 小写字母 - 大写字母 - 数字 - 特殊字符包括(`~!@#$%^&*()-_=+\\|[{}]:'\",<.>/?)
:param user_passwd: The user_passwd of this CreateInstanceUserReq.
:type: str
"""
self._user_passwd = user_passwd
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateInstanceUserReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.405797 | 141 | 0.550238 |
ace56393a1e5cf6c2b4b5fc3e9c528f6a136adc9 | 9,766 | py | Python | dnnlib/tflib/tfutil.py | DLW3D/stylegan2encoder | 42f01f27dd655b3b24ce460b44a6e779ff891f2c | [
"BSD-Source-Code"
] | 5 | 2020-02-29T10:47:18.000Z | 2021-02-10T15:25:11.000Z | dnnlib/tflib/tfutil.py | DLW3D/stylegan2encoder | 42f01f27dd655b3b24ce460b44a6e779ff891f2c | [
"BSD-Source-Code"
] | null | null | null | dnnlib/tflib/tfutil.py | DLW3D/stylegan2encoder | 42f01f27dd655b3b24ce460b44a6e779ff891f2c | [
"BSD-Source-Code"
] | null | null | null | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Miscellaneous helper utils for Tensorflow."""
import os
import numpy as np
import tensorflow as tf
# Silence deprecation warnings from TensorFlow 1.13 onwards
import logging
import config
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import tensorflow.contrib # requires TensorFlow 1.x!
tf.contrib = tensorflow.contrib
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints. Retained for backwards compatibility -- use TensorShape.as_list() in new code."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
def _sanitize_tf_config(config_dict: dict = None) -> dict:
# Defaults.
cfg = dict()
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
cfg["gpu_options.allow_growth"] = config.allow_growth # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
# Remove defaults for environment variables that are already set.
for key in list(cfg):
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
if fields[1] in os.environ:
del cfg[key]
# User overrides.
if config_dict is not None:
cfg.update(config_dict)
return cfg
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
# Skip if already initialized.
if tf.get_default_session() is not None:
return
# Setup config dict and random seeds.
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg["rnd.np_random_seed"]
if np_random_seed is not None:
np.random.seed(np_random_seed)
tf_random_seed = cfg["rnd.tf_random_seed"]
if tf_random_seed == "auto":
tf_random_seed = np.random.randint(1 << 31)
if tf_random_seed is not None:
tf.set_random_seed(tf_random_seed)
# Setup environment variables.
for key, value in cfg.items():
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
os.environ[fields[1]] = str(value)
# Create default TensorFlow session.
create_session(cfg, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
# Setup TensorFlow config proto.
cfg = _sanitize_tf_config(config_dict)
config_proto = tf.ConfigProto()
for key, value in cfg.items():
fields = key.split(".")
if fields[0] not in ["rnd", "env"]:
obj = config_proto
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
# Create session.
session = tf.Session(config=config_proto)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__()
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if nhwc_to_nchw:
images = tf.transpose(images, [0, 3, 1, 2])
return images * ((drange[1] - drange[0]) / 255) + drange[0]
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1, uint8_cast=True):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if shrink > 1:
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
if nchw_to_nhwc:
images = tf.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
if uint8_cast:
images = tf.saturate_cast(images, tf.uint8)
return images
| 37.852713 | 173 | 0.670797 |
ace563e0e7da264a8264fe7fc0ea0ae859649c6e | 132 | py | Python | feed/admin.py | LeonMaxwell/mianco | 88c1969bebbdf39314927976497f11f830b4c58e | [
"MIT"
] | null | null | null | feed/admin.py | LeonMaxwell/mianco | 88c1969bebbdf39314927976497f11f830b4c58e | [
"MIT"
] | null | null | null | feed/admin.py | LeonMaxwell/mianco | 88c1969bebbdf39314927976497f11f830b4c58e | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Announcement
# Регистрация модели в админке
admin.site.register(Announcement)
| 22 | 33 | 0.833333 |
ace5641cdbcbe2915f46a4e07c341540f9a7cbb7 | 2,949 | py | Python | appapy/templating/repository.py | AppalachiaInteractive/com.appalachia.python.common_py | 6d04a041dfb883846919866b49d956721e9e785f | [
"MIT"
] | null | null | null | appapy/templating/repository.py | AppalachiaInteractive/com.appalachia.python.common_py | 6d04a041dfb883846919866b49d956721e9e785f | [
"MIT"
] | null | null | null | appapy/templating/repository.py | AppalachiaInteractive/com.appalachia.python.common_py | 6d04a041dfb883846919866b49d956721e9e785f | [
"MIT"
] | 1 | 2021-04-25T15:26:31.000Z | 2021-04-25T15:26:31.000Z | import os
from typing import Dict
from appapy.common.cli import *
from appapy.templating.constants import *
from appapy.templating.tokenizedproperty import TokenizedProperty
from appapy.templating.utils import *
class Repository:
def __init__(self):
self.owner = TokenizedProperty("owner", "Owner", no_validation)
self.ownerid = TokenizedProperty("ownerid", "Owner ID", no_validation)
self.directory = TokenizedProperty("directory", "Directory Path", os.path.isdir)
self.package = TokenizedProperty("package", "Package Name", package_validation)
self.library = TokenizedProperty("library", "Library Name", no_validation)
self.project = TokenizedProperty("project", "Project Name", no_validation)
self.projectid = TokenizedProperty("projectid", "Project ID", no_validation)
self.display = TokenizedProperty("display", "Display Name", no_validation)
self.version = TokenizedProperty("version", "Current Version", no_validation)
self.description = TokenizedProperty(
"description", "Description", no_validation
)
self.license = TokenizedProperty("license", "License", no_validation)
self.licenseid = TokenizedProperty("licenseid", "License ID", no_validation)
self.author = TokenizedProperty("author", "Author", no_validation)
self.authorid = TokenizedProperty("authorid", "Author ID", no_validation)
self.technology = TokenizedProperty("technology", "Technology", no_validation)
self.technologyid = TokenizedProperty(
"technologyid", "Technology ID", no_validation
)
self.year = TokenizedProperty("year", "Copyright Year", no_validation)
self.commit = TokenizedProperty(
"commit", "License Transition Commit Hash", no_validation
)
self.license1 = TokenizedProperty("license1", "License 1", no_validation)
self.license2 = TokenizedProperty("license2", "License 2", no_validation)
self.csnamespace = TokenizedProperty(
"csnamespace", "C# Root Namespace", no_validation
)
self.thirdparty = False
self.tokenized_properties = [
self.owner,
self.package,
self.library,
self.project,
self.projectid,
self.display,
self.version,
self.description,
self.license,
self.licenseid,
self.author,
self.authorid,
self.technology,
self.technologyid,
self.year,
]
self.token_keys: List[TokenizedProperty] = [
prop.key for prop in self.tokenized_properties
]
self.token_lookup: Dict[str, TokenizedProperty] = {
prop.key: prop for prop in self.tokenized_properties
}
self.path_parts: List[str] = []
self.path_parts_lower: List[str] = []
| 42.128571 | 88 | 0.64666 |
ace56531075480933fc0ba35e69103a85bc783b0 | 31,675 | py | Python | ace/novaAdapter.py | hayley-leblanc/crashmonkey | 118d504291ae3e24ec2118efacd3f99c243c63b1 | [
"Apache-2.0"
] | null | null | null | ace/novaAdapter.py | hayley-leblanc/crashmonkey | 118d504291ae3e24ec2118efacd3f99c243c63b1 | [
"Apache-2.0"
] | null | null | null | ace/novaAdapter.py | hayley-leblanc/crashmonkey | 118d504291ae3e24ec2118efacd3f99c243c63b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#To run : python3 novaAdapter.py -b code/tests/generic_039/base_test.cpp -t code/tests/generic_039/generic_039 -p code/tests/generic_039
import os
import re
import sys
import stat
import subprocess
import argparse
import time
import itertools
from shutil import copyfile
#All functions that has options go here
# FallocOptions = ['FALLOC_FL_ZERO_RANGE','FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE','FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE', '0', 'FALLOC_FL_KEEP_SIZE']
FallocOptions = ['FALLOC_FL_KEEP_SIZE']
FsyncOptions = ['fsync','fdatasync']
RemoveOptions = ['remove','unlink']
LinkOptions = ['link','symlink']
# WriteOptions = ['WriteData','WriteDataMmap', 'pwrite']
WriteOptions = ['WriteData', 'pwrite']
redeclare_map = {}
def build_parser():
parser = argparse.ArgumentParser(description='Workload Generator for NOVA')
# global args
parser.add_argument('--base_file', '-b', default='', help='Base test file to generate workload')
parser.add_argument('--test_file', '-t', default='', help='J lang test skeleton to generate workload')
# crash monkey args
parser.add_argument('--target_path', '-p', default='../code/tests/', help='Directory to save the generated test files')
parser.add_argument('--output_name', '-o', default='file', help='Name of the generated file')
return parser
def print_setup(parsed_args):
print('\n{: ^50s}'.format('NOVA Workload generator\n'))
print('='*20, 'Setup' , '='*20, '\n')
print('{0:20} {1}'.format('Base test file', parsed_args.base_file))
print('{0:20} {1}'.format('Test skeleton', parsed_args.test_file))
print('{0:20} {1}'.format('Target directory', parsed_args.target_path))
print('{0:20} {1}'.format('Output file', parsed_args.output_name))
print('\n', '='*48, '\n')
def create_dir(dir_path):
try:
os.makedirs(dir_path)
except OSError:
if not os.path.isdir(dir_path):
raise
def create_dict():
operation_map = {'fsync': 0, 'fallocate': 0, 'open': 0, 'remove': 0}
return operation_map
#These maps keep track of the line number in each method, to add the next function to in the C++ file
# def updateSetupMap(index_map, num):
# index_map['setup'] += num
# index_map['run'] += num
# index_map['check'] += num
# index_map['define'] += num
# def updateRunMap(index_map, num):
# index_map['run'] += num
# index_map['check'] += num
# index_map['define'] += num
# def updateCheckMap(index_map, num):
# index_map['check'] += num
# index_map['define'] += num
# def updateDefineMap(index_map, num):
# index_map['define'] += num
def updateDefineMap(index_map, num):
index_map['define'] += num
index_map['setup'] += num
index_map['run'] += num
index_map['check'] += num
def updateSetupMap(index_map, num):
index_map['setup'] += num
index_map['run'] += num
index_map['check'] += num
def updateRunMap(index_map, num):
index_map['run'] += num
index_map['check'] += num
def updateCheckMap(index_map, num):
index_map['check'] += num
def insertDeclare(line, file, index_map):
with open(file, 'r+') as declare:
contents = declare.readlines()
updateRunMap(index_map, 1)
to_insert = '\t\t\t\tint ' + line + ' = 0 ;\n'
contents.insert(index_map['run'], to_insert)
declare.seek(0)
declare.writelines(contents)
declare.close()
# Add the 'line' which declares a file/dir used in the workload into the 'file'
# at position specified in the 'index_map'
def insertDefine(line, file, index_map):
with open(file, 'r+') as define:
contents = define.readlines()
#Initialize paths in setup phase
updateSetupMap(index_map, 1)
file_str = ''
if len(line.split('/')) != 1 :
for i in range(0, len(line.split('/'))):
file_str += line.split('/')[i]
else:
file_str = line.split('/')[-1]
if file_str == 'test':
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_ ;\n'
else:
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_' + ' + "/' + line + '";\n'
contents.insert(index_map['setup'], to_insert)
#Initialize paths in run phase
updateRunMap(index_map, 1)
file_str = ''
if len(line.split('/')) != 1 :
for i in range(0, len(line.split('/'))):
file_str += line.split('/')[i]
else:
file_str = line.split('/')[-1]
if file_str == 'test':
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_ ;\n'
else:
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_' + ' + "/' + line + '";\n'
contents.insert(index_map['run'], to_insert)
#Initialize paths in check phase
updateCheckMap(index_map, 1)
file_str = ''
if len(line.split('/')) != 1 :
for i in range(0, len(line.split('/'))):
file_str += line.split('/')[i]
else:
file_str = line.split('/')[-1]
if file_str == 'test':
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_ ;\n'
else:
to_insert = '\t\t\t\t' + file_str + '_path = mnt_dir_' + ' + "/' + line + '";\n'
contents.insert(index_map['check'], to_insert)
#Update defines portion
#Get only the file name. We don't want the path here
updateDefineMap(index_map, 1)
file_str = ''
if len(line.split('/')) != 1 :
for i in range(0, len(line.split('/'))):
file_str += line.split('/')[i]
else:
file_str = line.split('/')[-1]
to_insert = '\t\t\t string ' + file_str + '_path; \n'
contents.insert(index_map['define'], to_insert)
define.seek(0)
define.writelines(contents)
define.close()
def insertFalloc(contents, line, index_map, method):
to_insert = '\n\t\t\t\tif ( fallocate( fd_' + line.split(' ')[1] + ' , ' + line.split(' ')[2] + ' , ' + line.split(' ')[3] + ' , ' + line.split(' ')[4] + ') < 0){ \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] +');\n\t\t\t\t\t return errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 5)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 5)
def insertMkdir(contents, line, index_map, method):
to_insert = '\n\t\t\t\tif ( mkdir(' + line.split(' ')[1] + '_path.c_str() , ' + line.split(' ')[2] + ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertOpenFile(contents, line, index_map, method):
name = 'fd_' + line.split(' ')[1]
decl = ' '
if name not in redeclare_map:
decl = 'int '
redeclare_map[name] = 1
# TODO: prevent redeclations here
# to_insert = '\n\t\t\t\t' + decl + 'fd_' + line.split(' ')[1] + ' = cm_->CmOpen(' + line.split(' ')[1] + '_path.c_str() , ' + line.split(' ')[2] + ' , ' + line.split(' ')[3] + '); \n\t\t\t\tif ( fd_' + line.split(' ')[1] + ' < 0 ) { \n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
to_insert = '\n\t\t\t\t' + decl + 'fd_' + line.split(' ')[1] + ' = open(' + line.split(' ')[1] + '_path.c_str() , ' + line.split(' ')[2] + ' , ' + line.split(' ')[3] + '); \n\t\t\t\tif ( fd_' + line.split(' ')[1] + ' < 0 ) { \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 6)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 6)
def insertMknodFile(contents, line, index_map, method):
name = 'fd_' + line.split(' ')[1]
decl = ' '
if name not in redeclare_map:
decl = 'int '
redeclare_map[name] = 1
# TODO: prevent redeclations here
to_insert = '\n\t\t\t\t' + decl + 'fd_' + line.split(' ')[1] + ' = mknod(' + line.split(' ')[1] + '_path.c_str() , ' + line.split(' ')[2] + ' , ' + line.split(' ')[3] + '); \n\t\t\t\tif ( fd_' + line.split(' ')[1] + ' < 0 ) { \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 6)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 6)
def insertOpenDir(contents, line, index_map, method):
name = 'fd_' + line.split(' ')[1]
decl = ' '
if name not in redeclare_map:
decl = 'int '
redeclare_map[name] = 1
# TODO: prevent redeclations here
# to_insert = '\n\t\t\t\t' + decl + 'fd_' + line.split(' ')[1] + ' = cm_->CmOpen(' + line.split(' ')[1] + '_path.c_str() , O_DIRECTORY , ' + line.split(' ')[2] + '); \n\t\t\t\tif ( fd_' + line.split(' ')[1] + ' < 0 ) { \n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
to_insert = '\n\t\t\t\t' + decl + 'fd_' + line.split(' ')[1] + ' = open(' + line.split(' ')[1] + '_path.c_str() , O_DIRECTORY , ' + line.split(' ')[2] + '); \n\t\t\t\tif ( fd_' + line.split(' ')[1] + ' < 0 ) { \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 6)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 6)
def insertRemoveFile(contents,option, line, index_map, method):
to_insert = '\n\t\t\t\tif ( '+ option +'(' + line.split(' ')[1] + '_path.c_str() ) < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertTruncateFile(contents, line, index_map, method):
to_insert = '\n\t\t\t\tif ( truncate (' + line.split(' ')[1] + '_path.c_str(), ' + line.split(' ')[2] + ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertClose(contents, line, index_map, method):
# to_insert = '\n\t\t\t\tif ( cm_->CmClose ( fd_' + line.split(' ')[1] + ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
to_insert = '\n\t\t\t\tif ( close ( fd_' + line.split(' ')[1] + ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertRmdir(contents, line, index_map, method):
to_insert = '\n\t\t\t\tif ( ' + line.split(' ')[0] + '(' + line.split(' ')[1] + '_path.c_str()) < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertFsync(contents, option, line, index_map, method):
if option == 'fsync':
# ins = 'cm_->CmFsync'
ins = 'fsync'
elif option == 'fdatasync':
# ins = 'cm_->CmFdatasync'
ins = 'fdatasync'
to_insert = '\n\t\t\t\tif ( ' + ins + '( fd_' + line.split(' ')[1] + ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
def insertSync(contents, line, index_map, method):
# to_insert = '\n\t\t\t\tcm_->CmSync(); \n\n'
to_insert = '\n\t\t\t\tsync(); \n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 2)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 2)
def insertLink(contents, option, line, index_map, method):
to_insert = '\n\t\t\t\tif ( ' + option + '(' + line.split(' ')[1] + '_path.c_str() , '+ line.split(' ')[2] + '_path.c_str() '+ ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
#def insertCheckpoint(contents, line, index_map, method):
#
# to_insert = '\n\t\t\t\tif ( Checkpoint() < 0){ \n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\tlocal_checkpoint += 1; \n\t\t\t\tif (local_checkpoint == checkpoint) { \n\t\t\t\t\treturn 1;\n\t\t\t\t}\n\n'
#
# if method == 'setup':
# contents.insert(index_map['setup'], to_insert)
# updateSetupMap(index_map, 8)
# else:
# contents.insert(index_map['run'], to_insert)
# updateRunMap(index_map, 8)
def insertCheckpoint(contents, line, index_map, method):
# to_insert = '\n\t\t\t\tif ( cm_->CmCheckpoint() < 0){ \n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\tlocal_checkpoint += 1; \n\t\t\t\tif (local_checkpoint == checkpoint) { \n\t\t\t\t\treturn '+ line.split(' ')[1] + ';\n\t\t\t\t}\n\n'
to_insert = '\n\t\t\t\tif ( novaCheckpoint() < 0){ \n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\tlocal_checkpoint += 1; \n\t\t\t\tif (local_checkpoint == checkpoint) { \n\t\t\t\t\treturn '+ line.split(' ')[1] + ';\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 8)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 8)
def insertRename(contents, line, index_map, method):
to_insert = '\n\t\t\t\tif ( rename (' + line.split(' ')[1] + '_path.c_str() , '+ line.split(' ')[2] + '_path.c_str() '+ ') < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 4)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 4)
# def insertFsetxattr(contents, line, index_map, method):
# to_insert = '\n\t\t\t\tif ( ' + line.split(' ')[0] + '( fd_' + line.split(' ')[1] + ', \"user.xattr1\", \"val1 \", 4, 0 ) < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
# if method == 'setup':
# contents.insert(index_map['setup'], to_insert)
# updateSetupMap(index_map, 4)
# else:
# contents.insert(index_map['run'], to_insert)
# updateRunMap(index_map, 4)
# def insertRemovexattr(contents, line, index_map, method):
# to_insert = '\n\t\t\t\tif ( ' + line.split(' ')[0] + '(' + line.split(' ')[1] + '_path.c_str() , \"user.xattr1\") < 0){ \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
# if method == 'setup':
# contents.insert(index_map['setup'], to_insert)
# updateSetupMap(index_map, 4)
# else:
# contents.insert(index_map['run'], to_insert)
# updateRunMap(index_map, 4)
def insertWrite(contents, option, line, index_map, method):
# if option == 'mmapwrite':
# name = 'filep_' + line.split(' ')[1]
# decl = ' '
# data_decl = ' '
# text_decl = ' '
# filep_decl = ' '
# if name not in redeclare_map:
# decl = 'int '
# filep_decl = 'char *'
# data_decl = 'void* mdata_' +line.split(' ')[1] + ';'
# text_decl = 'const char *mtext_' + line.split(' ')[1] +' = \"mmmmmmmmmmklmnopqrstuvwxyz123456\";'
# redeclare_map[name] = 1
# to_insert = '\n\t\t\t\tif ( fallocate( fd_' + line.split(' ')[1] + ' , 0 , ' + line.split(' ')[2] + ' , ' + line.split(' ')[3] + ') < 0){ \n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] +');\n\t\t\t\t\t return errno;\n\t\t\t\t}\n\t\t\t\t' + filep_decl + 'filep_' + line.split(' ')[1] + ' = (char *) cm_->CmMmap(NULL, ' + line.split(' ')[3] + ' + ' + line.split(' ')[2] +', PROT_WRITE|PROT_READ, MAP_SHARED, fd_' + line.split(' ')[1] + ', 0);\n\t\t\t\tif (filep_' + line.split(' ')[1] + ' == MAP_FAILED) {\n\t\t\t\t\t return -1;\n\t\t\t\t}\n\n\t\t\t\t' +decl+ 'moffset_'+ line.split(' ')[1] +' = 0;\n\t\t\t\t' + decl +'to_write_'+line.split(' ')[1] +' = ' + line.split(' ')[3] + ' ;\n\t\t\t\t'+ text_decl+ '\n\n\t\t\t\twhile (moffset_'+line.split(' ')[1]+' < '+ line.split(' ')[3] +'){\n\t\t\t\t\tif (to_write_'+ line.split(' ')[1] +' < 32){\n\t\t\t\t\t\tmemcpy(filep_'+ line.split(' ')[1]+ ' + ' + line.split(' ')[2] + ' + moffset_'+ line.split(' ')[1] +', mtext_'+ line.split(' ')[1] +', to_write_' +line.split(' ')[1]+');\n\t\t\t\t\t\tmoffset_'+ line.split(' ')[1]+' += to_write_'+ line.split(' ')[1] +';\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tmemcpy(filep_'+ line.split(' ')[1] + ' + ' + line.split(' ')[2] + ' + moffset_' +line.split(' ')[1] + ',mtext_'+line.split(' ')[1] + ', 32);\n\t\t\t\t\t\tmoffset_'+line.split(' ')[1] +' += 32; \n\t\t\t\t\t} \n\t\t\t\t}\n\n\t\t\t\tif ( cm_->CmMsync ( filep_' + line.split(' ')[1] + ' + ' + line.split(' ')[2] + ', 8192 , MS_SYNC) < 0){\n\t\t\t\t\tcm_->CmMunmap( filep_' + line.split(' ')[1] + ',' + line.split(' ')[2] + ' + ' + line.split(' ')[3] +'); \n\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t\tcm_->CmMunmap( filep_' + line.split(' ')[1] + ' , ' + line.split(' ')[2] + ' + ' + line.split(' ')[3] +');\n\n'
# if method == 'setup':
# contents.insert(index_map['setup'], to_insert)
# updateSetupMap(index_map, 30)
# else:
# contents.insert(index_map['run'], to_insert)
# updateRunMap(index_map, 30)
if option == 'write':
# to_insert = '\n\t\t\t\tif ( WriteData ( fd_' + line.split(' ')[1] + ', ' + line.split(' ')[2] + ', ' + line.split(' ')[3] + ') < 0){ \n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
to_insert = '\n\t\t\t\tif ( novaWriteData ( fd_' + line.split(' ')[1] + ', ' + line.split(' ')[2] + ', ' + line.split(' ')[3] + ') < 0){ \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 5)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 5)
else:
name = 'offset_' + line.split(' ')[1]
decl = ' '
data_decl = ' '
text_decl = ' '
if name not in redeclare_map:
decl = 'int '
data_decl = 'void* data_' +line.split(' ')[1] + ';'
text_decl = 'const char *text_' + line.split(' ')[1] +' = \"ddddddddddklmnopqrstuvwxyz123456\";'
redeclare_map[name] = 1
# TODO: prevent redeclations here
# to_insert ='\n\t\t\t\tcm_->CmClose(fd_' + line.split(' ')[1] + '); \n\t\t\t\tfd_' + line.split(' ')[1] + ' = cm_->CmOpen(' + line.split(' ')[1] +'_path.c_str() , O_RDWR|O_DIRECT|O_SYNC , 0777); \n\t\t\t\tif ( fd_' + line.split(' ')[1] +' < 0 ) { \n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] +'); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n\t\t\t\t' + data_decl+'\n\t\t\t\tif (posix_memalign(&data_' + line.split(' ')[1] +' , 4096, ' + line.split(' ')[3] +' ) < 0) {\n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n\t\t\t\t \n\t\t\t\t' +decl+ 'offset_'+ line.split(' ')[1] +' = 0;\n\t\t\t\t' + decl +'to_write_'+line.split(' ')[1] +' = ' + line.split(' ')[3] + ' ;\n\t\t\t\t'+ text_decl+ '\n\t\t\t\twhile (offset_'+line.split(' ')[1]+' < '+ line.split(' ')[3] +'){\n\t\t\t\t\tif (to_write_'+ line.split(' ')[1] +' < 32){\n\t\t\t\t\t\tmemcpy((char *)data_'+ line.split(' ')[1]+ '+ offset_'+ line.split(' ')[1] +', text_'+ line.split(' ')[1] +', to_write_' +line.split(' ')[1]+');\n\t\t\t\t\t\toffset_'+ line.split(' ')[1]+' += to_write_'+ line.split(' ')[1] +';\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tmemcpy((char *)data_'+ line.split(' ')[1] +'+ offset_'+line.split(' ')[1] +',text_'+line.split(' ')[1] +', 32);\n\t\t\t\t\t\toffset_'+line.split(' ')[1] +' += 32; \n\t\t\t\t\t} \n\t\t\t\t} \n\n\t\t\t\tif ( pwrite ( fd_' + line.split(' ')[1] + ', data_'+ line.split(' ')[1] + ', ' + line.split(' ')[3] + ', ' + line.split(' ')[2] +') < 0){\n\t\t\t\t\tcm_->CmClose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\t\t\t\tcm_->CmClose(fd_' + line.split(' ')[1] + ');\n\n'
to_insert ='\n\t\t\t\tclose(fd_' + line.split(' ')[1] + '); \n\t\t\t\tfd_' + line.split(' ')[1] + ' = open(' + line.split(' ')[1] +'_path.c_str() , O_RDWR|O_DIRECT|O_SYNC , 0777); \n\t\t\t\tif ( fd_' + line.split(' ')[1] +' < 0 ) { \n\t\t\t\t\tclose( fd_' + line.split(' ')[1] +'); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n\t\t\t\t' + data_decl+'\n\t\t\t\tif (posix_memalign(&data_' + line.split(' ')[1] +' , 4096, ' + line.split(' ')[3] +' ) < 0) {\n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\n\t\t\t\t \n\t\t\t\t' +decl+ 'offset_'+ line.split(' ')[1] +' = 0;\n\t\t\t\t' + decl +'to_write_'+line.split(' ')[1] +' = ' + line.split(' ')[3] + ' ;\n\t\t\t\t'+ text_decl+ '\n\t\t\t\twhile (offset_'+line.split(' ')[1]+' < '+ line.split(' ')[3] +'){\n\t\t\t\t\tif (to_write_'+ line.split(' ')[1] +' < 32){\n\t\t\t\t\t\tmemcpy((char *)data_'+ line.split(' ')[1]+ '+ offset_'+ line.split(' ')[1] +', text_'+ line.split(' ')[1] +', to_write_' +line.split(' ')[1]+');\n\t\t\t\t\t\toffset_'+ line.split(' ')[1]+' += to_write_'+ line.split(' ')[1] +';\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tmemcpy((char *)data_'+ line.split(' ')[1] +'+ offset_'+line.split(' ')[1] +',text_'+line.split(' ')[1] +', 32);\n\t\t\t\t\t\toffset_'+line.split(' ')[1] +' += 32; \n\t\t\t\t\t} \n\t\t\t\t} \n\n\t\t\t\tif ( pwrite ( fd_' + line.split(' ')[1] + ', data_'+ line.split(' ')[1] + ', ' + line.split(' ')[3] + ', ' + line.split(' ')[2] +') < 0){\n\t\t\t\t\tclose( fd_' + line.split(' ')[1] + '); \n\t\t\t\t\treturn errno;\n\t\t\t\t}\n\t\t\t\tclose(fd_' + line.split(' ')[1] + ');\n\n'
if method == 'setup':
contents.insert(index_map['setup'], to_insert)
updateSetupMap(index_map, 32)
else:
contents.insert(index_map['run'], to_insert)
updateRunMap(index_map, 32)
# Insert a function in 'line' into 'file' at location specified by 'index_map' in the specified 'method'
# If the workload has functions with various possible paramter options, the 'permutation' defines the set of
# paramters to be set in this file.
def insertFunctions(line, file, index_map, method):
with open(file, 'r+') as insert:
contents = insert.readlines()
if line.split(' ')[0] == 'falloc':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertFalloc(contents, line, index_map, method)
if line.split(' ')[-2] == 'addToSetup':
line = line.replace(line.split(' ')[1], line.split(' ')[-1], 1)
insertFalloc(contents, line, index_map, 'setup')
elif line.split(' ')[0] == 'mkdir':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertMkdir(contents, line, index_map, method)
elif line.split(' ')[0] == 'mknod':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertMknodFile(contents, line, index_map, method)
elif line.split(' ')[0] == 'open':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertOpenFile(contents, line, index_map, method)
elif line.split(' ')[0] == 'opendir':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertOpenDir(contents, line, index_map, method)
elif line.split(' ')[0] == 'remove' or line.split(' ')[0] == 'unlink':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
option = line.split(' ')[0]
insertRemoveFile(contents, option, line, index_map, method)
elif line.split(' ')[0] == 'close':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertClose(contents, line, index_map, method)
elif line.split(' ')[0] == 'rmdir':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertRmdir(contents, line, index_map, method)
elif line.split(' ')[0] == 'truncate':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertTruncateFile(contents, line, index_map, method)
elif line.split(' ')[0] == 'fsync' or line.split(' ')[0] == 'fdatasync':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
option = line.split(' ')[0]
insertFsync(contents, option, line, index_map, method)
elif line.split(' ')[0] == 'sync':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertSync(contents, line, index_map, method)
elif line.split(' ')[0] == 'checkpoint':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertCheckpoint(contents, line, index_map, method)
elif line.split(' ')[0] == 'rename':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertRename(contents, line, index_map, method)
elif line.split(' ')[0] == 'fsetxattr':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertFsetxattr(contents, line, index_map, method)
elif line.split(' ')[0] == 'removexattr':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
insertRemovexattr(contents, line, index_map, method)
elif line.split(' ')[0] == 'link' or line.split(' ')[0] == 'symlink':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
option = line.split(' ')[0]
insertLink(contents, option, line, index_map, method)
elif line.split(' ')[0] == 'write' or line.split(' ')[0] == 'dwrite' or line.split(' ')[0] == 'mmapwrite':
if method == 'setup':
updateSetupMap(index_map, 1)
else:
updateRunMap(index_map, 1)
option = line.split(' ')[0]
insertWrite(contents, option, line, index_map, method)
elif line.split(' ')[0] == 'none':
pass
insert.seek(0)
insert.writelines(contents)
insert.close()
def main():
#open log file
#log_file = time.strftime('%Y%m%d_%H%M%S') + '-workloadGen.log'
#log_file_handle = open(log_file, 'w')
#Parse input args
parsed_args = build_parser().parse_args()
#Print the test setup - just for sanity
# base_test = '../code/tests/base_test.cpp'
# print_setup(parsed_args)
#check if test file exists
if not os.path.exists(parsed_args.test_file) or not os.path.isfile(parsed_args.test_file):
print(parsed_args.test_file + ' : No such test file\n')
exit(1)
#Create the target directory
create_dir(parsed_args.target_path)
#Create a pre-populated dictionary of replacable operations
operation_map = create_dict()
#Copy base file to target path
base_test = parsed_args.base_file
base_file = os.path.join(parsed_args.target_path,base_test.split('/')[-1])
# copyfile(base_test, base_file)
test_file = parsed_args.test_file
index_map = {'define' : 0, 'setup' : 0, 'run' : 0, 'check' : 0}
#iterate through the base file and populate these values
index = 0
with open(base_file, 'r') as f:
contents = f.readlines()
for index, line in enumerate(contents):
index += 1
line = line.strip()
if line.find('setup') != -1:
if line.split(' ')[1] == 'setup':
index_map['setup'] = index
elif line.find('run') != -1:
if line.split(' ')[1] == 'run':
index_map['run'] = index
elif line.find('check_test') != -1:
if line.split(' ')[1] == 'check_test':
index_map['check'] = index
elif line.find('private') != -1:
if line.split(' ')[1] == 'private':
index_map['define'] = index
f.close()
val = 0
new_file = test_file + ".cpp"
new_file = os.path.join(parsed_args.target_path, new_file)
copyfile(base_file, new_file)
new_index_map = index_map.copy()
# log = ' ,'.join(permutation);
# log = `val` + ' : ' + log + '\n'
# log_file_handle.write(log)
#Iterate through test file and fill up method by method
with open(test_file, 'r') as f:
iter = 0
for line in f:
#ignore newlines
if line.split(' ')[0] == '\n':
continue
#Remove leading, trailing spaces
line = line.strip()
#if the line starts with #, it indicates which region of base file to populate and skip this line
if line.split(' ')[0] == '#' :
method = line.strip().split()[-1]
continue
if method == 'define':
insertDefine(line, new_file, new_index_map)
elif method == 'declare':
insertDeclare(line, new_file, new_index_map)
elif (method == 'setup' or method == 'run'):
op_map={}
insertFunctions(line, new_file, new_index_map, method)
f.close()
val += 1
# log_file_handle.close()
if __name__ == '__main__':
main()
| 43.569464 | 1,774 | 0.540237 |
ace565343c1a26efd49827f9251c5340b6a2f3cc | 5,189 | py | Python | replace_dependencies.py | dartartem/eventuate-tram-upgrade-scripts | ed49c0674897622d35a0ff84c837cf7b768a4b4f | [
"Apache-2.0"
] | null | null | null | replace_dependencies.py | dartartem/eventuate-tram-upgrade-scripts | ed49c0674897622d35a0ff84c837cf7b768a4b4f | [
"Apache-2.0"
] | 4 | 2020-03-03T07:46:03.000Z | 2020-03-13T06:29:25.000Z | replace_dependencies.py | dartartem/eventuate-tram-upgrade-scripts | ed49c0674897622d35a0ff84c837cf7b768a4b4f | [
"Apache-2.0"
] | 1 | 2020-03-03T07:26:31.000Z | 2020-03-03T07:26:31.000Z | import sys
import os
from os import path
from os import walk
import itertools
import re
PROJECT_FOLDER = os.getcwd()
MODULE_REPLACEMENTS_FILE = "module.springboot.replacements"
MANUAL_MODULE_REPLACEMENTS_FILE = "manual.springboot.module.replacements"
CLASS_REPLACEMENTS_FILE = "class.springboot.replacements"
ARTIFACT_GROUP_REPLACEMENTS_FILE = "artifact.group.replacements"
VERSION_REPLACEMENTS_FILE = "version.replacements"
if (len(sys.argv) > 1 and sys.argv[1] == "MICRONAUT"):
MODULE_REPLACEMENTS_FILE = "module.micronaut.replacements"
MANUAL_MODULE_REPLACEMENTS_FILE = "manual.micronaut.module.replacements"
CLASS_REPLACEMENTS_FILE = "class.micronaut.replacements"
MODULE_REPLACEMENTS = os.path.join(sys.path[0], MODULE_REPLACEMENTS_FILE)
MANUAL_MODULE_REPLACEMENTS = os.path.join(sys.path[0], MANUAL_MODULE_REPLACEMENTS_FILE)
CLASS_REPLACEMENTS = os.path.join(sys.path[0], CLASS_REPLACEMENTS_FILE)
ARTIFACT_GROUP_REPLACEMENTS = os.path.join(sys.path[0], ARTIFACT_GROUP_REPLACEMENTS_FILE)
VERSION_REPLACEMENTS = os.path.join(sys.path[0], VERSION_REPLACEMENTS_FILE)
GRADLE_PROPERTIES = os.path.join(os.getcwd(), "gradle.properties")
POM_WITH_VERSIONS = os.path.join(os.getcwd(), "pom.xml")
LIBRARY_VERSIONS = os.path.join(sys.path[0], "library.versions")
def get_files_in_folder_and_subfolders(folder):
return list(
itertools.chain.from_iterable(
[[os.path.join(root, file) for file in files] for root, dirs, files in walk(PROJECT_FOLDER)]
)
)
def load_replacements(file):
replacements = read_lines_from_file(file)
replacement_map = {}
for replacement in replacements:
replacement = replacement.replace("\n", "")
if replacement:
r = replacement.split("->")
replacement_map[r[0]] = r[1]
return replacement_map
def filter_files(files, extension):
return [file for file in files if file.endswith(extension)]
def read_file(file):
f = open(file, "r")
content = f.read()
f.close()
return content
def write_file(file, content):
f = open(file, "w")
f.write(content)
f.close()
def read_lines_from_file(file):
f = open(file, "r")
lines = f.readlines()
f.close()
return lines
def write_lines_to_file(file, lines):
f = open(file, "w")
f.writelines(lines)
f.close()
def replace_dependencies(files, replacements, prefix = None, postfix = None):
for file in files:
content = read_file(file)
new_content = content
for k in replacements:
original = k
replacement = replacements[k]
if prefix:
original = prefix + original
replacement = prefix + replacement
if postfix:
original = original + postfix
replacement = replacement + postfix
content = content.replace(original, replacement)
if (content != new_content): write_file(file, content)
def update_libraries_gradle():
library_versions = load_replacements(LIBRARY_VERSIONS)
lines = read_lines_from_file(GRADLE_PROPERTIES)
new_lines = []
for line in lines:
for lib in library_versions:
if (line.startswith(lib + "=")): line = lib + "=" + library_versions[lib] + "\n"
new_lines.append(line)
if lines != new_lines: write_lines_to_file(GRADLE_PROPERTIES, new_lines)
def update_libraries_maven():
if not path.exists(POM_WITH_VERSIONS): return
library_versions = load_replacements(LIBRARY_VERSIONS)
lines = read_lines_from_file(POM_WITH_VERSIONS)
new_lines = []
for line in lines:
for lib in library_versions:
match = re.findall(".*<" + lib + ">(.*)</" + lib + ">.*", line)
if (match): line = line.replace(match[0], library_versions[lib])
new_lines.append(line)
if lines != new_lines: write_lines_to_file(POM_WITH_VERSIONS, new_lines)
def inspect_dependencies_for_manaul_replacement(files, replacements, prefix, postfix):
for file in files:
content = read_file(file)
for replacement in replacements:
if (prefix + replacement + postfix) in content:
print("")
print("WARNING!")
print(file + " : " + replacements[replacement])
print("")
files = get_files_in_folder_and_subfolders(PROJECT_FOLDER)
gradles = filter_files(files, "build.gradle")
poms = filter_files(files, "pom.xml")
classes = filter_files(files, ".java")
module_replacements = load_replacements(MODULE_REPLACEMENTS)
manual_module_replacements = load_replacements(MANUAL_MODULE_REPLACEMENTS)
class_replacements = load_replacements(CLASS_REPLACEMENTS)
artifact_group_replacements = load_replacements(ARTIFACT_GROUP_REPLACEMENTS)
version_replacements = load_replacements(VERSION_REPLACEMENTS)
replace_dependencies(gradles, module_replacements, ":", ":")
replace_dependencies(poms, module_replacements, "<artifactId>", "</artifactId>")
replace_dependencies(classes, class_replacements)
replace_dependencies(gradles, artifact_group_replacements, postfix = ":")
replace_dependencies(poms, artifact_group_replacements, "<groupId>", "</groupId>")
replace_dependencies(gradles, version_replacements, prefix = ":$")
replace_dependencies(poms, version_replacements, "<version>${", "}</version>")
update_libraries_gradle()
update_libraries_maven()
inspect_dependencies_for_manaul_replacement(gradles, manual_module_replacements, ":", ":")
inspect_dependencies_for_manaul_replacement(poms, manual_module_replacements, "<artifactId>", "</artifactId>") | 35.541096 | 110 | 0.768934 |
ace565a147e29426c0bc9e515d6f201d16fd04bd | 41,951 | py | Python | dbt_artifacts_loader/dbt/v3/manifest.py | yu-iskw/dbt-artifacts-loader | cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b | [
"Apache-2.0"
] | 10 | 2021-07-07T01:10:02.000Z | 2022-01-13T10:53:11.000Z | dbt_artifacts_loader/dbt/v3/manifest.py | yu-iskw/dbt-artifacts-loader | cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b | [
"Apache-2.0"
] | null | null | null | dbt_artifacts_loader/dbt/v3/manifest.py | yu-iskw/dbt-artifacts-loader | cbb3580ff5ae7c2a6f13b414d7ed90f1161d255b | [
"Apache-2.0"
] | null | null | null | # generated by datamodel-codegen:
# filename: manifest.json
# timestamp: 2021-10-28T12:48:36+00:00
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from dbt_artifacts_loader.dbt.base_bigquery_model import BaseBigQueryModel
from pydantic import Extra, Field, constr
class ManifestMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
dbt_schema_version: Optional[
str
] = 'https://schemas.getdbt.com/dbt/manifest/v3.json'
dbt_version: Optional[str] = '0.21.0rc1'
generated_at: Optional[datetime] = '2021-09-24T13:29:14.317700Z'
invocation_id: Optional[Optional[str]] = None
env: Optional[Dict[str, str]] = {}
project_id: Optional[Optional[str]] = Field(
None, description='A unique identifier for the project'
)
user_id: Optional[
Optional[
constr(
regex=r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
)
]
] = Field(None, description='A unique identifier for the user')
send_anonymous_usage_stats: Optional[Optional[bool]] = Field(
None, description='Whether dbt is configured to send anonymous usage statistics'
)
adapter_type: Optional[Optional[str]] = Field(
None, description='The type name of the adapter'
)
class ResourceType(Enum):
analysis = 'analysis'
class FileHash(BaseBigQueryModel):
class Config:
extra = Extra.forbid
name: str
checksum: str
class Hook(BaseBigQueryModel):
class Config:
extra = Extra.forbid
sql: str
transaction: Optional[bool] = True
index: Optional[Optional[int]] = None
class DependsOn(BaseBigQueryModel):
class Config:
extra = Extra.forbid
macros: Optional[List[str]] = []
nodes: Optional[List[str]] = []
class ColumnInfo(BaseBigQueryModel):
class Config:
extra = Extra.allow
name: str
description: Optional[str] = ''
meta: Optional[Dict[str, Any]] = {}
data_type: Optional[Optional[str]] = None
quote: Optional[Optional[bool]] = None
tags: Optional[List[str]] = []
class Docs(BaseBigQueryModel):
class Config:
extra = Extra.forbid
show: Optional[bool] = True
class InjectedCTE(BaseBigQueryModel):
class Config:
extra = Extra.forbid
id: str
sql: str
class ResourceType1(Enum):
test = 'test'
class TestConfig(BaseBigQueryModel):
class Config:
extra = Extra.allow
enabled: Optional[bool] = True
alias: Optional[Optional[str]] = None
schema_: Optional[Optional[str]] = Field('dbt_test__audit', alias='schema')
database: Optional[Optional[str]] = None
tags: Optional[Union[List[str], str]] = []
meta: Optional[Dict[str, Any]] = {}
materialized: Optional[str] = 'test'
severity: Optional[
constr(regex=r'^([Ww][Aa][Rr][Nn]|[Ee][Rr][Rr][Oo][Rr])$')
] = 'ERROR'
store_failures: Optional[Optional[bool]] = None
where: Optional[Optional[str]] = None
limit: Optional[Optional[int]] = None
fail_calc: Optional[str] = 'count(*)'
warn_if: Optional[str] = '!= 0'
error_if: Optional[str] = '!= 0'
class ResourceType2(Enum):
model = 'model'
class ResourceType3(Enum):
operation = 'operation'
class ResourceType4(Enum):
rpc = 'rpc'
class ResourceType5(Enum):
test = 'test'
class TestMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
name: str
kwargs: Optional[Dict[str, Any]] = {}
namespace: Optional[Optional[str]] = None
class ResourceType6(Enum):
seed = 'seed'
class SeedConfig(BaseBigQueryModel):
class Config:
extra = Extra.allow
enabled: Optional[bool] = True
alias: Optional[Optional[str]] = None
schema_: Optional[Optional[str]] = Field(None, alias='schema')
database: Optional[Optional[str]] = None
tags: Optional[Union[List[str], str]] = []
meta: Optional[Dict[str, Any]] = {}
materialized: Optional[str] = 'seed'
persist_docs: Optional[Dict[str, Any]] = {}
post_hook: Optional[List[Hook]] = Field([], alias='post-hook')
pre_hook: Optional[List[Hook]] = Field([], alias='pre-hook')
quoting: Optional[Dict[str, Any]] = {}
column_types: Optional[Dict[str, Any]] = {}
full_refresh: Optional[Optional[bool]] = None
on_schema_change: Optional[Optional[str]] = 'ignore'
quote_columns: Optional[Optional[bool]] = None
class ResourceType7(Enum):
snapshot = 'snapshot'
class ResourceType8(Enum):
analysis = 'analysis'
class ResourceType9(Enum):
test = 'test'
class ParsedDataTestNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType9
alias: str
checksum: FileHash
config: Optional[TestConfig] = {
'enabled': True,
'alias': None,
'schema': 'dbt_test__audit',
'database': None,
'tags': [],
'meta': {},
'materialized': 'test',
'severity': 'ERROR',
'store_failures': None,
'where': None,
'limit': None,
'fail_calc': 'count(*)',
'warn_if': '!= 0',
'error_if': '!= 0',
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class ResourceType10(Enum):
operation = 'operation'
class ResourceType11(Enum):
model = 'model'
class ResourceType12(Enum):
rpc = 'rpc'
class ResourceType13(Enum):
test = 'test'
class ParsedSchemaTestNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
test_metadata: TestMetadata
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType13
alias: str
checksum: FileHash
config: Optional[TestConfig] = {
'enabled': True,
'alias': None,
'schema': 'dbt_test__audit',
'database': None,
'tags': [],
'meta': {},
'materialized': 'test',
'severity': 'ERROR',
'store_failures': None,
'where': None,
'limit': None,
'fail_calc': 'count(*)',
'warn_if': '!= 0',
'error_if': '!= 0',
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
column_name: Optional[Optional[str]] = None
class ResourceType14(Enum):
seed = 'seed'
class ParsedSeedNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType14
alias: str
checksum: FileHash
config: Optional[SeedConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'seed',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'quote_columns': None,
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class ResourceType15(Enum):
snapshot = 'snapshot'
class SnapshotConfig(BaseBigQueryModel):
class Config:
extra = Extra.allow
enabled: Optional[bool] = True
alias: Optional[Optional[str]] = None
schema_: Optional[Optional[str]] = Field(None, alias='schema')
database: Optional[Optional[str]] = None
tags: Optional[Union[List[str], str]] = []
meta: Optional[Dict[str, Any]] = {}
materialized: Optional[str] = 'snapshot'
persist_docs: Optional[Dict[str, Any]] = {}
post_hook: Optional[List[Hook]] = Field([], alias='post-hook')
pre_hook: Optional[List[Hook]] = Field([], alias='pre-hook')
quoting: Optional[Dict[str, Any]] = {}
column_types: Optional[Dict[str, Any]] = {}
full_refresh: Optional[Optional[bool]] = None
on_schema_change: Optional[Optional[str]] = 'ignore'
strategy: Optional[Optional[str]] = None
unique_key: Optional[Optional[str]] = None
target_schema: Optional[Optional[str]] = None
target_database: Optional[Optional[str]] = None
updated_at: Optional[Optional[str]] = None
check_cols: Optional[Optional[Union[str, List[str]]]] = None
class ResourceType16(Enum):
source = 'source'
class Quoting(BaseBigQueryModel):
class Config:
extra = Extra.forbid
database: Optional[Optional[bool]] = None
schema_: Optional[Optional[bool]] = Field(None, alias='schema')
identifier: Optional[Optional[bool]] = None
column: Optional[Optional[bool]] = None
class FreshnessMetadata(BaseBigQueryModel):
class Config:
extra = Extra.forbid
dbt_schema_version: Optional[str] = 'https://schemas.getdbt.com/dbt/sources/v2.json'
dbt_version: Optional[str] = '0.21.0rc1'
generated_at: Optional[datetime] = '2021-09-24T13:29:14.312598Z'
invocation_id: Optional[Optional[str]] = None
env: Optional[Dict[str, str]] = {}
class Status(Enum):
runtime_error = 'runtime error'
class SourceFreshnessRuntimeError(BaseBigQueryModel):
class Config:
extra = Extra.forbid
unique_id: str
error: Optional[Optional[Union[str, int]]] = None
status: Status
class Status1(Enum):
pass_ = 'pass'
warn = 'warn'
error = 'error'
runtime_error = 'runtime error'
class Period(Enum):
minute = 'minute'
hour = 'hour'
day = 'day'
class Time(BaseBigQueryModel):
class Config:
extra = Extra.forbid
count: int
period: Period
class TimingInfo(BaseBigQueryModel):
class Config:
extra = Extra.forbid
name: str
started_at: Optional[Optional[datetime]] = None
completed_at: Optional[Optional[datetime]] = None
class ExternalPartition(BaseBigQueryModel):
class Config:
extra = Extra.allow
name: Optional[str] = ''
description: Optional[str] = ''
data_type: Optional[str] = ''
meta: Optional[Dict[str, Any]] = {}
class SourceConfig(BaseBigQueryModel):
class Config:
extra = Extra.allow
enabled: Optional[bool] = True
class ResourceType17(Enum):
macro = 'macro'
class MacroDependsOn(BaseBigQueryModel):
class Config:
extra = Extra.forbid
macros: Optional[List[str]] = []
class MacroArgument(BaseBigQueryModel):
class Config:
extra = Extra.forbid
name: str
type: Optional[Optional[str]] = None
description: Optional[str] = ''
class ParsedDocumentation(BaseBigQueryModel):
class Config:
extra = Extra.forbid
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
block_contents: str
class Type(Enum):
dashboard = 'dashboard'
notebook = 'notebook'
analysis = 'analysis'
ml = 'ml'
application = 'application'
class ResourceType18(Enum):
model = 'model'
analysis = 'analysis'
test = 'test'
snapshot = 'snapshot'
operation = 'operation'
seed = 'seed'
rpc = 'rpc'
docs = 'docs'
source = 'source'
macro = 'macro'
exposure = 'exposure'
class MaturityEnum(Enum):
low = 'low'
medium = 'medium'
high = 'high'
class ExposureOwner(BaseBigQueryModel):
class Config:
extra = Extra.forbid
email: str
name: Optional[Optional[str]] = None
class NodeConfig(BaseBigQueryModel):
class Config:
extra = Extra.allow
enabled: Optional[bool] = True
alias: Optional[Optional[str]] = None
schema_: Optional[Optional[str]] = Field(None, alias='schema')
database: Optional[Optional[str]] = None
tags: Optional[Union[List[str], str]] = []
meta: Optional[Dict[str, Any]] = {}
materialized: Optional[str] = 'view'
persist_docs: Optional[Dict[str, Any]] = {}
post_hook: Optional[List[Hook]] = Field([], alias='post-hook')
pre_hook: Optional[List[Hook]] = Field([], alias='pre-hook')
quoting: Optional[Dict[str, Any]] = {}
column_types: Optional[Dict[str, Any]] = {}
full_refresh: Optional[Optional[bool]] = None
on_schema_change: Optional[Optional[str]] = 'ignore'
class CompiledDataTestNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType1
alias: str
checksum: FileHash
config: Optional[TestConfig] = {
'enabled': True,
'alias': None,
'schema': 'dbt_test__audit',
'database': None,
'tags': [],
'meta': {},
'materialized': 'test',
'severity': 'ERROR',
'store_failures': None,
'where': None,
'limit': None,
'fail_calc': 'count(*)',
'warn_if': '!= 0',
'error_if': '!= 0',
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class CompiledModelNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType2
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class CompiledHookNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType3
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
index: Optional[Optional[int]] = None
class CompiledRPCNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType4
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class CompiledSchemaTestNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
test_metadata: TestMetadata
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType5
alias: str
checksum: FileHash
config: Optional[TestConfig] = {
'enabled': True,
'alias': None,
'schema': 'dbt_test__audit',
'database': None,
'tags': [],
'meta': {},
'materialized': 'test',
'severity': 'ERROR',
'store_failures': None,
'where': None,
'limit': None,
'fail_calc': 'count(*)',
'warn_if': '!= 0',
'error_if': '!= 0',
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
column_name: Optional[Optional[str]] = None
class CompiledSeedNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType6
alias: str
checksum: FileHash
config: Optional[SeedConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'seed',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'quote_columns': None,
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class CompiledSnapshotNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType7
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class ParsedAnalysisNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType8
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class ParsedHookNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType10
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
index: Optional[Optional[int]] = None
class ParsedModelNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType11
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class ParsedRPCNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType12
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class ParsedSnapshotNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType15
alias: str
checksum: FileHash
config: SnapshotConfig
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
class FreshnessThreshold(BaseBigQueryModel):
class Config:
extra = Extra.forbid
warn_after: Optional[Optional[Time]] = None
error_after: Optional[Optional[Time]] = None
filter: Optional[Optional[str]] = None
class SourceFreshnessOutput(BaseBigQueryModel):
class Config:
extra = Extra.forbid
unique_id: str
max_loaded_at: datetime
snapshotted_at: datetime
max_loaded_at_time_ago_in_s: float
status: Status1
criteria: FreshnessThreshold
adapter_response: Dict[str, Any]
timing: List[TimingInfo]
thread_id: str
execution_time: float
class ExternalTable(BaseBigQueryModel):
class Config:
extra = Extra.allow
location: Optional[Optional[str]] = None
file_format: Optional[Optional[str]] = None
row_format: Optional[Optional[str]] = None
tbl_properties: Optional[Optional[str]] = None
partitions: Optional[Optional[List[ExternalPartition]]] = None
class ParsedMacro(BaseBigQueryModel):
class Config:
extra = Extra.forbid
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
macro_sql: str
resource_type: ResourceType17
tags: Optional[List[str]] = []
depends_on: Optional[MacroDependsOn] = {'macros': []}
description: Optional[str] = ''
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
arguments: Optional[List[MacroArgument]] = []
created_at: Optional[int] = 1632490154
class ParsedExposure(BaseBigQueryModel):
class Config:
extra = Extra.forbid
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
type: Type
owner: ExposureOwner
resource_type: Optional[ResourceType18] = 'exposure'
description: Optional[str] = ''
maturity: Optional[Optional[MaturityEnum]] = None
meta: Optional[Dict[str, Any]] = {}
tags: Optional[List[str]] = []
url: Optional[Optional[str]] = None
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
refs: Optional[List[List[str]]] = []
sources: Optional[List[List[str]]] = []
created_at: Optional[int] = 1632490154
class CompiledAnalysisNode(BaseBigQueryModel):
class Config:
extra = Extra.forbid
raw_sql: str
compiled: bool
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
fqn: List[str]
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
resource_type: ResourceType
alias: str
checksum: FileHash
config: Optional[NodeConfig] = {
'enabled': True,
'alias': None,
'schema': None,
'database': None,
'tags': [],
'meta': {},
'materialized': 'view',
'persist_docs': {},
'quoting': {},
'column_types': {},
'full_refresh': None,
'on_schema_change': 'ignore',
'post-hook': [],
'pre-hook': [],
}
tags: Optional[List[str]] = []
refs: Optional[List[List[str]]] = []
sources: Optional[List[List]] = []
depends_on: Optional[DependsOn] = {'macros': [], 'nodes': []}
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
docs: Optional[Docs] = {'show': True}
patch_path: Optional[Optional[str]] = None
compiled_path: Optional[Optional[str]] = None
build_path: Optional[Optional[str]] = None
deferred: Optional[bool] = False
unrendered_config: Optional[Dict[str, Any]] = {}
created_at: Optional[int] = 1632490154
config_call_dict: Optional[Dict[str, Any]] = {}
compiled_sql: Optional[Optional[str]] = None
extra_ctes_injected: Optional[bool] = False
extra_ctes: Optional[List[InjectedCTE]] = []
relation_name: Optional[Optional[str]] = None
class ParsedSourceDefinition(BaseBigQueryModel):
class Config:
extra = Extra.forbid
fqn: List[str]
database: Optional[Optional[str]] = None
schema_: str = Field(..., alias='schema')
unique_id: str
package_name: str
root_path: str
path: str
original_file_path: str
name: str
source_name: str
source_description: str
loader: str
identifier: str
resource_type: ResourceType16
quoting: Optional[Quoting] = {
'database': None,
'schema': None,
'identifier': None,
'column': None,
}
loaded_at_field: Optional[Optional[str]] = None
freshness: Optional[Optional[FreshnessThreshold]] = None
external: Optional[Optional[ExternalTable]] = None
description: Optional[str] = ''
columns: Optional[Dict[str, ColumnInfo]] = {}
meta: Optional[Dict[str, Any]] = {}
source_meta: Optional[Dict[str, Any]] = {}
tags: Optional[List[str]] = []
config: Optional[SourceConfig] = {'enabled': True}
patch_path: Optional[Optional[str]] = None
unrendered_config: Optional[Dict[str, Any]] = {}
relation_name: Optional[Optional[str]] = None
created_at: Optional[int] = 1632490154
class ManifestV3(BaseBigQueryModel):
class Config:
extra = Extra.forbid
# The loaded_at field was manually added.
loaded_at: datetime = Field(default=datetime.utcnow(),
description="The loaded time by dbt-artifacts-loader")
metadata: ManifestMetadata = Field(..., description='Metadata about the manifest')
nodes: Dict[
str,
Union[
CompiledAnalysisNode,
CompiledDataTestNode,
CompiledModelNode,
CompiledHookNode,
CompiledRPCNode,
CompiledSchemaTestNode,
CompiledSeedNode,
CompiledSnapshotNode,
ParsedAnalysisNode,
ParsedDataTestNode,
ParsedHookNode,
ParsedModelNode,
ParsedRPCNode,
ParsedSchemaTestNode,
ParsedSeedNode,
ParsedSnapshotNode,
],
] = Field(
..., description='The nodes defined in the dbt project and its dependencies'
)
sources: Dict[str, ParsedSourceDefinition] = Field(
..., description='The sources defined in the dbt project and its dependencies'
)
macros: Dict[str, ParsedMacro] = Field(
..., description='The macros defined in the dbt project and its dependencies'
)
docs: Dict[str, ParsedDocumentation] = Field(
..., description='The docs defined in the dbt project and its dependencies'
)
exposures: Dict[str, ParsedExposure] = Field(
..., description='The exposures defined in the dbt project and its dependencies'
)
selectors: Dict[str, Any] = Field(
..., description='The selectors defined in selectors.yml'
)
disabled: Optional[
Optional[
List[
Union[
CompiledAnalysisNode,
CompiledDataTestNode,
CompiledModelNode,
CompiledHookNode,
CompiledRPCNode,
CompiledSchemaTestNode,
CompiledSeedNode,
CompiledSnapshotNode,
ParsedAnalysisNode,
ParsedDataTestNode,
ParsedHookNode,
ParsedModelNode,
ParsedRPCNode,
ParsedSchemaTestNode,
ParsedSeedNode,
ParsedSnapshotNode,
ParsedSourceDefinition,
]
]
]
] = Field(None, description='A list of the disabled nodes in the target')
parent_map: Optional[Optional[Dict[str, List[str]]]] = Field(
None, description='A mapping from\xa0child nodes to their dependencies'
)
child_map: Optional[Optional[Dict[str, List[str]]]] = Field(
None, description='A mapping from parent nodes to their dependents'
)
| 29.051939 | 88 | 0.612834 |
ace56683d6402dcbc4cc3e4898aa7f09ff697665 | 124 | py | Python | test_project/shop/apps.py | murray3k/wagtail-admin-list-controls | ad162fe70b9795937777b2dd6a01261deb394fdb | [
"MIT"
] | 30 | 2020-03-11T06:45:31.000Z | 2022-01-24T13:54:17.000Z | test_project/shop/apps.py | murray3k/wagtail-admin-list-controls | ad162fe70b9795937777b2dd6a01261deb394fdb | [
"MIT"
] | 14 | 2020-03-09T20:54:27.000Z | 2021-09-03T00:42:05.000Z | test_project/shop/apps.py | murray3k/wagtail-admin-list-controls | ad162fe70b9795937777b2dd6a01261deb394fdb | [
"MIT"
] | 3 | 2020-03-09T14:17:38.000Z | 2021-05-18T09:16:50.000Z | from django import apps
class AppConfig(apps.AppConfig):
name = '.'.join(__name__.split('.')[:-1])
label = 'shop'
| 17.714286 | 45 | 0.629032 |
ace566f081e4f40b66c55409c2ed9b2197e89858 | 3,793 | py | Python | analytics/dbrks_reference_shapefile/dbrks_reference_shapefile_ccg_boundaries_snapshot.py | nhsx-mirror/au-azure-databricks | 198d4b4127c339aa4f108025aa9218d4e43d3e64 | [
"MIT"
] | 3 | 2021-09-16T17:59:05.000Z | 2022-01-05T18:47:41.000Z | analytics/dbrks_reference_shapefile/dbrks_reference_shapefile_ccg_boundaries_snapshot.py | nhsx-mirror/au-azure-databricks | 198d4b4127c339aa4f108025aa9218d4e43d3e64 | [
"MIT"
] | 8 | 2021-09-17T08:20:21.000Z | 2022-03-23T16:15:31.000Z | analytics/dbrks_reference_shapefile/dbrks_reference_shapefile_ccg_boundaries_snapshot.py | nhsx-mirror/au-azure-databricks | 198d4b4127c339aa4f108025aa9218d4e43d3e64 | [
"MIT"
] | 1 | 2021-10-02T21:17:15.000Z | 2021-10-02T21:17:15.000Z | # Databricks notebook source
#!/usr/bin python3
# -------------------------------------------------------------------------
# Copyright (c) 2021 NHS England and NHS Improvement. All rights reserved.
# Licensed under the MIT License. See license.txt in the project root for
# license information.
# -------------------------------------------------------------------------
"""
FILE: dbrks_reference_shapefile_ccg_boundaries_snapshot_raw.py
DESCRIPTION:
Databricks notebook with code to process a GeoJSON file
from the ONS Geo Portal for CCG boundaries into tablular format
for NHSX Analytics Unit dashboard projects
USAGE:
CONTRIBUTORS: Mattia Ficarelli and Craig Shenton
CONTACT: data@nhsx.nhs.uk
CREATED: 21 Oct. 2021
VERSION: 0.0.1
"""
# COMMAND ----------
# Install libs
# -------------------------------------------------------------------------
%pip install geojson==2.5.* tabulate requests pandas pathlib azure-storage-file-datalake beautifulsoup4 numpy urllib3 lxml regex pyarrow==5.0.* geopandas shapely geopandas shapely
# COMMAND ----------
# Imports
# -------------------------------------------------------------------------
# Python:
import os
import io
import tempfile
from datetime import datetime
import json
import regex as re
# 3rd party:
import pandas as pd
import numpy as np
from pathlib import Path
from azure.storage.filedatalake import DataLakeServiceClient
import requests
from urllib.request import urlopen
from urllib import request as urlreq
from bs4 import BeautifulSoup
import geojson
import geopandas as gpd
from shapely.geometry import Point, Polygon, shape
from shapely import wkb, wkt
import shapely.speedups
shapely.speedups.enable()
# Connect to Azure datalake
# -------------------------------------------------------------------------
# !env from databricks secrets
CONNECTION_STRING = dbutils.secrets.get(scope="datalakefs", key="CONNECTION_STRING")
# COMMAND ----------
# MAGIC %run /Repos/dev/au-azure-databricks/functions/dbrks_helper_functions
# COMMAND ----------
# Load JSON config from Azure datalake
# -------------------------------------------------------------------------
file_path_config = "/config/pipelines/reference_tables/"
file_name_config = "config_shapefiles.json"
file_system_config = "nhsxdatalakesagen2fsprod"
config_JSON = datalake_download(CONNECTION_STRING, file_system_config, file_path_config, file_name_config)
config_JSON = json.loads(io.BytesIO(config_JSON).read())
# COMMAND ----------
# Read parameters from JSON config
# -------------------------------------------------------------------------
file_system = config_JSON['pipeline']['adl_file_system']
shapefile_source_path = config_JSON['pipeline']['project'][0]['shapefile_source_path']
shapefile_source_file = config_JSON['pipeline']['project'][0]['shapefile_source_file']
shapefile_sink_path = config_JSON['pipeline']['project'][0]['shapefile_sink_path']
shapefile_sink_file = config_JSON['pipeline']['project'][0]['shapefile_sink_file']
# COMMAND ----------
#Processing
latestFolder = datalake_latestFolder(CONNECTION_STRING, file_system, shapefile_source_path)
file = datalake_download(CONNECTION_STRING, file_system, shapefile_source_path+latestFolder, shapefile_source_file)
df = gpd.read_file(io.BytesIO(file))
column_mapping = {df.columns[0]: 'Unique ID', df.columns[1]: 'CCG code', df.columns[2]: 'CCG name'}
df_1 = df.rename(columns=column_mapping)
df_2 = df_1.set_index('Unique ID')
df_processed = df_2.copy()
# COMMAND ----------
#Upload processed data to datalake
file_contents = io.StringIO()
df_processed.to_csv(file_contents)
datalake_upload(file_contents, CONNECTION_STRING, file_system, shapefile_sink_path+latestFolder, shapefile_sink_file)
| 37.186275 | 180 | 0.659109 |
ace567806f7ae4c5f6dd809680fe4eee1f93e48c | 5,952 | py | Python | mmdet/models/backbones/ssd_vgg.py | qilei123/mmdetection_latest | 39dfd45c2e1abdf7f98c0940d72689bab2264a0e | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/ssd_vgg.py | qilei123/mmdetection_latest | 39dfd45c2e1abdf7f98c0940d72689bab2264a0e | [
"Apache-2.0"
] | null | null | null | mmdet/models/backbones/ssd_vgg.py | qilei123/mmdetection_latest | 39dfd45c2e1abdf7f98c0940d72689bab2264a0e | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
@BACKBONES.register_module()
class SSDVGG(VGG):
"""VGG Backbone network for single-shot-detection.
Args:
input_size (int): width and height of input, from {300, 512}.
depth (int): Depth of vgg, from {11, 13, 16, 19}.
out_indices (Sequence[int]): Output from which stages.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
384: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
input_size,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20.):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
assert input_size in (300, 384, 512)
self.input_size = input_size
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
self.inplanes = 1024
self.extra = self._make_extra_layers(self.extra_setting[input_size])
self.l2_norm = L2Norm(
self.features[out_feature_indices[0] - 1].out_channels,
l2_norm_scale)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.features.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
for m in self.extra.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
constant_init(self.l2_norm, self.l2_norm.scale)
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
for i, layer in enumerate(self.extra):
x = F.relu(layer(x), inplace=True)
if i % 2 == 1:
outs.append(x)
outs[0] = self.l2_norm(outs[0])
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _make_extra_layers(self, outplanes):
layers = []
kernel_sizes = (1, 3)
num_layers = 0
outplane = None
for i in range(len(outplanes)):
if self.inplanes == 'S':
self.inplanes = outplane
continue
k = kernel_sizes[num_layers % 2]
if outplanes[i] == 'S':
outplane = outplanes[i + 1]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=2, padding=1)
else:
outplane = outplanes[i]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=1, padding=0)
layers.append(conv)
self.inplanes = outplanes[i]
num_layers += 1
if self.input_size == 512:
layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
return nn.Sequential(*layers)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
| 34.807018 | 79 | 0.549227 |
ace56bc12720e8bbf9aa3df370fa38e5ff84a9c8 | 993 | py | Python | application/camera/cube_camera_shutter.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 485 | 2019-03-18T10:53:59.000Z | 2022-03-27T09:02:08.000Z | application/camera/cube_camera_shutter.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 110 | 2019-04-04T09:07:39.000Z | 2022-03-03T08:08:19.000Z | application/camera/cube_camera_shutter.py | 708yamaguchi/MaixPy_scripts | 5f1774e739fb7eecab344d619c0cd63a71ff3d4f | [
"MIT"
] | 379 | 2019-03-18T04:48:46.000Z | 2022-03-30T00:29:29.000Z | # simple_camera - By: chris - 周四 8月 6 2020
import sensor, image, time, lcd
from fpioa_manager import fm
from board import board_info
from Maix import GPIO
import time
num = 0
switch_status = 0
fm.register(board_info.BOOT_KEY, fm.fpioa.GPIO1, force=True)
fm.register(board_info.ENTER,fm.fpioa.GPIOHS10,force=True)
key_shot = GPIO(GPIO.GPIOHS10,GPIO.IN)
repl_unlock = GPIO(GPIO.GPIO1, GPIO.IN)
lcd.init(freq=15000000)
sensor.reset()
sensor.set_pixformat(sensor.YUV422)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(repl_unlock.value() != 0):
clock.tick()
img = sensor.snapshot()
if key_shot.value() == 0:
path = "/flash/camera-" + str(num) + ".jpg"
lcd.draw_string(80,40,"Saved :)",lcd.RED,lcd.WHITE)
time.sleep(1)
img.save(path)
num += 1
else:
lcd.display(img)
time.sleep(2)
import lcd
lcd.init()
lcd.draw_string(60, 100, "REPL is unlocked!", lcd.RED, lcd.BLACK)
time.sleep(4)
| 24.219512 | 65 | 0.690836 |
ace56e2728a6f7357e46ab171923e8b1d24fcd75 | 8,325 | py | Python | src/skmultiflow/meta/oza_bagging_adwin.py | imran-salim/scikit-multiflow | 05cf9dc095744b2990da326f0172fbab2c7e026f | [
"BSD-3-Clause"
] | 1 | 2020-04-16T10:17:03.000Z | 2020-04-16T10:17:03.000Z | src/skmultiflow/meta/oza_bagging_adwin.py | imran-salim/scikit-multiflow | 05cf9dc095744b2990da326f0172fbab2c7e026f | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/meta/oza_bagging_adwin.py | imran-salim/scikit-multiflow | 05cf9dc095744b2990da326f0172fbab2c7e026f | [
"BSD-3-Clause"
] | null | null | null | import copy as cp
from skmultiflow.meta import OzaBaggingClassifier
from skmultiflow.lazy import KNNADWINClassifier
from skmultiflow.drift_detection import ADWIN
from skmultiflow.utils.utils import *
import warnings
def OzaBaggingAdwin(base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None): # pragma: no cover
warnings.warn("'OzaBaggingAdwin' has been renamed to 'OzaBaggingADWINClassifier' in v0.5.0.\n"
"The old name will be removed in v0.7.0", category=FutureWarning)
return OzaBaggingADWINClassifier(base_estimator=base_estimator,
n_estimators=n_estimators,
random_state=random_state)
class OzaBaggingADWINClassifier(OzaBaggingClassifier):
""" Oza Bagging ensemble classifier with ADWIN change detector.
Parameters
----------
base_estimator: skmultiflow.core.BaseSKMObject or sklearn.BaseEstimator (default=KNNADWINClassifier)
Each member of the ensemble is an instance of the base estimator.
n_estimators: int (default=10)
The size of the ensemble, in other words, how many classifiers to train.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
Raises
------
ValueError: A ValueError is raised if the 'classes' parameter is
not passed in the first partial_fit call.
Notes
-----
This online ensemble learner method is an improvement from the Online
Bagging algorithm [1]_. The improvement comes from the addition of a ADWIN change
detector.
ADWIN stands for Adaptive Windowing. It works by keeping updated
statistics of a variable sized window, so it can detect changes and
perform cuts in its window to better adapt the learning algorithms.
References
----------
.. [1] N. C. Oza, “Online Bagging and Boosting,” in 2005 IEEE International Conference on Systems,
Man and Cybernetics, 2005, vol. 3, no. 3, pp. 2340–2345.
Examples
--------
>>> # Imports
>>> from skmultiflow.meta import OzaBaggingADWINClassifier
>>> from skmultiflow.lazy import KNNClassifier
>>> from skmultiflow.data.sea_generator import SEAGenerator
>>> # Setting up the stream
>>> stream = SEAGenerator(1, noise_percentage=6.7)
>>> # Setting up the OzaBaggingADWINClassifier to work with KNN as base estimator
>>> clf = OzaBaggingADWINClassifier(base_estimator=KNNClassifier(n_neighbors=8, max_window_size=2000, leaf_size=30),
... n_estimators=2)
>>> # Keeping track of sample count and correct prediction count
>>> sample_count = 0
>>> corrects = 0
>>> # Pre training the classifier with 200 samples
>>> X, y = stream.next_sample(200)
>>> clf = clf.partial_fit(X, y, classes=stream.target_values)
>>> for i in range(2000):
... X, y = stream.next_sample()
... pred = clf.predict(X)
... clf = clf.partial_fit(X, y)
... if pred is not None:
... if y[0] == pred[0]:
... corrects += 1
... sample_count += 1
>>>
>>> # Displaying the results
>>> print(str(sample_count) + ' samples analyzed.')
2000 samples analyzed.
>>> print('OzaBaggingADWINClassifier performance: ' + str(corrects / sample_count))
OzaBaggingADWINClassifier performance: 0.9645
"""
def __init__(self, base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None):
super().__init__(base_estimator, n_estimators, random_state)
# default values
self.adwin_ensemble = None
self.__configure()
def __configure(self):
self.adwin_ensemble = [cp.deepcopy(ADWIN()) for _ in range(self.actual_n_estimators)]
def reset(self):
self.__configure()
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially (incrementally) fit the model.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples)
An array-like with the class labels of all samples in X.
classes: numpy.ndarray, optional (default=None)
Array with all possible/known class labels. This is an optional parameter, except
for the first partial_fit call where it is compulsory.
sample_weight: numpy.ndarray of shape (n_samples), optional (default=None)
Samples weight. If not provided, uniform weights are assumed. Usage varies depending on the base estimator.
Raises
------
ValueError: A ValueError is raised if the 'classes' parameter is not
passed in the first partial_fit call, or if they are passed in further
calls but differ from the initial classes list passed.
Returns
_______
OzaBaggingADWINClassifier
self
Notes
-----
Since it's an ensemble learner, if X and y matrix of more than one
sample are passed, the algorithm will partial fit the model one sample
at a time.
Each sample is trained by each classifier a total of K times, where K
is drawn by a Poisson(1) distribution.
Alongside updating the model, the learner will also update ADWIN's
statistics over the new samples, so that the change detector can
evaluate if a concept drift was detected. In the case drift is detected,
the bagging algorithm will find the worst performing classifier and reset
its statistics and window.
"""
r, c = get_dimensions(X)
if self.classes is None:
if classes is None:
raise ValueError("The first partial_fit call should pass all the classes.")
else:
self.classes = classes
if self.classes is not None and classes is not None:
if set(self.classes) == set(classes):
pass
else:
raise ValueError(
"The classes passed to the partial_fit function differ from those passed in an earlier moment.")
self.__adjust_ensemble_size()
change_detected = False
for i in range(self.actual_n_estimators):
k = self._random_state.poisson()
if k > 0:
for b in range(k):
self.ensemble[i].partial_fit(X, y, classes, sample_weight)
try:
pred = self.ensemble[i].predict(X)
error_estimation = self.adwin_ensemble[i].estimation
for j in range(r):
if pred[j] is not None:
if pred[j] == y[j]:
self.adwin_ensemble[i].add_element(1)
else:
self.adwin_ensemble[i].add_element(0)
if self.adwin_ensemble[i].detected_change():
if self.adwin_ensemble[i].estimation > error_estimation:
change_detected = True
except ValueError:
change_detected = False
pass
if change_detected:
max_threshold = 0.0
i_max = -1
for i in range(self.actual_n_estimators):
if max_threshold < self.adwin_ensemble[i].estimation:
max_threshold = self.adwin_ensemble[i].estimation
i_max = i
if i_max != -1:
self.ensemble[i_max].reset()
self.adwin_ensemble[i_max] = ADWIN()
return self
def __adjust_ensemble_size(self):
if len(self.classes) != len(self.ensemble):
if len(self.classes) > len(self.ensemble):
for i in range(len(self.ensemble), len(self.classes)):
self.ensemble.append(cp.deepcopy(self.base_estimator))
self.adwin_ensemble.append(ADWIN())
self.actual_n_estimators += 1
| 40.412621 | 120 | 0.624144 |
ace5700a6e1a8a4f7b4738cf6ee5f0df89d47cf7 | 42 | py | Python | content/usr/src/app/examples/env.py | jerenius/Tahtoprobe | bce3cc439d2d63897ecbffeec820d637dc4cdb46 | [
"MIT"
] | null | null | null | content/usr/src/app/examples/env.py | jerenius/Tahtoprobe | bce3cc439d2d63897ecbffeec820d637dc4cdb46 | [
"MIT"
] | null | null | null | content/usr/src/app/examples/env.py | jerenius/Tahtoprobe | bce3cc439d2d63897ecbffeec820d637dc4cdb46 | [
"MIT"
] | null | null | null | import os
print(os.environ['mqttbroker'])
| 14 | 31 | 0.761905 |
ace5702d874c92be4f22b57cd79bdca87caa35bf | 8,394 | py | Python | src/code/run-hawkeye-for-different-parameter-combinations.py | srijankr/hawkeye | 674c729b1aa2825b84c7703063e1addbb9950c7a | [
"MIT"
] | 3 | 2021-11-10T19:45:40.000Z | 2022-01-23T11:29:07.000Z | src/code/run-hawkeye-for-different-parameter-combinations.py | srijankr/hawkeye | 674c729b1aa2825b84c7703063e1addbb9950c7a | [
"MIT"
] | null | null | null | src/code/run-hawkeye-for-different-parameter-combinations.py | srijankr/hawkeye | 674c729b1aa2825b84c7703063e1addbb9950c7a | [
"MIT"
] | null | null | null | '''This script is used to generate and store the accuracy obtained for each tweet
by running the HawkEye with different combinations of weighing constants and parameters'''
import pandas as pd
import numpy as np
import math
import pickle
import itertools
def rev2(ratings,notes,lambda1,lambda2,lambda3,init_goodness,convergence_threshold,alpha1,beta1,gamma1,delta1,mu_r,mu_w,mu_t,mu_g):
#do initializations
ratings['goodness'] = [init_goodness]*len(ratings)
ratings['rating'] = ratings.apply(lambda x : 1 if x['helpful']==1 else -1,axis=1)
notes['goodness'] = [init_goodness]*len(notes)
notes['verdict'] = notes.apply(lambda x : 1 if x['classification']=='NOT_MISLEADING' else -1,axis=1)
#DO
#Fairness of user in rating notes
ratings['score_goodness_difference_metric'] = 1-((ratings['rating']-ratings['goodness']).abs()/2)
ratings['rating_fairness'] = (ratings.groupby(['participantId'])['score_goodness_difference_metric'].transform("sum") + alpha1*mu_r)/(ratings.groupby(['participantId'])['participantId'].transform("count") + alpha1)
#Fairness of user in writing notes
notes['writing_fairness'] = (notes.groupby(['participantId'])['goodness'].transform("sum") + beta1*mu_w)/(notes.groupby(['participantId'])['participantId'].transform("count") + beta1)
#Accuracy of Tweet
notes['weighted_goodness'] = notes['goodness']*notes['verdict']
notes['tweet_accuracy'] = (notes.groupby(['tweetId'])['weighted_goodness'].transform("sum") + delta1*mu_t)/(notes.groupby(['tweetId'])['tweetId'].transform("count") + delta1)
#Goodness of notes
ratings['weighted_rating_fairness'] = ratings['rating_fairness']*ratings['rating']
ratings['goodness_term1'] = (ratings.groupby(['noteId'])['weighted_rating_fairness'].transform("sum") + gamma1*mu_g)/(ratings.groupby(['noteId'])['noteId'].transform("count") + gamma1)
notes['goodness_term1'] = lambda1*notes.apply(lambda x: 1 if len(ratings.loc[ratings['noteId'] == x['noteId']])==0 else ratings.loc[ratings['noteId'] == x['noteId']].iloc[0]['goodness_term1'],axis=1)
notes['goodness_term3'] = lambda3*(1-(notes['tweet_accuracy']-notes['verdict']).abs())
notes['goodness'] = 1/3 * (notes['goodness_term1'] + lambda2*notes['writing_fairness'] + notes['goodness_term3'])
#IMPORTANT : Update goodness ratings df
ratings['goodness'] = ratings.apply(lambda x: notes.loc[notes['noteId'] == x['noteId']].iloc[0]['goodness'],axis=1)
#WHILE
t = 1
error = math.inf
while(error>convergence_threshold):
old_rating_fairness_values = np.array(ratings['rating_fairness'])
old_writing_fairness_values = np.array(notes['writing_fairness'])
old_tweet_accuracy_values = np.array(notes['tweet_accuracy'])
old_goodness_values = np.array(notes['goodness'])
#Fairness of user in rating notes
ratings['score_goodness_difference_metric'] = 1-((ratings['rating']-ratings['goodness']).abs()/2)
ratings['rating_fairness'] = (ratings.groupby(['participantId'])['score_goodness_difference_metric'].transform("sum") + alpha1*mu_r)/(ratings.groupby(['participantId'])['participantId'].transform("count") + alpha1)
#Fairness of user in writing notes
notes['writing_fairness'] = (notes.groupby(['participantId'])['goodness'].transform("sum") + beta1*mu_w)/(notes.groupby(['participantId'])['participantId'].transform("count") + beta1)
#Accuracy of Tweet
notes['weighted_goodness'] = notes['goodness']*notes['verdict']
notes['tweet_accuracy'] = (notes.groupby(['tweetId'])['weighted_goodness'].transform("sum") + delta1*mu_t)/(notes.groupby(['tweetId'])['tweetId'].transform("count") + delta1)
#Goodness of notes
ratings['weighted_rating_fairness'] = ratings['rating_fairness']*ratings['rating']
ratings['goodness_term1'] = (ratings.groupby(['noteId'])['weighted_rating_fairness'].transform("sum") + gamma1*mu_g)/(ratings.groupby(['noteId'])['noteId'].transform("count") + gamma1)
notes['goodness_term1'] = lambda1*notes.apply(lambda x: 1 if len(ratings.loc[ratings['noteId'] == x['noteId']])==0 else ratings.loc[ratings['noteId'] == x['noteId']].iloc[0]['goodness_term1'],axis=1)
notes['goodness_term3'] = lambda3*(1-(notes['tweet_accuracy']-notes['verdict']).abs())
notes['goodness'] = 1/3 * (notes['goodness_term1'] + lambda2*notes['writing_fairness'] + notes['goodness_term3'])
#IMPORTANT : Update goodness ratings df
ratings['goodness'] = ratings.apply(lambda x: notes.loc[notes['noteId'] == x['noteId']].iloc[0]['goodness'],axis=1)
new_rating_fairness_values = np.array(ratings['rating_fairness'])
new_writing_fairness_values = np.array(notes['writing_fairness'])
new_tweet_accuracy_values = np.array(notes['tweet_accuracy'])
new_goodness_values = np.array(notes['goodness'])
rating_fairness_error = np.sum(np.absolute((np.subtract(old_rating_fairness_values,new_rating_fairness_values))))
writing_fairness_error = np.sum(np.absolute(np.subtract(old_writing_fairness_values,new_writing_fairness_values)))
tweet_accuracy_error = np.sum(np.absolute(np.subtract(old_tweet_accuracy_values,new_tweet_accuracy_values)))
goodness_error = np.sum(np.absolute(np.subtract(old_goodness_values,new_goodness_values)))
error = max(rating_fairness_error,writing_fairness_error,tweet_accuracy_error,goodness_error)
t += 1
return notes,ratings
def runRev2(results,parameterCombination,keywordArgs):
ratings = keywordArgs['ratingsGlobal']
notes= keywordArgs['notesGlobal']
init_goodness = keywordArgs['init_goodness']
convergence_threshold = keywordArgs['convergence_threshold']
mu_r = keywordArgs['mu_r']
mu_w = keywordArgs['mu_w']
mu_t = keywordArgs['mu_t']
mu_g = keywordArgs['mu_g']
alpha1 = parameterCombination[0]
beta1 = parameterCombination[1]
gamma1 = parameterCombination[2]
delta1 = parameterCombination[3]
lambda1 = parameterCombination[4]
lambda2 = parameterCombination[5]
lambda3 = parameterCombination[6]
notes_new,ratings_new = rev2(ratings,notes,lambda1,lambda2,lambda3,init_goodness,convergence_threshold,alpha1,beta1,gamma1,delta1,mu_r,mu_w,mu_t,mu_g)
results[parameterCombination] = (notes_new,ratings_new)
return results
if __name__ == '__main__':
init_goodness = 1
convergence_threshold = 0.001
notesGlobal = pd.read_csv("..//data//notes-00000-13-04-21.tsv", sep='\t')
ratingsGlobal = pd.read_csv("..//data//ratings-00000-13-04-21.tsv", sep='\t')
notesGlobal = notesGlobal[['noteId', 'participantId','tweetId','classification']]
ratingsGlobal = ratingsGlobal[['noteId', 'participantId','helpful','notHelpful']]
no_of_rating_participants = len(set(ratingsGlobal['participantId']))
no_of_writing_participants = len(set(notesGlobal['participantId']))
no_of_tweets = len(set(notesGlobal['tweetId']))
no_of_notes = len(set(notesGlobal['noteId']))
mu_r = 1*no_of_rating_participants/no_of_rating_participants
mu_w = 1*no_of_writing_participants/no_of_writing_participants
mu_t = 1*no_of_tweets/no_of_tweets
mu_g = 1*no_of_notes/no_of_notes
keywordArgs = {'ratingsGlobal': ratingsGlobal,
'notesGlobal' : notesGlobal,
'init_goodness' : init_goodness,
'convergence_threshold' : convergence_threshold,
'mu_r' : mu_r,
'mu_w' : mu_w,
'mu_t' : mu_t,
'mu_g' : mu_g}
a = [0,1,2] #CHANGE THIS TO CHANGE/ADD VALUES OF WEIGHING CONSTANTS
b = [0,0.5,1] #CHANGE THIS TO CHANGE/ADD VALUES OF SMOOTHING PARAMETERS
parameterCombinations = list(itertools.product(a,a,a,a,b,b,b))
results = {}
for parameterCombination in parameterCombinations:
results = runRev2(results,parameterCombination,keywordArgs)
results = runRev2(results,parameterCombination,keywordArgs)
results_dict = dict(results)
with open('results/hawkeye_all_combination_parameter_runs_result.pickle', 'wb') as handle:
pickle.dump(results_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 54.506494 | 222 | 0.690374 |
ace5706840a2f35005096fca80732e97c0f40118 | 336 | py | Python | animeoffline/mal/__init__.py | purplepinapples/anime-offline | f6d8a40e7d857bb5ed392197d01573cf635bd40f | [
"MIT"
] | null | null | null | animeoffline/mal/__init__.py | purplepinapples/anime-offline | f6d8a40e7d857bb5ed392197d01573cf635bd40f | [
"MIT"
] | 5 | 2019-02-24T09:28:13.000Z | 2019-03-05T05:03:18.000Z | animeoffline/mal/__init__.py | purplepinapples/anime-offline | f6d8a40e7d857bb5ed392197d01573cf635bd40f | [
"MIT"
] | null | null | null | import requests
from jikanpy import Jikan
from animeoffline import *
from animeoffline import config
from animeoffline import utils
# if anime_offline/config.py is unedited, this assumes a local instance
# of jikan-rest is running on your system
# https://github.com/jikan-me/jikan-rest
jikan = Jikan(selected_base=config.jikan_url)
| 25.846154 | 71 | 0.806548 |
ace571379c01562c1e139da63173e8306ce96c11 | 4,891 | py | Python | recsim_ng/applications/ecosystem_simulation/corpus.py | ZiminPark/recsim_ng | bed733e386744417498bc36578483cea45aa374c | [
"Apache-2.0"
] | null | null | null | recsim_ng/applications/ecosystem_simulation/corpus.py | ZiminPark/recsim_ng | bed733e386744417498bc36578483cea45aa374c | [
"Apache-2.0"
] | null | null | null | recsim_ng/applications/ecosystem_simulation/corpus.py | ZiminPark/recsim_ng | bed733e386744417498bc36578483cea45aa374c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Corpus entity for welfare simulation."""
from typing import Any, Mapping, Text
import edward2 as ed # type: ignore
import gin
from gym import spaces
import numpy as np
from recsim_ng.core import value
from recsim_ng.entities.choice_models import selectors
from recsim_ng.entities.recommendation import corpus
from recsim_ng.entities.state_models import static
from recsim_ng.lib.tensorflow import field_spec
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
Value = value.Value
ValueSpec = value.ValueSpec
Space = field_spec.Space
@gin.configurable
class ViableCorpus(corpus.Corpus):
"""Defines a corpus with state transition simulating viable content providers.
Attributes:
num_providers: number of content providers.
gamma: a parameter specifies how probabe documents are generated from the
provider with the largest cumulative click count.
discount: the discount factor to compute cumulative click count.
"""
def __init__(self,
config,
gamma = 2.,
discount = .96,
provider_stddev = 1.):
super().__init__(config)
self._num_providers = config.get("num_providers")
self._gamma = gamma
self._discount = discount
self._provider_means = config.get("provider_means")
self._provider_stddev = provider_stddev
lop_ctor = lambda params: tf.linalg.LinearOperatorScaledIdentity( # pylint: disable=g-long-lambda
num_rows=self._num_topics,
multiplier=params)
self._doc_feature_model = static.GMMVector(
batch_ndims=1, linear_operator_ctor=lop_ctor, return_component_id=True)
def initial_state(self):
"""The initial state which sets all provider click counts to zero."""
return Value(
provider_click_count=ed.Deterministic(
tf.zeros((self._num_providers,), dtype=tf.float32)))
def next_state(self, previous_state, user_response,
slate_docs):
"""Increases click counts of content providers of consumed documents."""
chosen_docs = user_response.get("choice")
chosen_doc_features = selectors.get_chosen(slate_docs, chosen_docs)
provider_id = chosen_doc_features.get("provider_id")
provider_id_one_hot = tf.one_hot(
provider_id, self._num_providers, dtype=tf.float32)
provider_click_count = (
self._discount * previous_state.get("provider_click_count") +
tf.reduce_sum(provider_id_one_hot, 0))
return Value(provider_click_count=ed.Deterministic(provider_click_count))
def available_documents(self, corpus_state):
"""The available_documents value."""
# Take softmax over content providers based on their provider_click_count.
provider_mixture_logits = tf.broadcast_to(
tf.expand_dims(
self._gamma *
tf.math.log(1 + corpus_state.get("provider_click_count")),
axis=0), [self._num_docs, self._num_providers])
batch_provider_means = tf.broadcast_to(
tf.expand_dims(self._provider_means, axis=0),
[self._num_docs] + list(self._provider_means.shape))
parameters = Value(
mixture_logits=provider_mixture_logits,
component_means=batch_provider_means,
component_scales=self._provider_stddev)
gmm_vector_initial_state = self._doc_feature_model.initial_state(parameters)
return Value(
provider_id=gmm_vector_initial_state.get("component_id"),
doc_features=gmm_vector_initial_state.get("state"),
)
def specs(self):
"""Specs for state and document spaces."""
state_spec = ValueSpec(
provider_click_count=Space(
spaces.Box(
low=np.zeros(self._num_providers),
high=np.ones(self._num_providers) * np.Inf)))
available_docs_spec = ValueSpec(
provider_id=Space(
spaces.Box(
low=np.zeros(self._num_docs),
high=np.ones(self._num_docs) * self._num_providers)),
doc_features=Space(
spaces.Box(
low=np.ones((self._num_docs, self._num_topics)) * -np.Inf,
high=np.ones((self._num_docs, self._num_topics)) * np.Inf)))
return state_spec.prefixed_with("state").union(
available_docs_spec.prefixed_with("available_docs"))
| 39.764228 | 102 | 0.712124 |
ace5735a253bbc2977bd8bc9a1e4bfbe6093fceb | 5,730 | py | Python | CopyJumpMachine.py | arkenidar/CopyJumpMachine | 48b2e5e7c2a611e577c0b038baa378ba6ff34c50 | [
"MIT"
] | 3 | 2017-08-15T12:46:49.000Z | 2020-08-12T23:36:10.000Z | CopyJumpMachine.py | arkenidar/CopyJumpMachine | 48b2e5e7c2a611e577c0b038baa378ba6ff34c50 | [
"MIT"
] | null | null | null | CopyJumpMachine.py | arkenidar/CopyJumpMachine | 48b2e5e7c2a611e577c0b038baa378ba6ff34c50 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''A bytecode interpreter. Possible instructions are of assignment or jump type.'''
class BitPrinter: # pylint: disable=too-few-public-methods
'''Prints a bit and a byte.'''
def __init__(self):
self.out_sequence = []
def print_bit(self, output_bit):
'''Adds a bit to the output.'''
assert output_bit == 0 or output_bit == 1
self.out_sequence.append(output_bit)
print('bit:', output_bit)
if len(self.out_sequence) == 8:
output_byte = 0
pos = 1
for iter_bit in self.out_sequence:
output_byte += pos * iter_bit
pos *= 2
print('byte:', output_byte)
self.out_sequence = []
def run_code(code, bit_printer, memory):
'''Runs some source code.'''
def init_memory(mem):
'''Initializes memory.
#memory[0] = 0 #constant source of zeroes during bit copy (mov)
#memory[1] = 1 #constant source of ones during bit copy (mov)'''
return [0, 1] + mem
mem = init_memory(memory)
def parse(code):
'''Parses source code.'''
lines = []
line_labels = {}
comment_ignore_mode = False
for line in code.split('\n'):
# handles multi-line comments
# (they begin in a line and end in another line,
# and the in-between is commented)
if line.strip() == '#begin:':
comment_ignore_mode = True
continue
if line.strip() == '#end:':
comment_ignore_mode = False
continue
if comment_ignore_mode:
continue
# pre-parsing
line = line.split("#")[0] # skip comments (everything after a ';' character)
line = line.strip() # strip blank characters
if line == '':
continue # skip empty lines
operands = line.split(' ')
if operands[0] != 'l': # exclude line_labels (lines with label)
lines.append(line)
elif operands[0] == 'l':
line_labels[operands[1]] = len(lines)
line_labels['end'] = len(lines)
return lines, line_labels
lines, line_labels = parse(code)
def execute(lines, line_labels, bit_printer):
'''Runs some byte code.'''
def read(src): # read from address
'''Reads a bit from an address.'''
try:
src = int(src)
value = mem[src]
except ValueError:
if src == 'in':
while True:
try:
value = int(input('bit? '))
if value in (0, 1):
break
except ValueError:
pass
return value
def write(dst, src, bit_printer): # write to address
'''Writes a bit copying from source address to destination address.'''
try:
dst = int(dst)
mem[dst] = src
except ValueError:
if dst == 'out':
bit_printer.print_bit(src)
def jump(cline, instruction_pointer, line_labels): # pylint: disable=too-many-arguments
'''Handles Jump instructions.'''
if len(cline) == 2: # unconditional jump
new_instruction_pointer = cline[1]
elif len(cline) == 3: # conditional jump
bit = read(cline[1])
assert bit == 0 or bit == 1
if bit == 0:
new_instruction_pointer = 'next'
elif bit == 1:
new_instruction_pointer = cline[2]
elif len(cline) == 4: # 2-conditional jump
bit = read(cline[1])
assert bit == 0 or bit == 1
if bit == 0:
new_instruction_pointer = cline[2]
elif bit == 1:
new_instruction_pointer = cline[3]
if new_instruction_pointer == 'next':
instruction_pointer += 1
else:
try:
instruction_pointer = int(new_instruction_pointer)
except ValueError:
instruction_pointer = line_labels[new_instruction_pointer]
return instruction_pointer
executable_lines = list(map(lambda s: s.split(' '), lines))
instruction_pointer = 0
while instruction_pointer < len(lines):
cline = executable_lines[instruction_pointer]
if cline[0] == 'j': # parse jump
instruction_pointer = jump(cline, instruction_pointer, line_labels)
elif cline[0] == 'm': # parse move
dst, src = cline[1], cline[2]
got = read(src)
write(dst, got, bit_printer)
instruction_pointer += 1
else:
print('current line(cline) is not a valid instruction:', cline)
execute(lines, line_labels, bit_printer)
def run_code_from_string(code_string):
'''Runs program-code string.'''
bit_printer = BitPrinter()
memory = [0 for i in range(1024*8)]
run_code(code_string, bit_printer, memory)
def run_code_from_argv():
'''Runs program from a file containg program-code
whose file-name is provided as CLI argument(argv).'''
import sys
if len(sys.argv) != 2:
print('provide a file-name argument!')
else:
file_name = sys.argv[1]
run_code_from_string(open(file_name).read())
if __name__ == '__main__':
run_code_from_argv()
| 32.931034 | 95 | 0.52164 |
ace573f7990a697ee4956542aebb4fc3ddbef3cb | 253 | py | Python | rocketgraph/__init__.py | vd2org/rocketgraph | dfb8b56def9665e7026ea95c17d65e5023521cbe | [
"MIT"
] | null | null | null | rocketgraph/__init__.py | vd2org/rocketgraph | dfb8b56def9665e7026ea95c17d65e5023521cbe | [
"MIT"
] | null | null | null | rocketgraph/__init__.py | vd2org/rocketgraph | dfb8b56def9665e7026ea95c17d65e5023521cbe | [
"MIT"
] | null | null | null | # Copyright (C) 2019 by Vd.
# This file is part of Rocketgraph, the powerful asynchronous library for telegra.ph.
# Rocketgraph is released under the MIT License (see LICENSE).
from .connectors import *
from .types import *
from .client import Client
| 28.111111 | 85 | 0.762846 |
ace5746720f1b2e60d09ad9076e607885c21db1e | 44,704 | py | Python | frappe/__init__.py | Ravn10/frappe | 3e0183bc62d4457228a38d764708b0b7c5549816 | [
"MIT"
] | null | null | null | frappe/__init__.py | Ravn10/frappe | 3e0183bc62d4457228a38d764708b0b7c5549816 | [
"MIT"
] | 4 | 2020-03-24T16:24:58.000Z | 2021-06-01T22:59:40.000Z | frappe/__init__.py | Ravn10/frappe | 3e0183bc62d4457228a38d764708b0b7c5549816 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, binary_type, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template
__version__ = '10.1.62'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, binary_type):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append(msg)
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from frappe.utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(encode(msg))
else:
raise ValidationError(encode(msg))
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def clear_messages():
local.message_log = []
def clear_last_message():
if len(local.message_log) > 0:
local.message_log = local.message_log[:-1]
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None, print_letterhead=False):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
from markdown2 import markdown
message = markdown(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header, print_letterhead=print_letterhead)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def get_domain_data(module):
try:
domain_data = get_hooks('domains')
if module in domain_data:
return _dict(get_attr(get_hooks('domains')[module][0] + '.data'))
else:
return _dict()
except ImportError:
if local.flags.in_test:
return _dict()
else:
raise
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
from frappe import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(*args, **kwargs)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import."""
from frappe.core.doctype.data_import import data_import
data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (defaut is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
local.form_dict.no_letterhead = no_letterhead
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
_lang = local.lang
#set lang as specified in print format attachment
if lang: local.lang = lang
local.flags.ignore_print_permissions = True
no_letterhead = not print_letterhead
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
#reset lang to original local lang
local.lang = _lang
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: (optional) if async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def enqueue_doc(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param doctype: DocType of the document on which you want to run the event
:param name: Name of the document on which you want to run the event
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains()
| 31.615276 | 180 | 0.732328 |
ace574beae019998577f5bf091bf68371ba91c3c | 6,582 | py | Python | src/ti/subarctic/lcdc.py | hstarmans/py-uio | 9cfbbef5eb96709685c9d32a2079e41cd5669b9b | [
"MIT"
] | null | null | null | src/ti/subarctic/lcdc.py | hstarmans/py-uio | 9cfbbef5eb96709685c9d32a2079e41cd5669b9b | [
"MIT"
] | null | null | null | src/ti/subarctic/lcdc.py | hstarmans/py-uio | 9cfbbef5eb96709685c9d32a2079e41cd5669b9b | [
"MIT"
] | null | null | null | from uio import fix_ctypes_struct, struct_field, cached_getter
import ctypes
from ctypes import c_uint8 as ubyte, c_uint16 as ushort, c_uint32 as uint
from ti.irqc4 import IrqCombiner
# determine lcdc functional clock
def lcdc_fck():
try:
with open( "/sys/kernel/debug/clk/lcd_gclk/clk_rate", 'rb' ) as f:
return int( f.read() )
except PermissionError:
pass
# fiiine, I'll do it the hard way
from devicetree import dt
dt_lcdc = dt('&lcdc')
assert dt_lcdc.u32('assigned-clocks') == dt('&lcd_gclk').phandle
lcdc_gclk_parent = dt_lcdc.u32('assigned-clock-parents')
if lcdc_gclk_parent == dt('&dpll_core_m5_ck').phandle:
return 250000000
if lcdc_gclk_parent == dt('&dpll_per_m2_ck').phandle:
return 192000000
if lcdc_gclk_parent != dt('&dpll_disp_m2_ck').phandle:
raise RuntimeError("unknown clock parent for lcd_gclk")
rate = dt_lcdc.u32('assigned-clock-rates')
if rate == 0:
raise RuntimeError("clock rate not configured for lcd_gclk")
return rate
#--------- Remote Framebuffer (RFB) interface ----------------------------------
#
# Pin usage for various protocols:
#
# vsync pclk hsync acb/oe mclk protocol
# ------ ------ ------ ------ ------ ------------------------
# nALE EN RnW nCS0 MCLK 0: motorola 6800 (sync)
# nALE EN RnW nCS0 nCS1 1: motorola 6800 (async)
# nALE nRS nWS nCS0 MCLK 2: intel 8080 (sync)
# nALE nRS nWS nCS0 nCS1 3: intel 8080 (async)
# RS - RnW E0 E1 4: hitachi hd44780
#
# (RS in hitachi mode is equivalent to nALE in other modes.)
#
# RnW nALE
# 0 0 write command/address
# 1 0 read status
# 0 1 write data
# 1 1 read data
#
# where:
# nRS = ~(EN & RnW)
# nWS = ~(EN & ~RnW)
# E0 = EN & ~nCS0
# E1 = EN & ~nCS1
#
# transfer cycle ( data[], nALE, RnW )
# set nCS=0, data[], nALE, RnW
# wait SU (0-31 cycles)
# set EN=1
# wait STROBE (1-63 cycles)
# sample data[] if read
# set EN=0
# wait HOLD (1-15 cycles)
# set nCS=1, nALE=1, RnW=1
@fix_ctypes_struct
class RfbCs( ctypes.Structure ):
_fields_ = [
# timing configuration
('cs_delay', uint, 2), #rw min idle cycles - ceil(7/clkdiv)
('rd_hold', uint, 4), #rw read hold cycles (1-15)
('rd_strobe', uint, 6), #rw read strobe cycles (1-63)
('rd_setup', uint, 5), #rw read setup cycles (0-31)
('wr_hold', uint, 4), #rw write hold cycles (1-15)
('wr_strobe', uint, 6), #rw write strobe cycles (1-63)
('wr_setup', uint, 5), #rw write setup cycles (0-31)
# direct transfer (not allowed when dma is enabled)
('address', uint), #-> address transfer
('data', uint), #<> data transfer
]
# for hd44780 protocol
cmd = struct_field( 4, uint ) #-> cmd transfer
status = struct_field( 4, uint ) #<- status transfer
assert ctypes.sizeof(RfbCs) == 12
@fix_ctypes_struct
class Rfb( ctypes.Structure ):
_fields_ = [
# interface configuration
('protocol', uint, 3), #rw protocol (see above)
('invert_ale_rs', uint, 1), #rw invert vsync pin
('invert_rs_en', uint, 1), #rw invert pclk pin
('invert_ws_rw', uint, 1), #rw invert hsync pin
('invert_cs0', uint, 1), #rw invert acb/oe pin
('invert_cs1_mclk', uint, 1), #rw invert mclk pin
# control
('dma_enable', uint, 1), #rw
('dma_cs', uint, 1), #rw
('cs0', RfbCs),
('cs1', RfbCs),
]
@cached_getter
def cs( self ):
return [ self.cs0, self.cs1 ]
assert ctypes.sizeof(Rfb) == 0x28 - 0x0c
#--------- Raster controller ---------------------------------------------------
@fix_ctypes_struct
class Raster( ctypes.Structure ):
_fields_ = [ ('', uint * 6) ] # TODO
assert ctypes.sizeof(Raster) == 0x40 - 0x28
#--------- Dma controller ------------------------------------------------------
@fix_ctypes_struct
class Dma( ctypes.Structure ):
_fields_ = [ ('', uint * 5) ] # TODO
assert ctypes.sizeof(Dma) == 0x54 - 0x40
#--------- LCDC subsystem ------------------------------------------------------
@fix_ctypes_struct
class Lcdc( ctypes.Structure ):
_fields_ = [
#--------- global config -------------------------------------------
('ident', uint), #r-
('pinmux', ubyte, 1), #rw 0=rfb, 1=raster
('clock_div', ubyte), #rw fck/mck divider
('', uint),
# clock_div == 0 is treated like clock_div == 1
#
# minimum clock_div values:
# 1 rfb
# 2 raster active
# 3 raster passive color (8 pixels per 3 cycles)
# 4 raster passive monochrome (4 pixels per cycle)
# 8 raster passive monochrome (8 pixels per cycle)
#--------- remote framebuffer interface ----------------------------
('rfb', Rfb),
#--------- raster controller ---------------------------------------
('raster', Raster),
#--------- dma controller ------------------------------------------
('dma', Dma),
#--------- subsystem wrapper ---------------------------------------
# sysconfig
('', uint, 2),
('idlemode', uint, 2), #rw 0=force, 1=block, 2=auto
('standbymode', uint, 2), #rw 0=force, 1=block, 2=auto
('', uint * 0),
# irq combiner
('irq', IrqCombiner),
('eoi', uint), #->
# local clock control
('en_raster', uint, 1), #rw
('en_rfb', uint, 1), #rw
('en_dma', uint, 1), #rw
('', uint * 0),
# local reset control (does not affect config registers)
('rst_raster', uint, 1), #rw
('rst_rfb', uint, 1), #rw
('rst_dma', uint, 1), #rw
('rst_global', uint, 1), #rw
('', uint * 0),
]
assert ctypes.sizeof(Lcdc) == 0x74
| 34.28125 | 80 | 0.471589 |
ace5754862e9419eb6cf48cbcce906c960ad8e0e | 18,231 | py | Python | Workflow/AMReX/UtilitiesModule.py | dunhamsj/thornado | 52e52e8129039238d64f0d70f48e703325f7e635 | [
"BSD-3-Clause"
] | null | null | null | Workflow/AMReX/UtilitiesModule.py | dunhamsj/thornado | 52e52e8129039238d64f0d70f48e703325f7e635 | [
"BSD-3-Clause"
] | null | null | null | Workflow/AMReX/UtilitiesModule.py | dunhamsj/thornado | 52e52e8129039238d64f0d70f48e703325f7e635 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
def OverwriteFile( FileName ):
from os.path import isfile
Overwrite = True
if isfile( FileName ):
YN = input( 'File: "{:s}" exists. overwrite? (Y/N): '.format \
( FileName ) )
if not YN == 'Y':
print( 'Not overwriting file' )
Overwrite = False
return Overwrite
def ChoosePlotFile( DataDirectory, PlotFileBaseName, argv = [ 'a' ], \
ReturnFileArray = False, Verbose = True ):
from os import listdir
if Verbose:
print( '\n Calling ChoosePlotFile...' )
print( ' -------------------------\n' )
print( ' DataDirectory: {:s}'.format( DataDirectory ) )
print( ' PlotFileBaseName: {:s}'.format( PlotFileBaseName ) )
print( ' argv: ', argv )
print( ' ReturnFileArray: {:}\n'.format( ReturnFileArray ) )
if len( argv ) == 1:
# Get last plotfile in directory
FileArray \
= np.sort( np.array( [ file for file in listdir( DataDirectory ) ] ) )
FileList = []
for iFile in range( FileArray.shape[0] ):
sFile = FileArray[iFile]
if( sFile[0:len(PlotFileBaseName)+1] == PlotFileBaseName + '_' \
and sFile[len(PlotFileBaseName)+1].isdigit() ):
FileList.append( sFile )
FileArray = np.array( FileList )
if not FileArray.shape[0] > 0:
msg = 'No files found in DataDirectory:'
msg += ' {:s}\nDouble check the path\n'.format( DataDirectory )
assert ( FileArray.shape[0] > 0 ), msg
File = FileArray[-1]
elif( len( argv ) == 2 ):
if argv[1][0].isalpha():
File = argv[1]
else:
File = PlotFileBaseName + '_{:}'.format( argv[1].zfill(8) )
FileArray = np.array( File )
else:
n = len( argv )
msg = 'len( argv ) must be > 0 and < 3: len( argv ) = {:d}'.format( n )
arg = ( n > 0 ) & ( n < 3 )
print( arg )
assert arg, msg
# Remove "/" at end of filename, if present
if ( File[-1] == '/' ): File = File[:-1]
if ReturnFileArray:
return File, FileArray
else:
return File
def GetData( DataDirectory, PlotFileBaseName, Field, \
CoordinateSystem, UsePhysicalUnits, argv = [ 'a' ], \
ReturnTime = False, ReturnMesh = False, Verbose = True ):
import yt
import numpy as np
if Verbose:
print( '\n Calling GetData...' )
print( ' ------------------' )
print( ' DataDirectory: {:s}'.format( DataDirectory ) )
print( ' PlotFileBaseName: {:s}'.format( PlotFileBaseName ) )
print( ' Field: {:s}'.format( Field ) )
print( ' CoordinateSystem: {:s}'.format( CoordinateSystem ) )
print( ' ReturnTime: {:}\n'.format( ReturnTime ) )
msg = 'Invalid choice of CoordinateSystem: {:s}'.format( CoordinateSystem )
msg += '\n\nValid Choices:\n'
msg += '--------------\n'
msg += 'cartesian\n'
msg += 'cylindrical\n'
msg += 'spherical'
assert ( CoordinateSystem == 'cartesian' \
or CoordinateSystem == 'cylindrical' \
or CoordinateSystem == 'spherical' ), msg
# https://yt-project.org/doc/faq/index.html#how-can-i-change-yt-s-log-level
yt.funcs.mylog.setLevel(40) # Suppress yt warnings
File = ChoosePlotFile( DataDirectory, PlotFileBaseName, \
argv = argv, Verbose = False )
if Verbose:
print( ' File: {:}\n'.format( File ) )
ds = yt.load( '{:}'.format( DataDirectory + File ) )
MaxLevel = ds.index.max_level
Time = ds.current_time.to_ndarray()
nX = ds.domain_dimensions
xL = ds.domain_left_edge
xU = ds.domain_right_edge
"""
https://yt-project.org/doc/reference/api/
yt.data_objects.construction_data_containers.html#yt.data_objects.
construction_data_containers.YTCoveringGrid
"""
CoveringGrid \
= ds.covering_grid \
( level = MaxLevel, \
left_edge = xL, \
dims = nX * 2**MaxLevel, \
num_ghost_zones = nX[0] )
nDimsX = 1
if nX[1] > 1: nDimsX += 1
if nX[2] > 1: nDimsX += 1
# --- Get Mesh ---
xL = xL.to_ndarray()
xU = xU.to_ndarray()
dX = ( xU - xL ) / np.float64( nX )
X1 = np.linspace( xL[0] + dX[0] / 2.0, xU[0] - dX[0] / 2.0, nX[0] )
X2 = np.linspace( xL[1] + dX[1] / 2.0, xU[1] - dX[1] / 2.0, nX[1] )
X3 = np.linspace( xL[2] + dX[2] / 2.0, xU[2] - dX[2] / 2.0, nX[2] )
if ( Field == 'PF_D' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'g/cm**3'
elif( Field == 'PF_V1' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'km/s'
elif( Field == 'PF_V2' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = 'km/s'
elif CoordinateSystem == 'cylindrical': DataUnit = 'km/s'
elif CoordinateSystem == 'spherical' : DataUnit = '1/s'
elif( Field == 'PF_V3' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = 'km/s'
elif CoordinateSystem == 'cylindrical': DataUnit = '1/s'
elif CoordinateSystem == 'spherical' : DataUnit = '1/s'
elif( Field == 'PF_E' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'erg/cm**3'
elif( Field == 'CF_D' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'g/cm**3'
elif( Field == 'CF_S1' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'g/cm**2/s'
elif( Field == 'CF_S2' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = 'g/cm**2/s'
elif CoordinateSystem == 'cylindrical': DataUnit = 'g/cm**2/s'
elif CoordinateSystem == 'spherical' : DataUnit = 'g/cm/s'
elif( Field == 'CF_S3' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = 'g/cm**2/s'
elif CoordinateSystem == 'cylindrical': DataUnit = 'g/cm/s'
elif CoordinateSystem == 'spherical' : DataUnit = 'g/cm/s'
elif( Field == 'CF_E' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'erg/cm**3'
elif( Field == 'AF_P' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'erg/cm**3'
elif( Field == 'AF_Cs' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = 'km/s'
elif( Field == 'GF_Gm_11' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = ''
elif( Field == 'GF_Gm_22' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = ''
elif CoordinateSystem == 'cylindrical': DataUnit = ''
elif CoordinateSystem == 'spherical' : DataUnit = 'km**2'
elif( Field == 'GF_Gm_33' ):
Data = CoveringGrid[Field].to_ndarray()
if CoordinateSystem == 'cartesian' : DataUnit = ''
elif CoordinateSystem == 'cylindrical': DataUnit = 'km**2'
elif CoordinateSystem == 'spherical' : DataUnit = 'km**2'
elif( Field == 'GF_Psi' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = ''
elif( Field == 'GF_Alpha' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = ''
elif( Field == 'DF_TCI' ):
Data = CoveringGrid[Field].to_ndarray()
DataUnit = ''
# --- Derived Fields ---
elif( Field == 'pr4' ):
p = CoveringGrid['AF_P'].to_ndarray()
Data = np.empty( (nX[0],nX[1],nX[2]), np.float64 )
for iX1 in range( nX[0] ):
for iX2 in range( nX[1] ):
for iX3 in range( nX[2] ):
Data[iX1,iX2,iX3] = p[iX1,iX2,iX3] * ( X1[iX1] * 1.0e5 )**4
DataUnit = 'erg*cm'
elif( Field == 'RelativisticBernoulliConstant' ):
c = 2.99792458e10
rho = CoveringGrid['PF_D' ].to_ndarray()
e = CoveringGrid['PF_E' ].to_ndarray()
v1 = CoveringGrid['PF_V1' ].to_ndarray() * 1.0e5
v2 = CoveringGrid['PF_V2' ].to_ndarray()
p = CoveringGrid['AF_P' ].to_ndarray()
alpha = CoveringGrid['GF_Alpha'].to_ndarray()
Gm11 = CoveringGrid['GF_Gm11' ].to_ndarray()
Gm22 = CoveringGrid['GF_Gm22' ].to_ndarray() * ( 1.0e5 )**2
VSq = Gm11 * v1**2 + Gm22 * v2**2
h = c**2 + ( e + p ) / rho
W = 1.0 / np.sqrt( 1.0 - VSq / c**2 )
B = alpha * h * W
Data = B
DataUnit = 'cm**2/s**2'
elif( Field == 'PolytropicConstant' ):
PF_D = CoveringGrid['PF_D' ].to_ndarray()
AF_P = CoveringGrid['AF_P' ].to_ndarray()
AF_Gm = CoveringGrid['AF_Gm'].to_ndarray()
Data = AF_P / PF_D**AF_Gm
DataUnit = 'erg/cm**3/(g/cm**3)**(Gamma_IDEAL)'
elif( Field == 'NonRelativisticSpecificEnthalpy' ):
e = CoveringGrid['PF_E'].to_ndarray()
p = CoveringGrid['AF_P'].to_ndarray()
rho = CoveringGrid['PF_D'].to_ndarray()
Data = ( e + p ) / rho
DataUnit = 'cm**2/s**2'
elif( Field == 'RelativisticSpecificEnthalpy' ):
c = 2.99792458e10
e = CoveringGrid['PF_E'].to_ndarray()
p = CoveringGrid['AF_P'].to_ndarray()
rho = CoveringGrid['PF_D'].to_ndarray()
Data = ( c**2 + ( e + p ) / rho ) / c**2
DataUnit = ''
elif( Field == 'LorentzFactor' ):
c = 2.99792458e5
Gm11 = CoveringGrid['GF_Gm_11'].to_ndarray()
Gm22 = CoveringGrid['GF_Gm_22'].to_ndarray()
Gm33 = CoveringGrid['GF_Gm_33'].to_ndarray()
V1 = CoveringGrid['PF_V1'].to_ndarray()
V2 = CoveringGrid['PF_V2'].to_ndarray()
V3 = CoveringGrid['PF_V3'].to_ndarray()
VSq = Gm11 * V1**2 + Gm22 * V2**2 + Gm33 * V3**2
Data = 1.0 / np.sqrt( 1.0 - VSq / c**2 )
DataUnit = ''
elif( Field == 'TurbulentVelocity' ):
Psi = CoveringGrid['GF_Psi' ].to_ndarray()
Gm11 = CoveringGrid['GF_Gm_11'].to_ndarray()
Gm22 = CoveringGrid['GF_Gm_22'].to_ndarray()
Gm33 = CoveringGrid['GF_Gm_33'].to_ndarray()
rho = CoveringGrid['PF_D' ].to_ndarray()
V1 = CoveringGrid['PF_V1'].to_ndarray()
V2 = CoveringGrid['PF_V2'].to_ndarray()
V3 = CoveringGrid['PF_V3'].to_ndarray()
# --- Compute angle-averaged and
# mass density weighted radial velocity ---
AngleAveragedMass = np.zeros( (nX[0]), np.float64 )
AngleAveragedRadialVelocity = np.zeros( (nX[0]), np.float64 )
Data = np.empty( nX, np.float64 )
for iX1 in range( nX[0] ):
for iX2 in range( nX[1] ):
for iX3 in range( nX[2] ):
AngleAveragedMass[iX1] \
+= rho[iX1,iX2,iX3] \
* Psi[iX1,iX2,iX3]**4 \
* np.sin( X2[iX2] ) * dX[1] * dX[2]
AngleAveragedRadialVelocity[iX1] \
+= V1[iX1,iX2,iX3] * rho[iX1,iX2,iX3] \
* Psi[iX1,iX2,iX3]**4 \
* np.sin( X2[iX2] ) * dX[1] * dX[2]
AngleAveragedRadialVelocity[iX1] /= AngleAveragedMass[iX1]
for iX2 in range( nX[1] ):
for iX3 in range( nX[2] ):
Data[iX1,iX2,iX3] \
= np.sqrt( \
Gm11[iX1,iX2,iX3] \
* ( V1[iX1,iX2,iX3] \
- AngleAveragedRadialVelocity[iX1] )**2 \
+ Gm22[iX1,iX2,iX3] * V2[iX1,iX2,iX3]**2 \
+ Gm33[iX1,iX2,iX3] * V3[iX1,iX2,iX3]**2 )
DataUnit = 'km/s'
elif( Field == 'TurbulentEnergyDensity' ):
Psi = CoveringGrid['GF_Psi' ].to_ndarray()
Gm11 = CoveringGrid['GF_Gm_11'].to_ndarray()
Gm22 = CoveringGrid['GF_Gm_22'].to_ndarray()
Gm33 = CoveringGrid['GF_Gm_33'].to_ndarray()
rho = CoveringGrid['PF_D' ].to_ndarray()
V1 = CoveringGrid['PF_V1'].to_ndarray()
V2 = CoveringGrid['PF_V2'].to_ndarray()
V3 = CoveringGrid['PF_V3'].to_ndarray()
AngleAveragedMass = np.zeros( (nX[0]), np.float64 )
AngleAveragedRadialVelocity = np.zeros( (nX[0]), np.float64 )
c = 2.99792458e5
Data = np.empty( nX, np.float64 )
for iX1 in range( nX[0] ):
# --- Compute angle-averaged and
# mass density weighted radial velocity ---
for iX2 in range( nX[1] ):
for iX3 in range( nX[2] ):
AngleAveragedMass[iX1] \
+= rho[iX1,iX2,iX3] \
* Psi[iX1,iX2,iX3]**4 \
* np.sin( X2[iX2] ) * dX[1] * dX[2]
AngleAveragedRadialVelocity[iX1] \
+= V1[iX1,iX2,iX3] * rho[iX1,iX2,iX3] \
* Psi[iX1,iX2,iX3]**4 \
* np.sin( X2[iX2] ) * dX[1] * dX[2]
AngleAveragedRadialVelocity[iX1] /= AngleAveragedMass[iX1]
# --- Compute turbulent energy density ---
for iX2 in range( nX[1] ):
for iX3 in range( nX[2] ):
# --- BetaSq = v_i * v^i / c^2 ---
BetaSq = ( Gm11[iX1,iX2,iX3] \
* ( V1[iX1,iX2,iX3] \
- AngleAveragedRadialVelocity[iX1] )**2 \
+ Gm22[iX1,iX2,iX3] * V2[iX1,iX2,iX3]**2 \
+ Gm33[iX1,iX2,iX3] * V3[iX1,iX2,iX3]**2 ) \
/ c**2
W = 1.0 / np.sqrt( 1.0 - BetaSq )
Data[iX1,iX2,iX3] \
= rho[iX1,iX2,iX3] * ( c * 1.0e5 )**2 \
* W**2 * BetaSq / ( W + 1.0 )
DataUnit = 'erg/cm**3'
elif( Field == 'Vorticity' ):
h1 = CoveringGrid['GF_h_1'].to_ndarray()
h2 = CoveringGrid['GF_h_2'].to_ndarray()
V1 = CoveringGrid['PF_V1' ].to_ndarray()
V2 = CoveringGrid['PF_V2' ].to_ndarray()
h1A = np.empty( (nX[0],nX[1]+2,nX[2]), np.float64 )
h2A = np.empty( (nX[0],nX[1]+2,nX[2]), np.float64 )
V1A = np.empty( (nX[0],nX[1]+2,nX[2]), np.float64 )
V2A = np.empty( (nX[0],nX[1]+2,nX[2]), np.float64 )
h1A[:,1:-1,:] = h1
h2A[:,1:-1,:] = h2
V1A[:,1:-1,:] = V1
V2A[:,1:-1,:] = V2
k = 0
# --- Apply reflecting boundary conditions in theta ---
for i in range( nX[0] ):
h1A[i,0,k] = +h1A[i,1,k]
h2A[i,0,k] = +h2A[i,1,k]
V1A[i,0,k] = +V1A[i,1,k]
V2A[i,0,k] = -V2A[i,1,k]
h1A[i,nX[1]+1,k] = +h1A[i,nX[1],k]
h2A[i,nX[1]+1,k] = +h2A[i,nX[1],k]
V1A[i,nX[1]+1,k] = +V1A[i,nX[1],k]
V2A[i,nX[1]+1,k] = -V2A[i,nX[1],k]
# --- Compute vorticity in domain using
# central differences for derivatives ---
Data = np.zeros( (nX[0],nX[1],nX[2]), np.float64 )
k = 0
for i in range( 1, nX[0] - 1 ):
for j in range( 1, nX[1] + 1 ):
Data[i,j-1,k] \
= 1.0 / ( h1A[i,j,k] * h2A[i,j,k] ) \
* ( ( h2A[i+1,j,k]**2 * V2A[i+1,j,k] \
- h2A[i-1,j,k]**2 * V2A[i-1,j,k] ) \
/ ( 2.0 * X1[i] ) \
- ( h1A[i,j+1,k]**2 * V1A[i,j+1,k] \
- h1A[i,j-1,k]**2 * V1A[i,j-1,k] ) \
/ ( 2.0 * X2[j-1] ) )
DataUnit = '1/s'
else:
print( '\nInvalid field: {:}'.format( Field ) )
print( '\nValid choices:' )
print( '--------------' )
print( ' PF_D' )
print( ' PF_V1' )
print( ' PF_V2' )
print( ' PF_V3' )
print( ' PF_E' )
print( ' CF_D' )
print( ' CF_S1' )
print( ' CF_S2' )
print( ' CF_S3' )
print( ' CF_E' )
print( ' AF_P' )
print( ' AF_Cs' )
print( ' GF_Gm_11' )
print( ' GF_Gm_22' )
print( ' GF_Gm_33' )
print( ' GF_Psi' )
print( ' GF_Alpha' )
print( ' DF_TCI' )
print( ' RelativisticBernoulliConstant' )
print( ' PolytropicConstant' )
print( ' NonRelativisticSpecificEnthalpy' )
print( ' RelativisticSpecificEnthalpy' )
print( ' LorentzFactor' )
print( ' TurbulentVelocity' )
print( ' TurbulentEnergyDensity' )
print( ' Vorticity' )
assert 0, 'Invalid choice of field'
if not UsePhysicalUnits: DataUnit = ''
if nDimsX == 1:
Data = Data[:,0,0]
elif nDimsX == 2:
X2v, X1v = np.meshgrid( X2, X1 )
X1 = X1v
X2 = X2v
Data = Data[:,:,0]
else:
print( 'Not ready for 3D yet. Good luck...' )
if ReturnTime and ReturnMesh:
return Data, DataUnit, X1, X2, X3, xL, xU, nX, Time
elif ReturnTime:
return Data, DataUnit, Time
elif ReturnMesh:
return Data, DataUnit, X1, X2, X3, xL, xU, nX
else:
return Data, DataUnit
def GetNorm( UseLogScale, Data, vmin = +1.0e100, vmax = -1.0e100 ):
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm
if vmin > +1.0e99: vmin = Data.min()
if vmax < -1.0e99: vmax = Data.max()
if UseLogScale:
if np.any( Data <= 0.0 ):
Norm = SymLogNorm( vmin = vmin, vmax = vmax, \
linthresh = 1.0e-2, base = 10 )
else:
Norm = LogNorm ( vmin = vmin, vmax = vmax )
else:
Norm = plt.Normalize ( vmin = vmin, vmax = vmax )
return Norm
| 29.076555 | 80 | 0.491964 |
ace576999bbc895a670bf11734329406fe229763 | 751 | py | Python | alipay/aop/api/response/AlipayOpenIotbpaasDevicecodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayOpenIotbpaasDevicecodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayOpenIotbpaasDevicecodeCreateResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenIotbpaasDevicecodeCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenIotbpaasDevicecodeCreateResponse, self).__init__()
self._short_code = None
@property
def short_code(self):
return self._short_code
@short_code.setter
def short_code(self, value):
self._short_code = value
def parse_response_content(self, response_content):
response = super(AlipayOpenIotbpaasDevicecodeCreateResponse, self).parse_response_content(response_content)
if 'short_code' in response:
self.short_code = response['short_code']
| 28.884615 | 115 | 0.728362 |
ace576b3351f887f28178ac0960eafc691a17ab8 | 981 | py | Python | src/icolos/core/steps_utils.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | 11 | 2022-01-30T14:36:13.000Z | 2022-03-22T09:40:57.000Z | src/icolos/core/steps_utils.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | 2 | 2022-03-23T07:56:49.000Z | 2022-03-24T12:01:42.000Z | src/icolos/core/steps_utils.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | 8 | 2022-01-28T10:32:31.000Z | 2022-03-22T09:40:59.000Z | from icolos.core.workflow_steps.step import StepBase
from icolos.utils.enums.step_enums import StepBaseEnum
from icolos.utils.general.convenience_functions import nested_get
from icolos.utils.enums.step_initialization_enum import StepInitializationEnum
from icolos.utils.enums.flow_control_enums import FlowControlInitializationEnum
_IE = StepInitializationEnum()
_FCE = FlowControlInitializationEnum()
def initialize_step_from_dict(step_conf: dict) -> StepBase:
_STE = StepBaseEnum
step_type = nested_get(step_conf, _STE.STEP_TYPE, default=None)
step_type = None if step_type is None else step_type.upper()
if step_type in _IE.STEP_INIT_DICT.keys():
return _IE.STEP_INIT_DICT[step_type](**step_conf)
elif step_type in _FCE.FLOW_CONTROL_INIT_DICT.keys():
return _FCE.FLOW_CONTROL_INIT_DICT[step_type](**step_conf)
else:
raise ValueError(
f"Backend for step {nested_get(step_conf, _STE.STEPID, '')} unknown."
)
| 42.652174 | 81 | 0.775739 |
ace5779277eed50b329b85089cccbea69c4cecf4 | 1,149 | py | Python | flavio/math/test_optimize.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 61 | 2016-03-09T16:19:39.000Z | 2022-03-30T00:55:51.000Z | flavio/math/test_optimize.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 167 | 2016-03-15T15:25:57.000Z | 2022-02-27T22:19:22.000Z | flavio/math/test_optimize.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 57 | 2016-03-15T14:24:23.000Z | 2022-01-14T01:00:03.000Z | import unittest
import numpy as np
import numpy.testing as npt
import flavio
def f(x):
return (x[0]-2)**2 + (x[1]-1)**2
def g(x):
return -f(x)
def h(x, a):
return (x[0]-a)**2 + (x[1]-1)**2
class TestOptimize(unittest.TestCase):
def test_slsqp(self):
res = flavio.math.optimize.minimize_robust(f, [0, 0], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.maximize_robust(g, [5, 5], disp=False, methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('SLSQP',))
npt.assert_array_almost_equal(res.x, [3, 1])
def test_minuit(self):
res = flavio.math.optimize.minimize_migrad(f, [0, 0], print_level=0)
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(f, [0, 0], methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [2, 1])
res = flavio.math.optimize.minimize_robust(h, [0, 0], args=(3,), methods=('MIGRAD',))
npt.assert_array_almost_equal(res.x, [3, 1])
| 39.62069 | 93 | 0.630113 |
ace5784a27457a93850ed476a25fd0450d6a29c8 | 2,485 | py | Python | full_model.py | dawei1/cs598_project | 08592f81f78e90fc2371341fd67b9077a2803d9a | [
"MIT"
] | null | null | null | full_model.py | dawei1/cs598_project | 08592f81f78e90fc2371341fd67b9077a2803d9a | [
"MIT"
] | null | null | null | full_model.py | dawei1/cs598_project | 08592f81f78e90fc2371341fd67b9077a2803d9a | [
"MIT"
] | null | null | null | '''
This module defines the entire model architecture and combines the different
pieces that are defined in other modules. It also defines a method for model
training and defines some default configuration for the model such as the
patching size P.
'''
import datetime
import numpy as np
import torch
import Patching as p
import Recognition as r
import ResNet as rn
import Constants
class PatchingModel(torch.nn.Module):
def __init__(self, height_width, P, c_prime):
super(PatchingModel, self).__init__()
self.resnet = rn.get_resnet_model()
self.patching = p.Patching(height_width, P)
self.recognition = r.Recognition(c_prime)
def forward(self, x):
output = self.resnet(x)
output = self.patching(output)
output = self.recognition(output)
return output
# Define default model configuration
resnet_out_height_width = int(Constants.image_crop_size/32)
c_prime = 2048
P = 6
patching_model = PatchingModel(resnet_out_height_width, P, c_prime)
optimizer = torch.optim.Adam(patching_model.parameters(), lr=0.001, weight_decay=0.1)
n_epochs = 10
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def custom_loss(output, target):
positive_prob_loss = torch.sum(target * torch.log(output))
negative_prob_loss = torch.sum((1 - target) * torch.log(1 - output))
total_loss = -positive_prob_loss - negative_prob_loss
return total_loss
def train_model(train_dataloader, model = patching_model, n_epoch=n_epochs, optimizer=optimizer):
model.train()
for epoch in range(n_epoch):
print(f"Starting Epoch {epoch}")
print(datetime.datetime.now())
curr_epoch_loss = []
for data, target in train_dataloader:
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
output = 1 - output
output = (output * 0.02) + 0.98
output = torch.prod(output, 3)
output = torch.prod(output, 2)
prediction = 1 - output
loss = custom_loss(prediction, target)
loss.backward()
curr_epoch_loss.append(loss.cpu().data.numpy())
optimizer.step()
del loss
del output
del prediction
del data
del target
torch.cuda.empty_cache()
print(f"Epoch {epoch}: curr_epoch_loss={np.mean(curr_epoch_loss)}")
return model
| 32.272727 | 97 | 0.664789 |
ace578719e64ab2c3df17d275ac417cc0d8067e6 | 3,896 | py | Python | grocery/views.py | gajrajgchouhan/Grocery-App | dd56f504280523ea7b08ea87622765c6ad461fb9 | [
"MIT"
] | null | null | null | grocery/views.py | gajrajgchouhan/Grocery-App | dd56f504280523ea7b08ea87622765c6ad461fb9 | [
"MIT"
] | null | null | null | grocery/views.py | gajrajgchouhan/Grocery-App | dd56f504280523ea7b08ea87622765c6ad461fb9 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from .forms import AddGrocery, Profile, FilterGrocery
from .models import Grocery
@login_required(login_url="/login")
def add(request):
if request.method == "POST":
f = AddGrocery(request.POST)
if f.is_valid():
added = f.save(commit=False)
added.user = request.user
added.save()
return render(request, "add.html", {"form": f, "message": "Done"})
else:
return render(request, "add.html", {"form": f, "message": "Error"})
else:
f = AddGrocery()
return render(request, "add.html", {"form": f})
@login_required(login_url="/login")
def index(request):
if request.method == "POST":
f = FilterGrocery(request.POST)
if f.is_valid():
groceries = Grocery.objects.all().filter(user=request.user, date=f.cleaned_data["date_filter"])
context = {"groceries": groceries, "form": f}
else:
error = f.errors
groceries = Grocery.objects.all().filter(user=request.user)
context = {"groceries": groceries, "form": f, "error": error}
else:
groceries = Grocery.objects.all().filter(user=request.user)
f = FilterGrocery()
context = {"groceries": groceries, "form": f}
return render(request, "index.html", context)
@login_required(login_url="/login")
def update(request, id):
if request.method == "POST":
instance = get_object_or_404(Grocery, id=id)
f = AddGrocery(request.POST, instance=instance)
if f.is_valid() and instance.user == request.user:
Grocery.objects.filter(id=id).update(**f.cleaned_data)
return redirect("/")
else:
instance = get_object_or_404(Grocery, id=id)
f = AddGrocery(
initial={
"name": instance.name,
"quantity": instance.quantity,
"status": instance.status,
"date": instance.date,
}
)
return render(request, "update.html", {"form": f, "id": id})
@require_http_methods(["GET"])
@login_required(login_url="/login")
def delete(request, id):
grocery = Grocery.objects.get(id=id)
if grocery.user == request.user:
grocery.delete()
return HttpResponseRedirect("/")
def register_page(request):
if request.method == "POST":
form = Profile(request.POST)
if form.is_valid():
form.save()
username = request.POST["username"]
password = request.POST["password1"]
user = authenticate(request, username=username, password=password)
login(request, user)
return HttpResponseRedirect("/")
else:
context = {"form": form}
return render(request, "register.html", context)
else:
form = Profile()
context = {"form": form}
return render(request, "register.html", context)
def login_page(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password1"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect("/")
else:
return HttpResponse("Invalid credentials")
if request.user.is_authenticated:
return HttpResponseRedirect("/")
form = Profile()
return render(request, "login.html", {"form": form})
def log_out(request):
logout(request)
return HttpResponseRedirect("/")
| 33.878261 | 107 | 0.615503 |
ace578cd4c2c79dc6705e49758897385b6732a40 | 14,722 | py | Python | my_pygame/shape.py | francis-clairicia/Py-Game-Case | af2da857f2ef758051ad3c174d77f5a2deab935d | [
"MIT"
] | 6 | 2022-02-10T09:07:56.000Z | 2022-02-10T10:36:18.000Z | my_pygame/shape.py | francis-clairicia/Py-Game-Case | af2da857f2ef758051ad3c174d77f5a2deab935d | [
"MIT"
] | null | null | null | my_pygame/shape.py | francis-clairicia/Py-Game-Case | af2da857f2ef758051ad3c174d77f5a2deab935d | [
"MIT"
] | null | null | null | # -*- coding: Utf-8 -*
from typing import Union, Callable
import pygame
from pygame.math import Vector2
from .drawable import Drawable
from .colors import TRANSPARENT, BLACK
from .surface import create_surface
from .gradients import horizontal, vertical, radial, squared
class Shape(Drawable, use_parent_theme=False):
def __init__(self, color: pygame.Color, outline: int, outline_color: pygame.Color, theme=None):
# pylint: disable=unused-argument
Drawable.__init__(self)
self.color = color
self.outline = outline
self.outline_color = outline_color
@property
def color(self) -> pygame.Color:
return self.__color
@color.setter
def color(self, value: pygame.Color) -> None:
self.__color = pygame.Color(value) if value is not None else TRANSPARENT
self.shape_update()
@property
def outline(self) -> int:
return self.__outline
@outline.setter
def outline(self, value: int) -> None:
self.__outline = int(value)
if self.__outline < 0:
self.__outline = 0
@property
def outline_color(self) -> pygame.Color:
return self.__outline_color
@outline_color.setter
def outline_color(self, value: pygame.Color) -> None:
self.__outline_color = pygame.Color(value) if value is not None else TRANSPARENT
def set_size(self, *size: Union[int, tuple[int, int]], smooth=True) -> None:
# pylint: disable=unused-argument
Drawable.set_size(self, *(size), smooth=False)
self.shape_update()
def set_width(self, width: float, smooth=True)-> None:
Drawable.set_width(self, width, smooth=False)
self.shape_update()
def set_height(self, height: float, smooth=True) -> None:
Drawable.set_height(self, height, smooth=False)
self.shape_update()
def shape_update(self) -> None:
pass
class PolygonShape(Shape):
def __init__(self, color: pygame.Color, *, outline=0, outline_color=BLACK, points=list(), theme=None):
self.__points = list()
self.__image_points = list()
self.__image_points_percent = list()
Shape.__init__(self, color=color, outline=outline, outline_color=outline_color)
self.points = points
@property
def points(self) -> list[Vector2]:
return self.__points.copy()
@points.setter
def points(self, points: list[Union[tuple[int, int], Vector2]]) -> None:
self.__points = points = [Vector2(point) for point in points]
left = min((point.x for point in points), default=0)
right = max((point.x for point in points), default=0)
top = min((point.y for point in points), default=0)
bottom = max((point.y for point in points), default=0)
width = right - left
height = bottom - top
image_points = [Vector2(point.x - left, point.y - top) for point in points]
self.__image_points_percent = [
((point.x / width if width != 0 else 0), (point.y / height if height != 0 else 0))
for point in image_points
]
Shape.set_size(self, width, height)
Shape.move(self, left=left, top=top)
def shape_update(self) -> None:
self.__image_points = [Vector2(self.width * x, self.height * y) for x, y in self.__image_points_percent]
if len(self.points) > 2:
pygame.draw.polygon(self.image, self.color, self.__image_points)
def _after_drawing(self, surface: pygame.Surface) -> None:
if self.outline > 0:
if len(self.points) > 2:
pygame.draw.polygon(surface, self.outline_color, self.points, width=self.outline)
elif len(self.points) == 2:
pygame.draw.line(surface, self.color, *self.points, width=self.outline)
def focus_drawing_function(self, surface: pygame.Surface, highlight_color: pygame.Color, highlight_thickness: int) -> None:
if len(self.points) > 2:
pygame.draw.polygon(surface, highlight_color, self.points, width=highlight_thickness)
elif len(self.points) == 2:
pygame.draw.line(surface, highlight_color, *self.points, width=highlight_thickness)
def move(self, **kwargs) -> None:
Shape.move(self, **kwargs)
self.__points = [Vector2(point.x + self.x, point.y + self.y) for point in self.__image_points]
def move_ip(self, x: float, y: float) -> None:
Shape.move_ip(self, x, y)
for point in self.points:
point.x += x
point.y += y
class RectangleShape(Shape):
def __init__(self, width: int, height: int, color: pygame.Color, *, outline=0, outline_color=BLACK,
border_radius=0, border_top_left_radius=-1, border_top_right_radius=-1,
border_bottom_left_radius=-1, border_bottom_right_radius=-1, theme=None):
self.__draw_params = {
"border_radius": border_radius,
"border_top_left_radius": border_top_left_radius,
"border_top_right_radius": border_top_right_radius,
"border_bottom_left_radius": border_bottom_left_radius,
"border_bottom_right_radius": border_bottom_right_radius
}
Shape.__init__(self, color=color, outline=outline, outline_color=outline_color)
self.set_size(width, height)
def shape_update(self) -> None:
self.image = create_surface(self.size)
pygame.draw.rect(self.image, self.color, self.image.get_rect(), **self.__draw_params)
def _after_drawing(self, surface: pygame.Surface) -> None:
if self.outline > 0:
pygame.draw.rect(surface, self.outline_color, self.rect, width=self.outline, **self.__draw_params)
def focus_drawing_function(self, surface: pygame.Surface, highlight_color: pygame.Color, highlight_thickness: int) -> None:
pygame.draw.rect(surface, highlight_color, self.rect, width=highlight_thickness, **self.__draw_params)
def config(self, **kwargs) -> None:
for key, value in filter(lambda key, value: key in self.__draw_params, kwargs.items()):
self.__draw_params[key] = int(value)
border_radius = property(
lambda self: self.__draw_params["border_radius"],
lambda self, value: self.config(border_radius=value)
)
border_top_left_radius = property(
lambda self: self.__draw_params["border_top_left_radius"],
lambda self, value: self.config(border_top_left_radius=value)
)
border_top_right_radius = property(
lambda self: self.__draw_params["border_top_right_radius"],
lambda self, value: self.config(border_top_right_radius=value)
)
border_bottom_left_radius = property(
lambda self: self.__draw_params["border_bottom_left_radius"],
lambda self, value: self.config(border_bottom_left_radius=value)
)
border_bottom_right_radius = property(
lambda self: self.__draw_params["border_bottom_right_radius"],
lambda self, value: self.config(border_bottom_right_radius=value)
)
class CircleShape(Shape):
def __init__(self, radius: int, color: pygame.Color, *, outline=0, outline_color=BLACK,
draw_top_left=True, draw_top_right=True,
draw_bottom_left=True, draw_bottom_right=True, theme=None):
self.__radius = 0
self.__draw_params = {
"draw_top_left": draw_top_left,
"draw_top_right": draw_top_right,
"draw_bottom_left": draw_bottom_left,
"draw_bottom_right": draw_bottom_right
}
Shape.__init__(self, color=color, outline=outline, outline_color=outline_color)
self.radius = radius
@property
def radius(self) -> int:
return self.__radius
@radius.setter
def radius(self, value: int) -> None:
self.set_size(max(int(value), 0) * 2)
def shape_update(self) -> None:
self.__radius = min(self.width // 2, self.height // 2)
self.image = create_surface(self.size)
pygame.draw.circle(self.image, self.color, (self.radius, self.radius), self.radius, **self.__draw_params)
def _after_drawing(self, surface: pygame.Surface) -> None:
if self.outline > 0:
pygame.draw.circle(surface, self.outline_color, self.center, self.radius, width=self.outline, **self.__draw_params)
def focus_drawing_function(self, surface: pygame.Surface, highlight_color: pygame.Color, highlight_thickness: int) -> None:
pygame.draw.circle(surface, highlight_color, self.center, self.radius, width=highlight_thickness, **self.__draw_params)
def config(self, **kwargs) -> None:
for key, value in filter(lambda key, value: key in self.__draw_params, kwargs.items()):
self.__draw_params[key] = bool(value)
def set_size(self, *size: Union[int, tuple[int, int]], smooth=True) -> None:
size = size if len(size) == 2 else size[0]
if isinstance(size, (int, float)):
size = int(size), int(size)
Shape.set_size(self, min(size), smooth=smooth)
def set_width(self, width: float, smooth=True)-> None:
Shape.set_size(self, width, smooth=smooth)
def set_height(self, height: float, smooth=True) -> None:
Shape.set_size(self, height, smooth=smooth)
draw_top_left = property(
lambda self: self.__draw_params["draw_top_left"],
lambda self, value: self.config(draw_top_left=value)
)
draw_top_right = property(
lambda self: self.__draw_params["draw_top_right"],
lambda self, value: self.config(draw_top_right=value)
)
draw_bottom_left = property(
lambda self: self.__draw_params["draw_bottom_left"],
lambda self, value: self.config(draw_bottom_left=value)
)
draw_bottom_right = property(
lambda self: self.__draw_params["draw_bottom_right"],
lambda self, value: self.config(draw_bottom_right=value)
)
class CrossShape(Shape):
def __init__(self, width: int, height: int, color: pygame.Color, *, outline=2, outline_color=BLACK, theme=None):
super().__init__(color, outline, outline_color)
self.set_size(width, height)
def shape_update(self) -> None:
try:
self.image = create_surface(self.size)
self.image.fill(self.color)
image_rect = self.image.get_rect()
pygame.draw.line(self.image, self.outline_color, image_rect.topleft, image_rect.bottomright, width=self.outline)
pygame.draw.line(self.image, self.outline_color, image_rect.topright, image_rect.bottomleft, width=self.outline)
except:
pass
class GradientShape(Drawable, use_parent_theme=False):
TYPE_HORIZONTAL = horizontal
TYPE_VERTICAL = vertical
TYPE_RADIAL = radial
TYPE_SQUARED = squared
def __init__(self, left_color: pygame.Color, right_color: pygame.Color, gradient_type: Callable[..., None]):
Drawable.__init__(self)
self.__left_color = TRANSPARENT
self.__right_color = TRANSPARENT
self.__gradient_type = gradient_type
self.left_color = left_color
self.right_color = right_color
@property
def left_color(self) -> pygame.Color:
return self.__left_color
@left_color.setter
def left_color(self, color: pygame.Color) -> None:
self.__left_color = pygame.Color(color)
self.__update_image()
@property
def right_color(self) -> pygame.Color:
return self.__right_color
@right_color.setter
def right_color(self, color: pygame.Color) -> None:
self.__right_color = pygame.Color(color)
self.__update_image()
def __update_image(self) -> None:
if self.w > 0 and self.h > 0:
start_color = (self.left_color.r, self.left_color.g, self.left_color.b, self.left_color.a)
end_color = (self.right_color.r, self.right_color.g, self.right_color.b, self.right_color.a)
if self.__gradient_type is self.TYPE_RADIAL:
size = min(self.width // 2, self.height // 2)
else:
size = self.size
self.image = self.__gradient_type(size, start_color, end_color)
def set_size(self, *size: Union[int, tuple[int, int]], smooth=True) -> None:
# pylint: disable=unused-argument
Drawable.set_size(self, *size, smooth=False)
self.__update_image()
def set_width(self, width: float, smooth=True)-> None:
Drawable.set_width(self, width, smooth=False)
self.__update_image()
def set_height(self, height: float, smooth=True) -> None:
Drawable.set_height(self, height, smooth=False)
self.__update_image()
class HorizontalGradientShape(GradientShape, use_parent_theme=False):
def __init__(self, width: int, height: int, left_color: pygame.Color, right_color: pygame.Color):
super().__init__(left_color, right_color, GradientShape.TYPE_HORIZONTAL)
self.set_size(width, height)
class VerticalGradientShape(GradientShape, use_parent_theme=False):
def __init__(self, width: int, height: int, left_color: pygame.Color, right_color: pygame.Color):
super().__init__(left_color, right_color, GradientShape.TYPE_VERTICAL)
self.set_size(width, height)
class SquaredGradientShape(GradientShape, use_parent_theme=False):
def __init__(self, width: int, height: int, left_color: pygame.Color, right_color: pygame.Color):
super().__init__(left_color, right_color, GradientShape.TYPE_SQUARED)
self.set_size(width, height)
class RadialGradientShape(GradientShape, use_parent_theme=False):
def __init__(self, radius: int, left_color: pygame.Color, right_color: pygame.Color):
super().__init__(left_color, right_color, GradientShape.TYPE_RADIAL)
self.__radius = 0
self.radius = radius
@property
def radius(self) -> int:
return self.__radius
@radius.setter
def radius(self, value: int) -> None:
self.__radius = int(value)
if self.__radius < 0:
self.__radius = 0
GradientShape.set_size(self, self.__radius * 2)
def set_size(self, *size: Union[int, tuple[int, int]], smooth=True) -> None:
GradientShape.set_size(self, *size, smooth=smooth)
self.__update_on_resize()
def set_width(self, width: float, smooth=True)-> None:
GradientShape.set_width(self, width, smooth=smooth)
self.__update_on_resize()
def set_height(self, height: float, smooth=True) -> None:
GradientShape.set_height(self, height, smooth=smooth)
self.__update_on_resize()
def __update_on_resize(self) -> None:
self.__radius = min(self.width // 2, self.height // 2)
| 40.668508 | 127 | 0.667097 |
ace579696a5dcc04b195a7a79ca874b8615a27d7 | 10,009 | py | Python | examples_python/a1.py | mayataka/invariant-ekf | 775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0 | [
"BSD-3-Clause"
] | 1 | 2022-03-28T12:38:09.000Z | 2022-03-28T12:38:09.000Z | examples_python/a1.py | mayataka/inekf | 775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0 | [
"BSD-3-Clause"
] | null | null | null | examples_python/a1.py | mayataka/inekf | 775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0 | [
"BSD-3-Clause"
] | null | null | null | import a1_simulator
import numpy as np
import inekf
from scipy.spatial.transform import Rotation
import matplotlib.pyplot as plt
PATH_TO_URDF = "a1_description/urdf/a1_friction.urdf"
TIME_STEP = 0.0025
sim = a1_simulator.A1Simulator(PATH_TO_URDF, TIME_STEP,
imu_gyro_noise=0.01, imu_lin_accel_noise=0.1,
imu_gyro_bias_noise=0.00001,
imu_lin_accel_bias_noise=0.0001,
qJ_noise=0.001, dqJ_noise=0.1,
ddqJ_noise=1.0, tauJ_noise=0.1)
estimator_settings = inekf.StateEstimatorSettings.UnitreeA1(PATH_TO_URDF, TIME_STEP)
estimator_settings.contact_estimator_settings.beta0 = [-20.0, -20.0, -20.0, -20.0]
estimator_settings.contact_estimator_settings.beta1 = [0.7, 0.7, 0.7, 0.7]
estimator_settings.contact_estimator_settings.contact_force_cov_alpha = 10.0
estimator_settings.noise_params.contact_cov = 0.01 * np.eye(3, 3)
estimator_settings.contact_position_noise = 0.1
estimator_settings.contact_rotation_noise = 0.1
estimator_settings.lpf_gyro_accel_cutoff = 250
estimator_settings.lpf_lin_accel_cutoff = 250
estimator_settings.lpf_dqJ_cutoff = 10
estimator_settings.lpf_ddqJ_cutoff = 5
estimator_settings.lpf_tauJ_cutoff = 10
estimator = inekf.StateEstimator(estimator_settings)
sim.init()
for i in range(200):
sim.step_simulation()
base_pos, base_quat, base_lin_vel_world, base_ang_vel_world = sim.get_base_state(coordinate='world')
estimator.init(base_pos=base_pos, base_quat=base_quat, base_lin_vel_world=base_lin_vel_world,
imu_gyro_bias=np.zeros(3), imu_lin_accel_bias=np.zeros(3))
base_pos_true = []
base_quat_true = []
base_lin_vel_true = []
base_ang_vel_true = []
base_pos_est = []
base_quat_est = []
base_lin_vel_est = []
base_ang_vel_est = []
plt.ion()
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
ax_pos, ax_quat, ax_lin_vel, ax_ang_vel = axes[0][0], axes[0][1], axes[1][0], axes[1][1],
PLT_WINDOW_SIZE = 200
line_pos_x_est, = ax_pos.plot([0], [0], linestyle='solid', color='blue', label=r'$x$'+' (est)')
line_pos_x_true, = ax_pos.plot([0], [0], linestyle='dashed', color='blue', label=r'$x$'+' (true)')
line_pos_y_est, = ax_pos.plot([0], [0], linestyle='solid', color='red', label=r'$y$'+' (est)')
line_pos_y_true, = ax_pos.plot([0], [0], linestyle='dashed', color='red', label=r'$y$'+' (true)')
line_pos_z_est, = ax_pos.plot([0], [0], linestyle='solid', color='green', label=r'$z$'+' (est)')
line_pos_z_true, = ax_pos.plot([0], [0], linestyle='dashed', color='green', label=r'$z$'+' (true)')
line_quat_x_est, = ax_quat.plot([0], [0], linestyle='solid', color='blue', label=r'$q_x$'+' (est)')
line_quat_x_true, = ax_quat.plot([0], [0], linestyle='dashed', color='blue', label=r'$q_x$'+' (true)')
line_quat_y_est, = ax_quat.plot([0], [0], linestyle='solid', color='red', label=r'$q_y$'+' (est)')
line_quat_y_true, = ax_quat.plot([0], [0], linestyle='dashed', color='red', label=r'$q_y$'+' (true)')
line_quat_z_est, = ax_quat.plot([0], [0], linestyle='solid', color='green', label=r'$q_z$'+' (est)')
line_quat_z_true, = ax_quat.plot([0], [0], linestyle='dashed', color='green', label=r'$q_z$'+' (true)')
line_quat_w_est, = ax_quat.plot([0], [0], linestyle='solid', color='yellow', label=r'$q_w$'+' (est)')
line_quat_w_true, = ax_quat.plot([0], [0], linestyle='dashed', color='yellow', label=r'$q_w$'+' (true)')
line_lin_vel_x_est, = ax_lin_vel.plot([0], [0], linestyle='solid', color='blue', label=r'$v_x$'+' (est)')
line_lin_vel_x_true, = ax_lin_vel.plot([0], [0], linestyle='dashed', color='blue', label=r'$v_x$'+' (true)')
line_lin_vel_y_est, = ax_lin_vel.plot([0], [0], linestyle='solid', color='red', label=r'$v_y$'+' (est)')
line_lin_vel_y_true, = ax_lin_vel.plot([0], [0], linestyle='dashed', color='red', label=r'$v_y$'+' (true)')
line_lin_vel_z_est, = ax_lin_vel.plot([0], [0], linestyle='solid', color='green', label=r'$v_z$'+' (est)')
line_lin_vel_z_true, = ax_lin_vel.plot([0], [0], linestyle='dashed', color='green', label=r'$v_z$'+' (true)')
line_ang_vel_x_est, = ax_ang_vel.plot([0], [0], linestyle='solid', color='blue', label=r'$w_x$'+' (est)')
line_ang_vel_x_true, = ax_ang_vel.plot([0], [0], linestyle='dashed', color='blue', label=r'$w_x$'+' (true)')
line_ang_vel_y_est, = ax_ang_vel.plot([0], [0], linestyle='solid', color='red', label=r'$w_y$'+' (est)')
line_ang_vel_y_true, = ax_ang_vel.plot([0], [0], linestyle='dashed', color='red', label=r'$w_x$'+' (true)')
line_ang_vel_z_est, = ax_ang_vel.plot([0], [0], linestyle='solid', color='green', label=r'$w_z$'+' (est)')
line_ang_vel_z_true, = ax_ang_vel.plot([0], [0], linestyle='dashed', color='green', label=r'$w_x$'+' (true)')
ax_pos.set_title('Base position [m]')
ax_quat.set_title('Base orientation (quaternion)')
ax_lin_vel.set_title('Base linear velocity [m/s]')
ax_ang_vel.set_title('Base angular velocity [rad/s]')
ax_pos.set_ylim([-0.5, 0.5])
ax_quat.set_ylim([-0.5, 1.2])
ax_lin_vel.set_ylim([-2, 2])
ax_ang_vel.set_ylim([-5, 5])
ax_pos.legend(ncol=3)
ax_quat.legend(ncol=4)
ax_lin_vel.legend(ncol=3)
ax_ang_vel.legend(ncol=3)
for i in range(10000):
sim.step_simulation()
if i%100 == 0:
# qJ_cmd = 0.01 * np.random.normal(12) + sim.qJ_ref
qJ_cmd = sim.qJ_ref
sim.apply_position_command(qJ_cmd)
# estimate state
imu_gyro_raw, imu_lin_acc_raw = sim.get_imu_state()
qJ, dqJ, tauJ = sim.get_joint_state()
estimator.update(imu_gyro_raw=imu_gyro_raw, imu_lin_accel_raw=imu_lin_acc_raw,
qJ=qJ, dqJ=dqJ, tauJ=tauJ)
base_pos_est.append(estimator.base_position_estimate.copy())
base_quat_est.append(estimator.base_quaternion_estimate.copy())
base_lin_vel_est.append(estimator.base_linear_velocity_estimate_local.copy())
base_ang_vel_est.append(estimator.base_angular_velocity_estimate_local.copy())
# true state
base_pos, base_quat, base_lin_vel, base_ang_vel = sim.get_base_state(coordinate='local')
base_pos_true.append(base_pos.copy())
base_quat_true.append(base_quat.copy())
base_lin_vel_true.append(base_lin_vel.copy())
base_ang_vel_true.append(base_ang_vel.copy())
# estimation error
R_true = Rotation.from_quat(base_quat).as_matrix()
diff = Rotation.from_matrix(R_true.T@estimator.base_rotation_estimate).as_quat()[0:3]
print('base_pos error:', base_pos-estimator.base_position_estimate)
print('base_rot error:', diff)
print('base_lin_vel error:', base_lin_vel-estimator.base_linear_velocity_estimate_local)
print('base_ang_vel error:', base_ang_vel-estimator.base_angular_velocity_estimate_local)
print('contact_probability:', estimator.contact_probability)
if len(base_pos_est) > PLT_WINDOW_SIZE:
base_pos_true.pop(0)
base_quat_true.pop(0)
base_lin_vel_true.pop(0)
base_ang_vel_true.pop(0)
base_pos_est.pop(0)
base_quat_est.pop(0)
base_lin_vel_est.pop(0)
base_ang_vel_est.pop(0)
ndata = len(base_pos_est)
tstart = (i-ndata) * TIME_STEP
tend = i * TIME_STEP
times = np.linspace(tstart, tend, ndata)
line_pos_x_true.set_xdata(times)
line_pos_y_true.set_xdata(times)
line_pos_z_true.set_xdata(times)
line_pos_x_est.set_xdata(times)
line_pos_y_est.set_xdata(times)
line_pos_z_est.set_xdata(times)
line_quat_x_true.set_xdata(times)
line_quat_y_true.set_xdata(times)
line_quat_z_true.set_xdata(times)
line_quat_w_true.set_xdata(times)
line_quat_x_est.set_xdata(times)
line_quat_y_est.set_xdata(times)
line_quat_z_est.set_xdata(times)
line_quat_w_est.set_xdata(times)
line_lin_vel_x_true.set_xdata(times)
line_lin_vel_y_true.set_xdata(times)
line_lin_vel_z_true.set_xdata(times)
line_lin_vel_x_est.set_xdata(times)
line_lin_vel_y_est.set_xdata(times)
line_lin_vel_z_est.set_xdata(times)
line_ang_vel_x_true.set_xdata(times)
line_ang_vel_y_true.set_xdata(times)
line_ang_vel_z_true.set_xdata(times)
line_ang_vel_x_est.set_xdata(times)
line_ang_vel_y_est.set_xdata(times)
line_ang_vel_z_est.set_xdata(times)
line_pos_x_true.set_ydata(np.array(base_pos_true).T[0])
line_pos_y_true.set_ydata(np.array(base_pos_true).T[1])
line_pos_z_true.set_ydata(np.array(base_pos_true).T[2])
line_pos_x_est.set_ydata(np.array(base_pos_est).T[0])
line_pos_y_est.set_ydata(np.array(base_pos_est).T[1])
line_pos_z_est.set_ydata(np.array(base_pos_est).T[2])
line_quat_x_true.set_ydata(np.array(base_quat_true).T[0])
line_quat_y_true.set_ydata(np.array(base_quat_true).T[1])
line_quat_z_true.set_ydata(np.array(base_quat_true).T[2])
line_quat_w_true.set_ydata(np.array(base_quat_true).T[3])
line_quat_x_est.set_ydata(np.array(base_quat_est).T[0])
line_quat_y_est.set_ydata(np.array(base_quat_est).T[1])
line_quat_z_est.set_ydata(np.array(base_quat_est).T[2])
line_quat_w_est.set_ydata(np.array(base_quat_est).T[3])
line_lin_vel_x_true.set_ydata(np.array(base_lin_vel_true).T[0])
line_lin_vel_y_true.set_ydata(np.array(base_lin_vel_true).T[1])
line_lin_vel_z_true.set_ydata(np.array(base_lin_vel_true).T[2])
line_lin_vel_x_est.set_ydata(np.array(base_lin_vel_est).T[0])
line_lin_vel_y_est.set_ydata(np.array(base_lin_vel_est).T[1])
line_lin_vel_z_est.set_ydata(np.array(base_lin_vel_est).T[2])
line_ang_vel_x_true.set_ydata(np.array(base_ang_vel_true).T[0])
line_ang_vel_y_true.set_ydata(np.array(base_ang_vel_true).T[1])
line_ang_vel_z_true.set_ydata(np.array(base_ang_vel_true).T[2])
line_ang_vel_x_est.set_ydata(np.array(base_ang_vel_est).T[0])
line_ang_vel_y_est.set_ydata(np.array(base_ang_vel_est).T[1])
line_ang_vel_z_est.set_ydata(np.array(base_ang_vel_est).T[2])
ax_pos.set_xlim([tstart, tend])
ax_quat.set_xlim([tstart, tend])
ax_lin_vel.set_xlim([tstart, tend])
ax_ang_vel.set_xlim([tstart, tend])
fig.canvas.draw()
fig.canvas.flush_events()
sim.disconnect() | 49.305419 | 109 | 0.717454 |
ace579cd6241f3416e0ede09f9ffec9f84c83f02 | 1,230 | py | Python | libs/libnova/libnova.py | rlancaste/craft-blueprints-kde | 8f2834422c96c7e5465fbe00911915066a72cc0f | [
"BSD-2-Clause"
] | null | null | null | libs/libnova/libnova.py | rlancaste/craft-blueprints-kde | 8f2834422c96c7e5465fbe00911915066a72cc0f | [
"BSD-2-Clause"
] | 1 | 2020-01-10T01:06:16.000Z | 2020-01-10T01:06:16.000Z | libs/libnova/libnova.py | rlancaste/craft-blueprints-kde | 8f2834422c96c7e5465fbe00911915066a72cc0f | [
"BSD-2-Clause"
] | 2 | 2020-01-02T18:22:12.000Z | 2020-08-05T13:39:21.000Z | import info
#
# this library is used by kdeedu/kstars
# the library is c-only
#
class subinfo(info.infoclass):
def setTargets(self):
self.targets['0.15.0'] = 'download.sourceforge.net/libnova/libnova-0.15.0.tar.gz'
self.targetInstSrc['0.15.0'] = 'libnova-0.15.0'
self.targetDigests['0.15.0'] = (['7c5aa33e45a3e7118d77df05af7341e61784284f1e8d0d965307f1663f415bb1'], CraftHash.HashAlgorithm.SHA256)
#self.patchToApply['0.15.0'] = [('libnova-20101215.diff', 1),
# ('libnova-20130629.diff', 1)]
self.patchToApply['0.15.0'] = [('libnova-remove-conflicting-definition.patch', 1)] # https://github.com/msys2/MINGW-packages/tree/92798d888cfaf779a83bae0a293a197d8825aac2/mingw-w64-libnova
self.description = "a Celestial Mechanics, Astrometry and Astrodynamics library"
self.defaultTarget = '0.15.0'
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
from Package.AutoToolsPackageBase import *
class Package(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.configure.args += " --disable-static --enable-shared"
| 39.677419 | 196 | 0.682927 |
ace57a68cdd5ea53189eb80d26a59633b47a4574 | 961 | py | Python | 2016/slides/GraphTheory/graph.py | MercerBinaryBears/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
] | 1 | 2016-07-18T04:14:50.000Z | 2016-07-18T04:14:50.000Z | 2016/slides/GraphTheory/graph.py | MercerBinaryBears/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
] | 7 | 2016-08-10T22:56:45.000Z | 2016-08-11T13:12:19.000Z | 2016/slides/GraphTheory/graph.py | tylerburnham42/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
] | null | null | null | class Graph:
def __init__(self):
self.adjacency = {}
def connect(self, vertex1, vertex2, weight=None):
if vertex1 not in self.adjacency:
self.adjacency[vertex1] = {}
self.adjacency[vertex1][vertex2] = weight
def is_vertex(self, vertex):
return vertex in self.adjacency
def are_connected(self, vertex1, vertex2):
return self.is_vertex(vertex1) and vertex2 in self.adjacency[vertex1]
def weight(self, vertex1, vertex2):
return self.adjacency[vertex1][vertex2]
def neighbors(self, vertex1):
return self.adjacency[vertex1].keys()
# Test Code
G = Graph()
G.connect('A', 'B')
G.connect('A', 'C')
G.connect('B', 'C')
G.connect('C', 'D')
print(G.are_connected('A', 'B')) # True
print(G.are_connected('B', 'A')) # False, because the graph is directed!
print(G.are_connected('A', 'C')) # False, because there is no direct connection
print(G.neighbors('A')) # ['C', 'B']
| 27.457143 | 79 | 0.640999 |
ace57b58e00929864dda442ef02fa47ce4e2ab03 | 3,192 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/ComStamp.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/ComStamp.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/ComStamp.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ComStamp(object):
def __init__(self, signPositionType=None, keyword=None, positionX=None, positionY=None, offsetX=None, offsetY=None, page=None, sealName=None, imageB64=None, stampId=None, desc=None, isDefault=None, imageType=None, imageSize=None, imageHeight=None, imageWidth=None, orgName=None, legalPersonName=None, transactorName=None, transactorIdCardNum=None, transactorMobile=None, identifyType=None, identifyValue=None):
"""
:param signPositionType: (Optional) 盖章类型(0 坐标 1 关键字)
:param keyword: (Optional) 盖章关键字(与坐标二选一)
:param positionX: (Optional) 盖章X坐标(与关键字二选一)
:param positionY: (Optional) 盖章Y坐标(与关键字二选一)
:param offsetX: (Optional) 盖章X坐标偏移量(配合positionX)
:param offsetY: (Optional) 盖章Y坐标偏移量(配合positionY)
:param page: (Optional) 盖章页码(选择坐标盖章时需要)
:param sealName: (Optional) 印章名称,必须和imageB64同时非空
:param imageB64: (Optional) 印章图像base64(与stampId二选一)
:param stampId: (Optional) 印章ID(与imageB64二选一)
:param desc: (Optional) 印章描述
:param isDefault: (Optional) 是否作为以后签章默认章
:param imageType: (Optional) 图片类型,只支持png格式
:param imageSize: (Optional) 图片大小,高度*宽度
:param imageHeight: (Optional) 图片高度
:param imageWidth: (Optional) 图片宽度
:param orgName: (Optional) 公司名称
:param legalPersonName: (Optional) 法人姓名
:param transactorName: (Optional) 代办人姓名
:param transactorIdCardNum: (Optional) 代办人身份证号码
:param transactorMobile: (Optional) 代办人手机号
:param identifyType: (Optional) 标记字段 - usci(统一社会信用码) orgCode(组织机构代码) businessNum (工商营业执照号)
:param identifyValue: (Optional) 标记值
"""
self.signPositionType = signPositionType
self.keyword = keyword
self.positionX = positionX
self.positionY = positionY
self.offsetX = offsetX
self.offsetY = offsetY
self.page = page
self.sealName = sealName
self.imageB64 = imageB64
self.stampId = stampId
self.desc = desc
self.isDefault = isDefault
self.imageType = imageType
self.imageSize = imageSize
self.imageHeight = imageHeight
self.imageWidth = imageWidth
self.orgName = orgName
self.legalPersonName = legalPersonName
self.transactorName = transactorName
self.transactorIdCardNum = transactorIdCardNum
self.transactorMobile = transactorMobile
self.identifyType = identifyType
self.identifyValue = identifyValue
| 44.333333 | 414 | 0.694862 |
ace57b6cb54bbf2333977a8b410ca8c1fa4c39dc | 20,812 | py | Python | saleor/graphql/translations/types.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 1,392 | 2021-10-06T15:54:28.000Z | 2022-03-31T20:50:55.000Z | saleor/graphql/translations/types.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 888 | 2021-10-06T10:48:54.000Z | 2022-03-31T11:00:30.000Z | saleor/graphql/translations/types.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 538 | 2021-10-07T16:21:27.000Z | 2022-03-31T22:58:57.000Z | from typing import List
import graphene
from django.conf import settings
from ...attribute import AttributeInputType
from ...attribute import models as attribute_models
from ...attribute.models import AttributeValue
from ...core.permissions import DiscountPermissions, ShippingPermissions
from ...core.tracing import traced_resolver
from ...discount import models as discount_models
from ...menu import models as menu_models
from ...page import models as page_models
from ...product import models as product_models
from ...shipping import models as shipping_models
from ...site import models as site_models
from ..channel import ChannelContext
from ..core.descriptions import DEPRECATED_IN_3X_FIELD, RICH_CONTENT
from ..core.enums import LanguageCodeEnum
from ..core.fields import JSONString, PermissionsField
from ..core.types import LanguageDisplay, ModelObjectType, NonNullList
from ..core.utils import str_to_enum
from ..page.dataloaders import SelectedAttributesByPageIdLoader
from ..product.dataloaders import (
SelectedAttributesByProductIdLoader,
SelectedAttributesByProductVariantIdLoader,
)
from .fields import TranslationField
def get_translatable_attribute_values(attributes: list) -> List[AttributeValue]:
"""Filter the list of passed attributes.
Return those which are translatable attributes.
"""
translatable_values: List[AttributeValue] = []
for assignment in attributes:
attr = assignment["attribute"]
if attr.input_type in AttributeInputType.TRANSLATABLE_ATTRIBUTES:
translatable_values.extend(assignment["values"])
return translatable_values
class BaseTranslationType(ModelObjectType):
language = graphene.Field(
LanguageDisplay, description="Translation language.", required=True
)
class Meta:
abstract = True
@staticmethod
@traced_resolver
def resolve_language(root, _info):
try:
language = next(
language[1]
for language in settings.LANGUAGES
if language[0] == root.language_code
)
except StopIteration:
return None
return LanguageDisplay(
code=LanguageCodeEnum[str_to_enum(root.language_code)], language=language
)
class AttributeValueTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
rich_text = JSONString(description="Attribute value." + RICH_CONTENT)
class Meta:
model = attribute_models.AttributeValueTranslation
interfaces = [graphene.relay.Node]
class AttributeValueTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
rich_text = JSONString(description="Attribute value." + RICH_CONTENT)
translation = TranslationField(
AttributeValueTranslation, type_name="attribute value"
)
attribute_value = graphene.Field(
"saleor.graphql.attribute.types.AttributeValue",
description="Represents a value of an attribute.",
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
class Meta:
model = attribute_models.AttributeValue
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_attribute_value(root: attribute_models.AttributeValue, _info):
return root
class AttributeTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
class Meta:
model = attribute_models.AttributeTranslation
interfaces = [graphene.relay.Node]
class AttributeTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
translation = TranslationField(AttributeTranslation, type_name="attribute")
attribute = graphene.Field(
"saleor.graphql.attribute.types.Attribute",
description="Custom attribute of a product.",
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
class Meta:
model = attribute_models.Attribute
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_attribute(root: attribute_models.Attribute, _info):
return root
class ProductVariantTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
class Meta:
model = product_models.ProductVariantTranslation
interfaces = [graphene.relay.Node]
class ProductVariantTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
translation = TranslationField(
ProductVariantTranslation, type_name="product variant"
)
product_variant = graphene.Field(
"saleor.graphql.product.types.products.ProductVariant",
description=(
"Represents a version of a product such as different size or color."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
attribute_values = NonNullList(
AttributeValueTranslatableContent,
required=True,
description="List of product variant attribute values that can be translated.",
)
class Meta:
model = product_models.ProductVariant
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_product_variant(root: product_models.ProductVariant, info):
return ChannelContext(node=root, channel_slug=None)
@staticmethod
def resolve_attribute_values(root: product_models.ProductVariant, info):
return (
SelectedAttributesByProductVariantIdLoader(info.context)
.load(root.id)
.then(get_translatable_attribute_values)
)
class ProductTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String()
description = JSONString(
description="Translated description of the product." + RICH_CONTENT
)
description_json = JSONString(
description="Translated description of the product." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
class Meta:
model = product_models.ProductTranslation
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_description_json(root: product_models.ProductTranslation, _info):
description = root.description
return description if description is not None else {}
class ProductTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String(required=True)
description = JSONString(description="Description of the product." + RICH_CONTENT)
description_json = JSONString(
description="Description of the product." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
translation = TranslationField(ProductTranslation, type_name="product")
product = graphene.Field(
"saleor.graphql.product.types.products.Product",
description="Represents an individual item for sale in the storefront.",
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
attribute_values = NonNullList(
AttributeValueTranslatableContent,
required=True,
description="List of product attribute values that can be translated.",
)
class Meta:
model = product_models.Product
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_product(root: product_models.Product, info):
return ChannelContext(node=root, channel_slug=None)
@staticmethod
def resolve_description_json(root: product_models.Product, _info):
description = root.description
return description if description is not None else {}
@staticmethod
def resolve_attribute_values(root: product_models.Product, info):
return (
SelectedAttributesByProductIdLoader(info.context)
.load(root.id)
.then(get_translatable_attribute_values)
)
class CollectionTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String()
description = JSONString(
description="Translated description of the collection." + RICH_CONTENT
)
description_json = JSONString(
description="Translated description of the collection." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
class Meta:
model = product_models.CollectionTranslation
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_description_json(root: product_models.CollectionTranslation, _info):
description = root.description
return description if description is not None else {}
class CollectionTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String(required=True)
description = JSONString(
description="Description of the collection." + RICH_CONTENT
)
description_json = JSONString(
description="Description of the collection." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
translation = TranslationField(CollectionTranslation, type_name="collection")
collection = graphene.Field(
"saleor.graphql.product.types.products.Collection",
description="Represents a collection of products.",
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
class Meta:
model = product_models.Collection
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_collection(root: product_models.Collection, info):
collection = product_models.Collection.objects.all().filter(pk=root.id).first()
return (
ChannelContext(node=collection, channel_slug=None) if collection else None
)
@staticmethod
def resolve_description_json(root: product_models.Collection, _info):
description = root.description
return description if description is not None else {}
class CategoryTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String()
description = JSONString(
description="Translated description of the category." + RICH_CONTENT
)
description_json = JSONString(
description="Translated description of the category." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
class Meta:
model = product_models.CategoryTranslation
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_description_json(root: product_models.CategoryTranslation, _info):
description = root.description
return description if description is not None else {}
class CategoryTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
name = graphene.String(required=True)
description = JSONString(description="Description of the category." + RICH_CONTENT)
description_json = JSONString(
description="Description of the category." + RICH_CONTENT,
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Use the `description` field instead."
),
)
translation = TranslationField(CategoryTranslation, type_name="category")
category = graphene.Field(
"saleor.graphql.product.types.products.Category",
description="Represents a single category of products.",
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
class Meta:
model = product_models.Category
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_category(root: product_models.Category, _info):
return root
@staticmethod
def resolve_description_json(root: product_models.Category, _info):
description = root.description
return description if description is not None else {}
class PageTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
title = graphene.String()
content = JSONString(description="Translated content of the page." + RICH_CONTENT)
content_json = JSONString(
description="Translated description of the page." + RICH_CONTENT,
deprecation_reason=f"{DEPRECATED_IN_3X_FIELD} Use the `content` field instead.",
)
class Meta:
model = page_models.PageTranslation
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_content_json(root: page_models.PageTranslation, _info):
content = root.content
return content if content is not None else {}
class PageTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
seo_title = graphene.String()
seo_description = graphene.String()
title = graphene.String(required=True)
content = JSONString(description="Content of the page." + RICH_CONTENT)
content_json = JSONString(
description="Content of the page." + RICH_CONTENT,
deprecation_reason=f"{DEPRECATED_IN_3X_FIELD} Use the `content` field instead.",
)
translation = TranslationField(PageTranslation, type_name="page")
page = graphene.Field(
"saleor.graphql.page.types.Page",
description=(
"A static page that can be manually added by a shop operator "
"through the dashboard."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
attribute_values = NonNullList(
AttributeValueTranslatableContent,
required=True,
description="List of page content attribute values that can be translated.",
)
class Meta:
model = page_models.Page
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_page(root: page_models.Page, info):
return (
page_models.Page.objects.visible_to_user(info.context.user)
.filter(pk=root.id)
.first()
)
@staticmethod
def resolve_content_json(root: page_models.Page, _info):
content = root.content
return content if content is not None else {}
@staticmethod
def resolve_attribute_values(root: page_models.Page, info):
return (
SelectedAttributesByPageIdLoader(info.context)
.load(root.id)
.then(get_translatable_attribute_values)
)
class VoucherTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String()
class Meta:
model = discount_models.VoucherTranslation
interfaces = [graphene.relay.Node]
class VoucherTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String()
translation = TranslationField(VoucherTranslation, type_name="voucher")
voucher = PermissionsField(
"saleor.graphql.discount.types.Voucher",
description=(
"Vouchers allow giving discounts to particular customers on categories, "
"collections or specific products. They can be used during checkout by "
"providing valid voucher codes."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
permissions=[DiscountPermissions.MANAGE_DISCOUNTS],
)
class Meta:
model = discount_models.Voucher
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_voucher(root: discount_models.Voucher, _info):
return ChannelContext(node=root, channel_slug=None)
class SaleTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String()
class Meta:
model = discount_models.SaleTranslation
interfaces = [graphene.relay.Node]
class SaleTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
translation = TranslationField(SaleTranslation, type_name="sale")
sale = PermissionsField(
"saleor.graphql.discount.types.Sale",
description=(
"Sales allow creating discounts for categories, collections "
"or products and are visible to all the customers."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
permissions=[DiscountPermissions.MANAGE_DISCOUNTS],
)
class Meta:
model = discount_models.Sale
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_sale(root: discount_models.Sale, _info):
return ChannelContext(node=root, channel_slug=None)
class ShopTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
header_text = graphene.String(required=True)
description = graphene.String(required=True)
class Meta:
model = site_models.SiteSettingsTranslation
interfaces = [graphene.relay.Node]
class MenuItemTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
class Meta:
model = menu_models.MenuItemTranslation
interfaces = [graphene.relay.Node]
class MenuItemTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
translation = TranslationField(MenuItemTranslation, type_name="menu item")
menu_item = graphene.Field(
"saleor.graphql.menu.types.MenuItem",
description=(
"Represents a single item of the related menu. Can store categories, "
"collection or pages."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
)
class Meta:
model = menu_models.MenuItem
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_menu_item(root: menu_models.MenuItem, _info):
return ChannelContext(node=root, channel_slug=None)
class ShippingMethodTranslation(BaseTranslationType):
id = graphene.GlobalID(required=True)
name = graphene.String()
description = JSONString(
description="Translated description of the shipping method." + RICH_CONTENT
)
class Meta:
model = shipping_models.ShippingMethodTranslation
interfaces = [graphene.relay.Node]
class ShippingMethodTranslatableContent(ModelObjectType):
id = graphene.GlobalID(required=True)
name = graphene.String(required=True)
description = JSONString(
description="Description of the shipping method." + RICH_CONTENT
)
translation = TranslationField(
ShippingMethodTranslation, type_name="shipping method"
)
shipping_method = PermissionsField(
"saleor.graphql.shipping.types.ShippingMethodType",
description=(
"Shipping method are the methods you'll use to get customer's orders "
" to them. They are directly exposed to the customers."
),
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} Get model fields from the root level queries."
),
permissions=[
ShippingPermissions.MANAGE_SHIPPING,
],
)
class Meta:
model = shipping_models.ShippingMethod
interfaces = [graphene.relay.Node]
@staticmethod
def resolve_shipping_method(root: shipping_models.ShippingMethod, _info):
return ChannelContext(node=root, channel_slug=None)
| 34.4 | 88 | 0.696665 |
ace57b8a425fcbc5658d1828c42978f5f4502275 | 41,239 | py | Python | custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2_grpc.py | apache/airavata-custos | 075dd26c364b5b5abe8a4f2b226b2de30474f8e4 | [
"Apache-2.0"
] | 10 | 2019-05-21T22:42:35.000Z | 2022-03-25T15:58:09.000Z | custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2_grpc.py | apache/airavata-custos | 075dd26c364b5b5abe8a4f2b226b2de30474f8e4 | [
"Apache-2.0"
] | 83 | 2019-02-22T12:22:14.000Z | 2022-03-30T13:42:47.000Z | custos-client-sdks/custos-python-sdk/build/lib/custos/server/integration/UserManagementService_pb2_grpc.py | apache/airavata-custos | 075dd26c364b5b5abe8a4f2b226b2de30474f8e4 | [
"Apache-2.0"
] | 20 | 2019-02-22T08:10:05.000Z | 2021-11-07T19:37:04.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import custos.server.core.IamAdminService_pb2 as IamAdminService__pb2
import custos.server.integration.UserManagementService_pb2 as UserManagementService__pb2
import custos.server.core.UserProfileService_pb2 as UserProfileService__pb2
class UserManagementServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.registerUser = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/registerUser',
request_serializer=IamAdminService__pb2.RegisterUserRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.RegisterUserResponse.FromString,
)
self.registerAndEnableUsers = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/registerAndEnableUsers',
request_serializer=IamAdminService__pb2.RegisterUsersRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.RegisterUsersResponse.FromString,
)
self.addUserAttributes = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/addUserAttributes',
request_serializer=IamAdminService__pb2.AddUserAttributesRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.deleteUserAttributes = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/deleteUserAttributes',
request_serializer=IamAdminService__pb2.DeleteUserAttributeRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.enableUser = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/enableUser',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.UserRepresentation.FromString,
)
self.disableUser = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/disableUser',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.UserRepresentation.FromString,
)
self.grantAdminPrivileges = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/grantAdminPrivileges',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.removeAdminPrivileges = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/removeAdminPrivileges',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.addRolesToUsers = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/addRolesToUsers',
request_serializer=IamAdminService__pb2.AddUserRolesRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.isUserEnabled = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/isUserEnabled',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.isUsernameAvailable = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/isUsernameAvailable',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.getUser = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/getUser',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.UserRepresentation.FromString,
)
self.findUsers = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/findUsers',
request_serializer=IamAdminService__pb2.FindUsersRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.FindUsersResponse.FromString,
)
self.resetPassword = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/resetPassword',
request_serializer=IamAdminService__pb2.ResetUserPassword.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.deleteUser = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/deleteUser',
request_serializer=IamAdminService__pb2.UserSearchRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.deleteUserRoles = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/deleteUserRoles',
request_serializer=IamAdminService__pb2.DeleteUserRolesRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.updateUserProfile = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/updateUserProfile',
request_serializer=UserManagementService__pb2.UserProfileRequest.SerializeToString,
response_deserializer=UserProfileService__pb2.UserProfile.FromString,
)
self.getUserProfile = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/getUserProfile',
request_serializer=UserManagementService__pb2.UserProfileRequest.SerializeToString,
response_deserializer=UserProfileService__pb2.UserProfile.FromString,
)
self.deleteUserProfile = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/deleteUserProfile',
request_serializer=UserManagementService__pb2.UserProfileRequest.SerializeToString,
response_deserializer=UserProfileService__pb2.UserProfile.FromString,
)
self.getAllUserProfilesInTenant = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/getAllUserProfilesInTenant',
request_serializer=UserManagementService__pb2.UserProfileRequest.SerializeToString,
response_deserializer=UserProfileService__pb2.GetAllUserProfilesResponse.FromString,
)
self.linkUserProfile = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/linkUserProfile',
request_serializer=UserManagementService__pb2.LinkUserProfileRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
self.getUserProfileAuditTrails = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/getUserProfileAuditTrails',
request_serializer=UserProfileService__pb2.GetUpdateAuditTrailRequest.SerializeToString,
response_deserializer=UserProfileService__pb2.GetUpdateAuditTrailResponse.FromString,
)
self.synchronizeUserDBs = channel.unary_unary(
'/org.apache.custos.user.management.service.UserManagementService/synchronizeUserDBs',
request_serializer=UserManagementService__pb2.SynchronizeUserDBRequest.SerializeToString,
response_deserializer=IamAdminService__pb2.OperationStatus.FromString,
)
class UserManagementServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def registerUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def registerAndEnableUsers(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addUserAttributes(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteUserAttributes(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def enableUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def disableUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def grantAdminPrivileges(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def removeAdminPrivileges(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def addRolesToUsers(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def isUserEnabled(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def isUsernameAvailable(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def findUsers(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def resetPassword(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteUser(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteUserRoles(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateUserProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUserProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteUserProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getAllUserProfilesInTenant(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def linkUserProfile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUserProfileAuditTrails(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def synchronizeUserDBs(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_UserManagementServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'registerUser': grpc.unary_unary_rpc_method_handler(
servicer.registerUser,
request_deserializer=IamAdminService__pb2.RegisterUserRequest.FromString,
response_serializer=IamAdminService__pb2.RegisterUserResponse.SerializeToString,
),
'registerAndEnableUsers': grpc.unary_unary_rpc_method_handler(
servicer.registerAndEnableUsers,
request_deserializer=IamAdminService__pb2.RegisterUsersRequest.FromString,
response_serializer=IamAdminService__pb2.RegisterUsersResponse.SerializeToString,
),
'addUserAttributes': grpc.unary_unary_rpc_method_handler(
servicer.addUserAttributes,
request_deserializer=IamAdminService__pb2.AddUserAttributesRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'deleteUserAttributes': grpc.unary_unary_rpc_method_handler(
servicer.deleteUserAttributes,
request_deserializer=IamAdminService__pb2.DeleteUserAttributeRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'enableUser': grpc.unary_unary_rpc_method_handler(
servicer.enableUser,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.UserRepresentation.SerializeToString,
),
'disableUser': grpc.unary_unary_rpc_method_handler(
servicer.disableUser,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.UserRepresentation.SerializeToString,
),
'grantAdminPrivileges': grpc.unary_unary_rpc_method_handler(
servicer.grantAdminPrivileges,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'removeAdminPrivileges': grpc.unary_unary_rpc_method_handler(
servicer.removeAdminPrivileges,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'addRolesToUsers': grpc.unary_unary_rpc_method_handler(
servicer.addRolesToUsers,
request_deserializer=IamAdminService__pb2.AddUserRolesRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'isUserEnabled': grpc.unary_unary_rpc_method_handler(
servicer.isUserEnabled,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'isUsernameAvailable': grpc.unary_unary_rpc_method_handler(
servicer.isUsernameAvailable,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'getUser': grpc.unary_unary_rpc_method_handler(
servicer.getUser,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.UserRepresentation.SerializeToString,
),
'findUsers': grpc.unary_unary_rpc_method_handler(
servicer.findUsers,
request_deserializer=IamAdminService__pb2.FindUsersRequest.FromString,
response_serializer=IamAdminService__pb2.FindUsersResponse.SerializeToString,
),
'resetPassword': grpc.unary_unary_rpc_method_handler(
servicer.resetPassword,
request_deserializer=IamAdminService__pb2.ResetUserPassword.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'deleteUser': grpc.unary_unary_rpc_method_handler(
servicer.deleteUser,
request_deserializer=IamAdminService__pb2.UserSearchRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'deleteUserRoles': grpc.unary_unary_rpc_method_handler(
servicer.deleteUserRoles,
request_deserializer=IamAdminService__pb2.DeleteUserRolesRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'updateUserProfile': grpc.unary_unary_rpc_method_handler(
servicer.updateUserProfile,
request_deserializer=UserManagementService__pb2.UserProfileRequest.FromString,
response_serializer=UserProfileService__pb2.UserProfile.SerializeToString,
),
'getUserProfile': grpc.unary_unary_rpc_method_handler(
servicer.getUserProfile,
request_deserializer=UserManagementService__pb2.UserProfileRequest.FromString,
response_serializer=UserProfileService__pb2.UserProfile.SerializeToString,
),
'deleteUserProfile': grpc.unary_unary_rpc_method_handler(
servicer.deleteUserProfile,
request_deserializer=UserManagementService__pb2.UserProfileRequest.FromString,
response_serializer=UserProfileService__pb2.UserProfile.SerializeToString,
),
'getAllUserProfilesInTenant': grpc.unary_unary_rpc_method_handler(
servicer.getAllUserProfilesInTenant,
request_deserializer=UserManagementService__pb2.UserProfileRequest.FromString,
response_serializer=UserProfileService__pb2.GetAllUserProfilesResponse.SerializeToString,
),
'linkUserProfile': grpc.unary_unary_rpc_method_handler(
servicer.linkUserProfile,
request_deserializer=UserManagementService__pb2.LinkUserProfileRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
'getUserProfileAuditTrails': grpc.unary_unary_rpc_method_handler(
servicer.getUserProfileAuditTrails,
request_deserializer=UserProfileService__pb2.GetUpdateAuditTrailRequest.FromString,
response_serializer=UserProfileService__pb2.GetUpdateAuditTrailResponse.SerializeToString,
),
'synchronizeUserDBs': grpc.unary_unary_rpc_method_handler(
servicer.synchronizeUserDBs,
request_deserializer=UserManagementService__pb2.SynchronizeUserDBRequest.FromString,
response_serializer=IamAdminService__pb2.OperationStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.custos.user.management.service.UserManagementService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class UserManagementService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def registerUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/registerUser',
IamAdminService__pb2.RegisterUserRequest.SerializeToString,
IamAdminService__pb2.RegisterUserResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def registerAndEnableUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/registerAndEnableUsers',
IamAdminService__pb2.RegisterUsersRequest.SerializeToString,
IamAdminService__pb2.RegisterUsersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addUserAttributes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/addUserAttributes',
IamAdminService__pb2.AddUserAttributesRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteUserAttributes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/deleteUserAttributes',
IamAdminService__pb2.DeleteUserAttributeRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def enableUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/enableUser',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.UserRepresentation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def disableUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/disableUser',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.UserRepresentation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def grantAdminPrivileges(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/grantAdminPrivileges',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def removeAdminPrivileges(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/removeAdminPrivileges',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def addRolesToUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/addRolesToUsers',
IamAdminService__pb2.AddUserRolesRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def isUserEnabled(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/isUserEnabled',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def isUsernameAvailable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/isUsernameAvailable',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/getUser',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.UserRepresentation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def findUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/findUsers',
IamAdminService__pb2.FindUsersRequest.SerializeToString,
IamAdminService__pb2.FindUsersResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def resetPassword(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/resetPassword',
IamAdminService__pb2.ResetUserPassword.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/deleteUser',
IamAdminService__pb2.UserSearchRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteUserRoles(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/deleteUserRoles',
IamAdminService__pb2.DeleteUserRolesRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def updateUserProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/updateUserProfile',
UserManagementService__pb2.UserProfileRequest.SerializeToString,
UserProfileService__pb2.UserProfile.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getUserProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/getUserProfile',
UserManagementService__pb2.UserProfileRequest.SerializeToString,
UserProfileService__pb2.UserProfile.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def deleteUserProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/deleteUserProfile',
UserManagementService__pb2.UserProfileRequest.SerializeToString,
UserProfileService__pb2.UserProfile.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getAllUserProfilesInTenant(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/getAllUserProfilesInTenant',
UserManagementService__pb2.UserProfileRequest.SerializeToString,
UserProfileService__pb2.GetAllUserProfilesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def linkUserProfile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/linkUserProfile',
UserManagementService__pb2.LinkUserProfileRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def getUserProfileAuditTrails(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/getUserProfileAuditTrails',
UserProfileService__pb2.GetUpdateAuditTrailRequest.SerializeToString,
UserProfileService__pb2.GetUpdateAuditTrailResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def synchronizeUserDBs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.custos.user.management.service.UserManagementService/synchronizeUserDBs',
UserManagementService__pb2.SynchronizeUserDBRequest.SerializeToString,
IamAdminService__pb2.OperationStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 50.786946 | 156 | 0.683431 |
ace57c53e4823cfa4ee7d61bc7ddee9e39a16e53 | 425 | py | Python | todo/migrations/0008_auto_20200104_1605.py | shithee/doo | f9584915695464b70832a9414dc552ef35c82de2 | [
"MIT"
] | null | null | null | todo/migrations/0008_auto_20200104_1605.py | shithee/doo | f9584915695464b70832a9414dc552ef35c82de2 | [
"MIT"
] | null | null | null | todo/migrations/0008_auto_20200104_1605.py | shithee/doo | f9584915695464b70832a9414dc552ef35c82de2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-01-04 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0007_auto_20200104_1533'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='media/profile.jpg', upload_to='static/media/'),
),
]
| 22.368421 | 92 | 0.609412 |
ace57cf6494e8d34b4c048263eddbfb763434eaa | 8,768 | py | Python | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | phy/apps/__init__.py | PaulMAnderson/phy | 134264e6c1ec586f797459633fa4e71352fafb4e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""CLI tool."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from contextlib import contextmanager
import logging
from pathlib import Path
import sys
from traceback import format_exception
import click
from phylib import add_default_handler, _Formatter # noqa
from phylib import _logger_date_fmt, _logger_fmt # noqa
from phy import __version_git__
from phy.gui.qt import QtDialogLogger
from phy.utils.profiling import _enable_profiler, _enable_pdb
from .base import ( # noqa
BaseController, WaveformMixin, FeatureMixin, TemplateMixin, TraceMixin)
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# CLI utils
#------------------------------------------------------------------------------
DEBUG = False
if '--debug' in sys.argv: # pragma: no cover
DEBUG = True
sys.argv.remove('--debug')
if '--pdb' in sys.argv: # pragma: no cover
sys.argv.remove('--pdb')
_enable_pdb()
# Add `profile` in the builtins.
if '--lprof' in sys.argv or '--prof' in sys.argv: # pragma: no cover
_enable_profiler('--lprof' in sys.argv)
if '--prof' in sys.argv:
sys.argv.remove('--prof')
if '--lprof' in sys.argv:
sys.argv.remove('--lprof')
#------------------------------------------------------------------------------
# Set up logging with the CLI tool
#------------------------------------------------------------------------------
def exceptionHandler(exception_type, exception, traceback): # pragma: no cover
tb = ''.join(format_exception(exception_type, exception, traceback))
logger.error("An error has occurred (%s): %s\n%s", exception_type.__name__, exception, tb)
@contextmanager
def capture_exceptions(): # pragma: no cover
"""Log exceptions instead of crashing the GUI, and display an error dialog on errors."""
logger.debug("Start capturing exceptions.")
# Add a custom exception hook.
excepthook = sys.excepthook
sys.excepthook = exceptionHandler
# Add a dialog exception handler.
handler = QtDialogLogger()
handler.setLevel(logging.ERROR)
logging.getLogger('phy').addHandler(handler)
yield
# Reset the original exception hook.
sys.excepthook = excepthook
# Remove the dialog exception handler.
logging.getLogger('phy').removeHandler(handler)
logger.debug("Stop capturing exceptions.")
#------------------------------------------------------------------------------
# Root CLI tool
#------------------------------------------------------------------------------
@click.group()
@click.version_option(version=__version_git__)
@click.help_option('-h', '--help')
@click.pass_context
def phycli(ctx):
"""Interactive visualization and manual spike sorting of large-scale ephys data."""
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('phy'))
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('phylib'))
add_default_handler(level='DEBUG' if DEBUG else 'INFO', logger=logging.getLogger('mtscomp'))
#------------------------------------------------------------------------------
# GUI command wrapper
#------------------------------------------------------------------------------
def _gui_command(f):
"""Command options for GUI commands."""
f = click.option(
'--clear-cache/--no-clear-cache', default=False,
help="Clear the .phy cache in the data directory.")(f)
f = click.option(
'--clear-state/--no-clear-state', default=False,
help="Clear the GUI state in `~/.phy/` and in `.phy`.")(f)
return f
#------------------------------------------------------------------------------
# Raw data GUI
#------------------------------------------------------------------------------
@phycli.command('trace-gui') # pragma: no cover
@click.argument('dat-path', type=click.Path(exists=True))
@click.option('-s', '--sample-rate', type=float)
@click.option('-d', '--dtype', type=str)
@click.option('-n', '--n-channels', type=int)
@click.option('-h', '--offset', type=int)
@click.option('-f', '--fortran', type=bool, is_flag=True)
@_gui_command
@click.pass_context
def cli_trace_gui(ctx, dat_path, **kwargs):
"""Launch the trace GUI on a raw data file."""
from .trace.gui import trace_gui
with capture_exceptions():
kwargs['n_channels_dat'] = kwargs.pop('n_channels')
kwargs['order'] = 'F' if kwargs.pop('fortran', None) else None
trace_gui(dat_path, **kwargs)
#------------------------------------------------------------------------------
# Template GUI
#------------------------------------------------------------------------------
@phycli.command('template-gui') # pragma: no cover
@click.argument('params-path', type=click.Path(exists=True))
@_gui_command
@click.pass_context
def cli_template_gui(ctx, params_path, **kwargs):
"""Launch the template GUI on a params.py file."""
from .template.gui import template_gui
prof = __builtins__.get('profile', None)
with capture_exceptions():
if prof:
from phy.utils.profiling import _profile
return _profile(prof, 'template_gui(params_path)', globals(), locals())
template_gui(params_path, **kwargs)
@phycli.command('template-describe')
@click.argument('params-path', type=click.Path(exists=True))
@click.pass_context
def cli_template_describe(ctx, params_path):
"""Describe a template file."""
from .template.gui import template_describe
template_describe(params_path)
#------------------------------------------------------------------------------
# Kwik GUI
#------------------------------------------------------------------------------
# Create the `phy cluster-manual file.kwik` command.
@phycli.command('kwik-gui') # pragma: no cover
@click.argument('path', type=click.Path(exists=True))
@click.option('--channel-group', type=int)
@click.option('--clustering', type=str)
@_gui_command
@click.pass_context
def cli_kwik_gui(ctx, path, channel_group=None, clustering=None, **kwargs):
"""Launch the Kwik GUI on a Kwik file."""
from .kwik.gui import kwik_gui
with capture_exceptions():
assert path
kwik_gui(path, channel_group=channel_group, clustering=clustering, **kwargs)
@phycli.command('kwik-describe')
@click.argument('path', type=click.Path(exists=True))
@click.option('--channel-group', type=int, help='channel group')
@click.option('--clustering', type=str, help='clustering')
@click.pass_context
def cli_kwik_describe(ctx, path, channel_group=0, clustering='main'):
"""Describe a Kwik file."""
from .kwik.gui import kwik_describe
assert path
kwik_describe(path, channel_group=channel_group, clustering=clustering)
#------------------------------------------------------------------------------
# Conversion
#------------------------------------------------------------------------------
@phycli.command('alf-convert')
@click.argument('subdirs', nargs=-1, type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument('out_dir', type=click.Path())
@click.pass_context
def cli_alf_convert(ctx, subdirs, out_dir):
"""Convert an ephys dataset into ALF. If several directories are specified, it is assumed
that each directory contains the data for one probe of the same recording."""
from phylib.io.alf import EphysAlfCreator
from phylib.io.merge import Merger
from phylib.io.model import load_model
out_dir = Path(out_dir)
if len(subdirs) >= 2:
# Merge in the `merged` subdirectory inside the output directory.
m = Merger(subdirs, out_dir / '_tmp_merged')
model = m.merge()
else:
model = load_model(Path(subdirs[0]) / 'params.py')
c = EphysAlfCreator(model)
c.convert(out_dir)
#------------------------------------------------------------------------------
# Waveform extraction
#------------------------------------------------------------------------------
@phycli.command('extract-waveforms')
@click.argument('params-path', type=click.Path(exists=True))
@click.argument('n_spikes_per_cluster', type=int, default=500)
@click.argument('--max-n-channels', type=int, default=16)
@click.pass_context
def template_extract_waveforms(
ctx, params_path, n_spikes_per_cluster, max_n_channels=None): # pragma: no cover
"""Extract spike waveforms."""
from phylib.io.model import load_model
model = load_model(params_path)
model.save_spikes_subset_waveforms(
max_n_spikes_per_template=n_spikes_per_cluster, max_n_channels=max_n_channels)
model.close()
| 35.354839 | 98 | 0.577669 |
ace57ef93fcc920a5dd3bde0e4c19c1098dc797e | 3,437 | py | Python | celery_singleton/singleton.py | aryanbaghi/celery-singleton | 9869515c04a751254b92622b7d9ede68c538b766 | [
"MIT"
] | null | null | null | celery_singleton/singleton.py | aryanbaghi/celery-singleton | 9869515c04a751254b92622b7d9ede68c538b766 | [
"MIT"
] | null | null | null | celery_singleton/singleton.py | aryanbaghi/celery-singleton | 9869515c04a751254b92622b7d9ede68c538b766 | [
"MIT"
] | null | null | null | import json
from hashlib import md5
from celery import Task as BaseTask
from kombu.utils.uuid import uuid
def clear_locks(app):
rc = app.backend.client
locks = rc.keys('SINGLETONLOCK_*')
if locks:
rc.delete(*locks)
class Singleton(BaseTask):
abstract = True
def aquire_lock(self, lock, task_id):
"""
"""
app = self._get_app()
return app.backend.client.setnx(lock, task_id)
def get_existing_task_id(self, lock):
app = self._get_app()
task_id = app.backend.client.get(lock)
return task_id.decode() if task_id else None
def generate_lock(self, task_name, *args, **kwargs):
task_args = json.dumps(args, sort_keys=True)
task_kwargs = json.dumps(kwargs, sort_keys=True)
return 'SINGLETONLOCK_'+md5(
(task_name+task_args+task_kwargs).encode()
).hexdigest()
def lock_and_run(self, lock, args=None, kwargs=None, task_id=None,
producer=None, link=None, link_error=None, shadow=None,
**options):
lock_aquired = self.aquire_lock(lock, task_id)
if lock_aquired:
try:
return super(Singleton, self).apply_async(
args=args, kwargs=kwargs,
task_id=task_id, producer=producer,
link=link, link_error=link_error,
shadow=shadow, **options
)
except:
# Clear the lock if apply_async fails
self.release_lock(*args, **kwargs)
raise
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
link=None, link_error=None, shadow=None, **options):
args = args or []
kwargs = kwargs or {}
task_id = task_id or uuid()
lock = self.generate_lock(self.name, *args, **kwargs)
task = self.lock_and_run(
lock, args=args, kwargs=kwargs,
task_id=task_id, producer=producer,
link=link, link_error=link_error,
shadow=shadow, **options
)
if task:
return task
existing_task_id = self.get_existing_task_id(lock)
while not existing_task_id:
task = self.lock_and_run(
lock, args=args, kwargs=kwargs,
task_id=task_id, producer=producer,
link=link, link_error=link_error,
shadow=shadow, **options
)
if task:
return task
existing_task_id = self.get_existing_task_id(lock)
return self.AsyncResult(existing_task_id)
def retry(self, args=None, kwargs=None, exc=None, throw=True,
eta=None, countdown=None, max_retries=None, **options):
args = args or self.request.args
kwargs = kwargs or self.request.kwargs
self.release_lock(*args, **kwargs)
return super(Singleton, self).retry(args, kwargs, exc, throw,
eta, countdown, max_retries, **options)
def release_lock(self, *args, **kwargs):
app = self._get_app()
lock = self.generate_lock(self.name, *args, **kwargs)
app.backend.delete(lock)
def on_failure(self, exc, task_id, args, kwargs, einfo):
self.release_lock(*args, **kwargs)
def on_success(self, retval, task_id, args, kwargs):
self.release_lock(*args, **kwargs)
| 34.029703 | 78 | 0.588013 |
ace57f00b2936b137933e92645e58bfe122b7181 | 812 | py | Python | polymuse/pattern.py | rushike/polymuse-future | 25af861e11fc3f4f95327405fec15d48bcc84a62 | [
"MIT"
] | 1 | 2019-11-30T11:12:08.000Z | 2019-11-30T11:12:08.000Z | polymuse/pattern.py | rushike/polymuse-future | 25af861e11fc3f4f95327405fec15d48bcc84a62 | [
"MIT"
] | null | null | null | polymuse/pattern.py | rushike/polymuse-future | 25af861e11fc3f4f95327405fec15d48bcc84a62 | [
"MIT"
] | null | null | null | """
This file is intended to content the basic input pattern need to feed to neural rnn models
"""
ip_patterns = [
[
60, 62, 64, 65, 67, 69, 71, 72,
72, 71, 69, 67, 65, 64, 62, 60,
60, 62, 64, 65, 67, 69, 71, 72,
72, 71, 69, 67, 65, 64, 62, 60,
],
[
72, 71, 69, 67, 65, 64, 62, 60,
60, 62, 64, 65, 67, 69, 71, 72,
60, 62, 64, 65, 67, 69, 71, 72,
72, 71, 69, 67, 65, 64, 62, 60,
],
[
72, 71, 69, 67, 65, 64, 62, 60,
72, 71, 69, 67, 65, 64, 62, 60,
72, 71, 69, 67, 65, 64, 62, 60,
72, 71, 69, 67, 65, 64, 62, 60,
],
[
36, 38, 40, 41, 43, 45, 47, 48,
48, 50, 52, 53, 55, 57, 59, 60,
60, 62, 64, 65, 67, 69, 71, 72,
72, 71, 69, 67, 65, 64, 62, 60,
]
] | 25.375 | 90 | 0.419951 |
ace57f656ba3109fbda71b7d0e8498bf2c0a96b1 | 497 | py | Python | Lesson_3/Task_4.py | AlexHarf/Selenium_training | 4baf3ea395627a4ae1333d946e8da4ed79607a01 | [
"Apache-2.0"
] | null | null | null | Lesson_3/Task_4.py | AlexHarf/Selenium_training | 4baf3ea395627a4ae1333d946e8da4ed79607a01 | [
"Apache-2.0"
] | null | null | null | Lesson_3/Task_4.py | AlexHarf/Selenium_training | 4baf3ea395627a4ae1333d946e8da4ed79607a01 | [
"Apache-2.0"
] | null | null | null | import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Firefox(firefox_binary="c:\\Program Files\\Nightly\\firefox.exe")
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click() | 29.235294 | 84 | 0.744467 |
ace57f96683271fcc33521e9ce3389aa977b910c | 8,510 | py | Python | nlptoolkit/models/seq2vec/rnn.py | jianzhnie/d2nlp | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | 3 | 2021-12-01T12:25:00.000Z | 2022-03-07T02:22:00.000Z | nlptoolkit/models/seq2vec/rnn.py | jianzhnie/nlp-toolkit | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | null | null | null | nlptoolkit/models/seq2vec/rnn.py | jianzhnie/nlp-toolkit | 94da74ec9be3aeee699b358f6bba9fde43bd80c0 | [
"Apache-2.0"
] | null | null | null | '''
Author: jianzhnie
Date: 2022-03-24 12:30:41
LastEditTime: 2022-03-24 17:31:01
LastEditors: jianzhnie
Description:
'''
import torch
import torch.nn as nn
class RNNEncoder(nn.Module):
r"""
A RNNEncoder takes as input a sequence of vectors and returns a
single vector, which is a combination of multiple `paddle.nn.RNN
<https://www.paddlepaddle.org.cn/documentation/docs/en/api
/paddle/nn/layer/rnn/RNN_en.html>`__ subclass.
The input to this encoder is of shape `(batch_size, num_tokens, input_size)`,
The output is of shape `(batch_size, hidden_size * 2)` if RNN is bidirection;
If not, output is of shape `(batch_size, hidden_size)`.
Paddle's RNN have two outputs: the hidden state for every time step at last layer,
and the hidden state at the last time step for every layer.
If `pooling_type` is not None, we perform the pooling on the hidden state of every time
step at last layer to create a single vector. If None, we use the hidden state
of the last time step at last layer as a single output (shape of `(batch_size, hidden_size)`);
And if direction is bidirection, the we concat the hidden state of the last forward
rnn and backward rnn layer to create a single vector (shape of `(batch_size, hidden_size * 2)`).
Args:
input_size (int):
The number of expected features in the input (the last dimension).
hidden_size (int):
The number of features in the hidden state.
num_layers (int, optional):
Number of recurrent layers.
E.g., setting num_layers=2 would mean stacking two RNNs together to form a stacked RNN,
with the second RNN taking in outputs of the first RNN and computing the final results.
Defaults to 1.
direction (str, optional):
The direction of the network. It can be "forward" and "bidirect"
(it means bidirection network). If "biderect", it is a birectional RNN,
and returns the concat output from both directions. Defaults to "forward"
dropout (float, optional):
If non-zero, introduces a Dropout layer on the outputs of each RNN layer
except the last layer, with dropout probability equal to dropout.
Defaults to 0.0.
pooling_type (str, optional):
If `pooling_type` is None, then the RNNEncoder will return the hidden state
of the last time step at last layer as a single vector.
If pooling_type is not None, it must be one of "sum", "max" and "mean".
Then it will be pooled on the RNN output (the hidden state of every time
step at last layer) to create a single vector.
Defaults to `None`.
"""
def __init__(self,
input_size,
hidden_size,
num_layers=1,
bidirectional=False,
dropout=0.0,
pooling_type=None,
**kwargs):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.pooling_type = pooling_type
self.rnn_layer = nn.RNN(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
batch_first=True,
bidirectional=bidirectional,
**kwargs)
def get_input_dim(self):
r"""
Returns the dimension of the vector input for each element in the sequence input
to a `RNNEncoder`. This is not the shape of the input tensor, but the
last element of that shape.
"""
return self.input_size
def get_output_dim(self):
r"""
Returns the dimension of the final vector output by this `RNNEncoder`. This is not
the shape of the returned tensor, but the last element of that shape.
"""
if self.bidirectional:
return self.hidden_size * 2
else:
return self.hidden_size
def forward(self, inputs):
r"""
RNNEncoder takes the a sequence of vectors and and returns a
single vector, which is a combination of multiple RNN layers.
The input to this encoder is of shape `(batch_size, num_tokens, input_size)`.
The output is of shape `(batch_size, hidden_size * 2)` if RNN is bidirection;
If not, output is of shape `(batch_size, hidden_size)`.
Args:
inputs (Tensor): Shape as `(batch_size, num_tokens, input_size)`.
Tensor containing the features of the input sequence.
sequence_length (Tensor): Shape as `(batch_size)`.
The sequence length of the input sequence.
Returns:
Tensor: Returns tensor `output`, the hidden state at the last time step for every layer.
Its data type is `float` and its shape is `[batch_size, hidden_size]`.
"""
encoded_text, last_hidden = self.rnn_layer(inputs)
if not self.pooling_type:
# We exploit the `last_hidden` (the hidden state at the last time step for every layer)
# to create a single vector.
# If rnn is not bidirection, then output is the hidden state of the last time step
# at last layer. Output is shape of `(batch_size, hidden_size)`.
# If rnn is bidirection, then output is concatenation of the forward and backward hidden state
# of the last time step at last layer. Output is shape of `(batch_size, hidden_size * 2)`.
if not self.bidirectional:
output = last_hidden[-1, :, :]
else:
output = torch.cat(
(last_hidden[-2, :, :], last_hidden[-1, :, :]), dim=1)
else:
# We exploit the `encoded_text` (the hidden state at the every time step for last layer)
# to create a single vector. We perform pooling on the encoded text.
# The output shape is `(batch_size, hidden_size * 2)` if use bidirectional RNN,
# otherwise the output shape is `(batch_size, hidden_size * 2)`.
if self.pooling_type == 'sum':
output = torch.sum(encoded_text, dim=1)
elif self.pooling_type == 'max':
output = torch.max(encoded_text, dim=1)
elif self.pooling_type == 'mean':
output = torch.mean(encoded_text, dim=1)
else:
raise RuntimeError(
'Unexpected pooling type %s .'
'Pooling type must be one of sum, max and mean.' %
self.pooling_type)
return output
class RNNModel(nn.Module):
def __init__(self,
vocab_size,
num_classes,
emb_dim=128,
padding_idx=0,
rnn_hidden_size=198,
bidirectional=False,
rnn_layers=1,
dropout_rate=0.0,
pooling_type=None,
fc_hidden_size=96):
super().__init__()
self.embedder = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=emb_dim,
padding_idx=padding_idx)
self.rnn_encoder = RNNEncoder(emb_dim,
rnn_hidden_size,
num_layers=rnn_layers,
bidirectional=bidirectional,
dropout=dropout_rate,
pooling_type=pooling_type)
self.fc = nn.Linear(self.rnn_encoder.get_output_dim(), fc_hidden_size)
self.output_layer = nn.Linear(fc_hidden_size, num_classes)
def forward(self, text):
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens, num_directions*rnn_hidden_size)
# num_directions = 2 if direction is 'bidirect'
# if not, num_directions = 1
text_repr = self.rnn_encoder(embedded_text)
# Shape: (batch_size, fc_hidden_size)
fc_out = torch.tanh(self.fc(text_repr))
# Shape: (batch_size, num_classes)
logits = self.output_layer(fc_out)
return logits
| 45.752688 | 106 | 0.593655 |
ace58028df63e3d22f74c4758c5f96cfe50d918b | 227 | py | Python | pypy/interpreter/pyparser/test/samples/snippet_with_2.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/interpreter/pyparser/test/samples/snippet_with_2.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/interpreter/pyparser/test/samples/snippet_with_2.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | # EXPECT: Module(None, Stmt([From('__future__', [('with_statement', None)]), With(Name('acontext'), Stmt([Pass()]), AssName('avariable', OP_ASSIGN))]))
from __future__ import with_statement
with acontext as avariable:
pass
| 37.833333 | 151 | 0.713656 |
ace580b59bc6ea347d0c240bf46803d22ed9bd73 | 297 | py | Python | conf/__init__.py | wooasink/densenet | 846bf43edc0c794410069d0ede83aab044ddab03 | [
"MIT"
] | 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | conf/__init__.py | wooasink/densenet | 846bf43edc0c794410069d0ede83aab044ddab03 | [
"MIT"
] | 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | conf/__init__.py | wooasink/densenet | 846bf43edc0c794410069d0ede83aab044ddab03 | [
"MIT"
] | 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | """ dynamically load settings
author baiyu
"""
import conf.global_settings as settings
class Settings:
def __init__(self, settings):
for attr in dir(settings):
if attr.isupper():
setattr(self, attr, getattr(settings, attr))
settings = Settings(settings) | 21.214286 | 60 | 0.659933 |
ace580ecb05bd0d1766554e5032129d6985e4875 | 2,202 | py | Python | build/plugins/ytest2.py | khanova/catboost | 5949565db3651f50b6fa247287d50ab6be718a5e | [
"Apache-2.0"
] | 1 | 2020-04-18T22:38:32.000Z | 2020-04-18T22:38:32.000Z | build/plugins/ytest2.py | khanova/catboost | 5949565db3651f50b6fa247287d50ab6be718a5e | [
"Apache-2.0"
] | null | null | null | build/plugins/ytest2.py | khanova/catboost | 5949565db3651f50b6fa247287d50ab6be718a5e | [
"Apache-2.0"
] | null | null | null | import os
import _common
def dir_stmts(unit, dir):
unit.onpeerdir(dir)
unit.onsrcdir(os.sep.join([dir, 'tests']))
def pytest_base(unit, args):
related_prj_dir = args[0]
related_prj_name = args[1]
dir_stmts(unit, related_prj_dir)
ytest_base(unit, related_prj_dir, related_prj_name, args[2:])
unit.set(['ADDITIONAL_PATH', '--test-related-path ${ARCADIA_ROOT}/test'])
def ytest_base(unit, related_prj_dir, related_prj_name, args):
keywords = {"DEPENDS": -1, "DATA": -1}
flat_args, spec_args = _common.sort_by_keywords(keywords, args)
unit.set(['TEST-NAME', os.path.basename(flat_args[0])])
unit.set(['SCRIPT-REL-PATH', flat_args[1]])
unit.set(['SOURCE-FOLDER-PATH', related_prj_dir])
unit.set(['BUILD-FOLDER-PATH', os.path.join('$B', related_prj_dir)])
unit.set(['TESTED-BINARY-PATH', flat_args[0]])
custom_deps = ' '.join(spec_args["DEPENDS"]) if "DEPENDS" in spec_args else ''
unit.set(['CUSTOM-DEPENDENCIES', custom_deps])
data_lst = spec_args.get('DATA', []) + (unit.get(['__test_data']) or '').split(' ')
data_lst.sort()
data = '\"' + ';'.join(data_lst) + '\"' if data_lst else ''
unit.set(['TEST-DATA', data])
ya_root = unit.get('YA_ROOT')
unit.set(['TEST_RUN_SCRIPT', 'devtools/{}/test/node/run_test.py'.format(ya_root)])
related_dirs_list = ['{ARCADIA_ROOT}/devtools/${YA_ROOT}', '${ARCADIA_ROOT}/devtools/${YA_ROOT}', '$RELATED_TARGET_SRCDIR']
related_dirs_value = []
for rel in related_dirs_list:
related_dirs_value.extend(['--test-related-path', rel])
unit.set(['RELATED_DIRS', ' '.join(related_dirs_value)])
unit.set(['TEST_KV', '${{kv;hide:"test_related_dirs {}"}}'.format(' '.join(related_dirs_list))])
def on_unittest(unit, *args):
related_prj_name = args[0]
related_prj_dir = args[1][3:]
ya_root = unit.get('YA_ROOT')
unit.set(['SPECIFIC_RUN_SCRIPT', 'devtools/{}/test/scripts/run_ut.py'.format(ya_root)])
unit.set(['TEST_TYPE', '${kv;hide:"test-type unittest"}'])
ytest_base(unit, related_prj_dir, related_prj_name, args)
def on_ytest(unit, *args):
pytest_base(unit, args)
def on_py_test(unit, *args):
pytest_base(unit, args)
| 37.322034 | 127 | 0.669391 |
ace5810c7b6bdefdb28c9d17d920fc5b61e0db90 | 26,636 | py | Python | meshio/vtk/_vtk.py | ChristosT/meshio | de0ced0cc19fa40389764514328874dce8afdbec | [
"MIT"
] | null | null | null | meshio/vtk/_vtk.py | ChristosT/meshio | de0ced0cc19fa40389764514328874dce8afdbec | [
"MIT"
] | null | null | null | meshio/vtk/_vtk.py | ChristosT/meshio | de0ced0cc19fa40389764514328874dce8afdbec | [
"MIT"
] | null | null | null | """
I/O for VTK <https://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf>.
"""
import logging
from functools import reduce
import numpy as np
from ..__about__ import __version__
from .._common import (
_meshio_to_vtk_order,
_vtk_to_meshio_order,
meshio_to_vtk_type,
vtk_to_meshio_type,
)
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import CellBlock, Mesh
vtk_type_to_numnodes = np.array(
[
0, # empty
1, # vertex
-1, # poly_vertex
2, # line
-1, # poly_line
3, # triangle
-1, # triangle_strip
-1, # polygon
-1, # pixel
4, # quad
4, # tetra
-1, # voxel
8, # hexahedron
6, # wedge
5, # pyramid
10, # penta_prism
12, # hexa_prism
-1,
-1,
-1,
-1,
3, # line3
6, # triangle6
8, # quad8
10, # tetra10
20, # hexahedron20
15, # wedge15
13, # pyramid13
9, # quad9
27, # hexahedron27
6, # quad6
12, # wedge12
18, # wedge18
24, # hexahedron24
7, # triangle7
4, # line4
]
)
# These are all VTK data types.
vtk_to_numpy_dtype_name = {
"bit": "bool",
"unsigned_char": "uint8",
"char": "int8",
"unsigned_short": "uint16",
"short": "int16",
"unsigned_int": "uint32",
"int": "int32",
"unsigned_long": "uint64",
"long": "int64",
"float": "float32",
"double": "float64",
"vtktypeint32": "int32", # vtk DataFile Version 5.1
"vtktypeint64": "int64", # vtk DataFile Version 5.1
"vtkidtype": "int32", # may be either 32-bit or 64-bit (VTK_USE_64BIT_IDS)
}
numpy_to_vtk_dtype = {
v: k for k, v in vtk_to_numpy_dtype_name.items() if "vtk" not in k
}
# supported vtk dataset types
vtk_dataset_types = [
"UNSTRUCTURED_GRID",
"STRUCTURED_POINTS",
"STRUCTURED_GRID",
"RECTILINEAR_GRID",
]
# additional infos per dataset type
vtk_dataset_infos = {
"UNSTRUCTURED_GRID": [],
"STRUCTURED_POINTS": [
"DIMENSIONS",
"ORIGIN",
"SPACING",
"ASPECT_RATIO", # alternative for SPACING in version 1.0 and 2.0
],
"STRUCTURED_GRID": ["DIMENSIONS"],
"RECTILINEAR_GRID": [
"DIMENSIONS",
"X_COORDINATES",
"Y_COORDINATES",
"Z_COORDINATES",
],
}
# all main sections in vtk
vtk_sections = [
"METADATA",
"DATASET",
"POINTS",
"CELLS",
"CELL_TYPES",
"POINT_DATA",
"CELL_DATA",
"LOOKUP_TABLE",
"COLOR_SCALARS",
]
class Info:
"""Info Container for the VTK reader."""
def __init__(self):
self.points = None
self.field_data = {}
self.cell_data_raw = {}
self.point_data = {}
self.dataset = {}
self.c = None
self.ct = None
self.active = None
self.is_ascii = False
self.split = []
self.num_items = 0
# One of the problem in reading VTK files are POINT_DATA and CELL_DATA fields.
# They can contain a number of SCALARS+LOOKUP_TABLE tables, without giving and
# indication of how many there are. Hence, SCALARS must be treated like a
# first-class section. To associate it with POINT/CELL_DATA, we store the
# `active` section in this variable.
self.section = None
def read(filename):
"""Reads a VTK vtk file."""
with open_file(filename, "rb") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# initialize output data
info = Info()
# skip header and title
f.readline()
f.readline()
data_type = f.readline().decode("utf-8").strip().upper()
if data_type not in ["ASCII", "BINARY"]:
raise ReadError(f"Unknown VTK data type '{data_type}'.")
info.is_ascii = data_type == "ASCII"
while True:
line = f.readline().decode("utf-8")
if not line:
# EOF
break
line = line.strip()
if len(line) == 0:
continue
info.split = line.split()
info.section = info.split[0].upper()
if info.section in vtk_sections:
_read_section(f, info)
else:
_read_subsection(f, info)
_check_mesh(info)
cells, cell_data = translate_cells(info.c, info.ct, info.cell_data_raw)
return Mesh(
info.points,
cells,
point_data=info.point_data,
cell_data=cell_data,
field_data=info.field_data,
)
def _read_section(f, info):
if info.section == "METADATA":
_skip_meta(f)
elif info.section == "DATASET":
info.active = "DATASET"
info.dataset["type"] = info.split[1].upper()
if info.dataset["type"] not in vtk_dataset_types:
raise ReadError(
"Only VTK '{}' supported (not {}).".format(
"', '".join(vtk_dataset_types), info.dataset["type"]
)
)
elif info.section == "POINTS":
info.active = "POINTS"
info.num_points = int(info.split[1])
data_type = info.split[2].lower()
info.points = _read_points(f, data_type, info.is_ascii, info.num_points)
elif info.section == "CELLS":
info.active = "CELLS"
last_pos = f.tell()
try:
line = f.readline().decode("utf-8")
except UnicodeDecodeError:
line = ""
if "OFFSETS" in line:
# vtk DataFile Version 5.1 - appearing in Paraview 5.8.1 outputs
# No specification found for this file format.
# See the question on ParaView Discourse Forum:
# <https://discourse.paraview.org/t/specification-of-vtk-datafile-version-5-1/5127>.
info.num_offsets = int(info.split[1])
info.num_items = int(info.split[2])
dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]])
offsets = _read_cells(f, info.is_ascii, info.num_offsets, dtype)
line = f.readline().decode("utf-8")
assert "CONNECTIVITY" in line
dtype = np.dtype(vtk_to_numpy_dtype_name[line.split()[1]])
connectivity = _read_cells(f, info.is_ascii, info.num_items, dtype)
info.c = (offsets, connectivity)
else:
f.seek(last_pos)
info.num_items = int(info.split[2])
info.c = _read_cells(f, info.is_ascii, info.num_items)
elif info.section == "CELL_TYPES":
info.active = "CELL_TYPES"
info.num_items = int(info.split[1])
info.ct = _read_cell_types(f, info.is_ascii, info.num_items)
elif info.section == "POINT_DATA":
info.active = "POINT_DATA"
info.num_items = int(info.split[1])
elif info.section == "CELL_DATA":
info.active = "CELL_DATA"
info.num_items = int(info.split[1])
elif info.section == "LOOKUP_TABLE":
info.num_items = int(info.split[2])
np.fromfile(f, count=info.num_items * 4, sep=" ", dtype=float)
# rgba = data.reshape((info.num_items, 4))
elif info.section == "COLOR_SCALARS":
nValues = int(info.split[2])
# re-use num_items from active POINT/CELL_DATA
num_items = info.num_items
dtype = np.ubyte
if info.is_ascii:
dtype = float
np.fromfile(f, count=num_items * nValues, dtype=dtype)
def _read_subsection(f, info):
if info.active == "POINT_DATA":
d = info.point_data
elif info.active == "CELL_DATA":
d = info.cell_data_raw
elif info.active == "DATASET":
d = info.dataset
else:
d = info.field_data
if info.section in vtk_dataset_infos[info.dataset["type"]]:
if info.section[1:] == "_COORDINATES":
info.num_points = int(info.split[1])
data_type = info.split[2].lower()
d[info.section] = _read_coords(f, data_type, info.is_ascii, info.num_points)
else:
if info.section == "DIMENSIONS":
d[info.section] = list(map(int, info.split[1:]))
else:
d[info.section] = list(map(float, info.split[1:]))
if len(d[info.section]) != 3:
raise ReadError(
"Wrong number of info in section '{}'. Need 3, got {}.".format(
info.section, len(d[info.section])
)
)
elif info.section == "SCALARS":
d.update(_read_scalar_field(f, info.num_items, info.split, info.is_ascii))
elif info.section == "VECTORS":
d.update(_read_field(f, info.num_items, info.split, [3], info.is_ascii))
elif info.section == "TENSORS":
d.update(_read_field(f, info.num_items, info.split, [3, 3], info.is_ascii))
elif info.section == "FIELD":
d.update(_read_fields(f, int(info.split[2]), info.is_ascii))
else:
raise ReadError(f"Unknown section '{info.section}'.")
def _check_mesh(info):
if info.dataset["type"] == "UNSTRUCTURED_GRID":
if info.c is None:
raise ReadError("Required section CELLS not found.")
if info.ct is None:
raise ReadError("Required section CELL_TYPES not found.")
elif info.dataset["type"] == "STRUCTURED_POINTS":
dim = info.dataset["DIMENSIONS"]
ori = info.dataset["ORIGIN"]
spa = (
info.dataset["SPACING"]
if "SPACING" in info.dataset
else info.dataset["ASPECT_RATIO"]
)
axis = [
np.linspace(ori[i], ori[i] + (dim[i] - 1.0) * spa[i], dim[i])
for i in range(3)
]
info.points = _generate_points(axis)
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
elif info.dataset["type"] == "RECTILINEAR_GRID":
axis = [
info.dataset["X_COORDINATES"],
info.dataset["Y_COORDINATES"],
info.dataset["Z_COORDINATES"],
]
info.points = _generate_points(axis)
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
elif info.dataset["type"] == "STRUCTURED_GRID":
info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"])
def _generate_cells(dim):
ele_dim = [d - 1 for d in dim if d > 1]
# TODO use math.prod when requiring Python 3.8+? this would save the int conversion
# <https://github.com/microsoft/pyright/issues/1226>
ele_no = int(np.prod(ele_dim))
spatial_dim = len(ele_dim)
if spatial_dim == 1:
# cells are lines in 1D
cells = np.empty((ele_no, 3), dtype=int)
cells[:, 0] = 2
cells[:, 1] = np.arange(ele_no, dtype=int)
cells[:, 2] = cells[:, 1] + 1
cell_types = np.full(ele_no, 3, dtype=int)
elif spatial_dim == 2:
# cells are quad in 2D
cells = np.empty((ele_no, 5), dtype=int)
cells[:, 0] = 4
cells[:, 1] = np.arange(0, ele_no, dtype=int)
cells[:, 1] += np.arange(0, ele_no, dtype=int) // ele_dim[0]
cells[:, 2] = cells[:, 1] + 1
cells[:, 3] = cells[:, 1] + 2 + ele_dim[0]
cells[:, 4] = cells[:, 3] - 1
cell_types = np.full(ele_no, 9, dtype=int)
else:
# cells are hex in 3D
cells = np.empty((ele_no, 9), dtype=int)
cells[:, 0] = 8
cells[:, 1] = np.arange(ele_no)
cells[:, 1] += (ele_dim[0] + ele_dim[1] + 1) * (
np.arange(ele_no) // (ele_dim[0] * ele_dim[1])
)
cells[:, 1] += (np.arange(ele_no) % (ele_dim[0] * ele_dim[1])) // ele_dim[0]
cells[:, 2] = cells[:, 1] + 1
cells[:, 3] = cells[:, 1] + 2 + ele_dim[0]
cells[:, 4] = cells[:, 3] - 1
cells[:, 5] = cells[:, 1] + (1 + ele_dim[0]) * (1 + ele_dim[1])
cells[:, 6] = cells[:, 5] + 1
cells[:, 7] = cells[:, 5] + 2 + ele_dim[0]
cells[:, 8] = cells[:, 7] - 1
cell_types = np.full(ele_no, 12, dtype=int)
return cells.reshape(-1), cell_types
def _generate_points(axis):
x_dim = len(axis[0])
y_dim = len(axis[1])
z_dim = len(axis[2])
pnt_no = x_dim * y_dim * z_dim
x_id, y_id, z_id = np.mgrid[0:x_dim, 0:y_dim, 0:z_dim]
points = np.empty((pnt_no, 3), dtype=axis[0].dtype)
# VTK sorts points and cells in Fortran order
points[:, 0] = axis[0][x_id.reshape(-1, order="F")]
points[:, 1] = axis[1][y_id.reshape(-1, order="F")]
points[:, 2] = axis[2][z_id.reshape(-1, order="F")]
return points
def _read_coords(f, data_type, is_ascii, num_points):
dtype = np.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
coords = np.fromfile(f, count=num_points, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
coords = np.fromfile(f, count=num_points, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return coords
def _read_points(f, data_type, is_ascii, num_points):
dtype = np.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
points = np.fromfile(f, count=num_points * 3, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
points = np.fromfile(f, count=num_points * 3, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return points.reshape((num_points, 3))
def _read_cells(f, is_ascii, num_items, dtype=np.dtype("int32")):
if is_ascii:
c = np.fromfile(f, count=num_items, sep=" ", dtype=dtype)
else:
dtype = dtype.newbyteorder(">")
c = np.fromfile(f, count=num_items, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
return c
def _read_cell_types(f, is_ascii, num_items):
if is_ascii:
ct = np.fromfile(f, count=int(num_items), sep=" ", dtype=int)
else:
# binary
ct = np.fromfile(f, count=int(num_items), dtype=">i4")
line = f.readline().decode("utf-8")
# Sometimes, there's no newline at the end
if line.strip() != "":
raise ReadError()
return ct
def _read_scalar_field(f, num_data, split, is_ascii):
data_name = split[1]
data_type = split[2].lower()
try:
num_comp = int(split[3])
except IndexError:
num_comp = 1
# The standard says:
# > The parameter numComp must range between (1,4) inclusive; [...]
if not (0 < num_comp < 5):
raise ReadError("The parameter numComp must range between (1,4) inclusive")
dtype = np.dtype(vtk_to_numpy_dtype_name[data_type])
lt, _ = f.readline().decode("utf-8").split()
if lt.upper() != "LOOKUP_TABLE":
raise ReadError()
if is_ascii:
data = np.fromfile(f, count=num_data * num_comp, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
data = np.fromfile(f, count=num_data * num_comp, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
data = data.reshape(-1, num_comp)
return {data_name: data}
def _read_field(f, num_data, split, shape, is_ascii):
data_name = split[1]
data_type = split[2].lower()
dtype = np.dtype(vtk_to_numpy_dtype_name[data_type])
# prod()
# <https://stackoverflow.com/q/2104782/353337>
k = reduce((lambda x, y: x * y), shape)
if is_ascii:
data = np.fromfile(f, count=k * num_data, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
data = np.fromfile(f, count=k * num_data, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
data = data.reshape(-1, *shape)
return {data_name: data}
def _read_fields(f, num_fields, is_ascii):
data = {}
for _ in range(num_fields):
line = f.readline().decode("utf-8").split()
if line[0] == "METADATA":
_skip_meta(f)
name, shape0, shape1, data_type = f.readline().decode("utf-8").split()
else:
name, shape0, shape1, data_type = line
shape0 = int(shape0)
shape1 = int(shape1)
dtype = np.dtype(vtk_to_numpy_dtype_name[data_type.lower()])
if is_ascii:
dat = np.fromfile(f, count=shape0 * shape1, sep=" ", dtype=dtype)
else:
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder(">")
dat = np.fromfile(f, count=shape0 * shape1, dtype=dtype)
line = f.readline().decode("utf-8")
if line != "\n":
raise ReadError()
if shape0 != 1:
dat = dat.reshape((shape1, shape0))
data[name] = dat
return data
def _skip_meta(f):
# skip possible metadata
# https://vtk.org/doc/nightly/html/IOLegacyInformationFormat.html
while True:
line = f.readline().decode("utf-8").strip()
if not line:
# end of metadata is a blank line
break
def translate_cells(data, types, cell_data_raw):
# https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html
# Translate it into the cells array.
# `data` is a one-dimensional vector with
# (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
# or a tuple with (offsets, connectivity)
has_polygon = np.any(types == meshio_to_vtk_type["polygon"])
cells = []
cell_data = {}
if has_polygon:
numnodes = np.empty(len(types), dtype=int)
# If some polygons are in the VTK file, loop over the cells
numcells = len(types)
offsets = np.empty(len(types), dtype=int)
offsets[0] = 0
for idx in range(numcells - 1):
numnodes[idx] = data[offsets[idx]]
offsets[idx + 1] = offsets[idx] + numnodes[idx] + 1
idx = numcells - 1
numnodes[idx] = data[offsets[idx]]
if not np.all(numnodes == data[offsets]):
raise ReadError()
# TODO: cell_data
for idx, vtk_cell_type in enumerate(types):
start = offsets[idx] + 1
cell_idx = start + _vtk_to_meshio_order(
vtk_cell_type, numnodes[idx], offsets.dtype
)
cell = data[cell_idx]
cell_type = vtk_to_meshio_type[vtk_cell_type]
if cell_type == "polygon":
cell_type += str(data[offsets[idx]])
if len(cells) > 0 and cells[-1].type == cell_type:
cells[-1].data.append(cell)
else:
cells.append(CellBlock(cell_type, [cell]))
# convert data to numpy arrays
for k, c in enumerate(cells):
cells[k] = CellBlock(c.type, np.array(c.data))
else:
# Deduct offsets from the cell types. This is much faster than manually going
# through the data array. Slight disadvantage: This doesn't work for cells with
# a custom number of points.
numnodes = vtk_type_to_numnodes[types]
if not np.all(numnodes > 0):
raise ReadError("File contains cells that meshio cannot handle.")
if isinstance(data, tuple):
offsets, conn = data
if not np.all(numnodes == np.diff(offsets)):
raise ReadError()
idx0 = 0
else:
offsets = np.cumsum(numnodes + 1) - (numnodes + 1)
if not np.all(numnodes == data[offsets]):
raise ReadError()
idx0 = 1
conn = data
b = np.concatenate(
[[0], np.where(types[:-1] != types[1:])[0] + 1, [len(types)]]
)
for start, end in zip(b[:-1], b[1:]):
if start == end:
continue
meshio_type = vtk_to_meshio_type[types[start]]
n = numnodes[start]
cell_idx = idx0 + _vtk_to_meshio_order(types[start], n, dtype=offsets.dtype)
indices = np.add.outer(offsets[start:end], cell_idx)
cells.append(CellBlock(meshio_type, conn[indices]))
for name, d in cell_data_raw.items():
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(d[start:end])
return cells, cell_data
def write(filename, mesh, binary=True):
def pad(array):
return np.pad(array, ((0, 0), (0, 1)), "constant")
if mesh.points.shape[1] == 2:
logging.warning(
"VTK requires 3D points, but 2D points given. "
"Appending 0 third component."
)
points = pad(mesh.points)
else:
points = mesh.points
if mesh.point_data:
for name, values in mesh.point_data.items():
if len(values.shape) == 2 and values.shape[1] == 2:
logging.warning(
"VTK requires 3D vectors, but 2D vectors given. "
"Appending 0 third component to {}.".format(name)
)
mesh.point_data[name] = pad(values)
for name, data in mesh.cell_data.items():
for k, values in enumerate(data):
if len(values.shape) == 2 and values.shape[1] == 2:
logging.warning(
"VTK requires 3D vectors, but 2D vectors given. "
"Appending 0 third component to {}.".format(name)
)
data[k] = pad(data[k])
if not binary:
logging.warning("VTK ASCII files are only meant for debugging.")
with open_file(filename, "wb") as f:
f.write(b"# vtk DataFile Version 4.2\n")
f.write(f"written by meshio v{__version__}\n".encode("utf-8"))
f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8"))
f.write(b"DATASET UNSTRUCTURED_GRID\n")
# write points and cells
_write_points(f, points, binary)
_write_cells(f, mesh.cells, binary)
# write point data
if mesh.point_data:
num_points = mesh.points.shape[0]
f.write(f"POINT_DATA {num_points}\n".encode("utf-8"))
_write_field_data(f, mesh.point_data, binary)
# write cell data
if mesh.cell_data:
total_num_cells = sum(len(c.data) for c in mesh.cells)
f.write(f"CELL_DATA {total_num_cells}\n".encode("utf-8"))
_write_field_data(f, mesh.cell_data, binary)
def _write_points(f, points, binary):
f.write(
"POINTS {} {}\n".format(
len(points), numpy_to_vtk_dtype[points.dtype.name]
).encode("utf-8")
)
if binary:
# Binary data must be big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
# if points.dtype.byteorder == "<" or (
# points.dtype.byteorder == "=" and sys.byteorder == "little"
# ):
# logging.warn("Converting to new byte order")
points.astype(points.dtype.newbyteorder(">")).tofile(f, sep="")
else:
# ascii
points.tofile(f, sep=" ")
f.write(b"\n")
def _write_cells(f, cells, binary):
total_num_cells = sum([len(c.data) for c in cells])
total_num_idx = sum([c.data.size for c in cells])
# For each cell, the number of nodes is stored
total_num_idx += total_num_cells
f.write(f"CELLS {total_num_cells} {total_num_idx}\n".encode("utf-8"))
if binary:
for c in cells:
n = c.data.shape[1]
cell_idx = _meshio_to_vtk_order(c.type, n)
dtype = np.dtype(">i4")
# One must force endianness here:
# <https://github.com/numpy/numpy/issues/15088>
np.column_stack(
[
np.full(c.data.shape[0], n, dtype=dtype),
c.data[:, cell_idx].astype(dtype),
],
).astype(dtype).tofile(f, sep="")
f.write(b"\n")
else:
# ascii
for c in cells:
n = c.data.shape[1]
cell_idx = _meshio_to_vtk_order(c.type, n)
# prepend a column with the value n
np.column_stack(
[
np.full(c.data.shape[0], n, dtype=c.data.dtype),
c.data[:, cell_idx],
]
).tofile(f, sep="\n")
f.write(b"\n")
# write cell types
f.write(f"CELL_TYPES {total_num_cells}\n".encode("utf-8"))
if binary:
for c in cells:
key_ = c.type[:7] if c.type[:7] == "polygon" else c.type
vtk_type = meshio_to_vtk_type[key_]
np.full(len(c.data), vtk_type, dtype=np.dtype(">i4")).tofile(f, sep="")
f.write(b"\n")
else:
# ascii
for c in cells:
key_ = c.type[:7] if c.type[:7] == "polygon" else c.type
np.full(len(c.data), meshio_to_vtk_type[key_]).tofile(f, sep="\n")
f.write(b"\n")
def _write_field_data(f, data, binary):
f.write((f"FIELD FieldData {len(data)}\n").encode("utf-8"))
for name, values in data.items():
if isinstance(values, list):
values = np.concatenate(values)
if len(values.shape) == 1:
num_tuples = values.shape[0]
num_components = 1
else:
num_tuples = values.shape[0]
num_components = values.shape[1]
if " " in name:
raise WriteError(f"VTK doesn't support spaces in field names ('{name}').")
f.write(
(
"{} {} {} {}\n".format(
name,
num_components,
num_tuples,
numpy_to_vtk_dtype[values.dtype.name],
)
).encode("utf-8")
)
if binary:
values.astype(values.dtype.newbyteorder(">")).tofile(f, sep="")
else:
# ascii
values.tofile(f, sep=" ")
# np.savetxt(f, points)
f.write(b"\n")
register("vtk", [".vtk"], read, {"vtk": write})
| 33.21197 | 96 | 0.559994 |
ace58134aba802364f9734f88003294489463a3b | 3,035 | py | Python | social_core/backends/microsoft.py | shnaqawi/social-core | 402e15bd2854ade0b8f0d19663966473f0e1c6ff | [
"BSD-3-Clause"
] | 3 | 2020-10-03T14:37:40.000Z | 2021-03-28T17:21:44.000Z | social_core/backends/microsoft.py | shnaqawi/social-core | 402e15bd2854ade0b8f0d19663966473f0e1c6ff | [
"BSD-3-Clause"
] | 13 | 2020-03-24T17:53:51.000Z | 2022-02-10T20:01:14.000Z | virtual/lib/python3.6/site-packages/social_core/backends/microsoft.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
] | 3 | 2020-07-27T16:44:41.000Z | 2020-09-03T15:26:22.000Z | import time
from jwt import DecodeError, ExpiredSignature
from ..exceptions import AuthTokenError
from .oauth import BaseOAuth2
"""
OAuth2 Backend to work with microsoft graph.
"""
class MicrosoftOAuth2(BaseOAuth2):
name = 'microsoft-graph'
SCOPE_SEPARATOR = ' '
AUTHORIZATION_URL = \
'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'
ACCESS_TOKEN_URL = \
'https://login.microsoftonline.com/common/oauth2/v2.0/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
DEFAULT_SCOPE = ['User.Read']
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
self.process_error(self.data)
state = self.validate_state()
response = self.request_access_token(
self.access_token_url(),
data=self.auth_complete_params(state),
headers=self.auth_headers(),
auth=self.auth_complete_credentials(),
method=self.ACCESS_TOKEN_METHOD
)
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
def get_user_id(self, details, response):
"""Use user account id as unique id"""
return response.get('id')
def get_user_details(self, response):
"""Return user details from Microsoft online account"""
email = response.get('mail')
username = response.get('userPrincipalName')
if '@' in username:
if not email:
email = username
username = username.split('@', 1)[0]
return {'username': username,
'email': email,
'fullname': response.get('displayName', ''),
'first_name': response.get('givenName', ''),
'last_name': response.get('surname', '')}
def user_data(self, access_token, *args, **kwargs):
"""Return user data by querying Microsoft service"""
try:
return self.get_json(
'https://graph.microsoft.com/v1.0/me',
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json',
'Authorization': 'Bearer ' + access_token
},
method='GET'
)
except (DecodeError, ExpiredSignature) as error:
raise AuthTokenError(self, error)
def get_auth_token(self, user_id):
"""Return the access token for the given user, after ensuring that it
has not expired, or refreshing it if so."""
user = self.get_user(user_id=user_id)
access_token = user.social_user.access_token
expires_on = user.social_user.extra_data['expires_on']
if expires_on <= int(time.time()):
new_token_response = self.refresh_token(token=access_token)
access_token = new_token_response['access_token']
return access_token
| 34.885057 | 77 | 0.603954 |
ace581981466d776c734be3a5cea631c132cdd0b | 420 | py | Python | main/celery.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | 1 | 2020-06-17T02:51:35.000Z | 2020-06-17T02:51:35.000Z | main/celery.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | 7 | 2020-06-06T18:32:37.000Z | 2022-03-12T00:33:42.000Z | main/celery.py | Joost-dm/jooster | 04f3fa979b689b61a4aeae8a5309a09277404fd6 | [
"MIT"
] | null | null | null | """ Celery. """
from celery import Celery
import os
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main.settings')
app = Celery('main')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
app.conf.beat_schedule = {
'check_activity': {
'task': 'main.tasks.check_activity',
'schedule': crontab(minute='*/1')
}
}
| 19.090909 | 66 | 0.695238 |
ace584220035595e9bfd1d3e874f1a388f61f730 | 7,285 | py | Python | tools/oopif/iframe_server.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | tools/oopif/iframe_server.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/oopif/iframe_server.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test server for generating nested iframes with different sites.
Very simple python server for creating a bunch of iframes. The page generation
is randomized based on query parameters. See the __init__ function of the
Params class for a description of the parameters.
This server relies on gevent. On Ubuntu, install it via:
sudo apt-get install python-gevent
Run the server using
python iframe_server.py
To use the server, run chrome as follows:
google-chrome --host-resolver-rules='map *.invalid 127.0.0.1'
Change 127.0.0.1 to be the IP of the machine this server is running on. Then
in this chrome instance, navigate to any domain in .invalid
(eg., http://1.invalid:8090) to run this test.
"""
import colorsys
import copy
import random
import urllib
import urlparse
from gevent import pywsgi # pylint: disable=F0401
MAIN_PAGE = """
<html>
<head>
<style>
body {
background-color: %(color)s;
}
</style>
</head>
<body>
<center>
<h1><a href="%(url)s">%(site)s</a></h1>
<p><small>%(url)s</small>
</center>
<br />
%(iframe_html)s
</body>
</html>
"""
IFRAME_FRAGMENT = """
<iframe src="%(src)s" width="%(width)s" height="%(height)s">
</iframe>
"""
class Params(object):
"""Simple object for holding parameters"""
def __init__(self, query_dict):
# Basic params:
# nframes is how many frames per page.
# nsites is how many sites to random choose out of.
# depth is how deep to make the frame tree
# pattern specifies how the sites are layed out per depth. An empty string
# uses a random N = [0, nsites] each time to generate a N.invalid URL.
# Otherwise sepcify with single letters like 'ABCA' and frame
# A.invalid will embed B.invalid will embed C.invalid will embed A.
# jitter is the amount of randomness applied to nframes and nsites.
# Should be from [0,1]. 0.0 means no jitter.
# size_jitter is like jitter, but for width and height.
self.nframes = int(query_dict.get('nframes', [4] )[0])
self.nsites = int(query_dict.get('nsites', [10] )[0])
self.depth = int(query_dict.get('depth', [1] )[0])
self.jitter = float(query_dict.get('jitter', [0] )[0])
self.size_jitter = float(query_dict.get('size_jitter', [0.5] )[0])
self.pattern = query_dict.get('pattern', [''] )[0]
self.pattern_pos = int(query_dict.get('pattern_pos', [0] )[0])
# Size parameters. Values are percentages.
self.width = int(query_dict.get('width', [60])[0])
self.height = int(query_dict.get('height', [50])[0])
# Pass the random seed so our pages are reproduceable.
self.seed = int(query_dict.get('seed',
[random.randint(0, 2147483647)])[0])
def get_site(urlpath):
"""Takes a urlparse object and finds its approximate site.
Site is defined as registered domain name + scheme. We approximate
registered domain name by preserving the last 2 elements of the DNS
name. This breaks for domains like co.uk.
"""
no_port = urlpath.netloc.split(':')[0]
host_parts = no_port.split('.')
site_host = '.'.join(host_parts[-2:])
return '%s://%s' % (urlpath.scheme, site_host)
def generate_host(rand, params):
"""Generates the host to be used as an iframes source.
Uses the .invalid domain to ensure DNS will not resolve to any real
address.
"""
if params.pattern:
host = params.pattern[params.pattern_pos]
params.pattern_pos = (params.pattern_pos + 1) % len(params.pattern)
else:
host = rand.randint(1, apply_jitter(rand, params.jitter, params.nsites))
return '%s.invalid' % host
def apply_jitter(rand, jitter, n):
"""Reduce n by random amount from [0, jitter]. Ensures result is >=1."""
if jitter <= 0.001:
return n
v = n - int(n * rand.uniform(0, jitter))
if v:
return v
else:
return 1
def get_color_for_site(site):
"""Generate a stable (and pretty-ish) color for a site."""
val = hash(site)
# The constants below are arbitrary chosen emperically to look "pretty."
# HSV is used because it is easier to control the color than RGB.
# Reducing the H to 0.6 produces a good range of colors. Preserving
# > 0.5 saturation and value means the colors won't be too washed out.
h = (val % 100)/100.0 * 0.6
s = 1.0 - (int(val/100) % 100)/200.
v = 1.0 - (int(val/10000) % 100)/200.0
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
return 'rgb(%d, %d, %d)' % (int(r * 255), int(g * 255), int(b * 255))
def make_src(scheme, netloc, path, params):
"""Constructs the src url that will recreate the given params."""
if path == '/':
path = ''
return '%(scheme)s://%(netloc)s%(path)s?%(params)s' % {
'scheme': scheme,
'netloc': netloc,
'path': path,
'params': urllib.urlencode(params.__dict__),
}
def make_iframe_html(urlpath, params):
"""Produces the HTML fragment for the iframe."""
if (params.depth <= 0):
return ''
# Ensure a stable random number per iframe.
rand = random.Random()
rand.seed(params.seed)
netloc_paths = urlpath.netloc.split(':')
netloc_paths[0] = generate_host(rand, params)
width = apply_jitter(rand, params.size_jitter, params.width)
height = apply_jitter(rand, params.size_jitter, params.height)
iframe_params = {
'src': make_src(urlpath.scheme, ':'.join(netloc_paths),
urlpath.path, params),
'width': '%d%%' % width,
'height': '%d%%' % height,
}
return IFRAME_FRAGMENT % iframe_params
def create_html(environ):
"""Creates the current HTML page. Also parses out query parameters."""
urlpath = urlparse.urlparse('%s://%s%s?%s' % (
environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
environ['PATH_INFO'],
environ['QUERY_STRING']))
site = get_site(urlpath)
params = Params(urlparse.parse_qs(urlpath.query))
rand = random.Random()
rand.seed(params.seed)
iframe_htmls = []
for frame in xrange(0, apply_jitter(rand, params.jitter, params.nframes)):
# Copy current parameters into iframe and make modifications
# for the recursive generation.
iframe_params = copy.copy(params)
iframe_params.depth = params.depth - 1
# Base the new seed off the current seed, but have it skip enough that
# different frame trees are unlikely to collide. Numbers and skips
# not chosen in any scientific manner at all.
iframe_params.seed = params.seed + (frame + 1) * (
1000000 + params.depth + 333)
iframe_htmls.append(make_iframe_html(urlpath, iframe_params))
template_params = dict(params.__dict__)
template_params.update({
'color': get_color_for_site(site),
'iframe_html': '\n'.join(iframe_htmls),
'site': site,
'url': make_src(urlpath.scheme, urlpath.netloc, urlpath.path, params),
})
return MAIN_PAGE % template_params
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
if environ['PATH_INFO'] == '/favicon.ico':
yield ''
else:
yield create_html(environ)
server = pywsgi.WSGIServer(('', 8090), application)
server.serve_forever()
| 32.377778 | 79 | 0.664242 |
ace5842d7cf7ef6fa229d44a0fffa27acb5cf0c3 | 5,759 | py | Python | kinto/core/testing.py | Nomanpathan/kinto | c2e735f821fe17993d48830a4f36076bccb6168e | [
"Apache-2.0"
] | 1 | 2020-12-06T20:49:41.000Z | 2020-12-06T20:49:41.000Z | kinto/core/testing.py | Nomanpathan/kinto | c2e735f821fe17993d48830a4f36076bccb6168e | [
"Apache-2.0"
] | null | null | null | kinto/core/testing.py | Nomanpathan/kinto | c2e735f821fe17993d48830a4f36076bccb6168e | [
"Apache-2.0"
] | null | null | null | import os
import threading
import unittest
from collections import defaultdict
from unittest import mock
import webtest
from cornice import errors as cornice_errors
from pyramid.url import parse_url_overrides
from kinto.core import DEFAULT_SETTINGS, statsd
from kinto.core.storage import generators
from kinto.core.utils import encode64, follow_subrequest, memcache, sqlalchemy
skip_if_travis = unittest.skipIf("TRAVIS" in os.environ, "travis")
skip_if_no_postgresql = unittest.skipIf(sqlalchemy is None, "postgresql is not installed.")
skip_if_no_memcached = unittest.skipIf(memcache is None, "memcached is not installed.")
skip_if_no_statsd = unittest.skipIf(not statsd.statsd_module, "statsd is not installed.")
class DummyRequest(mock.MagicMock):
"""Fully mocked request."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.upath_info = "/v0/"
self.registry = mock.MagicMock(settings={**DEFAULT_SETTINGS})
self.registry.id_generators = defaultdict(generators.UUID4)
self.GET = {}
self.headers = {}
self.errors = cornice_errors.Errors()
self.authenticated_userid = "bob"
self.authn_type = "basicauth"
self.prefixed_userid = "basicauth:bob"
self.effective_principals = ["system.Everyone", "system.Authenticated", "bob"]
self.prefixed_principals = self.effective_principals + [self.prefixed_userid]
self.json = {}
self.validated = {}
self.log_context = lambda **kw: kw
self.matchdict = {}
self.response = mock.MagicMock(headers={})
self.application_url = "" # used by parse_url_overrides
def route_url(*a, **kw):
# XXX: refactor DummyRequest to take advantage of `pyramid.testing`
parts = parse_url_overrides(self, kw)
return "".join([p for p in parts if p])
self.route_url = route_url
follow_subrequest = follow_subrequest
def get_request_class(prefix):
class PrefixedRequestClass(webtest.app.TestRequest):
@classmethod
def blank(cls, path, *args, **kwargs):
if prefix:
path = f"/{prefix}{path}"
return webtest.app.TestRequest.blank(path, *args, **kwargs)
return PrefixedRequestClass
class FormattedErrorMixin:
"""Test mixin in order to perform advanced error responses assertions."""
def assertFormattedError(self, response, code, errno, error, message=None, info=None):
self.assertIn("application/json", response.headers["Content-Type"])
self.assertEqual(response.json["code"], code)
self.assertEqual(response.json["errno"], errno.value)
self.assertEqual(response.json["error"], error)
if message is not None:
self.assertIn(message, response.json["message"])
else: # pragma: no cover
self.assertNotIn("message", response.json)
if info is not None:
self.assertIn(info, response.json["info"])
else: # pragma: no cover
self.assertNotIn("info", response.json)
def get_user_headers(user, password="secret"):
"""Helper to obtain a Basic Auth authorization headers from the specified
`user` (e.g. ``"user:pass"``)
:rtype: dict
"""
credentials = f"{user}:{password}"
authorization = f"Basic {encode64(credentials)}"
return {"Authorization": authorization}
class BaseWebTest:
"""Base Web Test to test your kinto.core service.
It setups the database before each test and delete it after.
"""
api_prefix = "v0"
"""URL version prefix"""
entry_point = None
"""Main application entry"""
headers = {"Content-Type": "application/json"}
@classmethod
def setUpClass(cls):
cls.app = cls.make_app()
cls.storage = cls.app.app.registry.storage
cls.cache = cls.app.app.registry.cache
cls.permission = cls.app.app.registry.permission
cls.storage.initialize_schema()
cls.permission.initialize_schema()
cls.cache.initialize_schema()
@classmethod
def make_app(cls, settings=None, config=None):
"""Instantiate the application and setup requests to use the api
prefix.
:param dict settings: extra settings values
:param pyramid.config.Configurator config: already initialized config
:returns: webtest application instance
"""
settings = cls.get_app_settings(extras=settings)
main = cls.entry_point
wsgi_app = main({}, config=config, **settings)
app = webtest.TestApp(wsgi_app)
app.RequestClass = get_request_class(cls.api_prefix)
return app
@classmethod
def get_app_settings(cls, extras=None):
"""Application settings to be used. Override to tweak default settings
for the tests.
:param dict extras: extra settings values
:rtype: dict
"""
settings = {**DEFAULT_SETTINGS}
settings["storage_backend"] = "kinto.core.storage.memory"
settings["cache_backend"] = "kinto.core.cache.memory"
settings["permission_backend"] = "kinto.core.permission.memory"
settings.update(extras or None)
return settings
def tearDown(self):
super().tearDown()
self.storage.flush()
self.cache.flush()
self.permission.flush()
class ThreadMixin:
def setUp(self):
super().setUp()
self._threads = []
def tearDown(self):
super().tearDown()
for thread in self._threads:
thread.join()
def _create_thread(self, *args, **kwargs):
thread = threading.Thread(*args, **kwargs)
self._threads.append(thread)
return thread
| 32.353933 | 91 | 0.657406 |
ace5845a3b6aebedb36f30cf32b8881aedba7734 | 23,316 | py | Python | Assets/StreamingAssets/.q/Lib/site-packages/docplex/mp/functional.py | hennlo/Q-shall-not-pass | 8013ce891462683eb9cfedc4ac12a1e602fc1ba8 | [
"Apache-2.0"
] | null | null | null | Assets/StreamingAssets/.q/Lib/site-packages/docplex/mp/functional.py | hennlo/Q-shall-not-pass | 8013ce891462683eb9cfedc4ac12a1e602fc1ba8 | [
"Apache-2.0"
] | null | null | null | Assets/StreamingAssets/.q/Lib/site-packages/docplex/mp/functional.py | hennlo/Q-shall-not-pass | 8013ce891462683eb9cfedc4ac12a1e602fc1ba8 | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
# gendoc: ignore
from docplex.mp.basic import Expr
from docplex.mp.constants import SOSType
from docplex.mp.operand import LinearOperand
from docplex.mp.utils import is_iterable, is_iterator, DocplexLinearRelaxationError
# do NOT import Model -> circular
# change this flag to generate named objects
# by default all generated objects will have no name
use_debug_names = False
def get_name_if_debug(name):
return name if use_debug_names else None
# noinspection PyAbstractClass
class _FunctionalExpr(Expr, LinearOperand):
# INTERNAL class
# parent class for all nonlinear expressions.
__slots__ = ('_f_var', '_resolved')
def __init__(self, model, name=None):
Expr.__init__(self, model, name)
self._f_var = None
self._resolved = False
def to_linear_expr(self):
return self._get_resolved_f_var()
def iter_terms(self):
yield self._get_resolved_f_var(), 1
iter_sorted_terms = iter_terms
def iter_variables(self):
# do we need to create it here?
yield self._get_resolved_f_var()
def unchecked_get_coef(self, dvar):
return 1 if dvar is self._f_var else 0
def _new_generated_free_continuous_var(self, artefact_pos, name=None):
# INTERNAL
inf = self._model.infinity
return self._new_generated_continuous_var(artefact_pos, lb=-inf, ub=inf, name=name)
def _new_generated_continuous_var(self, artefact_pos, lb=None, ub=None, name=None):
return self._new_generated_var(artefact_pos, vartype=self._model.continuous_vartype, lb=lb, ub=ub, name=name)
def _new_generated_binary_var(self, artefact_pos, name=None):
return self._new_generated_var(artefact_pos, self._model.binary_vartype, name=name)
def _new_generated_var(self, artefact_pos, vartype, lb=None, ub=None, name=None):
# INTERNAL
assert artefact_pos >= 0
m = self._model
gvar = m._lfactory.new_var(vartype, lb=lb, ub=ub, varname=name, safe=True)
gvar.origin = (self, artefact_pos)
return gvar
def _new_generated_binary_varlist(self, keys, offset=0, name=None):
bvars = self.model.binary_var_list(keys, name)
for b, bv in enumerate(bvars, start=offset):
bv.origin = (self, b)
return bvars
def new_generated_sos1(self, dvars):
sos1 = self.model._add_sos(dvars, SOSType.SOS1)
sos1.origin = self
return sos1
def _new_generated_indicator(self, binary_var, linear_ct, active_value=1, name=None):
ind = self._model._lfactory.new_indicator_constraint(binary_var, linear_ct, active_value, name)
ind.origin = self
self._model.add(ind)
return ind
def _new_generated_binary_ct(self, lhs, rhs, sense='EQ'):
# posts a constraint and marks it as generated.
m = self._model
ct = m._lfactory.new_binary_constraint(lhs=lhs, sense=sense, rhs=rhs)
m._post_constraint(ct)
ct.origin = self
return ct
def _post_generated_cts(self, cts):
# takes a sequence of constraints
# posts a constraint and marks it as generated.
self._model._lfactory._post_constraint_block(cts)
for c in cts:
c.origin = self
return cts
def _get_resolved_f_var(self):
self._ensure_resolved()
return self._f_var
def _get_allocated_f_var(self):
if self._f_var is None:
self._f_var = self._create_functional_var()
return self._f_var
def resolve(self):
self._ensure_resolved()
def _ensure_resolved(self):
if self._f_var is None:
# 1. create the var (once!)
self._f_var = self._create_functional_var()
# 2. post the link between the fvar and the argument expr
if not self._resolved:
self._resolve()
self._resolved = True
def _is_resolved(self):
return self._resolved and self._f_var is not None
def _name_functional_var_name(self, fvar, fvar_meta_format="_%s%d"):
fname = fvar_meta_format % (self.function_symbol, fvar._index)
fvar.set_name(fname)
def _create_functional_var(self, named=True):
fvar = self._new_generated_free_continuous_var(artefact_pos=0, name=None)
if named:
self._name_functional_var_name(fvar)
return fvar
@property
def functional_var(self):
return self._get_resolved_f_var()
as_var = functional_var
def get_artefact(self, pos):
assert pos == 0
return self.as_var
def square(self):
return self.functional_var.square()
def _resolve(self):
raise NotImplementedError # pragma: no cover
def _get_function_symbol(self):
# redefine this to get the function symbol
raise NotImplementedError # pragma: no cover
@property
def function_symbol(self):
return self._get_function_symbol()
def relaxed_copy(self, relaxed_model, var_map):
raise DocplexLinearRelaxationError(self, cause=self.function_symbol)
def __str__(self):
return self.to_string()
def to_string(self, **kwargs):
raise NotImplementedError # pragma: no cover
# -- arithmetic operators
def __mul__(self, e):
return self.functional_var.__mul__(e)
def __rmul__(self, e):
return self.functional_var.__mul__(e)
def __div__(self, e):
return self.divide(e)
def __truediv__(self, e):
# for py3
# INTERNAL
return self.divide(e) # pragma: no cover
def divide(self, e):
return self.functional_var.divide(e)
def __add__(self, e):
return self.functional_var.__add__(e)
def __radd__(self, e):
return self.functional_var.__add__(e)
def __sub__(self, e):
return self.functional_var.__sub__(e)
def __rsub__(self, e):
return self.functional_var.__rsub__(e)
def __neg__(self):
# the "-e" unary minus returns a linear expression
return self.functional_var.__neg__()
def _allocate_arg_var_if_necessary(self, arg_expr, pos):
# INTERNAL
# allocates a new variables if only the argument expr is not a variable
# and returns it
try:
arg_var = arg_expr.as_variable()
except AttributeError:
arg_var = None
if arg_var is None:
arg_var = self._new_generated_free_continuous_var(artefact_pos=pos)
self._new_generated_binary_ct(arg_var, arg_expr)
return arg_var
# noinspection PyAbstractClass
class UnaryFunctionalExpr(_FunctionalExpr):
def __init__(self, model, argument_expr, name=None):
_FunctionalExpr.__init__(self, model, name)
self._argument_expr = model._lfactory._to_linear_operand(argument_expr)
self._x_var = self._allocate_arg_var_if_necessary(argument_expr, pos=1)
def get_artefact(self, pos):
if pos == 0:
return self.as_var
elif pos == 1:
return self._x_var
@property
def argument_expr(self):
return self._argument_expr
def is_discrete(self):
return self._argument_expr.is_discrete()
def to_string(self):
return "{0:s}({1!s})".format(self.function_symbol, self._argument_expr)
def copy(self, target_model, memo):
copy_key = id(self)
cloned_expr = memo.get(copy_key)
if cloned_expr is None:
copied_arg_expr = self._argument_expr.copy(target_model, memo)
cloned_expr = self.__class__(model=target_model, argument_expr=copied_arg_expr)
memo[copy_key] = cloned_expr
return cloned_expr
class AbsExpr(UnaryFunctionalExpr):
def relaxed_copy(self, relaxed_model, var_map):
raise DocplexLinearRelaxationError(self, cause='abs')
def __init__(self, model, argument_expr):
UnaryFunctionalExpr.__init__(self, model, argument_expr)
def _get_function_symbol(self):
return "abs"
def clone(self):
return AbsExpr(self.model, self._argument_expr)
# noinspection PyArgumentEqualDefault,PyArgumentEqualDefault
def _resolve(self):
self_f_var = self._f_var
assert self_f_var
abs_index = self_f_var.index
abs_names = ["_abs_pp_%d" % abs_index, "_abs_np_%d" % abs_index] if use_debug_names else [None, None]
# 1. allocate two variables in one pass.
positive_var = self._new_generated_continuous_var(artefact_pos=2, lb=0, name=abs_names[0])
negative_var = self._new_generated_continuous_var(artefact_pos=3, lb=0, name=abs_names[1])
# F(x) = p + n
ct1 = (self_f_var == positive_var + negative_var)
# sos
self.sos = self.new_generated_sos1(dvars=[positive_var, negative_var])
# # x = p-n
ct2 = (self._argument_expr == positive_var - negative_var)
self._post_generated_cts([ct1, ct2])
# store
self._artefact_vars = (positive_var, negative_var)
def get_artefact(self, pos):
if pos <= 1:
return super(AbsExpr, self).get_artefact(pos)
else:
# offset is 2
assert 2 <= pos <= 3
return self._artefact_vars[pos - 2]
def _get_solution_value(self, s=None):
raw = abs(self._argument_expr._get_solution_value(s))
return self._round_if_discrete(raw)
def __repr__(self):
return "docplex.mp.AbsExpr({0:s})".format(self._argument_expr.truncated_str())
# noinspection PyAbstractClass
class _SequenceExpr(_FunctionalExpr):
# INTERNAL: base class for functional exprs with a sequence argument (e.g. min/max)
def __init__(self, model, exprs, name=None):
_FunctionalExpr.__init__(self, model, name)
if is_iterable(exprs) or is_iterator(exprs):
self._exprs = exprs
else:
self._exprs = [model._lfactory._to_linear_operand(exprs)]
# allocate xvars iff necessary
self._xvars = [self._allocate_arg_var_if_necessary(expr, pos=e) for e, expr in enumerate(self._exprs, start=1)]
@property
def nb_args(self):
return len(self._exprs)
def is_discrete(self):
return all(map(lambda ex: ex.is_discrete(), self._exprs))
def _get_args_string(self, sep=","):
return sep.join(e.truncated_str() for e in self._exprs)
def to_string(self):
# generic: format expression arguments with holophraste
str_args = self._get_args_string()
return "{0}({1!s})".format(self.function_symbol, str_args)
def iter_exprs(self):
return iter(self._exprs)
def _generate_variables(self):
# INTERNAL: variable generator scanning all expressions
# may return the same variable twice (or more)
# use varset() if you need the set.
for e in self._exprs:
for v in e.iter_variables():
yield v
yield self._get_resolved_f_var()
def iter_variables(self):
return self._generate_variables()
def contains_var(self, dvar):
return dvar is self._f_var
def _get_solution_value(self, s=None):
fvar = self._f_var
if self._is_resolved() and (not s or fvar in s):
raw = fvar._get_solution_value(s)
else:
raw = self.compute_solution_value(s)
return self._round_if_discrete(raw_value=raw)
def compute_solution_value(self, s):
raise NotImplementedError # pragma: no cover
def copy(self, target_model, memo):
copy_key = id(self)
cloned_expr = memo.get(copy_key)
if cloned_expr is None:
copied_exprs = [expr.copy(target_model, memo) for expr in self._exprs]
cloned_expr = self.__class__(target_model, copied_exprs, self.name)
# add in mapping
memo[copy_key] = cloned_expr
return cloned_expr
def clone(self):
# generic clone
return self.__class__(self.model, self._exprs, self.name)
def get_logical_seq_artefact(self, zvars, pos):
# 0 -> fvar
# 1 .. N -> xargs
# N+1 .. 2N -> zvars
if pos == 0:
return self.as_var
else:
nb_args = self.nb_args
if 1 <= pos <= nb_args:
return self._xvars[pos - 1]
else:
assert nb_args + 1 <= pos <= 2 * nb_args
zvar_pos = pos - (nb_args + 1)
return zvars[zvar_pos]
class MinimumExpr(_SequenceExpr):
""" An expression that represents the minimum of a sequence of expressions.
This expression can be used in all arithmetic operations.
After a solve, the value of this expression is equal to the minimum of the values
of its argument expressions.
"""
def __init__(self, model, exprs, name=None):
_SequenceExpr.__init__(self, model, exprs, name)
def _get_function_symbol(self):
return "min"
def __repr__(self):
str_args = self._get_args_string()
return "docplex.mp.MinExpr({0!s})".format(str_args)
def _resolve(self):
self_min_var = self._f_var
assert self_min_var
self_x_vars = self._xvars
nb_args = len(self_x_vars)
if 0 == nb_args:
self._f_var.set_bounds(0, 0)
elif 1 == nb_args:
self._new_generated_binary_ct(self_min_var, self._xvars[0])
else:
cts = []
for xv in self_x_vars:
cts.append(self_min_var <= xv)
# allocate N _generated_ binaries
# reserve 1 + nb_args slots for artefacts
z_vars = self._new_generated_binary_varlist(offset=nb_args + 1, keys=nb_args)
self.z_vars = z_vars
# sos?
cts.append(self.model.sum(z_vars) == 1)
self._post_generated_cts(cts)
# indicators
for i in range(nb_args):
z = z_vars[i]
x = self_x_vars[i]
# need a block generation of indicators
self._new_generated_indicator(binary_var=z, linear_ct=(self_min_var >= x))
def compute_solution_value(self, s):
return min(expr._get_solution_value(s) for expr in self._exprs)
def get_artefact(self, pos):
return self.get_logical_seq_artefact(self.z_vars, pos)
class MaximumExpr(_SequenceExpr):
""" An expression that represents the maximum of a sequence of expressions.
This expression can be used in all arithmetic operations.
After a solve, the value of this expression is equal to the minimum of the values
of its argument expressions.
"""
def __init__(self, model, exprs, name=None):
_SequenceExpr.__init__(self, model, exprs, name)
def _get_function_symbol(self):
return "max"
def __repr__(self):
str_args = self._get_args_string()
return "docplex.mp.MaxExpr({0!s})".format(str_args)
def _resolve(self):
self_max_var = self._f_var
self_x_vars = self._xvars
nb_args = len(self_x_vars)
if 0 == nb_args:
self._f_var.set_bounds(0, 0) # what else ??
elif 1 == nb_args:
self._new_generated_binary_ct(self_max_var, self._xvars[0])
else:
for xv in self_x_vars:
self._new_generated_binary_ct(self_max_var, xv, 'GE')
# allocate N binaries
z_vars = self._new_generated_binary_varlist(keys=nb_args, offset=nb_args + 1)
self.z_vars = z_vars
# sos?
self._new_generated_binary_ct(self.model.sum(z_vars), 1)
# indicators
for i in range(nb_args):
z = z_vars[i]
x = self_x_vars[i]
self._new_generated_indicator(binary_var=z, linear_ct=(self_max_var <= x))
def compute_solution_value(self, s):
return max(expr._get_solution_value(s) for expr in self._exprs)
def get_artefact(self, pos):
return self.get_logical_seq_artefact(self.z_vars, pos)
class LogicalNotExpr(UnaryFunctionalExpr):
def _create_functional_var(self, named=True):
# the resulting variable is a binary variable...
bvar = self._new_generated_binary_var(artefact_pos=0, name=None)
self._name_functional_var_name(bvar)
return bvar
def is_discrete(self):
return True
def _get_function_symbol(self):
return "not"
def as_logical_operand(self):
return self._get_resolved_f_var()
def __init__(self, model, argument_expr):
UnaryFunctionalExpr.__init__(self, model, argument_expr)
self._logical_op_arg = argument_expr.as_logical_operand()
assert self._logical_op_arg is not None
self._actual_arg_s = str(argument_expr)
def to_string(self):
return "{0:s}({1!s})".format(self.function_symbol, self._actual_arg_s)
def clone(self):
return LogicalNotExpr(self.model, self._argument_expr)
# noinspection PyArgumentEqualDefault,PyArgumentEqualDefault
def _resolve(self):
not_var = self._f_var
assert not_var
# not_x + x == 1
ct1 = (not_var + self._logical_op_arg == 1)
self._post_generated_cts([ct1])
# store
self.not_ct = ct1
def _get_solution_value(self, s=None):
arg_val = self._argument_expr._get_solution_value(s)
return 0 if arg_val else 1
def __repr__(self):
return "docplex.mp.NotExpr({0:s})".format(self._argument_expr.truncated_str())
class _LogicalSequenceExpr(_SequenceExpr):
def as_logical_operand(self):
return self._get_resolved_f_var()
def _create_functional_var(self, named=True):
# the resulting variable is a binary variable...
bvar = self._new_generated_binary_var(artefact_pos=0, name=None)
self._name_functional_var_name(bvar)
return bvar
def __init__(self, model, exprs, name=None):
_FunctionalExpr.__init__(self, model, name)
assert is_iterable(exprs) or is_iterator(exprs)
self._exprs = exprs
# never allocate vars: arguments --are-- binary variables.
self._xvars = exprs
def _get_args_string(self, sep=","):
def first_or_id(x):
try:
r = x[0]
except TypeError:
r = x
return r
s = sep.join(str(first_or_id(b.origin)) if b.is_generated() else str(b) for b in self._xvars)
return s
def is_discrete(self):
return True
precision = 1e-5
class LogicalAndExpr(_LogicalSequenceExpr):
def _get_function_symbol(self):
return "and"
def __repr__(self):
str_args = self._get_args_string()
return "docplex.mp.LogicalAndExpr({0!s})".format(str_args)
def compute_solution_value(self, s):
# return 1/0 not True/False
threshold = 1 - self.precision
return 1 if all(ex._get_solution_value(s) >= threshold for ex in self._exprs) else 0
def _resolve(self):
self_and_var = self._f_var
self_x_vars = self._xvars
if self_x_vars:
cts = [(self_and_var <= xv) for xv in self_x_vars]
m = self._model
nb_vars = len(self_x_vars)
# rtc-39600: subtract n-1 from the sum.
# the -and- var is propagated to 1 if all sum vars are 1.
cts.append(self_and_var >= m._aggregator._sum_with_seq(self._xvars) - (nb_vars - 1))
self._post_generated_cts(cts)
class LogicalOrExpr(_LogicalSequenceExpr):
def _get_function_symbol(self):
return "or"
def __repr__(self):
str_args = self._get_args_string()
return "docplex.mp.LogicalOrExpr({0!s})".format(str_args)
def compute_solution_value(self, s):
# return 1/0 not True/False
threshold = 1 - self.precision
return 1 if any(ex._get_solution_value(s) >= threshold for ex in self._exprs) else 0
def _resolve(self):
self_or_var = self._f_var
self_x_vars = self._xvars
if self_x_vars:
cts = [(xv <= self_or_var) for xv in self_x_vars]
m = self._model
cts.append(self_or_var <= m._aggregator._sum_with_seq(self._xvars))
self._post_generated_cts(cts)
self._resolved = True
class PwlExpr(UnaryFunctionalExpr):
def __init__(self, model,
pwl_func, argument_expr,
usage_counter,
y_var=None,
add_counter_suffix=True,
resolve=True):
UnaryFunctionalExpr.__init__(self, model, argument_expr)
self._pwl_func = pwl_func
self._usage_counter = usage_counter
self._f_var = y_var
if pwl_func.name:
# ?
if add_counter_suffix:
self.name = '{0}_{1!s}'.format(self._pwl_func.name, self._usage_counter)
else:
self.name = self._pwl_func.name
if resolve:
self._ensure_resolved()
def _get_function_symbol(self):
# this method determines the name of the generated variable
# as usual it starts with "_" to mark this is a generated variable.
pwl_name = self._pwl_func.get_name()
# TODO: what if pwl_name is not LP-compliant??
return "pwl" if not pwl_name else "pwl_%s#" % pwl_name
def _get_solution_value(self, s=None):
raw = self._f_var._get_solution_value(s)
return self._round_if_discrete(raw)
def iter_variables(self):
for v in self._argument_expr.iter_variables():
yield v
yield self._get_resolved_f_var()
def _resolve(self):
mdl = self._model
pwl_constraint = mdl._lfactory.new_pwl_constraint(self, self.get_name())
mdl._add_pwl_constraint_internal(pwl_constraint)
@property
def pwl_func(self):
return self._pwl_func
@property
def usage_counter(self):
return self._usage_counter
def __repr__(self):
return "docplex.mp.PwlExpr({0:s}, {1:s})".format(self._get_function_symbol(),
self._argument_expr.truncated_str())
def copy(self, target_model, memo):
copy_key = id(self)
cloned_expr = memo.get(copy_key)
if cloned_expr is None:
copied_pwl_func = memo[self.pwl_func]
copied_x_var = memo[self._x_var]
cloned_expr = PwlExpr(target_model, copied_pwl_func, copied_x_var, self.usage_counter)
copied_pwl_expr_f_var = memo.get(self._f_var)
if copied_pwl_expr_f_var:
cloned_expr._f_var = copied_pwl_expr_f_var
# Need to set the _origin attribute of the copied var
copied_pwl_expr_f_var._origin = cloned_expr
memo[copy_key] = cloned_expr
return cloned_expr
def relaxed_copy(self, relaxed_model, var_map):
raise DocplexLinearRelaxationError(self, cause='pwl')
| 33.308571 | 119 | 0.638231 |
ace58709231eb8441c0184414b0a8c0a765afa82 | 168 | py | Python | udemy/python-video-workbook/my_progress/013.py | djrgit/coursework | 2a91da9b76cb1acbd12f3d8049f15d2e71f475a1 | [
"MIT"
] | null | null | null | udemy/python-video-workbook/my_progress/013.py | djrgit/coursework | 2a91da9b76cb1acbd12f3d8049f15d2e71f475a1 | [
"MIT"
] | null | null | null | udemy/python-video-workbook/my_progress/013.py | djrgit/coursework | 2a91da9b76cb1acbd12f3d8049f15d2e71f475a1 | [
"MIT"
] | 3 | 2018-08-13T23:14:22.000Z | 2019-01-11T22:50:07.000Z | # Exercise 13 - Ranges of Strings
my_range = range(1, 21)
def convert_ints_to_strs(parameter1):
return list(map(str, my_range))
print(convert_ints_to_strs(my_range)) | 24 | 37 | 0.779762 |
ace5876f4866c6acf40dc04b396358e6117b7317 | 676 | py | Python | var/spack/repos/builtin/packages/r-rcppcnpy/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/r-rcppcnpy/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-rcppcnpy/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRcppcnpy(RPackage):
"""Rcpp bindings for NumPy files."""
homepage = "https://github.com/eddelbuettel/rcppcnpy"
url = "https://cran.r-project.org/src/contrib/RcppCNPy_0.2.9.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/RcppCNPy"
version('0.2.9', '7f63354d15928b6716830c2975b3baf0')
depends_on('r@3.1.0:', type=('build', 'run'))
depends_on('cnpy')
depends_on('r-rcpp', type=('build', 'run'))
| 32.190476 | 77 | 0.693787 |
ace58854d71d2f4ffbdaf5ae7281920d54ffd766 | 64,842 | py | Python | python/paddle/fluid/executor.py | LutaoChu/Paddle | 0581d74d563e1c6ebb088ee265cbcc1a8ef2c3ca | [
"Apache-2.0"
] | 1 | 2019-06-13T11:32:16.000Z | 2019-06-13T11:32:16.000Z | python/paddle/fluid/executor.py | LutaoChu/Paddle | 0581d74d563e1c6ebb088ee265cbcc1a8ef2c3ca | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/executor.py | LutaoChu/Paddle | 0581d74d563e1c6ebb088ee265cbcc1a8ef2c3ca | [
"Apache-2.0"
] | 2 | 2019-08-16T12:03:28.000Z | 2019-09-03T13:02:57.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import multiprocessing
import sys
import warnings
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
import six
from .data_feeder import convert_dtype
from .framework import Program, default_main_program, Variable, Operator, convert_np_dtype_to_dtype_
from . import core
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
from .trainer_factory import FetchHandlerMonitor
import copy
__all__ = ['Executor', 'global_scope', 'scope_guard']
g_scope = core.Scope()
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
def global_scope():
"""
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Returns:
Scope: The global/default scope instance.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
"""
return g_scope
def _switch_scope(scope):
global g_scope
ex = g_scope
g_scope = scope
return ex
@signature_safe_contextmanager
def scope_guard(scope):
"""
This function switches scope through python `with` statement.
Scope records the mapping between variable names and variables ( :ref:`api_guide_Variable` ),
similar to brackets in programming languages.
If this function is not invoked, all variables and variable names are recorded in the default global scope.
When users need to create variables with the same name,
they need to switch scopes through this function
if they do not want the mapping of variables with the same name to be overwritten.
After switching through the `with` statement,
all variables created in the `with` block will be assigned to a new scope.
Parameters:
scope: The new scope.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
"""
ex = _switch_scope(scope)
yield
_switch_scope(ex)
def as_numpy(tensor):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
"""
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors hold LoD information. \
They can not be completely cast to Python ndarray. \
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
def dtype_is_compatible_with(first, second):
"""
Returns True if the first dtype can be compatible the second one.
Currently, we require the two dtype's have to be same.
Args:
dtype (np.dtype|VarType|str): The type of data: float32, int64, etc.
Returns:
True if the two types are same.
"""
if not isinstance(first, core.VarDesc.VarType):
first = convert_np_dtype_to_dtype_(first)
if not isinstance(second, core.VarDesc.VarType):
second = convert_np_dtype_to_dtype_(second)
return first == second
def dimension_is_compatible_with(first, second):
"""
Returns True if the two dimensions are compatible.
A dimension is compatible with the other if:
1. The length of the dimensions are same.
2. Each non-negative number of the two dimensions are same.
3. For negative number or 'None' in a dimension, it means unknown so it
is compatible with any number.
Args:
first (list/tuple): integers representing shape. "None" or negative
number means unknown.
second (list/tuple): integers representing shape. "None" or negative
number means unknown.
Returns:
True if the two dimensions are compatible.
"""
dim_len = len(first)
if dim_len != len(second):
return False
for i in range(dim_len):
if first[i] is None or first[i] < 0:
continue
if second[i] is None or second[i] < 0:
continue
if first[i] != second[i]:
return False
return True
def check_feed_shape_type(var, feed, num_places=1):
"""
Returns True if the variable doesn't require feed check or it is compatible
with the shape and have same dtype as the fed value.
A dimension is compatible with the other if:
1. The length of the dimensions are same.
2. Each non-negative number of the two dimensions are same.
3. For negative number or 'None' in a dimension, it means unknown so it
is compatible with any number.
Args:
var (Variable): the Variable object
feed (LoDTensor): the fed value, which must be a LoDTensor
num_places: an integer value indicating the number of places.
ParallelExecutor will divide data into devices (CPU/GPU) evenly.
Returns:
True if the shape and dtype of variable is compatible with the feed value
Raises:
ValueError: if the shape or dtype of the variable is not compatible with
the feed value
"""
if var.desc.need_check_feed():
diff_shape = core.diff_tensor_shape(feed, var.desc, num_places)
if diff_shape is not None:
raise ValueError(
'The fed Variable %r should have dimensions = %d, shape = '
'%r, but received fed shape %r on each device' %
(var.name, len(var.shape), var.shape, diff_shape))
if not dtype_is_compatible_with(feed._dtype(), var.dtype):
var_dtype_format = convert_dtype(var.dtype) if isinstance(
var.dtype, core.VarDesc.VarType) else var.dtype
feed_dtype_format = convert_dtype(feed._dtype()) if isinstance(
feed._dtype(), core.VarDesc.VarType) else feed._dtype()
raise ValueError(
'The data type of fed Variable %r must be %r, but received %r' %
(var.name, var_dtype_format, feed_dtype_format))
return True
def has_feed_operators(block, feed_targets, feed_holder_name):
""" Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
"""
feed_count = 0
for op in block.ops:
if op.desc.type() == 'feed':
feed_count += 1
assert op.desc.input('X')[0] == feed_holder_name
feed_target_name = op.desc.output('Out')[0]
if feed_target_name not in feed_targets:
raise Exception("'feed_targets' does not have {} variable".
format(feed_target_name))
else:
break
if feed_count > 0 and feed_count != len(feed_targets):
raise Exception(
"Feed operators in program desc do not match 'feed_targets'")
return feed_count > 0
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
""" Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
"""
fetch_count = 0
for op in block.ops:
if op.desc.type() == 'fetch':
fetch_count += 1
assert op.desc.output('Out')[0] == fetch_holder_name
fetch_target_name = op.desc.input('X')[0]
if fetch_target_name not in [
var.desc.name() for var in fetch_targets
]:
raise Exception("'fetch_targets' does not have {} variable".
format(fetch_target_name))
idx = op.desc.attr('col')
assert fetch_target_name == fetch_targets[idx].desc.name()
if fetch_count > 0 and fetch_count != len(fetch_targets):
raise Exception(
"Fetch operators in program desc do not match 'fetch_targets'")
return fetch_count > 0
def _fetch_var(name, scope=None, return_numpy=True):
"""
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, six.string_types)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(_to_name_str(name))
assert var is not None, (
"Cannot find " + name + " in scope. Perhaps you need to make the"
" variable persistable by using var.persistable = True in your"
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
def _to_name_str(var):
def _to_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
elif isinstance(var, Operator):
return str(id(var))
else:
raise TypeError(str(var) + " should be Variable, Operator or str")
# NOTEz(zhiqiu): The item in fetch_list may be tuple returned by Optimizer.minimize(),
# see comments in _split_optimize_ops_in_fetch_list for more details.
if isinstance(var, tuple):
var = var[0]
if isinstance(var, list):
s = [_to_str(item) for item in var]
return ','.join(s)
else:
return _to_str(var)
def _get_strong_program_cache_key(program, feed, fetch_list):
return str(id(program)) + _get_program_cache_key(feed, fetch_list)
def _get_program_cache_key(feed, fetch_list):
feed_var_names = []
if isinstance(feed, dict):
feed_var_names = list(feed.keys())
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
feed_var_names += list(each.keys())
fetch_var_names = list(map(_to_name_str, fetch_list))
return str(feed_var_names + fetch_var_names)
def _as_lodtensor(data, place, dtype=None):
"""
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
data(core.Place): the place of created tensor
dtype(core.VarDesc.VarType): the expected data type of created tensor
Returns:
LoDTensor
"""
if isinstance(data, list):
raise RuntimeError("Some of your feed data hold LoD information. \
They can not be completely cast from a list of Python \
ndarray to LoDTensor. Please convert data to LoDTensor \
directly before feeding the data.\
")
#NOTE(zhiqiu): convert python builtin ,like float and int, to numpy array
if not isinstance(data, np.ndarray):
if np.isscalar(data):
assert dtype is not None, 'dtype should be given when casting python scalar to tensor'
dtype = convert_dtype(dtype) if isinstance(
dtype, core.VarDesc.VarType) else dtype
data = np.array([data]).astype(dtype)
# single tensor case
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
class FetchHandler(object):
def __init__(self, var_dict=None, period_secs=60):
assert var_dict != None
self.var_dict = var_dict
self.period_secs = period_secs
def handler(self, res_dict):
for key in res_dict:
if type(res_dict[key]) is np.ndarray:
sys.stdout.write("{}[0]: {} ".format(key, res_dict[key][0]))
sys.stdout.write("\n")
@staticmethod
def help():
print("""
class FetchHandlerExample(FetchHandler):
def handler(self, res_dict):
print(res_dict["auc"])
print("auc: {}, {}".format(res_dict["auc"], time.ctime()))
auc = Variable()
var_dict = {"auc": auc}
handler = FetchHandlerExample(var_dict=var_dict)
""")
class Executor(object):
"""
An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running.
Args:
place(fluid.CPUPlace()|fluid.CUDAPlace(n)|None): This parameter represents
which device the executor runs on. When this parameter is None, PaddlePaddle
will set the default device according to its installation version. If Paddle
is CPU version, the default device would be set to `CPUPlace()` . If Paddle is
GPU version, the default device would be set to `CUDAPlace(0)` . Default is None.
Returns:
Executor
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import numpy
import os
# Set place explicitly.
# use_cuda = True
# place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# exe = fluid.Executor(place)
# If you don't set place, PaddlePaddle sets the default device.
exe = fluid.Executor()
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
startup_program.random_seed=1
exe.run(startup_program)
# Run the main program directly without compile.
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(train_program,
feed={"X": x},
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram`
# for more detail.
# NOTE: If you use CPU to run the program or Paddle is
# CPU version, you need to specify the CPU_NUM, otherwise,
# fluid will use all the number of the logic core as
# the CPU_NUM, in that case, the batch size of the input
# should be greater than CPU_NUM, if not, the process will be
# failed by an exception.
# Set place explicitly.
# if not use_cuda:
# os.environ['CPU_NUM'] = str(2)
# If you don't set place and PaddlePaddle is CPU version
# os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
loss_name=loss.name)
loss_data, = exe.run(compiled_prog,
feed={"X": x},
fetch_list=[loss.name])
"""
def __init__(self, place=None):
if place is None:
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
else:
self.place = place
self.program_caches = dict()
self.ctx_caches = dict()
self.scope_caches = dict()
self.var_caches = dict()
self.pruned_program_caches = dict()
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
self.pruned_program_scope_caches = dict()
def _get_scope_cache(self, program_cache_key):
return self.scope_caches.get(program_cache_key, None)
def _get_ctx_cache(self, program_cache_key):
return self.ctx_caches.get(program_cache_key, None)
def _get_program_cache(self, program_cache_key):
return self.program_caches.get(program_cache_key, None)
def _add_program_cache(self, program_cache_key, program):
self.program_caches[program_cache_key] = program
def _get_pruned_program_cache(self, program_cache_key):
return self.pruned_program_caches.get(program_cache_key, None)
def _add_pruned_program_cache(self, program_cache_key, program):
self.pruned_program_caches[program_cache_key] = program
def _get_pruned_program_scope_cache(self, program_cache_key):
return self.pruned_program_scope_caches.get(program_cache_key, None)
def _add_pruned_program_scope_cache(self, program_cache_key, program):
self.pruned_program_scope_caches[program_cache_key] = program
def _add_ctx_cache(self, ctx_cache_key, ctx):
self.ctx_caches[ctx_cache_key] = ctx
def _add_scope_cache(self, scope_cache_key, scope):
self.scope_caches[scope_cache_key] = scope
def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
fetch_var_name):
tmp_program = program.clone()
global_block = tmp_program.global_block()
if feed_var_name in global_block.vars:
feed_var = global_block.var(feed_var_name)
else:
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
if fetch_var_name in global_block.vars:
fetch_var = global_block.var(fetch_var_name)
else:
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
# prepend feed operators
if not has_feed_operators(global_block, feed, feed_var_name):
for i, name in enumerate(feed):
if global_block.has_var(name):
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
else:
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% name)
# append fetch_operators
if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), (
"Wrong type for fetch_list[%s]: %s" % (i, type(var)))
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
return tmp_program
def _feed_data(self, program, feed, feed_var_name, scope):
# feed var to framework
global_block = program.global_block()
for op in global_block.ops:
if op.desc.type() == 'feed':
feed_target_name = op.desc.output('Out')[0]
cur_feed = feed[feed_target_name]
var = global_block.var(feed_target_name)
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = _as_lodtensor(cur_feed, self.place, var.dtype)
check_feed_shape_type(var, cur_feed)
idx = op.desc.attr('col')
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
else:
break
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
]
return outs
def _split_optimize_ops_in_fetch_list(self, fetch_list):
"""
Split optimize_ops from fetch_list, which provided to specify program prunning.
Args:
fetch_list(list): The original fetch_list.
Possible types of fetch_list are:
fetch_list = ['loss']
fetch_list = [[sgd, sgd], 'loss']
fetch_list = [([sgd, sgd], [(param, grad)]), 'loss']
Returns:
optimize_ops(list): The optimize operators splited from fetch_list.
fetch_list(list): The updated fetch_list which does not contain optimize operators.
"""
_optimize_ops = []
_fetch_list = []
def _get_targets(_optimize_ops, _fetch_list, item):
if isinstance(item, Operator):
if item._is_optimize_op():
_optimize_ops.append(item)
else:
raise TypeError(
"The operator in fetch_list is not an optimize_op")
elif isinstance(item, Variable) or isinstance(
item, str) or isinstance(item, six.string_types):
_fetch_list.append(item)
else:
raise TypeError(
"The item in fetch_list should be str, variable or optimize_op, but recieved %s.",
type(item))
for item in fetch_list:
# NOTE(zhiqiu): to support (optimizer_ops, param_and_grads) and optimizer_ops in fetch_list
# we should handle tuple and list in fetch_list.
# TODO(zhiqiu): find a better way to handle that.
if isinstance(item, list):
for i in item:
_get_targets(_optimize_ops, _fetch_list, i)
elif isinstance(item, tuple):
for i in item[0]:
_get_targets(_optimize_ops, _fetch_list, i)
else:
_get_targets(_optimize_ops, _fetch_list, item)
return _fetch_list, _optimize_ops
def _prune_program(self,
program,
feed=None,
fetch_list=None,
optimize_ops=None):
"""
Prune operators and variables which are not needed to generate
:code:`fetch_list` and optimize operators.
Prune operators and variables which are needed
to generate variables to be feeded.
Notes: This is a very low level API. Users should not use this API
directly.
Args:
program(Program): the origin program
feed(list|dict): feed dict or list.
fetch_list(list|Variable): A list of variables need to be fetched
optimize_ops(list[Operator]): A list of optimizer operators
Returns:
Program: A new, pruned program.
"""
compiled = isinstance(program, compiler.CompiledProgram)
if compiled:
if program._program:
origin_program = program._program
else:
warnings.warn(
"The program holds no _program, maybe it is constructed by graph, which can't be pruned yet."
)
return
else:
origin_program = program
feed_names = []
if isinstance(feed, dict):
feed_names = list(feed.keys())
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
feed_names += list(each.keys())
# if optimize_ops is [], all optimize ops in the program is used.
if not optimize_ops:
for block in origin_program.blocks:
for op in block.ops:
if op._is_optimize_op():
optimize_ops.append(op)
targets = fetch_list + optimize_ops
pruned_program = origin_program._prune_with_input(feed_names, targets)
if compiled:
# for compiled program, update the underlying program, re-generate graph,
# and reset the flag so it can be compiled again.
program._program = pruned_program
program._graph = core.Graph(pruned_program.desc)
program._compiled = False
else:
program = pruned_program
return program
def _update_feed(self, program, feed):
"""
Update the feed dict, remove the feed item which is pruned in program.
Notes: This is a very low level API. Users should not use this API
directly.
Args:
program(Program): the pruned program.
feed(list|dict): feed dict or list.
Returns:
feed:(list|dict) updated feed.
"""
compiled = isinstance(program, compiler.CompiledProgram)
if compiled:
if program._program:
global_block = program._program.global_block()
else:
warnings.warn(
"The program holds no _program, maybe it is constructed by graph."
)
else:
global_block = program.global_block()
if isinstance(feed, dict):
for feed_name in list(feed.keys()):
if not global_block.has_var(feed_name):
feed.pop(feed_name)
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% feed_name)
elif isinstance(feed, list) or isinstance(feed, tuple):
for i, each in enumerate(feed):
for feed_name in list(each.keys()):
if not global_block.has_var(feed_name):
each.pop(feed_name)
warnings.warn(
"The variable %s is not found in program. It is not declared or is pruned."
% feed_name)
return feed
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def close(self):
"""
Close the executor. This interface is used for distributed training (PServers mode).
This executor can not be used after calling the interface, because
this interface releases resources associated with the current Trainer.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
"""
if not self._closed:
self._default_executor.close()
self._closed = True
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
return_numpy, return_merged):
exe = program._executor
# TODO(zhenghuihuang): quantization uses Graph in CompiledProgram
# instead of program. We will add support for checking Vars in Graph
need_check_feed = program._program is not None
if need_check_feed:
global_block = program._program.global_block()
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
var = global_block.var(feed_name) if need_check_feed else None
if not isinstance(feed_tensor, core.LoDTensor):
# always set to CPU place, since the tensor need to be split
# it is fast in CPU
feed_tensor = _as_lodtensor(feed[feed_name],
core.CPUPlace(), var.dtype
if var else None)
if need_check_feed:
check_feed_shape_type(var, feed_tensor, exe.device_count())
feed_tensor_dict[feed_name] = feed_tensor
exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
var = global_block.var(
feed_name) if need_check_feed else None
if not isinstance(tensor, core.LoDTensor):
tensor = _as_lodtensor(each[feed_name],
program._places[i], var.dtype
if var else None)
if need_check_feed:
check_feed_shape_type(var, tensor)
res_dict[feed_name] = tensor
res.append(res_dict)
exe.feed_tensors_into_local_scopes(res)
fetch_var_names = list(map(_to_name_str, fetch_list))
tensors = exe.run(fetch_var_names, return_merged)._move_to_list()
return as_numpy(tensors) if return_numpy else tensors
def run(self,
program=None,
feed=None,
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None,
return_numpy=True,
use_program_cache=False,
return_merged=True,
use_prune=False):
"""
Run the specified :code:`Program` or :code:`CompiledProgram`. It should be noted that the executor
will execute all the operators in :code:`Program` or :code:`CompiledProgram` without pruning some
operators of the :code:`Program` or :code:`CompiledProgram` according to fetch_list. And you could
specify the scope to store the :code:`Variables` during the executor running if the scope
is not set, the executor will use the global scope, i.e. :code:`fluid.global_scope()`.
Args:
program(Program|CompiledProgram): This parameter represents the :code:`Program` or
:code:`CompiledProgram` to be executed. If this parameter is not provided, that
parameter is None, the program will be set to :code:`fluid.default_main_program()`.
The default is None.
feed(list|dict): This parameter represents the input variables of the model.
If it is single card training, the feed is dict type, and if it is multi-card
training, the parameter feed can be dict or list type variable. If the
parameter type is dict, the data in the feed will be split and sent to
multiple devices (CPU/GPU), that is to say, the input data will be evenly
sent to different devices, so you should make sure the number of samples of
the current mini-batch must be greater than the number of places;
if the parameter type is list, those data are copied directly to each device,
so the length of this list should be equal to the number of places.
The default is None.
fetch_list(list): This parameter represents the variables that need to be returned
after the model runs. The default is None.
feed_var_name(str): This parameter represents the name of the input variable of
the feed operator. The default is "feed".
fetch_var_name(str): This parameter represents the name of the output variable of
the fetch operator. The default is "fetch".
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is :code:`fluid.global_scope()`
return_numpy(bool): This parameter indicates whether convert the fetched variables
(the variable specified in the fetch list) to numpy.ndarray. if it is False,
the type of the return value is a list of :code:`LoDTensor`. The default is True.
use_program_cache(bool): This parameter indicates whether the input :code:`Program` is cached.
If the parameter is True, the model may run faster in the following cases:
the input program is :code:`fluid.Program`, and the parameters(program, feed variable name
and fetch_list variable) of this interface remains unchanged during running.
The default is False.
return_merged(bool): This parameter indicates whether fetched variables (the variables
specified in the fetch list) should be merged according to the execution device dimension.
If :code:`return_merged` is False, the type of the return value is a two-dimensional list
of :code:`Tensor` ( :code:`return_numpy` is False) or a two-dimensional list of
:code:`numpy.ndarray` ( :code:`return_numpy` is True). If :code:`return_merged` is True,
the type of the return value is an one-dimensional list of :code:`Tensor` ( :code:`return_numpy`
is False) or an one-dimensional list of :code:`numpy.ndarray` ( :code:`return_numpy` is True).
Please see Examples 2 for more details. If the lengths of fetched results are variant, please
set :code:`return_merged` as False, which denotes that the fetched results will not be merged.
The default is True, but it is just for the compatibility, and may use False as default value
in the future version.
use_prune(bool): This parameter indicates whether the input :code:`Program` will be pruned.
If the parameter is True, the program will be pruned accroding to the given feed and fetch_list,
which means the operators and variables in program that generate :code:`feed` and are not
needed to generate :code:`fetch_list` will be pruned. The default is False, which means the
program will not pruned and all the operators and variables will be executed during running.
Note that if the tuple returned from :code:`Optimizer.minimize()` is passed to :code:`fetch_list`,
:code:`use_prune` will be overrided to True, and the program will be pruned.
Returns:
List: The fetched result list.
NOTES:
1. If it is multi-card running and the feed parameter is dict type, the input data
will be evenly sent to different cards. For example, using two GPUs to run the model,
the input sample number is 3, that is, [0, 1, 2], the sample number on GPU0 is 1,
that is, [0], and the sample number on GPU1 is 2, that is, [1, 2].
If the number of samples is less than the number of devices, the program will
throw an exception, so when running the model, you should make sure that the
number of samples of the last batch of the data set should be greater than the
number of CPU cores or GPU cards, if it is less than, it is recommended that
the batch be discarded.
2. If the number of CPU cores or GPU cards available is greater than 1, the fetch
results are spliced together in dimension 0 for the same variable values
(variables in fetch_list) on different devices.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# First create the Executor.
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.data(name='X', shape=[None, 1], dtype='float32')
class_dim = 2
prediction = fluid.layers.fc(input=data, size=class_dim)
loss = fluid.layers.mean(prediction)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
build_strategy = fluid.BuildStrategy()
binary = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
batch_size = 6
x = np.random.random(size=(batch_size, 1)).astype('float32')
# Set return_merged as False to fetch unmerged results:
unmerged_prediction, = exe.run(binary, feed={'X': x},
fetch_list=[prediction.name],
return_merged=False)
# If the user uses two GPU cards to run this python code, the printed result will be
# (2, 3, class_dim). The first dimension value of the printed result is the number of used
# GPU cards, and the second dimension value is the quotient of batch_size and the
# number of used GPU cards.
print("The unmerged prediction shape: {}".format(np.array(unmerged_prediction).shape))
print(unmerged_prediction)
# Set return_merged as True to fetch merged results:
merged_prediction, = exe.run(binary, feed={'X': x},
fetch_list=[prediction.name],
return_merged=True)
# If the user uses two GPU cards to run this python code, the printed result will be
# (6, class_dim). The first dimension value of the printed result is the batch_size.
print("The merged prediction shape: {}".format(np.array(merged_prediction).shape))
print(merged_prediction)
# Out:
# The unmerged prediction shape: (2, 3, 2)
# [array([[-0.37620035, -0.19752218],
# [-0.3561043 , -0.18697084],
# [-0.24129935, -0.12669306]], dtype=float32), array([[-0.24489994, -0.12858354],
# [-0.49041364, -0.25748932],
# [-0.44331917, -0.23276259]], dtype=float32)]
# The merged prediction shape: (6, 2)
# [[-0.37789783 -0.19921964]
# [-0.3577645 -0.18863106]
# [-0.24274671 -0.12814042]
# [-0.24635398 -0.13003758]
# [-0.49232286 -0.25939852]
# [-0.44514108 -0.2345845 ]]
"""
try:
return self._run_impl(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache,
use_prune=use_prune,
return_merged=return_merged)
except Exception as e:
if not isinstance(e, core.EOFException):
warnings.warn(
"The following exception is not an EOF exception.")
six.reraise(*sys.exc_info())
def _run_impl(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache,
return_merged, use_prune):
if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
use_default_main_program = program is None
if program is None:
program = default_main_program()
if isinstance(program, Program) and \
len(program.global_block().ops) == 0:
if use_default_main_program:
error_info = "Now you are using default_main_program, "\
"but there are no operators in the program to be executed. "\
"Please ensure you create model correctly or you can pass "\
"the Program or the CompiledProgram manually."
else:
error_info = "There are no operators in the program to be executed. "\
"If you pass Program manually, please use fluid.program_guard "\
"to ensure the current Program is being used."
warnings.warn(error_info)
if scope is None:
scope = global_scope()
if fetch_list is not None:
if isinstance(fetch_list, Variable) or isinstance(
fetch_list, str) or isinstance(fetch_list,
six.string_types):
fetch_list = [fetch_list]
assert isinstance(fetch_list, tuple) or isinstance(fetch_list, list), \
"Currently , The fetch_list type only should be list or tuple, \n"\
"but the input type is {}. For more information please refer to \n"\
"the executor.run(...).".format(type(fetch_list))
else:
fetch_list = []
# use_prune can be overrided by putting optimize_ops in fetch_list
_origin_fetch_list = fetch_list
_origin_program = program
fetch_list, optimize_ops = self._split_optimize_ops_in_fetch_list(
fetch_list)
if optimize_ops:
use_prune = True
if use_prune:
cache_key = _get_strong_program_cache_key(program, feed,
_origin_fetch_list)
cached_pruned_program = self._get_pruned_program_cache(cache_key)
if cached_pruned_program is None:
if isinstance(program, compiler.CompiledProgram):
program_scope_cache = self._get_pruned_program_scope_cache(
str(id(_origin_program)))
# copy the original program, so it can be cached.
program = copy.copy(program)
# share the local scopes for same original CompiledProgram.
program._share_vars_from = program_scope_cache
if self._get_pruned_program_scope_cache(
str(id(_origin_program))) is None:
self._add_pruned_program_scope_cache(
str(id(_origin_program)), program)
pruned_program = self._prune_program(program, feed, fetch_list,
optimize_ops)
self._add_pruned_program_cache(cache_key, pruned_program)
else:
pruned_program = cached_pruned_program
feed = self._update_feed(pruned_program, feed)
program = pruned_program
compiled = isinstance(program, compiler.CompiledProgram)
# For backward compatibility, run directly.
if not compiled:
return self._run_program(
program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
program._compile(scope, self.place)
if program._is_inference:
return self._run_inference(program._executor, feed)
else:
return self._run_parallel(
program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy,
return_merged=return_merged)
def _run_program(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
assert program is not None, "The program should not be Empty"
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
if use_program_cache:
cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
cached_program = self._get_program_cache(cache_key)
cached_ctx = self._get_ctx_cache(cache_key)
cached_scope = self._get_scope_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
fetch_list_str = list(map(_to_name_str, fetch_list))
cached_ctx = self._default_executor.prepare(
cached_program.desc, 0, fetch_list_str, False)
# currently, we cache program, vars, sub_scope here
# we suppose that in a life cycle of training, a user
# will not create many programs. So, here the basic
# rule of caching is to cache all unseen (program, var, scope)
# when a user use use_program_cache.
cached_scope = scope.new_scope()
self._default_executor.create_variables(cached_program.desc,
cached_scope, 0)
self._add_ctx_cache(cache_key, cached_ctx)
self._add_scope_cache(cache_key, cached_scope)
program = cached_program
ctx = cached_ctx
scope = cached_scope
else:
program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
if not use_program_cache:
self._default_executor.run(program.desc, scope, 0, True, True,
fetch_var_name)
else:
self._default_executor.run_prepared_ctx(ctx, scope, False, False,
False)
arr = scope.find_var(fetch_var_name).get_lod_tensor_array()
tensors = arr._move_to_list()
if return_numpy:
return as_numpy(tensors)
else:
return tensors
def _run_inference(self, exe, feed):
return exe.run(feed)
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(str(trainer))
if program._fleet_opt and "fleet_desc" in program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
filelist_length = len(dataset.dataset.get_filelist())
if filelist_length < pipeline_num:
pipeline_num = filelist_length
print(
"Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
% (filelist_length, filelist_length))
if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
print(
"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
% (filelist_length // pipeline_num, filelist_length))
pipeline_opt["concurrency_list"][
0] = filelist_length // pipeline_num
dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
return pipeline_num
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if not compiled:
# TODO: Need a better way to distinguish and specify different execution mode
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_thread_barrier(program._is_distributed)
trainer._set_program(program)
else:
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program.program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(
program.program._fleet_opt)
trainer._set_program(program.program)
if thread <= 0:
if dataset.thread_num <= 0:
raise RuntimeError(
"You should set thread num first, either in Dataset"
"or in Executor.train_from_dataset")
else:
trainer._set_thread(dataset.thread_num)
else:
trainer._set_thread(thread)
trainer._set_debug(debug)
trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
return scope, trainer
def _run_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
is_infer=False,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
if dataset is None:
raise RuntimeError("dataset is need and should be initialized")
if program._pipeline_opt is not None and program._pipeline_opt[
"sync_steps"] != -1:
# hack for paddlebox: sync_steps(-1) denotes paddlebox
thread = self._adjust_pipeline_resource(program._pipeline_opt,
dataset, thread)
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(is_infer)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
dataset._dynamic_adjust_before_train(trainer.proto_desc.thread_num)
trainer_instance = self._default_executor.init_for_dataset(
program.desc, trainer._desc(), scope, dataset.dataset)
if fetch_handler is not None:
scope0 = trainer_instance.get_worker_scope(0)
fetch_monitor = FetchHandlerMonitor(scope0, fetch_handler)
fetch_monitor.start()
self._default_executor.run_from_dataset(trainer_instance)
fetch_monitor.stop()
self._default_executor.release_trainer(trainer_instance)
else:
self._default_executor.run_from_dataset(trainer_instance)
self._default_executor.release_trainer(trainer_instance)
dataset._dynamic_adjust_after_train()
dataset._finish_to_run()
return None
def infer_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
"""
Infer from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, infer_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current infer task.
The document of infer_from_dataset is almost the same as train_from_dataset,
except that in distributed training, push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-threadvery easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. Default is 0, which
means using thread num of dataset
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable will be printed during
training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
fetch_handler(FetchHandler): a user define class for fetch output.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
return self._run_from_dataset(program, dataset, scope, thread, True,
debug, fetch_list, fetch_info,
print_period, fetch_handler)
def train_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100,
fetch_handler=None):
"""
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. Default is 0, which
means using thread num of dataset
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable will be printed
during training
fetch_info(String List): print information for each variable, its length should be equal
to fetch_list
print_period(int): the number of mini-batches for each print, default is 100
fetch_handler(FetchHandler): a user define class for fetch output.
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
return self._run_from_dataset(program, dataset, scope, thread, False,
debug, fetch_list, fetch_info,
print_period, fetch_handler)
| 43.199201 | 128 | 0.597159 |
ace58905be9ad40dc88ac240f60e3dd978b1f8e2 | 767 | py | Python | backend/users_setting/models.py | Yashbontala/django-whatsapp | 9d4dc33e11e68c8614173cf5d21ef33db1d7baf7 | [
"MIT"
] | 7 | 2019-03-10T17:37:07.000Z | 2021-05-14T13:28:13.000Z | backend/users_setting/models.py | Yashbontala/django-whatsapp | 9d4dc33e11e68c8614173cf5d21ef33db1d7baf7 | [
"MIT"
] | 2 | 2019-05-22T14:54:36.000Z | 2019-05-30T23:59:45.000Z | backend/users_setting/models.py | Yashbontala/django-whatsapp | 9d4dc33e11e68c8614173cf5d21ef33db1d7baf7 | [
"MIT"
] | 1 | 2021-04-05T12:01:23.000Z | 2021-04-05T12:01:23.000Z | from enum import Enum
from django.db import models
from django.contrib.auth.models import User
class Setting(models.Model):
class Meta:
db_table = 'settings'
user = models.OneToOneField(User, on_delete=models.CASCADE)
webhook_enable = models.BooleanField(default=False)
webhook_url = models.URLField(blank=True, null=True)
def __str__(self):
return self.user.username
class Webhook(models.Model):
class Meta:
db_table = 'webhooks'
setting = models.ForeignKey(Setting, on_delete=models.CASCADE)
event = models.CharField(max_length=50)
data = models.TextField(max_length=50000, blank=True, null=True)
status = models.IntegerField(default=400)
def __str__(self):
return self.event
| 25.566667 | 68 | 0.709257 |
ace589cb3a6692521d41eb6469f0b3caea8b65e8 | 6,019 | py | Python | tests/test_private_api.py | 5hirish/insta_scrapper | fccfe97a419c3bf82e2b8a2962e2c159c2fc02bc | [
"MIT"
] | null | null | null | tests/test_private_api.py | 5hirish/insta_scrapper | fccfe97a419c3bf82e2b8a2962e2c159c2fc02bc | [
"MIT"
] | null | null | null | tests/test_private_api.py | 5hirish/insta_scrapper | fccfe97a419c3bf82e2b8a2962e2c159c2fc02bc | [
"MIT"
] | null | null | null | import unittest
import argparse
import os
import json
import sys
import logging
import re
import warnings
from .private import (
AccountTests, CollectionsTests, DiscoverTests,
FeedTests, FriendshipTests, LiveTests,
LocationTests, MediaTests, MiscTests,
TagsTests, UploadTests, UsersTests,
UsertagsTests, HighlightsTests,
ClientTests, ApiUtilsTests, CompatPatchTests,
IGTVTests,
)
from .common import (
Client, ClientError, ClientLoginError, ClientCookieExpiredError,
__version__, to_json, from_json
)
if __name__ == '__main__':
warnings.simplefilter('ignore', UserWarning)
logging.basicConfig(format='%(name)s %(message)s', stream=sys.stdout)
logger = logging.getLogger('instascrape')
logger.setLevel(logging.WARNING)
# Example command:
# python test_private_api.py -u "xxx" -p "xxx" -settings "saved_auth.json" -save
parser = argparse.ArgumentParser(description='Test instascrape.py')
parser.add_argument('-settings', '--settings', dest='settings_file_path', type=str, required=True)
parser.add_argument('-u', '--username', dest='username', type=str, required=True)
parser.add_argument('-p', '--password', dest='password', type=str, required=True)
parser.add_argument('-d', '--device_id', dest='device_id', type=str)
parser.add_argument('-uu', '--uuid', dest='uuid', type=str)
parser.add_argument('-save', '--save', action='store_true')
parser.add_argument('-tests', '--tests', nargs='+')
parser.add_argument('-debug', '--debug', action='store_true')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
print('Client version: {0!s}'.format(__version__))
cached_auth = None
if args.settings_file_path and os.path.isfile(args.settings_file_path):
with open(args.settings_file_path) as file_data:
cached_auth = json.load(file_data, object_hook=from_json)
# Optional. You can custom the device settings instead of using the default one
my_custom_device = {
'phone_manufacturer': 'LGE/lge',
'phone_model': 'RS988',
'phone_device': 'h1',
'android_release': '6.0.1',
'android_version': 23,
'phone_dpi': '640dpi',
'phone_resolution': '1440x2392',
'phone_chipset': 'h1'
}
api = None
if not cached_auth:
ts_seed = str(int(os.path.getmtime(__file__)))
if not args.uuid:
# Example of how to generate a uuid.
# You can generate a fixed uuid if you use a fixed value seed
uuid = Client.generate_uuid(
seed='{pw!s}.{usr!s}.{ts!s}'.format(**{'pw': args.username, 'usr': args.password, 'ts': ts_seed}))
else:
uuid = args.uuid
if not args.device_id:
# Example of how to generate a device id.
# You can generate a fixed device id if you use a fixed value seed
device_id = Client.generate_deviceid(
seed='{usr!s}.{ts!s}.{pw!s}'.format(**{'pw': args.password, 'usr': args.username, 'ts': ts_seed}))
else:
device_id = args.device_id
# start afresh without existing auth
try:
api = Client(
args.username, args.password,
auto_patch=True, drop_incompat_keys=False,
guid=uuid, device_id=device_id,
# custom device settings
**my_custom_device)
except ClientLoginError:
print('Login Error. Please check your username and password.')
sys.exit(99)
# stuff that you should cache
cached_auth = api.settings
if args.save:
# this auth cache can be re-used for up to 90 days
with open(args.settings_file_path, 'w') as outfile:
json.dump(cached_auth, outfile, default=to_json)
else:
try:
# remove previous app version specific info so that we
# can test the new sig key whenever there's an update
for k in ['app_version', 'signature_key', 'key_version', 'ig_capabilities']:
cached_auth.pop(k, None)
api = Client(
args.username, args.password,
auto_patch=True, drop_incompat_keys=False,
settings=cached_auth,
**my_custom_device)
except ClientCookieExpiredError:
print('Cookie Expired. Please discard cached auth and login again.')
sys.exit(99)
tests = []
tests.extend(AccountTests.init_all(api))
tests.extend(CollectionsTests.init_all(api))
tests.extend(DiscoverTests.init_all(api))
tests.extend(FeedTests.init_all(api))
tests.extend(FriendshipTests.init_all(api))
tests.extend(LiveTests.init_all(api))
tests.extend(LocationTests.init_all(api))
tests.extend(MediaTests.init_all(api))
tests.extend(MiscTests.init_all(api))
tests.extend(TagsTests.init_all(api))
tests.extend(UploadTests.init_all(api))
tests.extend(UsersTests.init_all(api))
tests.extend(UsertagsTests.init_all(api))
tests.extend(HighlightsTests.init_all(api))
tests.extend(IGTVTests.init_all(api))
tests.extend(ClientTests.init_all(api))
tests.extend(CompatPatchTests.init_all(api))
tests.extend(ApiUtilsTests.init_all())
def match_regex(test_name):
for test_re in args.tests:
test_re = r'{0!s}'.format(test_re)
if re.match(test_re, test_name):
return True
return False
if args.tests:
tests = filter(lambda x: match_regex(x['name']), tests)
try:
suite = unittest.TestSuite()
for test in tests:
suite.addTest(test['test'])
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
except ClientError as e:
print('Unexpected ClientError {0!s} (Code: {1:d}, Response: {2!s})'.format(
e.msg, e.code, e.error_response))
| 36.478788 | 114 | 0.637647 |
ace589d3044d693aefaa3140e9d8b9ae93760591 | 12,342 | py | Python | gooddata-metadata-client/gooddata_metadata_client/model/json_api_dataset_out_list.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-24T16:27:06.000Z | 2022-02-25T10:18:49.000Z | gooddata-metadata-client/gooddata_metadata_client/model/json_api_dataset_out_list.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 29 | 2022-01-20T15:45:38.000Z | 2022-03-31T09:39:25.000Z | gooddata-metadata-client/gooddata_metadata_client/model/json_api_dataset_out_list.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-20T07:11:15.000Z | 2022-03-09T14:50:17.000Z | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_metadata_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.json_api_dataset_out_includes import JsonApiDatasetOutIncludes
from gooddata_metadata_client.model.json_api_dataset_out_with_links import JsonApiDatasetOutWithLinks
from gooddata_metadata_client.model.list_links import ListLinks
globals()['JsonApiDatasetOutIncludes'] = JsonApiDatasetOutIncludes
globals()['JsonApiDatasetOutWithLinks'] = JsonApiDatasetOutWithLinks
globals()['ListLinks'] = ListLinks
class JsonApiDatasetOutList(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('data',): {
},
('included',): {
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([JsonApiDatasetOutWithLinks],), # noqa: E501
'links': (ListLinks,), # noqa: E501
'included': ([JsonApiDatasetOutIncludes],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'links': 'links', # noqa: E501
'included': 'included', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""JsonApiDatasetOutList - a model defined in OpenAPI
Args:
data ([JsonApiDatasetOutWithLinks]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
links (ListLinks): [optional] # noqa: E501
included ([JsonApiDatasetOutIncludes]): Included resources. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""JsonApiDatasetOutList - a model defined in OpenAPI
Args:
data ([JsonApiDatasetOutWithLinks]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
links (ListLinks): [optional] # noqa: E501
included ([JsonApiDatasetOutIncludes]): Included resources. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.305263 | 124 | 0.580943 |
ace58a25b63db9c23d835837028d76a5fa8f648e | 2,910 | py | Python | windows_packages_gpu/onnxruntime/capi/onnxruntime_validation.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | windows_packages_gpu/onnxruntime/capi/onnxruntime_validation.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | windows_packages_gpu/onnxruntime/capi/onnxruntime_validation.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
"""
Check OS requirements for ONNX Runtime Python Bindings.
"""
import platform
import linecache
import warnings
def check_distro_info():
__my_distro__ = ''
__my_distro_ver__ = ''
__my_system__ = platform.system().lower()
__my_arch__ = platform.architecture()[0].lower()
__OS_RELEASE_FILE__ = '/etc/os-release'
__LSB_RELEASE_FILE__ = '/etc/lsb-release'
if __my_arch__ != '64bit':
warnings.warn('Unsupported architecture (%s). ONNX Runtime supports 64bit architecture, only.' % __my_arch__)
if __my_system__ == 'windows':
__my_distro__ = __my_system__
__my_distro_ver__ = platform.release().lower()
if __my_distro_ver__ != '10':
warnings.warn('Unsupported Windows version (%s). ONNX Runtime supports Windows 10 and above, only.' % __my_distro_ver__)
elif __my_system__ == 'linux':
''' Although the 'platform' python module for getting Distro information works well on standard OS images running on real
hardware, it is not acurate when running on Azure VMs, Git Bash, Cygwin, etc. The returned values for release and version
are unpredictable for virtualized or emulated environments.
/etc/os-release and /etc/lsb_release files, on the other hand, are guaranteed to exist and have standard values in all
OSes supported by onnxruntime. The former is the current standard file to check OS info and the latter is its antecessor.
'''
# Newer systems have /etc/os-release with relevant distro info
__my_distro__ = linecache.getline(__OS_RELEASE_FILE__, 3)[3:-1]
__my_distro_ver__ = linecache.getline(__OS_RELEASE_FILE__, 6)[12:-2]
# Older systems may have /etc/os-release instead
if not __my_distro__:
__my_distro__ = linecache.getline(__LSB_RELEASE_FILE__, 1)[11:-1]
__my_distro_ver__ = linecache.getline(__LSB_RELEASE_FILE__, 2)[16:-1]
# Instead of trying to parse distro specific files,
# warn the user ONNX Runtime may not work out of the box
__my_distro__ = __my_distro__.lower()
__my_distro_ver__ = __my_distro_ver__.lower()
elif __my_system__ == 'darwin':
__my_distro__ = __my_system__
__my_distro_ver__ = platform.release().lower()
if int(__my_distro_ver__.split('.')[0]) < 11:
warnings.warn('Unsupported macOS version (%s). ONNX Runtime supports macOS 11.0 or later.' % (__my_distro_ver__))
else:
warnings.warn('Unsupported platform (%s). ONNX Runtime supports Linux, macOS and Windows platforms, only.' % __my_system__)
| 50.172414 | 133 | 0.650859 |
ace58ae27c7e8f6d1166064da34b7671dab3b2a6 | 5,103 | py | Python | convert_caffe_lmdb.py | tolga-b/nnt | b9473121a8078a6598cee3f64a7fee4dcfd5b292 | [
"MIT"
] | null | null | null | convert_caffe_lmdb.py | tolga-b/nnt | b9473121a8078a6598cee3f64a7fee4dcfd5b292 | [
"MIT"
] | null | null | null | convert_caffe_lmdb.py | tolga-b/nnt | b9473121a8078a6598cee3f64a7fee4dcfd5b292 | [
"MIT"
] | null | null | null | from __future__ import division
import multiprocessing
from multiprocessing import JoinableQueue, Manager
from timeit import default_timer
import external.caffe_pb2 as caffe_pb2
import lmdb
import numpy as np
import os
import sys
import argparse
from utils import save_h5
"""
We are going to implement a parallel reader to read large lmdb database and load it into memory
Depends on the protobuf definition from caffe (caffe_pb2.py)
#TODO add on the fly functionality for generator like usage
"""
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_dict):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_dict = result_dict
self.datum = caffe_pb2.Datum()
def run(self):
# proc_name = self.name
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
# print '%s: Exiting' % proc_name
self.task_queue.task_done()
break
# print '%s: %s' % (proc_name, next_task)
ind, feat = next_task # task is tuple
self.datum.ParseFromString(feat)
self.result_dict[ind] = np.array(self.datum.float_data, dtype=np.float32)
self.task_queue.task_done()
return
def test_lmdb_read_speed(path_to_features, read_count=50000):
""" Test how fast we can read from lmdb (no decoding) single thread
"""
env = lmdb.open(os.path.join(path_to_features), readonly=True)
with env.begin() as txn:
cursor = txn.cursor()
cnt = 0
start = default_timer()
for key, val in cursor:
if cnt == read_count:
break
cnt += 1
elapsed = default_timer() - start
print 'Read {} in {} seconds, {} files/s'.format(cnt, elapsed, cnt / elapsed)
return cnt / elapsed
def test_lmdb_decode_speed(path_to_features, read_count=5000):
""" Test how fast we can decode single thread
"""
env = lmdb.open(os.path.join(path_to_features), readonly=True)
with env.begin() as txn:
cursor = txn.cursor()
cnt = 0
feats = []
for key, val in cursor:
feats.append(val)
if cnt == read_count:
break
cnt += 1
datum = caffe_pb2.Datum()
start = default_timer()
for feat in feats:
datum.ParseFromString(feat)
out = np.array(datum.float_data, dtype=np.float32)
elapsed = default_timer() - start
print 'Decoded {} in {}, {} decodes/s'.format(cnt, elapsed, cnt / elapsed)
return cnt / elapsed
def main(raw_args):
# set up parser
parser = argparse.ArgumentParser(description='Convert caffe lmdb to h5 or npy file')
parser.add_argument('path_to_lmdb', type=str, help='Full path to lmdb database')
parser.add_argument('path_to_output', type=str, help='Full path to output file')
parser.add_argument('--num_consumers', type=int, default=0,
help='Number of cores to use, default is the available number of cores')
parser.add_argument('--verbosity', type=int, default=1000,
help='Print every verbosity conversions')
args = parser.parse_args(raw_args)
# print(args)
# tasks is fed by single sequential reader
tasks = JoinableQueue(maxsize=500)
# results is filled by multiple writers with features
manager = Manager()
results = manager.dict()
# start consumers
if args.num_consumers == 0:
num_consumers = multiprocessing.cpu_count()
else:
num_consumers = args.num_consumers
print('Creating {} consumers'.format(num_consumers))
consumers = [Consumer(tasks, results)
for i in xrange(num_consumers)]
for w in consumers:
w.start()
# enqueue jobs
env = lmdb.open(os.path.join(args.path_to_lmdb), readonly=True)
with env.begin() as txn:
cursor = txn.cursor()
cnt = 0
start = default_timer()
for key, val in cursor:
tasks.put((cnt, val))
if not cnt % args.verbosity:
print '{},'.format(cnt),
sys.stdout.flush()
cnt += 1
# add a poison pill for each consumer
for i in xrange(num_consumers):
tasks.put(None)
# wait for all of the tasks to finish
tasks.join()
elapsed = default_timer() - start
# convert and sort results in the end w.r.t processing order
if cnt > 0:
print('\nConverted {} in {}, {} conversions/s'.format(cnt, elapsed, cnt / elapsed))
print('with shape {}'.format(results[0].shape[0]))
results_sorted = np.zeros((cnt, results[0].shape[0]), dtype=np.float32)
for i in xrange(cnt):
results_sorted[i] = results[i]
if args.path_to_output[-4:] == '.npy':
np.save(args.path_to_output, results_sorted)
else:
save_h5(args.path_to_output, results_sorted)
print('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
| 33.572368 | 96 | 0.623947 |
ace58b10e01e8921139443605d649c1c61da8472 | 1,203 | py | Python | data/p3BR/R1/benchmark/startPyquil63.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startPyquil63.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startPyquil63.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=3
prog += H(0) # number=9
prog += CZ(1,0) # number=10
prog += H(0) # number=11
prog += CNOT(1,0) # number=5
prog += X(1) # number=6
prog += Z(1) # number=8
prog += X(1) # number=7
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil63.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 22.698113 | 64 | 0.604323 |
ace58b3ee233445432a7cfdce39179ddcba01a1d | 7,793 | py | Python | powerline/commands/main.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 23 | 2016-12-16T09:03:18.000Z | 2022-02-25T19:19:23.000Z | powerline/commands/main.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 30 | 2016-12-20T11:11:42.000Z | 2019-11-19T15:23:59.000Z | powerline/commands/main.py | PH111P/powerline | f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a | [
"MIT"
] | 4 | 2016-12-11T18:29:11.000Z | 2018-04-22T07:51:28.000Z | import argparse
import sys
from itertools import chain
from powerline.lib.overrides import parsedotval, parse_override_var
from powerline.lib.dict import mergeargs
from powerline.lib.encoding import get_preferred_arguments_encoding
from powerline.lib.unicode import u, unicode
from powerline.bindings.wm import wm_threads
if sys.version_info < (3,):
encoding = get_preferred_arguments_encoding()
def arg_to_unicode(s):
return unicode(s, encoding, 'replace') if not isinstance(s, unicode) else s # NOQA
else:
def arg_to_unicode(s):
return s
def finish_args(parser, environ, args, is_daemon=False):
'''Do some final transformations
Transforms ``*_override`` arguments into dictionaries, adding overrides from
environment variables. Transforms ``renderer_arg`` argument into dictionary
as well, but only if it is true.
:param dict environ:
Environment from which additional overrides should be taken from.
:param args:
Arguments object returned by
:py:meth:`argparse.ArgumentParser.parse_args`. Will be modified
in-place.
:return: Object received as second (``args``) argument.
'''
args.config_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_CONFIG_OVERRIDES', '')),
(parsedotval(v) for v in args.config_override or ()),
))
args.theme_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_THEME_OVERRIDES', '')),
(parsedotval(v) for v in args.theme_override or ()),
))
if args.renderer_arg:
args.renderer_arg = mergeargs((parsedotval(v) for v in args.renderer_arg), remove=True)
if 'pane_id' in args.renderer_arg:
if isinstance(args.renderer_arg['pane_id'], (bytes, unicode)):
try:
args.renderer_arg['pane_id'] = int(args.renderer_arg['pane_id'].lstrip(' %'))
except ValueError:
pass
if 'client_id' not in args.renderer_arg:
args.renderer_arg['client_id'] = args.renderer_arg['pane_id']
args.config_path = (
[path for path in environ.get('POWERLINE_CONFIG_PATHS', '').split(':') if path]
+ (args.config_path or [])
)
if args.ext[0].startswith('wm.'):
if not is_daemon:
parser.error('WM bindings must be used with daemon only')
elif args.ext[0][3:] not in wm_threads:
parser.error('WM binding not found')
elif not args.side:
parser.error('expected one argument')
return args
def int_or_sig(s):
if s.startswith('sig'):
return u(s)
else:
return int(s)
def get_argparser(ArgumentParser=argparse.ArgumentParser):
parser = ArgumentParser(description='Powerline prompt and status line script.')
parser.add_argument(
'ext', nargs=1,
help='Extension: application for which powerline command is launched '
'(usually `shell\' or `tmux\'). Also supports `wm.\' extensions: '
+ ', '.join(('`wm.' + key + '\'' for key in wm_threads.keys())) + '.'
)
parser.add_argument(
'side', nargs='?', choices=('left', 'right', 'above', 'aboveleft'),
help='Side: `left\' and `right\' represent left and right side '
'respectively, `above\' emits lines that are supposed to be printed '
'just above the prompt and `aboveleft\' is like concatenating '
'`above\' with `left\' with the exception that only one Python '
'instance is used in this case. May be omitted for `wm.*\' extensions.'
)
parser.add_argument(
'-r', '--renderer-module', metavar='MODULE', type=str,
help='Renderer module. Usually something like `.bash\' or `.zsh\' '
'(with leading dot) which is `powerline.renderers.{ext}{MODULE}\', '
'may also be full module name (must contain at least one dot or '
'end with a dot in case it is top-level module) or '
'`powerline.renderers\' submodule (in case there are no dots).'
)
parser.add_argument(
'-w', '--width', type=int,
help='Maximum prompt with. Triggers truncation of some segments.'
)
parser.add_argument(
'--last-exit-code', metavar='INT', type=int_or_sig,
help='Last exit code.'
)
parser.add_argument(
'--last-pipe-status', metavar='LIST', default='',
type=lambda s: [int_or_sig(status) for status in s.split()],
help='Like above, but is supposed to contain space-separated array '
'of statuses, representing exit statuses of commands in one pipe.'
)
parser.add_argument(
'--jobnum', metavar='INT', type=int,
help='Number of jobs.'
)
parser.add_argument(
'-c', '--config-override', metavar='KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Configuration overrides for `config.json\'. Is translated to a '
'dictionary and merged with the dictionary obtained from actual '
'JSON configuration: KEY.KEY=VALUE is translated to '
'`{"KEY": {"KEY": VALUE}}\' and then merged recursively. '
'VALUE may be any JSON value, values that are not '
'`null\', `true\', `false\', start with digit, `{\', `[\' '
'are treated like strings. If VALUE is omitted '
'then corresponding key is removed.'
)
parser.add_argument(
'-t', '--theme-override', metavar='THEME.KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Like above, but theme-specific. THEME should point to '
'an existing and used theme to have any effect, but it is fine '
'to use any theme here.'
)
parser.add_argument(
'-R', '--renderer-arg',
metavar='KEY=VAL', type=arg_to_unicode, action='append',
help='Like above, but provides argument for renderer. Is supposed '
'to be used only by shell bindings to provide various data like '
'last-exit-code or last-pipe-status (they are not using '
'`--renderer-arg\' for historical resons: `--renderer-arg\' '
'was added later).'
)
parser.add_argument(
'-p', '--config-path', action='append', metavar='PATH',
help='Path to configuration directory. If it is present then '
'configuration files will only be seeked in the provided path. '
'May be provided multiple times to search in a list of directories.'
)
parser.add_argument(
'--socket', metavar='ADDRESS', type=str,
help='Socket address to use in daemon clients. Is always UNIX domain '
'socket on linux and file socket on Mac OS X. Not used here, '
'present only for compatibility with other powerline clients. '
'This argument must always be the first one and be in a form '
'`--socket ADDRESS\': no `=\' or short form allowed '
'(in other powerline clients, not here).'
)
return parser
def write_output(args, powerline, segment_info, write):
if args.renderer_arg:
segment_info.update(args.renderer_arg)
if args.side.startswith('above'):
for line in powerline.render_above_lines(
width=args.width,
segment_info=segment_info,
mode=segment_info.get('mode', None),
):
if line:
write(line + '\n')
args.side = args.side[len('above'):]
if args.side:
rendered = powerline.render(
width=args.width,
side=args.side,
segment_info=segment_info,
mode=segment_info.get('mode', None),
)
write(rendered)
| 41.673797 | 97 | 0.618504 |
ace58d3bd38ee4834d39eb3d3570bed20434ed23 | 9,113 | py | Python | orchid_app/views.py | ktheiss22/Orchids | bdd129d0c899db4a8f3cdb5630c690b48d534cae | [
"MIT"
] | 3 | 2018-01-24T19:08:30.000Z | 2018-12-29T08:08:26.000Z | orchid_app/views.py | ktheiss22/Orchids | bdd129d0c899db4a8f3cdb5630c690b48d534cae | [
"MIT"
] | 20 | 2017-02-22T21:21:47.000Z | 2022-03-11T23:15:02.000Z | orchid_app/views.py | ktheiss22/Orchids | bdd129d0c899db4a8f3cdb5630c690b48d534cae | [
"MIT"
] | 1 | 2022-02-17T18:10:00.000Z | 2022-02-17T18:10:00.000Z | import re
import os
import django_tables2 as tables
from datetime import datetime
from django.core import exceptions
from django.contrib import messages
from django.shortcuts import render, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from . import models
from forms import ActionsForm, SystemForm
import orchid_app.controller as controller
import orchid_app.utils.sysinfo as sysinfo
import warnings
warnings.filterwarnings('ignore')
class SensorTable(tables.Table):
date = tables.DateTimeColumn(short=True) # still doesn't work.
class Meta:
model = models.Sensors
fields = ('date', 't_amb', 't_obj', 'rh', 'lux', 'hpa', 'wind', 'water')
class ActionTable(tables.Table):
date = tables.DateTimeColumn(short=True) # still doesn't work.
class Meta:
model = models.Actions
fields = ('date', 'water', 'mist', 'fan', 'light', 'heat', 'reason')
# @register.filter(name='myDate')
# def myDate(value, arg):
# #arg is optional and not needed but you could supply your own formatting if you want.
# dateformatted = value.strftime("%b %d, %Y at %I:%M %p")
# return dateformatted
# @login_required
def list(request):
# Use auto_id for further form changes
form = ActionsForm(request.POST or None, auto_id=True)
# Get actions template
a = controller.get_last_action()
if request.method == "POST":
if form.is_valid():
a = parse_user_input(a, request)
return redirect('/')
else:
form = ActionsForm()
for k, v in a.iteritems():
a[k] = _verb(v)
form.water = a.water
qs = models.Sensors.objects.all().order_by('-date') # filter(date=request.date
paginator = Paginator(qs, 30)
page = request.GET.get('page')
try:
table = paginator.page(page)
except PageNotAnInteger:
table = paginator.page(1)
except EmptyPage:
table = paginator.page(paginator.num_pages)
# Keep reference to page. Dirty trick. TODO: improve.
pp = table
# Convert current page into table.
table = SensorTable(table)
total = qs.count()
statuses = [False for i in range(len(controller.state_list))]
i = controller.get_current_state()[1]
if i != controller.NO_DATA:
statuses[i] = True
al = _get_next_actions_parsed()
tl = _get_timer_actions_parsed()
context = {'form': form, 'paginator': pp, 'total': total, 'table': table, 'actuators': a, 'statuses': statuses,
'actionList': al, 'timerList': tl,
}
return render(request, 'orchid_app/sensor_list.html', context)
def action_list(request):
form = ActionsForm(request.POST or None, auto_id=True)
a = controller.get_last_action()
if request.method == "POST":
if form.is_valid():
a = parse_user_input(a, request)
# Use POST-Redirect-GET concept (PRG). This avoids "form resubmission" from browser on page refresh (F5).
# Huge notice: The redirection path is RELATIVE. It relates to the page the form is loaded.
# Therefore, an argument for every redirect must start with slash /, which means 'absolute path from root of the app'.
return redirect('/actions/')
else:
form = ActionsForm()
# Standartize/verbose actuator form values.
for k, v in a.iteritems():
a[k] = _verb(v)
form.water = a.water
qs = models.Actions.objects.all().order_by('-date') # filter(date=request.date
paginator = Paginator(qs, 30)
page = request.GET.get('page')
try:
table = paginator.page(page)
except PageNotAnInteger:
table = paginator.page(1)
except EmptyPage:
table = paginator.page(paginator.num_pages)
# Keep reference to page. Dirty trick. TODO: improve.
pp = table
# Convert current page into table.
table = ActionTable(table)
total = qs.count()
statuses = [False for i in range(len(controller.state_list))]
i = controller.get_current_state()[1]
if i != controller.NO_DATA:
statuses[i] = True
al = _get_next_actions_parsed()
tl = _get_timer_actions_parsed()
context = {'form': form, 'paginator': pp, 'total': total, 'table': table, 'actuators': a, 'statuses': statuses,
'actionList': al, 'timerList': tl,
}
return render(request, 'orchid_app/action_list.html', context)
def sysinfo_list(request):
form = SystemForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
if 'update' in request.POST:
print 'User requested the firmware update.'
msg = 'User requested the firmware update.'
res = controller.update_firmware()
elif 'restart' in request.POST:
print 'user requested runner restart.'
msg = 'user requested runner restart.'
res = os.system('sudo service orchid_runner restart') == 0
if res:
messages.success(request, "Actions taken: " + msg)
else:
messages.error(request, "Actions failed: " + msg)
return redirect('/sysinfo/')
else:
form = SystemForm()
si = sysinfo.get_sysinfo_html()
chart_data = sysinfo.get_sysinfo_d()
charts = {
'CPU': chart_data['cpu']['load']['current'],
'RAM': chart_data['memory']['RAM_MB']['percent'],
'Flash': chart_data['memory']['flash_GB']['percent'],
'Temp': chart_data['cpu']['temp']['current'],
}
return render(request, 'orchid_app/sysinfo_list.html', {'form': form, 'sysinfo': si, 'charts': charts})
def parse_user_input(a, request):
# Keep a copy for compare
la = controller.get_last_action()
a.mist = request.POST.get("mist", False)
a.water = request.POST.get("water", False)
a.fan = request.POST.get("fan", False)
a.light = request.POST.get("light", False)
a.heat = request.POST.get("heat", False)
time = request.POST.get("time", 0)
reason = 'Manual'
suffix = '' # Add it to indicate in the DB that timer is engaged. Dirty trick for stateless machine :(
for k, v in a.iteritems():
# Don't waste time on non-changes actions
if v == la[k]:
continue
if v and int(time) > 0:
# For ON action:
# Set 'Manual' reason and Send long-time actions to background timer if time is given.
# Else Do 0-time actions immediately.
suffix = ' with timer for %s minutes' % time
# # For OFF action.
# # Set 'Automate' reason and Turn off actuator if was enabled automatically.
# # Else Set 'Manual' reason and Turn off actuator.
# if controller.is_enabled(automate=True, actuator=k):
# reason = 'Automate overridden by user'
# # Stop other actions compare. One overriding action is important and enough
# break
msg = controller.activate(reason=reason + suffix, mist=a.mist, drip=a.water, fan=a.fan, light=a.light, heat=a.heat)
if [i for i in ['wrong', 'skip'] if i not in msg.lower()]:
messages.success(request, "Actions taken: " + msg)
else:
messages.error(request, "Actions tried: " + msg)
return a
def _get_next_actions_parsed():
'''Return list of actions and times in format per item:
['actuator', 'action', 'remaining_time']
'''
al = controller.get_next_action()
if al:
for i in range(len(al)):
al[i] = (al[i][0].capitalize(), _verb(al[i][2]).capitalize(), 'Now' if al[i][1] < 1 else _humanize(al[i][1]))
return al
def _get_timer_actions_parsed():
'''Return list of actions and times in format per item:
['actuator', 'action', 'remaining_time']
'''
# Process timer
res = []
tr = controller.get_timer_order(seconds=True)
if tr:
ad, t_rem = tr # Unpack timer results
# filt = {'reason__icontains': 'timer off'}
# qs = models.Actions.objects.filter(**filt).last()
# dt = (datetime.now() - qs.date.replace(tzinfo=None)).total_seconds() / 60
# Do not follow automated rules if timer is active.
# if dt < dt + t_rem:
if 0 < t_rem:
for i in ad:
# t_rem = max(0, t_rem)
if ad[i]:
res.append((i.capitalize(), _verb(not ad[i]).capitalize(), 'Now' if t_rem < 1 else _humanize(t_rem, with_secs=True)))
return res
def _verb(b):
'''Convert boolean into verbal off/on.'''
return ['off', 'on'][b] if type(b) == bool else b
def _humanize(fl, with_secs=False):
'''Convert minutes or seconds into human readable string.'''
fl = int(fl)
if with_secs:
s = fl % 60
fl /= 60
else:
s = ''
m = fl % 1440 % 60
h = fl % 1440 / 60
d = fl / 1440
res = (str(d) + 'd') if d else ''
res += (str(h) + 'h') if h else ''
res += (str(m) + 'm') if m else ''
res += (str(s) + 's') if s else ''
return res if res else 0
| 33.380952 | 137 | 0.609788 |
ace58e4ed49a6f030be8fd5c9b70045ce7ff975c | 54 | py | Python | python/testData/formatter/continuationIndentBeforeFunctionArguments.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2018-12-29T09:53:39.000Z | 2018-12-29T09:53:42.000Z | python/testData/formatter/continuationIndentBeforeFunctionArguments.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/formatter/continuationIndentBeforeFunctionArguments.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | sum(
1,
2, 3,
5,
)
sum(
1,
2, 3,
5)
| 4.909091 | 14 | 0.259259 |
ace58e5cc8ebb3452b4c134953d5d91e42dcc3e5 | 20,674 | py | Python | layerindex/tools/import_layer.py | anelliot/layerindex-web | a3b1696cfca6c3b8fa63f7aaeb7d52d3a68348b6 | [
"MIT"
] | null | null | null | layerindex/tools/import_layer.py | anelliot/layerindex-web | a3b1696cfca6c3b8fa63f7aaeb7d52d3a68348b6 | [
"MIT"
] | 4 | 2021-06-08T21:14:26.000Z | 2022-03-12T00:22:24.000Z | layerindex/tools/import_layer.py | lab-github/layerindex-web | 60b03648b7e1acd89127ce4148ea596409b40e06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Import a layer into the database
#
# Copyright (C) 2016 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# Licensed under the MIT license, see COPYING.MIT for details
import sys
import os.path
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
import optparse
import re
import glob
import utils
import logging
import subprocess
from layerconfparse import LayerConfParse
class DryRunRollbackException(Exception):
pass
logger = utils.logger_create('LayerIndexImport')
link_re = re.compile(r'\[(http.*) +link\]')
def set_vcs_fields(layer, repoval):
layer.vcs_url = repoval
if repoval.startswith('git://git.openembedded.org/'):
reponame = re.sub('^.*/', '', repoval)
layer.vcs_web_url = 'http://cgit.openembedded.org/' + reponame
layer.vcs_web_tree_base_url = 'http://cgit.openembedded.org/' + reponame + '/tree/%path%?h=%branch%'
layer.vcs_web_file_base_url = 'http://cgit.openembedded.org/' + reponame + '/tree/%path%?h=%branch%'
layer.vcs_web_commit_url = 'http://cgit.openembedded.org/' + reponame + '/commit/?id=%hash%'
elif repoval.startswith('git://git.yoctoproject.org/'):
reponame = re.sub('^.*/', '', repoval)
layer.vcs_web_url = 'http://git.yoctoproject.org/cgit/cgit.cgi/' + reponame
layer.vcs_web_tree_base_url = 'http://git.yoctoproject.org/cgit/cgit.cgi/' + reponame + '/tree/%path%?h=%branch%'
layer.vcs_web_file_base_url = 'http://git.yoctoproject.org/cgit/cgit.cgi/' + reponame + '/tree/%path%?h=%branch%'
layer.vcs_web_commit_url = 'http://git.yoctoproject.org/cgit/cgit.cgi/' + reponame + '/commit/?id=%hash%'
elif repoval.startswith('git://github.com/') or repoval.startswith('http://github.com/') or repoval.startswith('https://github.com/'):
reponame = re.sub('^.*github.com/', '', repoval)
reponame = re.sub('.git$', '', reponame)
layer.vcs_web_url = 'http://github.com/' + reponame
layer.vcs_web_tree_base_url = 'http://github.com/' + reponame + '/tree/%branch%/'
layer.vcs_web_file_base_url = 'http://github.com/' + reponame + '/blob/%branch%/'
layer.vcs_web_commit_url = 'http://github.com/' + reponame + '/commit/%hash%'
elif repoval.startswith('git://gitlab.com/') or repoval.startswith('http://gitlab.com/') or repoval.startswith('https://gitlab.com/'):
reponame = re.sub('^.*gitlab.com/', '', repoval)
reponame = re.sub('.git$', '', reponame)
layer.vcs_web_url = 'http://gitlab.com/' + reponame
layer.vcs_web_tree_base_url = 'http://gitlab.com/' + reponame + '/tree/%branch%/'
layer.vcs_web_file_base_url = 'http://gitlab.com/' + reponame + '/blob/%branch%/'
layer.vcs_web_commit_url = 'http://gitlab.com/' + reponame + '/commit/%hash%'
elif repoval.startswith('git://bitbucket.org/') or repoval.startswith('http://bitbucket.org/') or repoval.startswith('https://bitbucket.org/'):
reponame = re.sub('^.*bitbucket.org/', '', repoval)
reponame = re.sub('.git$', '', reponame)
layer.vcs_web_url = 'http://bitbucket.org/' + reponame
layer.vcs_web_tree_base_url = 'http://bitbucket.org/' + reponame + '/src/%branch%/%path%?at=%branch%'
layer.vcs_web_file_base_url = 'http://bitbucket.org/' + reponame + '/src/%branch%/%path%?at=%branch%'
layer.vcs_web_commit_url = 'http://bitbucket.org/' + reponame + '/commits/%hash%'
def readme_extract(readmefn):
maintainer_re = re.compile('maintaine[r(s)ed by]*[:\n\r]', re.IGNORECASE)
deps_re = re.compile('depend[sencies upon]*[:\n\r]', re.IGNORECASE)
maintlines = []
deps = []
desc = ''
maint_mode = False
blank_seen = False
deps_mode = False
desc_mode = True
with open(readmefn, 'r') as f:
for line in f.readlines():
if deps_mode:
if maintainer_re.search(line):
deps_mode = False
else:
if ':' in line:
blank_seen = False
if line.startswith('URI:'):
deps.append(line.split(':', 1)[-1].strip())
if line.startswith('layers:'):
deps[len(deps)-1] = (deps[len(deps)-1], line.split(':', 1)[-1].strip())
elif not (line.startswith('====') or line.startswith('----')):
if blank_seen:
deps_mode = False
else:
blank_seen = True
continue
if maint_mode:
line = line.strip()
if line and '@' in line or ' at ' in line:
maintlines.append(line)
elif not (line.startswith('====') or line.startswith('----')):
if maintlines or blank_seen:
maint_mode = False
else:
blank_seen = True
elif maintainer_re.search(line):
desc_mode = False
maint_mode = True
blank_seen = False
if ':' in line:
line = line.rsplit(":", 1)[-1].strip()
if line:
maintlines.append(line)
elif deps_re.search(line):
desc_mode = False
deps_mode = True
blank_seen = False
elif desc_mode:
if not line.strip():
if blank_seen:
desc_mode = False
blank_seen = True
elif line.startswith('====') or line.startswith('----'):
# Assume we just got the title, we don't need that
desc = ''
else:
desc += line
maintainers = []
for line in maintlines:
for maint in line.split(','):
if '@' in maint or ' at ' in maint and not 'yyyyyy@zzzzz.com' in maint:
maintainers.append(maint.strip())
return desc, maintainers, deps
def maintainers_extract(maintfn):
maintainers = []
with open(maintfn, 'r') as f:
for line in f.readlines():
if line.startswith('M:'):
line = line.split(':', 1)[-1].strip()
if line and '@' in line or ' at ' in line:
maintainers.append(line)
return list(set(maintainers))
def get_github_layerinfo(layer_url, username = None, password = None):
import http.client
import json
from layerindex.models import LayerMaintainer
def github_api_call(path):
conn = http.client.HTTPSConnection('api.github.com')
headers = {"User-Agent": "test_github.py"}
if username:
import base64
auth = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
headers['Authorization'] = "Basic %s" % auth
conn.request("GET", path, headers=headers)
resp = conn.getresponse()
return resp
json_data = None
owner_json_data = None
if layer_url.endswith('.git'):
layer_url = layer_url[:-4]
resp = github_api_call('/repos/%s' % layer_url.split('github.com/')[-1].rstrip('/'))
if resp.status in [200, 302]:
data = resp.read().decode('utf-8')
json_data = json.loads(data)
#headers = dict((key, value) for key, value in resp.getheaders())
#print(headers)
owner_resp = github_api_call(json_data['owner']['url'].split('api.github.com')[-1])
if resp.status in [200, 302]:
owner_data = owner_resp.read().decode('utf-8')
owner_json_data = json.loads(owner_data)
else:
logger.error('HTTP status %s reading owner info from github API: %s' % (resp.status, resp.read().decode('utf-8')))
else:
logger.error('HTTP status %s reading repo info from github API: %s' % (resp.status, resp.read().decode('utf-8')))
return (json_data, owner_json_data)
def get_layer_type_choices():
"""
Return help string and choices for --type.
"""
from layerindex.models import LayerItem
help_str = "Specify layer type."
choices = []
for i in LayerItem.LAYER_TYPE_CHOICES:
key, description = i
help_str += ' %s: %s,' % (key, description)
choices.append(key)
help_str = help_str.rstrip(',')
choices.append('')
return (help_str, choices)
def main():
valid_layer_name = re.compile('[-\w]+$')
parser = optparse.OptionParser(
usage = """
%prog [options] <url> [name]""")
utils.setup_django()
layer_type_help, layer_type_choices = get_layer_type_choices()
parser.add_option("-s", "--subdir",
help = "Specify subdirectory",
action="store", dest="subdir")
parser.add_option("-t", "--type",
help = layer_type_help,
choices = layer_type_choices,
action="store", dest="layer_type", default='')
parser.add_option("-n", "--dry-run",
help = "Don't write any data back to the database",
action="store_true", dest="dryrun")
parser.add_option("-d", "--debug",
help = "Enable debug output",
action="store_const", const=logging.DEBUG, dest="loglevel", default=logging.INFO)
parser.add_option("", "--github-auth",
help = "Specify github username:password",
action="store", dest="github_auth")
parser.add_option("-q", "--quiet",
help = "Hide all output except error messages",
action="store_const", const=logging.ERROR, dest="loglevel")
parser.add_option("-a", "--actual-branch",
help = "Set actual branch",
action="store", dest="actual_branch")
options, args = parser.parse_args(sys.argv)
if len(args) < 2:
print("Please specify URL of repository for layer")
sys.exit(1)
layer_url = args[1]
if len(args) > 2:
layer_name = args[2]
else:
if options.subdir:
layer_name = options.subdir
else:
layer_name = [x for x in layer_url.split('/') if x][-1]
if layer_name.endswith('.git'):
layer_name = layer_name[:-4]
if not valid_layer_name.match(layer_name):
logger.error('Invalid layer name "%s" - Layer name can only include letters, numbers and dashes.', layer_name)
sys.exit(1)
if options.github_auth:
if not ':' in options.github_auth:
logger.error('--github-auth value must be specified as username:password')
sys.exit(1)
splitval = options.github_auth.split(':')
github_login = splitval[0]
github_password = splitval[1]
else:
github_login = None
github_password = None
import settings
from layerindex.models import LayerItem, LayerBranch, LayerDependency, LayerMaintainer
from django.db import transaction
logger.setLevel(options.loglevel)
fetchdir = settings.LAYER_FETCH_DIR
if not fetchdir:
logger.error("Please set LAYER_FETCH_DIR in settings.py")
sys.exit(1)
if not os.path.exists(fetchdir):
os.makedirs(fetchdir)
master_branch = utils.get_branch('master')
core_layer = None
try:
with transaction.atomic():
# Fetch layer
logger.info('Fetching repository %s' % layer_url)
layer = LayerItem()
layer.name = layer_name
layer.status = 'P'
layer.summary = 'tempvalue'
layer.description = layer.summary
set_vcs_fields(layer, layer_url)
urldir = layer.get_fetch_dir()
repodir = os.path.join(fetchdir, urldir)
out = None
try:
if not os.path.exists(repodir):
out = utils.runcmd(['git', 'clone', layer.vcs_url, urldir], fetchdir, logger=logger)
else:
out = utils.runcmd(['git', 'fetch'], repodir, logger=logger)
except Exception as e:
logger.error("Fetch failed: %s" % str(e))
sys.exit(1)
actual_branch = 'master'
if (options.actual_branch):
actual_branch = options.actual_branch
try:
out = utils.runcmd(['git', 'checkout', 'origin/%s' % actual_branch], repodir, logger=logger)
except subprocess.CalledProcessError:
actual_branch = None
branches = utils.runcmd(['git', 'branch', '-r'], repodir, logger=logger)
for line in branches.splitlines():
if 'origin/HEAD ->' in line:
actual_branch = line.split('-> origin/')[-1]
break
if not actual_branch:
logger.error("Repository has no master branch nor origin/HEAD")
sys.exit(1)
out = utils.runcmd(['git', 'checkout', 'origin/%s' % actual_branch], repodir, logger=logger)
layer_paths = []
if options.subdir:
layerdir = os.path.join(repodir, options.subdir)
if not os.path.exists(layerdir):
logger.error("Subdirectory %s does not exist in repository for master branch" % options.subdir)
sys.exit(1)
if not os.path.exists(os.path.join(layerdir, 'conf/layer.conf')):
logger.error("conf/layer.conf not found in subdirectory %s" % options.subdir)
sys.exit(1)
layer_paths.append(layerdir)
else:
if os.path.exists(os.path.join(repodir, 'conf/layer.conf')):
layer_paths.append(repodir)
# Find subdirs with a conf/layer.conf
for subdir in os.listdir(repodir):
subdir_path = os.path.join(repodir, subdir)
if os.path.isdir(subdir_path):
if os.path.exists(os.path.join(subdir_path, 'conf/layer.conf')):
layer_paths.append(subdir_path)
if not layer_paths:
logger.error("conf/layer.conf not found in repository or first level subdirectories - is subdirectory set correctly?")
sys.exit(1)
if 'github.com' in layer.vcs_url:
json_data, owner_json_data = get_github_layerinfo(layer.vcs_url, github_login, github_password)
for layerdir in layer_paths:
layer.pk = None
if layerdir != repodir:
subdir = os.path.relpath(layerdir, repodir)
if len(layer_paths) > 1:
layer.name = subdir
else:
subdir = ''
if LayerItem.objects.filter(name=layer.name).exists():
if LayerItem.objects.filter(name=layer.name).exclude(vcs_url=layer.vcs_url).exists():
conflict_list = LayerItem.objects.filter(name=layer.name).exclude(vcs_url=layer.vcs_url)
conflict_list_urls = []
for conflict in conflict_list:
conflict_list_urls.append(conflict.vcs_url)
cln = ', '.join(conflict_list_urls)
logger.error('A layer named "%s" already exists in the database. Possible name collision with %s.vcs_url = %s' % (layer.name, layer.name, cln))
sys.exit(1)
else:
logger.info('The layer named "%s" already exists in the database. Skipping this layer with same vcs_url' % layer.name)
layer_paths = [x for x in layer_paths if x != layerdir]
continue
logger.info('Creating layer %s' % layer.name)
# Guess layer type if not specified
if options.layer_type:
layer.layer_type = options.layer_type
elif layer.name in ['openembedded-core', 'meta-oe']:
layer.layer_type = 'A'
elif glob.glob(os.path.join(layerdir, 'conf/distro/*.conf')):
layer.layer_type = 'D'
elif glob.glob(os.path.join(layerdir, 'conf/machine/*.conf')):
layer.layer_type = 'B'
else:
layer.layer_type = 'M'
layer.save()
layerbranch = LayerBranch()
layerbranch.layer = layer
layerbranch.branch = master_branch
if layerdir != repodir:
layerbranch.vcs_subdir = subdir
if actual_branch:
layerbranch.actual_branch = actual_branch
layerbranch.save()
if layer.name != settings.CORE_LAYER_NAME:
if not core_layer:
core_layer = utils.get_layer(settings.CORE_LAYER_NAME)
if core_layer:
logger.debug('Adding dep %s to %s' % (core_layer.name, layer.name))
layerdep = LayerDependency()
layerdep.layerbranch = layerbranch
layerdep.dependency = core_layer
layerdep.save()
layerconfparser = LayerConfParse(logger=logger)
try:
config_data = layerconfparser.parse_layer(layerdir)
if config_data:
utils.add_dependencies(layerbranch, config_data, logger=logger)
utils.add_recommends(layerbranch, config_data, logger=logger)
finally:
layerconfparser.shutdown()
# Get some extra meta-information
readme_files = glob.glob(os.path.join(layerdir, 'README*'))
if (not readme_files) and subdir:
readme_files = glob.glob(os.path.join(repodir, 'README*'))
maintainer_files = glob.glob(os.path.join(layerdir, 'MAINTAINERS'))
if (not maintainer_files) and subdir:
maintainer_files = glob.glob(os.path.join(repodir, 'MAINTAINERS'))
maintainers = []
if readme_files:
(desc, maintainers, deps) = readme_extract(readme_files[0])
if desc:
layer.summary = layer.name
layer.description = desc
if maintainer_files:
maintainers.extend(maintainers_extract(readme_files[0]))
if (not maintainers) and 'github.com' in layer.vcs_url:
if json_data:
layer.summary = json_data['description']
layer.description = layer.summary
if owner_json_data:
owner_name = owner_json_data.get('name', None)
owner_email = owner_json_data.get('email', None)
if owner_name and owner_email:
maintainers.append('%s <%s>' % (owner_name, owner_email))
if layer.name == 'openembedded-core':
layer.summary = 'Core metadata'
elif layer.name == 'meta-oe':
layer.summary = 'Additional shared OE metadata'
layer.description = layer.summary
if maintainers:
maint_re = re.compile(r'^"?([^"@$<>]+)"? *<([^<> ]+)>[ -]*(.+)?$')
for maintentry in maintainers:
res = maint_re.match(maintentry)
if res:
maintainer = LayerMaintainer()
maintainer.layerbranch = layerbranch
maintainer.name = res.group(1).strip()
maintainer.email = res.group(2)
if res.group(3):
maintainer.responsibility = res.group(3).strip()
maintainer.save()
layer.save()
if not layer_paths:
logger.error('No layers added.')
sys.exit(1);
if options.dryrun:
raise DryRunRollbackException()
except DryRunRollbackException:
pass
sys.exit(0)
if __name__ == "__main__":
main()
| 42.892116 | 168 | 0.545226 |
ace58ecb4c20b6214553c3f2cfc828ae9655a2c1 | 26,934 | py | Python | cohesity_management_sdk/models/__init__.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | 1 | 2019-11-07T23:19:32.000Z | 2019-11-07T23:19:32.000Z | cohesity_management_sdk/models/__init__.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/__init__.py | chandrashekar-cohesity/management-sdk-python | 9e6ec99e8a288005804b808c4e9b19fd204e3a8b | [
"Apache-2.0"
] | null | null | null | __all__ = [
'ad_object',
'aws_snapshot_manager_params',
'aag_and_databases',
'access_token',
'access_token_credential',
'acropolis_protection_source',
'acropolis_restore_parameters',
'activate_view_aliases_result',
'active_directory_entry',
'active_directory_principal',
'active_directory_principals_add_parameters',
'ad_attribute',
'ad_domain',
'ad_domain_controller',
'ad_domain_identity',
'ad_guid_pair',
'ad_object_attribute_parameters',
'ad_object_restore_parameters',
'ad_protection_source',
'ad_restore_options',
'ad_restore_parameters',
'ad_root_topology_object',
'added_active_directory_principal',
'added_idp_principal',
'additional_oracle_db_params',
'agent_deployment_status_response',
'agent_information',
'aggregated_subtree_info',
'alert',
'alert_category_name',
'alert_document',
'alert_metadata',
'alert_property',
'alert_resolution',
'alert_resolution_details',
'alert_resolution_info',
'alert_resolution_request',
'alerting_config',
'alerting_policy_proto',
'amazon_cloud_credentials',
'antivirus_scan_config',
'antivirus_service_config',
'antivirus_service_config_params',
'antivirus_service_group',
'antivirus_service_group_params',
'antivirus_service_group_state_params',
'app_metadata',
'append_hosts_parameters',
'application_info',
'application_parameters',
'application_restore_object',
'application_special_parameters',
'applications_restore_task_request',
'apps_config',
'archival_external_target',
'archival_target',
'attribute_value',
'audit_logs_tile',
'aws_credentials',
'aws_params',
'aws_protection_source',
'aws_snapshot_manager_parameters',
'azure_cloud_credentials',
'azure_credentials',
'azure_managed_disk_params',
'azure_params',
'azure_protection_source',
'backup_job_pre_or_post_script',
'backup_job_proto',
'backup_job_proto_backup_source',
'backup_job_proto_dr_to_cloud_params',
'backup_job_proto_exclude_source',
'backup_job_proto_exclusion_time_range',
'backup_policy_proto',
'backup_policy_proto_continuous_schedule',
'backup_policy_proto_daily_schedule',
'backup_policy_proto_exclusion_time_range',
'backup_policy_proto_monthly_schedule',
'backup_policy_proto_one_off_schedule',
'backup_policy_proto_schedule_end',
'backup_run',
'backup_script',
'backup_source_params',
'backup_source_stats',
'backup_task_info',
'bandwidth_limit',
'bandwidth_limit_override',
'basic_cluster_info',
'basic_task_info',
'blackout_period',
'c_2_s_access_portal',
'cancel_protection_job_run_param',
'capacity_by_tier',
'centrify_zone',
'change_protection_job_state_param',
'change_service_state_parameters',
'change_service_state_result',
'chassis_info',
'cifs_share_info',
'clear_nlm_locks_parameters',
'clone_directory_params',
'clone_task_info',
'clone_task_request',
'clone_view_request',
'close_smb_file_open_parameters',
'cloud_deploy_target',
'cloud_deploy_target_details',
'cloud_network_configuration',
'cloud_parameters',
'cluster',
'cluster_audit_log',
'cluster_audit_log_configuration',
'cluster_audit_logs_search_result',
'cluster_config_proto_sid',
'cluster_creation_progress_result',
'cluster_hardware_info',
'cluster_identifier',
'cluster_networking_endpoint',
'cluster_networking_resource_information',
'cluster_partition',
'cluster_public_keys',
'cluster_stats',
'compare_ad_objects_request',
'compared_ad_object',
'connector_parameters',
'continuous_schedule',
'copy_run',
'copy_run_stats',
'copy_snapshot_task_status',
'count_by_tier',
'create_active_directory_entry_params',
'create_bond_parameters',
'create_bond_result',
'create_cloud_cluster_parameters',
'create_cluster_result',
'create_idp_configuration_request',
'create_physical_cluster_parameters',
'create_remote_vault_restore_task_parameters',
'create_remote_vault_search_job_parameters',
'create_view_box_params',
'create_view_request',
'create_virtual_cluster_parameters',
'created_remote_vault_search_job_uid',
'credentials',
'custom_unix_id_attributes',
'daily_schedule',
'data_migration_job_parameters',
'data_migration_policy',
'data_transfer_from_vault_per_task',
'data_transfer_from_vault_summary',
'data_usage_stats',
'datastore_info',
'db_file_info',
'delete_infected_file_params',
'delete_infected_file_response',
'delete_protection_job_param',
'delete_route_param',
'delete_view_users_quota_parameters',
'delivery_rule_proto_delivery_target',
'deploy_task_request',
'deploy_v_ms_to_aws_params',
'deploy_v_ms_to_azure_params',
'deploy_v_ms_to_cloud_params',
'deploy_v_ms_to_gcp_params',
'device_node',
'device_tree_details',
'disk',
'disk_block',
'disk_partition',
'disk_unit',
'domain_controllers',
'download_files_and_folders_params',
'download_package_parameters',
'download_package_result',
'edit_hosts_parameters',
'email_delivery_target',
'email_meta_data',
'encryption_configuration',
'entity_identifier',
'entity_permission_information',
'entity_proto',
'entity_schema_proto',
'entity_schema_proto_attributes_descriptor',
'entity_schema_proto_key_value_descriptor',
'entity_schema_proto_time_series_descriptor',
'entity_schema_proto_time_series_descriptor_metric_unit',
'env_backup_params',
'environment_type_job_parameters',
'erasure_coding_info',
'error_proto',
'eula_config',
'expand_cloud_cluster_parameters',
'expand_physical_cluster_parameters',
'extended_retention_policy',
'external_client_subnets',
'file_extension_filter',
'file_id',
'file_level_data_lock_config',
'file_lock_status',
'file_lock_status_params',
'file_nlm_locks',
'file_partition_block',
'file_path_filter',
'file_path_parameters',
'file_restore_info',
'file_search_result',
'file_search_results',
'file_snapshot_information',
'file_stubbing_params',
'file_version',
'filename_pattern_to_directory',
'filer_audit_log_configuration',
'files_and_folders_info',
'filesystem_volume',
'filtering_policy_proto',
'fixed_unix_id_mapping',
'flash_blade_file_system',
'flash_blade_network_interface',
'flash_blade_nfs_info',
'flash_blade_protection_source',
'flash_blade_smb_info',
'flash_blade_storage_array',
'full_snapshot_info',
'gcp_credentials',
'gcp_protection_source',
'gdpr_copy_task',
'get_alert_types_params',
'get_registration_info_response',
'get_views_by_share_name_result',
'get_views_result',
'google_account_info',
'google_cloud_credentials',
'granularity_bucket',
'group',
'group_delete_parameters',
'group_info',
'group_parameters',
'guid_pair',
'hardware_info',
'health_tile',
'host_entry',
'host_result',
'hyper_flex_protection_source',
'hyperv_backup_env_params',
'hyperv_clone_parameters',
'hyperv_datastore',
'hyperv_env_job_parameters',
'hyperv_protection_source',
'hyperv_restore_parameters',
'hyperv_virtual_machine',
'icap_connection_status_response',
'id_mapping_info',
'idp_principals_add_parameters',
'idp_reachability_test_result',
'idp_service_configuration',
'idp_user_info',
'index_and_snapshots',
'indexing_policy',
'indexing_policy_proto',
'infected_file',
'infected_file_id',
'infected_file_param',
'infected_files',
'interface_group',
'iops_tile',
'ipmi_configuration',
'iscsi_san_port',
'isilon_access_zone',
'isilon_cluster',
'isilon_mount_point',
'isilon_nfs_mount_point',
'isilon_protection_source',
'isilon_smb_mount_point',
'job_policy_proto',
'job_runs_tile',
'key_value_pair',
'kms_configuration',
'kms_configuration_response',
'kvm_protection_source',
'last_protection_run_summary',
'latency_thresholds',
'latest_protection_job_run_info',
'latest_protection_run',
'ldap_provider',
'ldap_provider_response',
'ldap_provider_status',
'legal_holdings',
'list_centrify_zone',
'list_nlm_locks_response',
'lock_file_params',
'lock_range',
'logical_stats',
'logical_volume',
'ms_exchange_params',
'map_reduce_aux_data',
'metric_data_block',
'metric_data_point',
'metric_value',
'monthly_schedule',
'mount_volume_result_details',
'mount_volumes_parameters',
'mount_volumes_state',
'nas_backup_params',
'nas_credentials',
'nas_env_job_parameters',
'nas_mount_credential_params',
'nas_protection_source',
'netapp_cluster_info',
'netapp_protection_source',
'netapp_volume_info',
'netapp_vserver_info',
'network_configuration',
'network_interface',
'network_mapping',
'networking_information',
'new_s_3_secret_access_key',
'nlm_lock',
'node',
'node_hardware_info',
'node_network_interfaces',
'node_stats',
'node_status',
'node_system_disk_info',
'notification_rule',
'notifications',
'ntp_settings_config',
'o_365_backup_env_params',
'object_search_results',
'object_snapshot_info',
'objects_by_env',
'objects_protected_by_policy',
'office_365_credentials',
'office_365_protection_source',
'oracle_cloud_credentials',
'oracle_db_channel_info',
'oracle_db_channel_info_host_info',
'oracle_host',
'oracle_protection_source',
'oracle_session',
'oracle_source_params',
'outlook_backup_env_params',
'outlook_env_job_parameters',
'outlook_folder',
'outlook_mailbox',
'outlook_restore_parameters',
'output_spec',
'overwrite_view_param',
'package_details',
'pagination_parameters',
'pattern',
'physical_backup_env_params',
'physical_backup_source_params',
'physical_env_job_parameters',
'physical_file_backup_params',
'physical_file_backup_params_backup_path_info',
'physical_node_configuration',
'physical_protection_source',
'physical_snapshot_params',
'physical_special_parameters',
'physical_volume',
'postgres_node_info',
'preferences',
'preferred_domain_controller',
'principal',
'privilege_info',
'product_model_interface_tuple',
'protect_object_parameters',
'protected_object',
'protected_objects_by_env',
'protected_objects_tile',
'protected_source_summary',
'protected_vm_info',
'protection_info',
'protection_job',
'protection_job_info',
'protection_job_request_body',
'protection_job_run_stats',
'protection_job_summary',
'protection_job_summary_for_policies',
'protection_job_summary_stats',
'protection_object_summary',
'protection_policy',
'protection_policy_request',
'protection_policy_summary',
'protection_run_instance',
'protection_run_response',
'protection_runs_summary',
'protection_source',
'protection_source_node',
'protection_source_response',
'protection_source_tree_info',
'protection_source_uid',
'protection_stats',
'protection_summary',
'protection_summary_by_env',
'protection_tile',
'pure_env_job_parameters',
'pure_protection_source',
'pure_storage_array',
'pure_volume',
'q_star_server_credentials',
'qo_s',
'quota_and_usage_in_view',
'quota_policy',
'recover_task_request',
'recoveries_tile',
'recovery_task_info',
'register_application_servers_parameters',
'register_protection_source_parameters',
'register_remote_cluster',
'registered_application_server',
'registered_source_info',
'remote_cluster',
'remote_host',
'remote_host_connector_params',
'remote_job_script',
'remote_protection_job_information',
'remote_protection_job_run_information',
'remote_protection_job_run_instance',
'remote_restore_indexing_status',
'remote_restore_snapshot_status',
'remote_script_path_and_params',
'remote_vault_restore_task_status',
'remote_vault_search_job_information',
'remote_vault_search_job_results',
'rename_view_param',
'replicate_snapshots_to_aws_params',
'replication_encryption_key_reponse',
'replication_target',
'replication_target_settings',
'request_error',
'reset_s_3_secret_key_parameters',
'restore_count_by_object_type',
'restore_files_task_request',
'restore_info',
'restore_object_details',
'restore_object_state',
'restore_points_for_time_range',
'restore_points_for_time_range_param',
'restore_task',
'retention_policy_proto',
'role',
'role_create_parameters',
'role_delete_parameters',
'role_update_parameters',
'route',
'rpo_policy_settings',
'rpo_schedule',
'run_job_snapshot_target',
'run_now_parameters',
'run_protection_job_param',
'run_uid',
'sql_server_instance_version',
'salesforce_account_info',
'sample',
'scheduler_proto_scheduler_job_schedule',
'scheduling_policy',
'scheduling_policy_proto',
'scheduling_policy_proto_continuous_schedule',
'scheduling_policy_proto_daily_schedule',
'scheduling_policy_proto_monthly_schedule',
'scheduling_policy_proto_rpo_schedule',
'schema_info',
'script_path_and_params',
'share',
'smb_active_file_opens_response',
'smb_active_file_path',
'smb_active_open',
'smb_active_session',
'smb_permission',
'smb_permissions_info',
'smb_principal',
'snapshot_archival_copy_policy',
'snapshot_attempt',
'snapshot_cloud_copy_policy',
'snapshot_copy_task',
'snapshot_info',
'snapshot_manager_params',
'snapshot_replication_copy_policy',
'snapshot_target',
'snapshot_target_policy_proto',
'snapshot_target_settings',
'snapshot_version',
'source_app_params',
'source_backup_status',
'source_for_principal_param',
'source_special_parameter',
'sources_for_sid',
'sql_aag_host_and_databases',
'sql_backup_job_params',
'sql_env_job_parameters',
'sql_protection_source',
'sql_restore_parameters',
'sql_source_id',
'ssl_certificate_config',
'ssl_verification',
'static_route',
'stop_remote_vault_search_job_parameters',
'storage_efficiency_tile',
'storage_policy',
'storage_policy_override',
'stubbing_policy_proto',
'subnet',
'supported_config',
'syslog_server',
'tag_attribute',
'tape_media_information',
'task',
'task_attribute',
'task_event',
'task_notification',
'tenant',
'tenant_active_directory_update',
'tenant_active_directory_update_parameters',
'tenant_config',
'tenant_create_parameters',
'tenant_deletion_info',
'tenant_entity_update',
'tenant_entity_update_parameters',
'tenant_info',
'tenant_ldap_provider_update',
'tenant_ldap_provider_update_parameters',
'tenant_protection_job_update',
'tenant_protection_job_update_parameters',
'tenant_protection_policy_update',
'tenant_protection_policy_update_parameters',
'tenant_proxy',
'tenant_update',
'tenant_user_update_parameters',
'tenant_view_box_update',
'tenant_view_box_update_parameters',
'tenant_view_update',
'tenant_view_update_parameters',
'tenant_vlan_update',
'tenant_vlan_update_parameters',
'test_idp_reachability',
'throttling_policy_override',
'throttling_policy_parameters',
'throughput_tile',
'time',
'time_of_a_week',
'time_of_day',
'time_range_settings',
'time_series_schema_response',
'trending_data',
'un_register_application_servers_parameters',
'universal_id',
'universal_id_proto',
'unprotect_object_params',
'update_antivirus_service_group_params',
'update_application_server_parameters',
'update_bond_parameters',
'update_bond_result',
'update_cluster_params',
'update_eula_config',
'update_idp_configuration_request',
'update_ignored_trusted_domains_params',
'update_infected_file_params',
'update_infected_file_response',
'update_ldap_provider_param',
'update_ldap_provider_params',
'update_machine_accounts_params',
'update_protection_job_run',
'update_protection_job_runs_param',
'update_protection_jobs_state',
'update_protection_jobs_state_params',
'update_protection_object_parameters',
'update_resolution_params',
'update_restore_task_params',
'update_sources_for_principals_params',
'update_user_quota_settings_for_view',
'update_view_param',
'upgrade_cluster_parameters',
'upgrade_cluster_result',
'upgrade_physical_agents_message',
'upgrade_physical_server_agents',
'upload_package_result',
'usage_and_performance_stats',
'user',
'user_delete_parameters',
'user_id',
'user_id_mapping',
'user_info',
'user_information',
'user_parameters',
'user_quota',
'user_quota_and_usage',
'user_quota_settings',
'user_quota_summary_for_user',
'user_quota_summary_for_view',
'vcloud_director_info',
'vmware_backup_env_params',
'vmware_backup_source_params',
'vmware_disk_exclusion_proto',
'vmware_object_id',
'vmware_protection_source',
'value',
'value_data',
'vault',
'vault_bandwidth_limits',
'vault_config',
'vault_encryption_key',
'view',
'view_alias',
'view_alias_info',
'view_box',
'view_box_pair_info',
'view_box_stats',
'view_protection',
'view_protection_source',
'view_stats',
'view_user_quota_parameters',
'view_user_quotas',
'virtual_disk_id_information',
'virtual_disk_info',
'virtual_disk_information',
'virtual_disk_mapping',
'virtual_disk_mapping_response',
'virtual_disk_recover_task_state',
'virtual_disk_restore_parameters',
'virtual_disk_restore_response',
'virtual_node_configuration',
'vlan',
'vlan_parameters',
'vm_volumes_information',
'vmware_clone_parameters',
'vmware_env_job_parameters',
'vmware_restore_parameters',
'vmware_special_parameters',
'volume_security_info',
'vserver_network_interface',
'web_hook_delivery_target',
'windows_host_snapshot_parameters',
'worm_retention_proto',
'aag_preference_enum',
'access_enum',
'access_info_list_enum',
'acl_mode_enum',
'action_enum',
'ad_attribute_flag_enum',
'ad_object_flag_enum',
'alert_category_enum',
'alert_category_list_enum',
'alerting_policy_enum',
'alert_severity_list_enum',
'alert_state_enum',
'alert_state_list_enum',
'algorithm_enum',
'application_enum',
'application_environment_enum',
'apps_mode_enum',
'authentication_status_enum',
'authentication_type_enum',
'auth_type_enum',
'aws_type_enum',
'azure_type_enum',
'backup_run_type_enum',
'backup_type_enum',
'bonding_mode_enum',
'category_enum',
'centrify_schema_enum',
'cluster_type_enum',
'combine_method_enum',
'compression_policy_enum',
'connection_state_enum',
'current_operation_enum',
'data_disk_type_enum',
'data_protocol_enum',
'day_enum',
'day_count_enum',
'desired_wal_location_enum',
'disk_format_enum',
'encryption_policy_enum',
'entity_type_enum',
'environment_enum',
'exclude_office_365_type_enum',
'exclude_type_enum',
'external_target_type_enum',
'file_selection_policy_enum',
'file_size_policy_enum',
'file_type_enum',
'flag_enum',
'folder_type_enum',
'gcp_type_enum',
'glacier_retrieval_type_enum',
'health_status_enum',
'host_os_type_enum',
'host_type_enum',
'hyperv_type_enum',
'indexing_task_status_enum',
'interval_unit_enum',
'last_upgrade_status_enum',
'locking_protocol_enum',
'logical_volume_type_enum',
'mode_enum',
'nas_protocol_enum',
'nas_type_enum',
'netapp_type_enum',
'nfs_access_enum',
'object_class_enum',
'object_status_enum',
'os_disk_type_enum',
'partition_table_format_enum',
'periodicity_enum',
'physical_server_host_type_enum',
'physical_type_enum',
'pkg_type_enum',
'priority_enum',
'property_enum',
'protection_source_environment_enum',
'protocol_enum',
'protocol_access_enum',
'pure_type_enum',
'qos_type_enum',
'recovery_model_enum',
'remediation_state_enum',
'removal_reason_enum',
'removal_state_enum',
'run_type_enum',
'search_job_status_enum',
'search_result_flag_enum',
'security_mode_enum',
'server_type_enum',
'service_enum',
'severity_enum',
'share_type_enum',
'smb_access_enum',
'snapshot_task_status_enum',
'sql_options_enum',
'sql_server_db_state_enum',
'state_enum',
'status_enum',
'storage_tier_enum',
'style_enum',
'target_host_type_enum',
'task_state_enum',
'task_type_enum',
'tier_type_enum',
'tools_running_status_enum',
'type_enum',
'upgradability_enum',
'upgrade_status_enum',
'usage_type_enum',
'user_database_preference_enum',
'value_type_enum',
'vault_type_enum',
'vm_backup_status_enum',
'vm_backup_type_enum',
'vmware_type_enum',
'worm_retention_type_enum',
'action_update_protection_jobs_state_params_enum',
'alert_category_list_get_alerts_enum',
'authentication_type_user_enum',
'backup_type_sql_env_job_parameters_enum',
'bonding_mode_create_bond_parameters_enum',
'bonding_mode_network_interface_enum',
'bonding_mode_update_bond_parameters_enum',
'category_alert_metadata_enum',
'category_notification_rule_enum',
'cluster_type_cluster_enum',
'compression_policy_vault_enum',
'day_blackout_period_enum',
'day_monthly_schedule_enum',
'encryption_policy_vault_enum',
'environment_connector_parameters_enum',
'environment_list_application_servers_enum',
'environment_list_protected_objects_enum',
'environment_get_restore_tasks_enum',
'environment_search_protection_sources_enum',
'environment_registered_source_info_enum',
'environment_aggregated_subtree_info_enum',
'environment_application_info_enum',
'environment_restore_object_details_enum',
'environment_backup_run_enum',
'environment_snapshot_info_enum',
'environment_protection_summary_by_env_enum',
'environment_protection_job_enum',
'environment_protection_job_request_body_enum',
'environment_register_protection_source_parameters_enum',
'environment_remote_protection_job_information_enum',
'environment_remote_protection_job_run_information_enum',
'environment_restore_points_for_time_range_param_enum',
'environment_get_protection_jobs_enum',
'environment_get_protection_policies_enum',
'environment_search_restored_files_enum',
'environment_search_objects_enum',
'environment_list_protection_sources_enum',
'environment_list_protection_sources_registration_info_enum',
'environment_list_protection_sources_root_nodes_enum',
'host_type_agent_information_enum',
'host_type_hyperv_protection_source_enum',
'host_type_physical_protection_source_enum',
'host_type_vmware_protection_source_enum',
'host_type_register_protection_source_parameters_enum',
'host_type_download_physical_agent_enum',
'mode_smb_permission_enum',
'nas_protocol_nas_env_job_parameters_enum',
'object_class_active_directory_principals_add_parameters_enum',
'object_class_added_active_directory_principal_enum',
'object_class_added_idp_principal_enum',
'object_class_idp_principals_add_parameters_enum',
'object_class_search_active_directory_principals_enum',
'object_class_search_principals_enum',
'periodicity_extended_retention_policy_enum',
'periodicity_snapshot_archival_copy_policy_enum',
'periodicity_snapshot_cloud_copy_policy_enum',
'periodicity_snapshot_replication_copy_policy_enum',
'protocol_isilon_mount_point_enum',
'protocol_nas_protection_source_enum',
'protocol_syslog_server_enum',
'qos_type_rpo_policy_settings_enum',
'remediation_state_update_infected_file_params_enum',
'removal_state_view_box_enum',
'role_network_interface_enum',
'run_type_run_protection_job_param_enum',
'search_job_status_remote_vault_search_job_results_enum',
'service_network_interface_enum',
'severity_notification_rule_enum',
'status_copy_snapshot_task_status_enum',
'status_source_backup_status_enum',
'status_backup_run_enum',
'status_copy_run_enum',
'status_task_notification_enum',
'status_restore_task_enum',
'status_task_enum',
'status_get_tenants_enum',
'tier_type_azure_cloud_credentials_enum',
'tier_type_google_cloud_credentials_enum',
'tier_type_oracle_cloud_credentials_enum',
'type_aws_protection_source_enum',
'type_azure_protection_source_enum',
'type_flash_blade_protection_source_enum',
'type_gcp_protection_source_enum',
'type_hyper_flex_protection_source_enum',
'type_hyperv_datastore_enum',
'type_hyperv_protection_source_enum',
'type_isilon_protection_source_enum',
'type_kvm_protection_source_enum',
'type_nas_protection_source_enum',
'type_netapp_protection_source_enum',
'type_netapp_volume_info_enum',
'type_netapp_vserver_info_enum',
'type_oracle_protection_source_enum',
'type_physical_protection_source_enum',
'type_pure_protection_source_enum',
'type_sql_protection_source_enum',
'type_view_protection_source_enum',
'type_vmware_protection_source_enum',
'type_user_id_mapping_enum',
'type_ad_restore_options_enum',
'type_cloud_deploy_target_details_enum',
'type_smb_permission_enum',
'type_protection_job_info_enum',
'type_remote_host_enum',
'type_clone_task_request_enum',
'type_run_job_snapshot_target_enum',
'type_snapshot_target_settings_enum',
'type_file_search_result_enum',
'type_network_interface_enum',
'type_recovery_task_info_enum',
'type_protection_policy_enum',
'type_protection_policy_request_enum',
'type_recover_task_request_enum',
'type_restore_task_enum',
'type_vault_enum',
'upgradability_agent_deployment_status_response_enum',
'worm_retention_type_data_migration_policy_enum',
'worm_retention_type_protection_policy_enum',
'worm_retention_type_protection_policy_request_enum',
] | 30.923077 | 67 | 0.741665 |
ace58f0c568374c1c89106695093c0345ac4eaa1 | 2,418 | py | Python | Chapter06/rubner_tavan_network.py | arifmudi/Mastering-Machine-Learning-Algorithms | 8655e8e3f1e94f4d65bb92465033ebf54c193409 | [
"MIT"
] | 48 | 2018-05-28T12:16:18.000Z | 2022-02-24T12:49:10.000Z | Chapter06/rubner_tavan_network.py | arifmudi/Mastering-Machine-Learning-Algorithms | 8655e8e3f1e94f4d65bb92465033ebf54c193409 | [
"MIT"
] | 1 | 2018-08-19T05:48:22.000Z | 2018-08-19T05:48:22.000Z | Chapter06/rubner_tavan_network.py | arifmudi/Mastering-Machine-Learning-Algorithms | 8655e8e3f1e94f4d65bb92465033ebf54c193409 | [
"MIT"
] | 41 | 2018-05-28T12:16:19.000Z | 2022-01-14T18:48:12.000Z | import numpy as np
from sklearn.datasets import make_blobs
# Set random seed for reproducibility
np.random.seed(1000)
n_components = 2
learning_rate = 0.0001
max_iterations = 1000
stabilization_cycles = 5
threshold = 0.00001
def zero_center(Xd):
return Xd - np.mean(Xd, axis=0)
if __name__ == '__main__':
# Create the dataset
X, _ = make_blobs(n_samples=500, centers=2, cluster_std=5.0, random_state=1000)
Xs = zero_center(X)
Q = np.cov(Xs.T)
eigu, eigv = np.linalg.eig(Q)
print('Eigenvalues: {}'.format(eigu))
print('Eigenvectors: {}'.format(eigv))
# Initialize the variables
W = np.random.normal(0.0, 0.5, size=(Xs.shape[1], n_components))
V = np.tril(np.random.normal(0.0, 0.01, size=(n_components, n_components)))
np.fill_diagonal(V, 0.0)
prev_W = np.zeros((Xs.shape[1], n_components))
t = 0
# Perform the training cycle
while (np.linalg.norm(W - prev_W, ord='fro') > threshold and t < max_iterations):
prev_W = W.copy()
t += 1
for i in range(Xs.shape[0]):
y_p = np.zeros((n_components, 1))
xi = np.expand_dims(Xs[i], 1)
y = None
for _ in range(stabilization_cycles):
y = np.dot(W.T, xi) + np.dot(V, y_p)
y_p = y.copy()
dW = np.zeros((Xs.shape[1], n_components))
dV = np.zeros((n_components, n_components))
for t in range(n_components):
y2 = np.power(y[t], 2)
dW[:, t] = np.squeeze((y[t] * xi) + (y2 * np.expand_dims(W[:, t], 1)))
dV[t, :] = -np.squeeze((y[t] * y) + (y2 * np.expand_dims(V[t, :], 1)))
W += (learning_rate * dW)
V += (learning_rate * dV)
V = np.tril(V)
np.fill_diagonal(V, 0.0)
W /= np.linalg.norm(W, axis=0).reshape((1, n_components))
print('Final w: {}'.format(W))
# Compute the covariance matrix
Y_comp = np.zeros((Xs.shape[0], n_components))
for i in range(Xs.shape[0]):
y_p = np.zeros((n_components, 1))
xi = np.expand_dims(Xs[i], 1)
for _ in range(stabilization_cycles):
Y_comp[i] = np.squeeze(np.dot(W.T, xi) + np.dot(V.T, y_p))
y_p = y.copy()
print('Final covariance matrix: {}'.format(np.cov(Y_comp.T))) | 29.487805 | 87 | 0.544251 |
ace58f21d02ec36cee247c3c3923a3ea0ccee32a | 588 | py | Python | Code/haar.py | Elmirrah/image_classification | 7b42537840e71e70fffa41e8abe323b1fe92e5cf | [
"MIT"
] | 1 | 2018-01-26T14:23:48.000Z | 2018-01-26T14:23:48.000Z | Code/haar.py | pranshu28/image_classification | 7b42537840e71e70fffa41e8abe323b1fe92e5cf | [
"MIT"
] | null | null | null | Code/haar.py | pranshu28/image_classification | 7b42537840e71e70fffa41e8abe323b1fe92e5cf | [
"MIT"
] | null | null | null | import cv2
import numpy as np
faceDetect=cv2.CascadeClassifier('haarcascade_pen.xml');
cam=cv2.VideoCapture(0);
id=input('enter user id')
sampleNum=0;
while(True):
ret,img=cam.read();
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5);
for(x,y,w,h) in faces:
sampleNum=sampleNum+1;
cv2.imwrite("dataset/User."+str(id)+"."+str(sampleNum)+".jpg",gray[y:y+h,x:x+w])
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
cv2.waitKey(100);
cv2.imshow("Face",img);
if(sampleNum>20):
break
cam.release()
cv2.destoryAllwindows()
Add Comment Collapse | 25.565217 | 82 | 0.714286 |
ace58f75db7a39ff459dee8f1fc0891d0a315f38 | 784 | py | Python | Order.py | ElectronAlchemist/Bristol-Stock-Gym | d5a1f19c3f7aaed1f426ad9eff84c947797c31a6 | [
"MIT"
] | 4 | 2019-05-09T11:27:25.000Z | 2020-06-04T15:22:21.000Z | Order.py | ElectronAlchemist/Bristol-Stock-Gym | d5a1f19c3f7aaed1f426ad9eff84c947797c31a6 | [
"MIT"
] | null | null | null | Order.py | ElectronAlchemist/Bristol-Stock-Gym | d5a1f19c3f7aaed1f426ad9eff84c947797c31a6 | [
"MIT"
] | null | null | null | from enum import Enum
# An order can either be a BID or an ASK
class OType(Enum):
BID = 'BID'
ASK = 'ASK'
# an Order/quote has a trader id, a type (buy/sell) price, quantity, timestamp, and unique i.d.
class Order:
def __init__(self, tid, otype, price, qty, time, qid = 0):
self.tid = tid # trader i.d.
self.otype = otype # order type
self.price = price # price
self.qty = qty # quantity
self.time = time # timestamp
self.qid = qid # quote i.d. (unique to each quote)
def __str__(self):
return '[%s %s P=%03d Q=%s T=%5.2f QID:%d]' % \
(self.tid, self.otype, self.price, self.qty, self.time, self.qid)
| 35.636364 | 95 | 0.519133 |
ace58f99250ee9ec30fa376d125ebff22d8ca969 | 4,081 | py | Python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2017_10_01/operations/_ea_subscription_rollback_to_legacy_pricing_model_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2017_10_01/operations/_ea_subscription_rollback_to_legacy_pricing_model_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2017_10_01/operations/_ea_subscription_rollback_to_legacy_pricing_model_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EASubscriptionRollbackToLegacyPricingModelOperations(object):
"""EASubscriptionRollbackToLegacyPricingModelOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def post(
self,
**kwargs # type: Any
):
# type: (...) -> None
"""Enterprise Agreement Customer roll back to use legacy pricing model.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
# Construct URL
url = self.post.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
post.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/rollbackToLegacyPricingModel'} # type: ignore
| 43.414894 | 136 | 0.68047 |
ace5906bdb41acb5fb4f5e4f33c2643158217b3e | 1,905 | py | Python | queenbee/operator/metadata.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | queenbee/operator/metadata.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | queenbee/operator/metadata.py | MingboPeng/queenbee | a7968b0f88833cdfab928ca681057bf245f36ed2 | [
"MIT"
] | null | null | null | """Queenbee Operator MetaData class.
This object provides metadata information for an Operator.
The specification is designed based on openapi info object:
https://swagger.io/specification/#infoObject
"""
from typing import List
from pydantic import Field
from ..base.basemodel import BaseModel
class Maintainer(BaseModel):
"""Maintainer information"""
name: str = Field(
...,
description='The name of the author/maintainer person or organization.'
)
email: str = Field(
None,
description='The email address of the author/maintainer person or organization.'
)
class MetaData(BaseModel):
"""Operator metadata information"""
name: str = Field(
...,
description='Operator name. This name should be unique among all the operators'
' in your workflow.'
)
tag: str = Field(
...,
description='The tag of the operator'
)
app_version: str = Field(
None,
description='The version of the app binary backing the operator (CLI tool or'
' container)'
)
keywords: List[str] = Field(
None,
description='A list of keywords to search the operator by'
)
maintainers: List[Maintainer] = Field(
None,
description='A list of maintainers for the operator'
)
home: str = Field(
None,
description='The URL of this operator home page'
)
sources: List[str] = Field(
None,
description='A list of URLs to source code for this operator'
)
icon: str = Field(
None,
description='A URL to an SVG or PNG image to be used as an icon'
)
deprecated: bool = Field(
None,
description='Whether this operator is deprecated'
)
description: str = Field(
None,
description='A description of what this operator does'
)
| 23.231707 | 88 | 0.623622 |
ace5909f96f8ede3ffb9f54e04b8de2bedee96e0 | 6,838 | py | Python | miniproject/views.py | jai-dewani/Lab-Works | adb27ecc52b7b771b8975da9570a938064bfa024 | [
"MIT"
] | null | null | null | miniproject/views.py | jai-dewani/Lab-Works | adb27ecc52b7b771b8975da9570a938064bfa024 | [
"MIT"
] | null | null | null | miniproject/views.py | jai-dewani/Lab-Works | adb27ecc52b7b771b8975da9570a938064bfa024 | [
"MIT"
] | 1 | 2019-03-05T09:06:56.000Z | 2019-03-05T09:06:56.000Z | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import get_user_model, logout
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponseForbidden, JsonResponse
import os
import requests, json
from django.conf import settings
from .models import Answers, Question, AccountUser, Subject, Testcase, Professor
from .forms import UploadFileForm
RUN_URL = 'https://api.jdoodle.com/v1/execute'
CLIENT_SECRET = '5db3f1c12c59caa1002d1cb5757e72c96d969a1a'
def index(request):
if not request.user.is_authenticated:
return redirect('/loginuser')
else:
subjects = Subject.objects.filter()
# print(questions[0].QName)
try:
user = AccountUser.objects.get(user=request.user)
userType = 'S'
except:
userType = 'P'
context = {
'subjects':subjects,
'user':True,
'userType':userType,
'username': request.user.username
}
print(context)
return render(request,'index.html',context)
def subject(request,subject_id):
if request.method == 'POST':
response = render(request, '404.html',)
response.status_code = 404
return response
else:
subject = Subject.objects.get(id=subject_id)
questions = Question.objects.filter(Qsubject=subject)
try:
user = AccountUser.objects.get(user=request.user)
userType = 'S'
except:
userType = 'P'
context = {
'questions' : questions,
'subject_id': subject_id,
'userType':userType,
'username': request.user.username
}
return render(request,'subject.html',context)
def question(request,subject_id,question_id):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
source_code = request.FILES['code'].read().decode('unicode_escape')
# print(source_code)
question = Question.objects.get(id=question_id)
testcases = Testcase.objects.filter(Question=question)
flag = True,
error = ''
totalTestcases = len(testcases)
print(testcases)
passed = 0
data = {
"language": "python3",
"versionIndex": "0",
"clientId": "d0b2ab4f943ca044aa8e9ee39290afd5",
"clientSecret":"8ddec190c616ac0aafdef83aa83e4a7a493c1415c44b81e29d49405ad5031dd"
}
for testcase in testcases:
input = testcase.input.read().decode('unicode_escape')
expectedOutput = testcase.output.read().decode('unicode_escape')
print("INPUT")
print(input)
print("OUTPUT")
print(expectedOutput)
data['script'] = source_code
data['stdin'] = input
output = requests.post(RUN_URL, json=data).json()
print(output)
output = output["output"]
print("EXPOUTPUT")
print(output)
print(type(output),type(expectedOutput))
if output==expectedOutput:
print("CORRECT")
passed += 1
accuracy = float(passed)/float(totalTestcases)*100
accuracy = round(accuracy,2)
context = {
'answer':flag,
'error':error,
'subject_id':subject_id,
'accuracy':accuracy
}
# print(output["output"])
return render(request,'answer.html',context)
else:
question = Question.objects.get(id=question_id)
testcase = Testcase.objects.filter(Question=question)
subject = Subject.objects.get(id=subject_id)
try:
user = AccountUser.objects.get(user=request.user)
try:
testcase = testcase[:1]
except:
testcase = []
userType = 'S'
except:
userType = 'P'
context = {
'subject_id': subject_id,
'subject_name':subject.Sname,
'userType':userType,
'question' : question,
'testcase':testcase,
'username': request.user.username
}
return render(request,'viewQuestion.html',context)
def newQuestion(request,subject_id):
if request.method=='POST':
qName = request.POST['questionName']
qCode = request.POST['questionCode']
qDesc = request.POST['questionDesc']
print(request.user)
subject = Subject.objects.get(id=subject_id)
user = Professor.objects.get(user=request.user)
question = Question(
QName = qName,
QCode = qCode,
QDesc = qDesc,
createdBy = user,
Qsubject = subject
)
question.save()
return redirect('/')
else:
context = {
'username': request.user.username,
'subject_id':subject_id
}
return render(request,'newQuestion.html',context)
def viewfile(request,filename):
with open('media/'+str(filename), 'r') as destination:
stream = ""
for chunk in destination.read():
stream += chunk
return HttpResponse(stream,content_type='text/plain')
def testCase(request,subject_id,question_id):
if request.method=='POST':
form = UploadFileForm(request.POST, request.FILES)
question = Question.objects.get(id=question_id)
question.testcases += 1
question.save()
testcase = Testcase(Question=question,input=request.FILES['input'],output=request.FILES['output'])
testcase.save()
return redirect('/')
else:
context = {
'subject_id':subject_id,
'question_id':question_id,
'username': request.user.username
}
return render(request,'testcase.html',context)
def handle_uploaded_file(f,filename):
print(filename)
with open('media/'+str(filename), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
def upload(request):
if request.method=='POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
# return redirect('/')
else:
form = DocumentForm()
documents = Document.objects.all()
return render(request, 'upload.html',{
'form':form,
'documents':documents,
'username': request.user.username
})
def signup_view(request):
if request.method=='POST':
username = request.POST['username']
password = request.POST['password']
email = request.POST['email']
phoneNumber = request.POST['phoneNumber']
sem = request.POST['sem']
# try:
user = User.objects.create_user(
username = username,
email=email,
password=password
)
accountUser = AccountUser(
user = user,
phoneNumber = phoneNumber,
semester = sem
)
accountUser.save()
user = authenticate(
request,
username=username,
password=password
)
if user is not None:
login(request,user)
return redirect('/')
else:
print("PROBLEM")
context = {
'message': 'Check login credentials'
}
return render(request,'login.html',context)
else:
return render(request,'signup.html')
def login_view(request):
if request.method=='POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(
request,
username=username,
password=password
)
if user is not None:
login(request,user)
print(user)
return redirect('/')
else:
print("PROBLEM")
context = {
'message': 'Check login credentials'
}
return render(request,'login.html',context)
else:
return render(request,'login.html')
def logout_view(request):
logout(request)
return redirect('/')
| 24.597122 | 100 | 0.702983 |
ace59255b88e9517854b9ef777abac4a0e7cb4c6 | 6,986 | py | Python | log_caspase/model_509.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_caspase/model_509.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_caspase/model_509.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C3ub')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('Xiap', ['C3A'])
Monomer('C8A', ['C3pro'])
Monomer('C3pro', ['C8A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C3ub_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('Xiap_0', 127250.0)
Parameter('C8A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Parameter('Fadd_0', 130000.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C3ub_obs', C3ub())
Observable('C3A_obs', C3A())
Observable('Xiap_obs', Xiap())
Observable('C8A_obs', C8A())
Observable('C3pro_obs', C3pro())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Observable('Fadd_obs', Fadd())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=None) + C3pro(C8A=None) | C8A(C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(C3pro=1) % C3pro(C8A=1) >> C8A(C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C3ub(), C3ub_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(Xiap(C3A=None), Xiap_0)
Initial(C8A(C3pro=None), C8A_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
| 69.86 | 296 | 0.818065 |
ace5929e2d4e08d6340577d63141f96b783640f7 | 76 | py | Python | plugins/cortex_v2/komand_cortex_v2/actions/run_file_analyzer/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/cortex_v2/komand_cortex_v2/actions/run_file_analyzer/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/cortex_v2/komand_cortex_v2/actions/run_file_analyzer/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import RunFileAnalyzer
| 25.333333 | 39 | 0.789474 |
ace59327d565eab247510a0eb8827bd2497a2862 | 371 | py | Python | 1501-1600/1508-Score After Flipping Matrix/1508-Score After Flipping Matrix.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1501-1600/1508-Score After Flipping Matrix/1508-Score After Flipping Matrix.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1501-1600/1508-Score After Flipping Matrix/1508-Score After Flipping Matrix.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | class Solution:
"""
@param A: a matrix
@return: the score
"""
def matrixScore(self, A):
# Write your code here.
m, n = len(A), len(A[0])
result = 0
for j in range(n):
val = sum(A[i][j] == A[i][0] for i in range(m))
result += max(val, m - val) * (1 << (n - 1 - j))
return result
| 24.733333 | 60 | 0.436658 |
ace5934adbd3e9564535d833086d73c49138c4fe | 7,099 | py | Python | homeassistant/components/netatmo/data_handler.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/netatmo/data_handler.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/netatmo/data_handler.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 1 | 2022-02-09T00:30:51.000Z | 2022-02-09T00:30:51.000Z | """The Netatmo data handler."""
from __future__ import annotations
import asyncio
from collections import deque
from dataclasses import dataclass
from datetime import datetime, timedelta
from itertools import islice
import logging
from time import time
from typing import Any
import pyatmo
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_time_interval
from .const import (
AUTH,
DOMAIN,
MANUFACTURER,
WEBHOOK_ACTIVATION,
WEBHOOK_DEACTIVATION,
WEBHOOK_NACAMERA_CONNECTION,
WEBHOOK_PUSH_TYPE,
)
_LOGGER = logging.getLogger(__name__)
CAMERA_DATA_CLASS_NAME = "AsyncCameraData"
WEATHERSTATION_DATA_CLASS_NAME = "AsyncWeatherStationData"
HOMECOACH_DATA_CLASS_NAME = "AsyncHomeCoachData"
CLIMATE_TOPOLOGY_CLASS_NAME = "AsyncClimateTopology"
CLIMATE_STATE_CLASS_NAME = "AsyncClimate"
PUBLICDATA_DATA_CLASS_NAME = "AsyncPublicData"
DATA_CLASSES = {
WEATHERSTATION_DATA_CLASS_NAME: pyatmo.AsyncWeatherStationData,
HOMECOACH_DATA_CLASS_NAME: pyatmo.AsyncHomeCoachData,
CAMERA_DATA_CLASS_NAME: pyatmo.AsyncCameraData,
CLIMATE_TOPOLOGY_CLASS_NAME: pyatmo.AsyncClimateTopology,
CLIMATE_STATE_CLASS_NAME: pyatmo.AsyncClimate,
PUBLICDATA_DATA_CLASS_NAME: pyatmo.AsyncPublicData,
}
BATCH_SIZE = 3
DEFAULT_INTERVALS = {
CLIMATE_TOPOLOGY_CLASS_NAME: 3600,
CLIMATE_STATE_CLASS_NAME: 300,
CAMERA_DATA_CLASS_NAME: 900,
WEATHERSTATION_DATA_CLASS_NAME: 600,
HOMECOACH_DATA_CLASS_NAME: 300,
PUBLICDATA_DATA_CLASS_NAME: 600,
}
SCAN_INTERVAL = 60
@dataclass
class NetatmoDevice:
"""Netatmo device class."""
data_handler: NetatmoDataHandler
device: pyatmo.climate.NetatmoModule
parent_id: str
state_class_name: str
@dataclass
class NetatmoDataClass:
"""Class for keeping track of Netatmo data class metadata."""
name: str
interval: int
next_scan: float
subscriptions: list[CALLBACK_TYPE]
class NetatmoDataHandler:
"""Manages the Netatmo data handling."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Initialize self."""
self.hass = hass
self.config_entry = config_entry
self._auth = hass.data[DOMAIN][config_entry.entry_id][AUTH]
self.data_classes: dict = {}
self.data: dict = {}
self._queue: deque = deque()
self._webhook: bool = False
async def async_setup(self) -> None:
"""Set up the Netatmo data handler."""
async_track_time_interval(
self.hass, self.async_update, timedelta(seconds=SCAN_INTERVAL)
)
self.config_entry.async_on_unload(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-None",
self.handle_event,
)
)
async def async_update(self, event_time: datetime) -> None:
"""
Update device.
We do up to BATCH_SIZE calls in one update in order
to minimize the calls on the api service.
"""
for data_class in islice(self._queue, 0, BATCH_SIZE):
if data_class.next_scan > time():
continue
if data_class_name := data_class.name:
self.data_classes[data_class_name].next_scan = (
time() + data_class.interval
)
await self.async_fetch_data(data_class_name)
self._queue.rotate(BATCH_SIZE)
@callback
def async_force_update(self, data_class_entry: str) -> None:
"""Prioritize data retrieval for given data class entry."""
self.data_classes[data_class_entry].next_scan = time()
self._queue.rotate(-(self._queue.index(self.data_classes[data_class_entry])))
async def handle_event(self, event: dict) -> None:
"""Handle webhook events."""
if event["data"][WEBHOOK_PUSH_TYPE] == WEBHOOK_ACTIVATION:
_LOGGER.info("%s webhook successfully registered", MANUFACTURER)
self._webhook = True
elif event["data"][WEBHOOK_PUSH_TYPE] == WEBHOOK_DEACTIVATION:
_LOGGER.info("%s webhook unregistered", MANUFACTURER)
self._webhook = False
elif event["data"][WEBHOOK_PUSH_TYPE] == WEBHOOK_NACAMERA_CONNECTION:
_LOGGER.debug("%s camera reconnected", MANUFACTURER)
self.async_force_update(CAMERA_DATA_CLASS_NAME)
async def async_fetch_data(self, data_class_entry: str) -> None:
"""Fetch data and notify."""
if self.data[data_class_entry] is None:
return
try:
await self.data[data_class_entry].async_update()
except pyatmo.NoDevice as err:
_LOGGER.debug(err)
self.data[data_class_entry] = None
except pyatmo.ApiError as err:
_LOGGER.debug(err)
except asyncio.TimeoutError as err:
_LOGGER.debug(err)
return
for update_callback in self.data_classes[data_class_entry].subscriptions:
if update_callback:
update_callback()
async def register_data_class(
self,
data_class_name: str,
data_class_entry: str,
update_callback: CALLBACK_TYPE,
**kwargs: Any,
) -> None:
"""Register data class."""
if data_class_entry in self.data_classes:
if update_callback not in self.data_classes[data_class_entry].subscriptions:
self.data_classes[data_class_entry].subscriptions.append(
update_callback
)
return
self.data_classes[data_class_entry] = NetatmoDataClass(
name=data_class_entry,
interval=DEFAULT_INTERVALS[data_class_name],
next_scan=time() + DEFAULT_INTERVALS[data_class_name],
subscriptions=[update_callback],
)
self.data[data_class_entry] = DATA_CLASSES[data_class_name](
self._auth, **kwargs
)
try:
await self.async_fetch_data(data_class_entry)
except KeyError:
self.data_classes.pop(data_class_entry)
raise
self._queue.append(self.data_classes[data_class_entry])
_LOGGER.debug("Data class %s added", data_class_entry)
async def unregister_data_class(
self, data_class_entry: str, update_callback: CALLBACK_TYPE | None
) -> None:
"""Unregister data class."""
self.data_classes[data_class_entry].subscriptions.remove(update_callback)
if not self.data_classes[data_class_entry].subscriptions:
self._queue.remove(self.data_classes[data_class_entry])
self.data_classes.pop(data_class_entry)
self.data.pop(data_class_entry)
_LOGGER.debug("Data class %s removed", data_class_entry)
@property
def webhook(self) -> bool:
"""Return the webhook state."""
return self._webhook
| 31.977477 | 88 | 0.674039 |
ace5935e41942b2fc3a283bfc50a5a48b63b6202 | 3,361 | py | Python | profiles_project/settings.py | OmUniyal/rest-api-django | 735ed3040966adf0ff9588d377cac0b21bf8ba26 | [
"MIT"
] | null | null | null | profiles_project/settings.py | OmUniyal/rest-api-django | 735ed3040966adf0ff9588d377cac0b21bf8ba26 | [
"MIT"
] | 5 | 2020-02-12T03:12:55.000Z | 2021-06-09T18:49:43.000Z | profiles_project/settings.py | OmUniyal/rest-api-django | 735ed3040966adf0ff9588d377cac0b21bf8ba26 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2&zzyy@p4+^$d3c0-1!m*0j@4xl-7gcg5n56&2j+dclb%^q906'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile' #<app name>.<model_name>
#declared while deploying to AWS
STATIC_ROOT = 'static/' | 26.257813 | 91 | 0.699494 |
ace593a032af64b5816063057102c2db19710672 | 110,233 | py | Python | tests/jobs.py | RyanMagnusson/incubator-airflow | ad81412fba2e8510442db73d9c905cac5eed8ebd | [
"Apache-2.0"
] | 2 | 2018-03-07T12:25:05.000Z | 2018-03-19T01:00:10.000Z | tests/jobs.py | RyanMagnusson/incubator-airflow | ad81412fba2e8510442db73d9c905cac5eed8ebd | [
"Apache-2.0"
] | null | null | null | tests/jobs.py | RyanMagnusson/incubator-airflow | ad81412fba2e8510442db73d9c905cac5eed8ebd | [
"Apache-2.0"
] | 1 | 2020-07-14T09:45:54.000Z | 2020-07-14T09:45:54.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import multiprocessing
import os
import shutil
import six
import socket
import threading
import time
import unittest
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.net import get_hostname
from mock import Mock, patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
'example_kubernetes_operator', # only works with k8s cluster
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances([], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.state = State.SCHEDULED
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.SCHEDULED)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr.refresh_from_db(session=session)
dr.state = State.FAILED
# why o why
session.merge(dr)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.NONE)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = mock.MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1,
**self.default_scheduler_args)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
DAG_NAME4 = 'no_catchup_test4'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
# check @once schedule
dag4 = DAG(DAG_NAME4,
schedule_interval='@once',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag4)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag4)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag4)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag4.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag4.clear()
dr = None
dr = scheduler.create_dag_run(dag4)
# We had better get a dag run
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
| 35.308456 | 119 | 0.61067 |
ace5942206ac91754fe910ce9ac6bba63c0cc4ab | 620 | py | Python | services/dy-3dvis/simcoreparaviewweb/docker/getport.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | services/dy-3dvis/simcoreparaviewweb/docker/getport.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | 2 | 2018-05-13T09:10:57.000Z | 2019-03-06T08:10:40.000Z | services/dy-3dvis/simcoreparaviewweb/docker/getport.py | oetiker/osparc-simcore | 00918bf8f000840cc70cc49458780a55858d52ea | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
import time
log = logging.getLogger(__name__)
_INPUT_FILE = Path(r"/home/root/trigger/server_port")
log.debug("looking for file %s", _INPUT_FILE)
# wait till the file exists
while not _INPUT_FILE.exists():
time.sleep(2)
log.debug("File %s appeared", _INPUT_FILE)
with _INPUT_FILE.open() as fp:
hostname_port = fp.readline()
hostname = str(hostname_port).split(":")[0]
port = str(hostname_port).split(":")[1]
log.debug("host and port are: %s:%s", hostname, port)
# output for shell
print("{hostname},{port}".format(hostname=hostname, port=port))
| 26.956522 | 64 | 0.7 |
ace5946a0be7ec7917fedfbe4c10d584de03148e | 3,128 | py | Python | app/settings.py | fullsail-daveloper/activity24-2001 | de499e990fb9bfb6feba4172493f050115cff0b5 | [
"MIT"
] | null | null | null | app/settings.py | fullsail-daveloper/activity24-2001 | de499e990fb9bfb6feba4172493f050115cff0b5 | [
"MIT"
] | null | null | null | app/settings.py | fullsail-daveloper/activity24-2001 | de499e990fb9bfb6feba4172493f050115cff0b5 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a^o$eh88(r)85q*7%4=prt8#_=+n99fsoirn2l8-j6uv*+@3q@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.225806 | 91 | 0.686381 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.