hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43e5588b3ab0e41f5558ef90e5cdfc3ea00f94c9
| 2,535
|
py
|
Python
|
precommit-pylint.py
|
zinok/precommit-pylint
|
217fc9d96aca4cbe9e799a4810a48eee142d958f
|
[
"MIT"
] | 1
|
2021-08-13T18:01:20.000Z
|
2021-08-13T18:01:20.000Z
|
precommit-pylint.py
|
zinok/precommit-pylint
|
217fc9d96aca4cbe9e799a4810a48eee142d958f
|
[
"MIT"
] | null | null | null |
precommit-pylint.py
|
zinok/precommit-pylint
|
217fc9d96aca4cbe9e799a4810a48eee142d958f
|
[
"MIT"
] | 3
|
2019-02-27T10:08:35.000Z
|
2022-01-19T11:05:53.000Z
|
#!/usr/bin/env python3
import argparse
import contextlib
import io
import os
import re
from pylint.lint import Run as pylint_run
_IGNORE_REGEXP = re.compile(
r'Ignoring entire file \(file-ignored\)'
)
def _check_ignore(pylint_output):
"""Check the python file whether ignored
If the file is ignored returns True,
returns False otherwise
"""
for line in pylint_output.splitlines():
match = re.search(_IGNORE_REGEXP, line)
if match:
return True
return False
def check_file(limit, filename, output=False):
"""Check single file
:type limit: float
:param limit: Minimum score to pass the commit
:type filename: str
:param filename: Path to the file to check
:type output: bool
:param output: Show pylint output
"""
# Check if file to skip
if os.path.basename(filename) == '__init__.py' and os.stat(filename).st_size == 0:
print(f'Skipping pylint on {filename} (empty __init__.py..\tSKIPPED')
return True
# Start pylint
buffer = io.StringIO()
with contextlib.redirect_stdout(buffer):
with contextlib.redirect_stderr(buffer):
linter = pylint_run([filename], do_exit=False).linter
out = buffer.getvalue()
# pylint don't score files without statements
score_missing = 0.0 if getattr(linter.stats, 'statement', False) else 10.0
# Verify the score
score = getattr(linter.stats, 'global_note', score_missing)
ignored = _check_ignore(out)
file_passed = ignored or score >= float(limit)
# Add some output
print('Running pylint on {}.. {:.2f}/10.00\t{}{}'.format(
filename, score,
'PASSED' if file_passed else 'FAILED',
'\tIGNORED' if ignored else ''
))
if output and score < 10:
print("{0}\n{1}{0}\n".format("=" * 80, out))
return file_passed
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument(
'--limit', type=float, default=10,
help=(
'Score limit for pylint, defaults to `%(default)s`'
),
)
parser.add_argument(
'--output', action='store_true',
help=(
'Show pylint output, defaults to `%(default)s`'
),
)
args = parser.parse_args(argv)
# check files
for filename in args.filenames:
if not check_file(args.limit, filename, args.output):
return 1
return 0
if __name__ == '__main__':
exit(main())
| 25.09901
| 86
| 0.631558
|
0f612e3d46d4f0ec55a9db47bdd8c53c3ee1389b
| 909
|
py
|
Python
|
solutions/301_remove_invalid_parentheses.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/301_remove_invalid_parentheses.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/301_remove_invalid_parentheses.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
class Solution:
def removeInvalidParentheses(self, s: str) -> List[str]:
"""BFS
Running Time: O(n * 2^n) where n is the length of s.
"""
res = []
lvl = set([s])
while lvl:
nlvl = set()
for sstr in lvl:
if self._is_valid(sstr):
res.append(sstr)
if res:
return res
for sstr in lvl:
for i in range(len(sstr)):
if sstr[i] not in [')', '(']:
continue
nlvl.add(sstr[:i] + sstr[i+1:])
lvl = nlvl
return res
def _is_valid(self, a):
l = 0
for c in a:
if c == '(':
l += 1
elif c == ')':
l -= 1
if l < 0:
return False
return l == 0
| 26.735294
| 60
| 0.353135
|
601e2fbb37406087387e8ec36a539ef5816a0a16
| 10,211
|
py
|
Python
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/__init__.py
|
viewdy/phantomjs
|
eddb0db1d253fd0c546060a4555554c8ee08c13c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-09T10:24:31.000Z
|
2021-02-09T10:24:31.000Z
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/__init__.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | null | null | null |
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/__init__.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | 1
|
2017-03-19T13:03:23.000Z
|
2017-03-19T13:03:23.000Z
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This module is required for Python to treat this directory as a package.
"""Autoinstalls third-party code required by WebKit."""
import codecs
import os
import sys
from webkitpy.common.system.autoinstall import AutoInstaller
from webkitpy.common.system.filesystem import FileSystem
_THIRDPARTY_DIR = os.path.dirname(__file__)
_AUTOINSTALLED_DIR = os.path.join(_THIRDPARTY_DIR, "autoinstalled")
# Putting the autoinstall code into webkitpy/thirdparty/__init__.py
# ensures that no autoinstalling occurs until a caller imports from
# webkitpy.thirdparty. This is useful if the caller wants to configure
# logging prior to executing autoinstall code.
# FIXME: If any of these servers is offline, webkit-patch breaks (and maybe
# other scripts do, too). See <http://webkit.org/b/42080>.
# We put auto-installed third-party modules in this directory--
#
# webkitpy/thirdparty/autoinstalled
fs = FileSystem()
fs.maybe_make_directory(_AUTOINSTALLED_DIR)
init_path = fs.join(_AUTOINSTALLED_DIR, "__init__.py")
if not fs.exists(init_path):
fs.write_text_file(init_path, "")
readme_path = fs.join(_AUTOINSTALLED_DIR, "README")
if not fs.exists(readme_path):
fs.write_text_file(readme_path,
"This directory is auto-generated by WebKit and is "
"safe to delete.\nIt contains needed third-party Python "
"packages automatically downloaded from the web.")
class AutoinstallImportHook(object):
def __init__(self, filesystem=None):
self._fs = filesystem or FileSystem()
def _ensure_autoinstalled_dir_is_in_sys_path(self):
# Some packages require that the are being put somewhere under a directory in sys.path.
if not _AUTOINSTALLED_DIR in sys.path:
sys.path.append(_AUTOINSTALLED_DIR)
def find_module(self, fullname, _):
# This method will run before each import. See http://www.python.org/dev/peps/pep-0302/
if '.autoinstalled' not in fullname:
return
# Note: all of the methods must follow the "_install_XXX" convention in
# order for autoinstall_everything(), below, to work properly.
if '.mechanize' in fullname:
self._install_mechanize()
elif '.pep8' in fullname:
self._install_pep8()
elif '.pylint' in fullname:
self._install_pylint()
elif '.coverage' in fullname:
self._install_coverage()
elif '.eliza' in fullname:
self._install_eliza()
elif '.irc' in fullname:
self._install_irc()
elif '.buildbot' in fullname:
self._install_buildbot()
elif '.webpagereplay' in fullname:
self._install_webpagereplay()
def _install_mechanize(self):
return self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
"mechanize-0.2.5/mechanize")
def _install_pep8(self):
return self._install("http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
"pep8-0.5.0/pep8.py")
def _install_pylint(self):
self._ensure_autoinstalled_dir_is_in_sys_path()
did_install_something = False
if (not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "pylint")) or
not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "logilab/astng")) or
not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "logilab/common"))):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
files_to_remove = []
if sys.platform == 'win32':
files_to_remove = ['test/data/write_protected_file.txt']
did_install_something = installer.install("http://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common", files_to_remove=files_to_remove)
did_install_something |= installer.install("http://pypi.python.org/packages/source/l/logilab-astng/logilab-astng-0.24.1.tar.gz#md5=ddaf66e4d85714d9c47a46d4bed406de", url_subpath="logilab-astng-0.24.1", target_name="logilab/astng")
did_install_something |= installer.install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', url_subpath="pylint-0.25.1", target_name="pylint")
return did_install_something
# autoinstalled.buildbot is used by BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py
# and should ideally match the version of BuildBot used at build.webkit.org.
def _install_buildbot(self):
# The buildbot package uses jinja2, for example, in buildbot/status/web/base.py.
# buildbot imports jinja2 directly (as though it were installed on the system),
# so the search path needs to include jinja2. We put jinja2 in
# its own directory so that we can include it in the search path
# without including other modules as a side effect.
jinja_dir = self._fs.join(_AUTOINSTALLED_DIR, "jinja2")
installer = AutoInstaller(append_to_search_path=True, target_dir=jinja_dir)
did_install_something = installer.install(url="http://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.6.tar.gz#md5=1c49a8825c993bfdcf55bb36897d28a2",
url_subpath="Jinja2-2.6/jinja2")
SQLAlchemy_dir = self._fs.join(_AUTOINSTALLED_DIR, "sqlalchemy")
installer = AutoInstaller(append_to_search_path=True, target_dir=SQLAlchemy_dir)
did_install_something |= installer.install(url="http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.7.7.tar.gz#md5=ddf6df7e014cea318fa981364f3f93b9",
url_subpath="SQLAlchemy-0.7.7/lib/sqlalchemy")
did_install_something |= self._install("http://pypi.python.org/packages/source/b/buildbot/buildbot-0.8.6p1.tar.gz#md5=b6727d2810c692062c657492bcbeac6a", "buildbot-0.8.6p1/buildbot")
return did_install_something
def _install_coverage(self):
self._ensure_autoinstalled_dir_is_in_sys_path()
return self._install(url="http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
def _install_eliza(self):
return self._install(url="http://www.adambarth.com/webkit/eliza", target_name="eliza.py")
def _install_irc(self):
# Since irclib and ircbot are two top-level packages, we need to import
# them separately. We group them into an irc package for better
# organization purposes.
irc_dir = self._fs.join(_AUTOINSTALLED_DIR, "irc")
installer = AutoInstaller(target_dir=irc_dir)
did_install_something = installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
url_subpath="irclib.py")
did_install_something |= installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
url_subpath="ircbot.py")
return did_install_something
def _install_unittest2(self):
self._ensure_autoinstalled_dir_is_in_sys_path()
return self._install(url="http://pypi.python.org/packages/source/u/unittest2/unittest2-0.5.1.tar.gz#md5=a0af5cac92bbbfa0c3b0e99571390e0f", url_subpath="unittest2-0.5.1/unittest2")
def _install_webpagereplay(self):
did_install_something = False
if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay")):
did_install_something = self._install("http://web-page-replay.googlecode.com/files/webpagereplay-1.1.2.tar.gz", "webpagereplay-1.1.2")
self._fs.move(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay-1.1.2"), self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay"))
module_init_path = self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay", "__init__.py")
if not self._fs.exists(module_init_path):
self._fs.write_text_file(module_init_path, "")
return did_install_something
def _install(self, url, url_subpath=None, target_name=None):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
return installer.install(url=url, url_subpath=url_subpath, target_name=target_name)
_hook = AutoinstallImportHook()
sys.meta_path.append(_hook)
def autoinstall_everything():
install_methods = [method for method in dir(_hook.__class__) if method.startswith('_install_')]
did_install_something = False
for method in install_methods:
did_install_something |= getattr(_hook, method)()
return did_install_something
| 53.742105
| 278
| 0.717168
|
520cddfeb122010d0d133bf66a62f4a6f71a957e
| 394
|
py
|
Python
|
python_teste/python_aulas/aula_100.py
|
BrunoDantasMoreira/projectsPython
|
bd73ab0b3c067456407f227ed2ece42e7f21ddfc
|
[
"MIT"
] | 1
|
2020-07-27T14:18:08.000Z
|
2020-07-27T14:18:08.000Z
|
python_teste/python_aulas/aula_100.py
|
BrunoDantasMoreira/projectsPython
|
bd73ab0b3c067456407f227ed2ece42e7f21ddfc
|
[
"MIT"
] | null | null | null |
python_teste/python_aulas/aula_100.py
|
BrunoDantasMoreira/projectsPython
|
bd73ab0b3c067456407f227ed2ece42e7f21ddfc
|
[
"MIT"
] | null | null | null |
from random import randint
lista = []
def sorteia():
print('Sorteando 5 valores da lista: ', end='')
for c in range(0, 5):
s = randint(1, 10)
lista.append(s)
print(s, end=' ')
def somapar():
soma = 0
for a in lista:
if a % 2 == 0:
soma += a
print(f'\nSomando os valores pares de {lista}, temos {soma}')
sorteia()
somapar()
| 16.416667
| 65
| 0.530457
|
7346b62d9cb76bee842bc9e96024b35a72959148
| 2,038
|
py
|
Python
|
setup.py
|
motmot/pycamiface
|
1e9777ab77ebb5aa0495e35113eb83ee54c785c1
|
[
"BSD-3-Clause"
] | 4
|
2015-01-17T13:39:48.000Z
|
2018-08-20T17:02:34.000Z
|
setup.py
|
motmot/pycamiface
|
1e9777ab77ebb5aa0495e35113eb83ee54c785c1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
motmot/pycamiface
|
1e9777ab77ebb5aa0495e35113eb83ee54c785c1
|
[
"BSD-3-Clause"
] | null | null | null |
import setuptools # required for namespace_packages option, below
from distutils.core import setup
import os, sys
package_data={}
ext_modules = []
build_ctypes_based_wrappers = True
include_shlibs_for_ctypes = False
if sys.platform.startswith('linux'):
include_shlibs_for_ctypes = False
ctypes_backends = ['mega']
if build_ctypes_based_wrappers:
if include_shlibs_for_ctypes:
if sys.platform == 'win32':
prefix = 'cam_iface_'
extension = '.dll'
elif sys.platform.startswith('linux'):
prefix = 'libcam_iface_'
extension = '.so'
elif sys.platform.startswith('darwin'):
prefix = 'libcam_iface_'
extension = '.dylib'
else:
raise ValueError('unknown platform')
for backend in ctypes_backends:
fname = prefix+backend+extension
if not os.path.exists(os.path.join('cam_iface',fname)):
print '***** WARNING: Could not find file %s'%fname
package_data.setdefault('cam_iface',[]).append(fname)
if 0:
opj = os.path.join
CAMIFACE_PREFIX='../cam_iface'
include_dirs = [opj(CAMIFACE_PREFIX,'inc'),
opj(CAMIFACE_PREFIX,'shmwrap')]
libpath = os.path.abspath(opj(CAMIFACE_PREFIX,'lib'))
print 'WARNING: compiling without system install of camiface. You probably need to do this:'
print 'export LD_LIBRARY_PATH=%s'%libpath
else:
include_dirs = None
setup(name='motmot.cam_iface',
description='cross-platform, cross-backend camera driver',
long_description="""cam_iface is the core packge of several that
are involved with digital camera acquisition and analysis""",
url='http://code.astraw.com/projects/motmot/cam_iface.html',
version='0.9.0',
author='Andrew Straw',
author_email='strawman@astraw.com',
license="BSD",
namespace_packages = ['motmot'],
packages = ['motmot','motmot.cam_iface'],
ext_modules=ext_modules,
package_data=package_data,
)
| 34.542373
| 96
| 0.657998
|
97a8e142ece0f864ae35cb8622b44c9c37706fe8
| 664
|
py
|
Python
|
training_test.py
|
jepetersohn/haikuna-matata
|
417933c302f2b871650920805565f7a4bb154045
|
[
"MIT"
] | 6
|
2017-01-22T03:15:01.000Z
|
2019-12-01T16:19:36.000Z
|
training_test.py
|
hollabaq86/haikuna-matata
|
247589b72dbc6d9063e1d98fe86bc264ad5e01b5
|
[
"MIT"
] | 3
|
2017-01-15T01:32:04.000Z
|
2017-01-16T00:25:46.000Z
|
training_test.py
|
hollabaq86/haikuna-matata
|
247589b72dbc6d9063e1d98fe86bc264ad5e01b5
|
[
"MIT"
] | 6
|
2017-01-19T21:49:55.000Z
|
2021-04-14T09:57:17.000Z
|
import unittest
import training
class TestTrainingMethods(unittest.TestCase):
def test_favor_unigram(self):
from models import Unigram
training.favorUnigram("the", "quick")
unigram = Unigram.query.filter(Unigram.word1 == "the", Unigram.word2 == "quick").first()
print int(unigram.count)
self.assertEqual(unigram.count, 2)
def test_unfavor_unigram(self):
from models import Unigram
training.unfavorUnigram("the", "quick")
unigram = Unigram.query.filter(Unigram.word1 == "the", Unigram.word2 == "quick").first()
print int(unigram.count)
self.assertEqual(unigram.count, 1)
if __name__ == '__main__':
unittest.main()
| 28.869565
| 92
| 0.713855
|
41d94e2d1bfe6b0aa8ba50a5c42020dbb3a79239
| 21,124
|
py
|
Python
|
constrained_decoding/translation_model/nematus_tm.py
|
chrishokamp/constrained_decoding
|
187846cea4d2aeee6867781b8ceb04cd02d79a4e
|
[
"MIT"
] | 73
|
2017-04-25T16:38:23.000Z
|
2022-02-21T21:39:50.000Z
|
constrained_decoding/translation_model/nematus_tm.py
|
Brucewuzhang/constrained_decoding
|
187846cea4d2aeee6867781b8ceb04cd02d79a4e
|
[
"MIT"
] | 6
|
2017-04-24T13:07:38.000Z
|
2020-03-12T08:58:01.000Z
|
constrained_decoding/translation_model/nematus_tm.py
|
Brucewuzhang/constrained_decoding
|
187846cea4d2aeee6867781b8ceb04cd02d79a4e
|
[
"MIT"
] | 20
|
2017-06-16T08:11:50.000Z
|
2021-12-06T01:36:41.000Z
|
"""
Implements AbstractConstrainedTM for Nematus NMT models
"""
import copy
import json
import codecs
import logging
import numpy
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import shared
from nematus.theano_util import (load_params, init_theano_params)
from nematus.nmt import (build_sampler, gen_sample, init_params)
from nematus.compat import fill_options
from nematus.util import load_dict, load_config
from . import AbstractConstrainedTM
from .. import ConstraintHypothesis
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class NematusTranslationModel(AbstractConstrainedTM):
def __init__(self, model_files, configs, model_weights=None):
""""
Create a ConstrainedTM using Nematus translation models
Args:
config: a dict containing key-->value for each argument supported by `nematus/translate.py`
"""
if configs is not None:
assert len(model_files) == len(configs), 'Number of models differs from numer of config files'
trng = RandomStreams(1234)
# don't use noise
use_noise = shared(numpy.float32(0.))
self.eos_token = '<eos>'
self.fs_init = []
self.fs_next = []
# each entry in self.word_dicts is:
# `{'input_dicts': [...], 'input_idicts': [...], 'output_dict': <dict>, 'output_idict': <dict>}
self.word_dicts = []
if configs is None:
# Nematus models with new format (no separate config)
configs = []
for model in model_files:
configs.append(load_config(model))
# backward compatibility
fill_options(configs[-1])
for model, config in zip(model_files, configs):
# fill in any unspecified options in-place
fill_options(config)
param_list = numpy.load(model).files
param_list = dict.fromkeys([key for key in param_list if not key.startswith('adam_')], 0)
params = load_params(model, param_list)
tparams = init_theano_params(params)
# load model-specific input and output vocabularies
# Note: some models have multiple input factors -- if so, we need to split that model's input into factors
# using the same logic that was used at training time
# Note: every model's output vocabulary must be exactly the same in order to do ensemble decoding
self.word_dicts.append(self.load_dictionaries(config['dictionaries'],
n_words_src=config.get('n_words_src', None),
n_words_trg=config.get('n_words', None)))
# WORKING: add passing attention model alignment through GBS
# f_init, f_next = build_sampler(tparams, config, use_noise, trng,
# return_alignment=config['return_alignment'])
f_init, f_next = build_sampler(tparams, config, use_noise, trng,
return_alignment=True)
self.fs_init.append(f_init)
self.fs_next.append(f_next)
# Make sure all output dicts have the same number of items
assert len(set(len(d['output_dict']) for d in self.word_dicts)) == 1, 'Output vocabularies must be identical'
self.num_models = len(self.fs_init)
if model_weights is None:
self.model_weights = numpy.ones(self.num_models) / float(self.num_models)
else:
assert len(model_weights) == self.num_models, 'if you specify weights, there must be one per model'
self.model_weights = numpy.array(model_weights)
@staticmethod
def is_utf8(filename):
"""
Checks whether the encoding of `filename`'s content is utf-8.
"""
with open(filename, 'rb') as f:
try:
f.read().decode('utf-8')
return True
except UnicodeDecodeError:
return False
def load_dictionaries(self, dictionary_files, n_words_src=None, n_words_trg=None):
"""
Load the input dictionaries and output dictionary for a model. Note the `n_words_src` kwarg is here to
maintain compatibility with the dictionary loading logic in Nematus.
Args:
dictionary_files: list of strings which are paths to *.json Nematus dictionary files
Returns:
input_dicts, input_idicts, output_dict, output_idict
"""
def load_utf8_dict(filename):
with codecs.open(filename, 'rb', encoding='utf8') as f:
return {k.encode('utf8'): v for k,v in json.load(f).items()}
input_dict_files = dictionary_files[:-1]
output_dict_file = dictionary_files[-1]
# load source dictionary and invert
input_dicts = []
input_idicts = []
for dictionary in input_dict_files:
input_dict = load_utf8_dict(dictionary) if self.is_utf8(dictionary) else load_dict(dictionary)
if n_words_src is not None:
for key, idx in input_dict.items():
if idx >= n_words_src:
del input_dict[key]
input_idict = dict()
for kk, vv in input_dict.iteritems():
input_idict[vv] = kk
input_idict[0] = '<eos>'
input_idict[1] = 'UNK'
input_dicts.append(input_dict)
input_idicts.append(input_idict)
# load target dictionary and invert
output_dict = load_utf8_dict(output_dict_file) if self.is_utf8(output_dict_file) else load_dict(output_dict_file)
if n_words_trg is not None:
for key, idx in output_dict.items():
if idx >= n_words_trg:
del output_dict[key]
output_idict = dict()
for kk, vv in output_dict.iteritems():
output_idict[vv] = kk
output_idict[0] = '<eos>'
output_idict[1] = 'UNK'
return {
'input_dicts': input_dicts,
'input_idicts': input_idicts,
'output_dict': output_dict,
'output_idict': output_idict,
'src_size': n_words_src,
'trg_size': n_words_trg
}
def map_inputs(self, inputs, factor_separator='|'):
"""
Map inputs to sequences of ints, which are token indices for the embedding layer(s) of each model
Args:
inputs: a list of unicode strings, whitespace tokenized. Each list item i corresponds to the input for
model_i. If a model uses >1 factor, tokens will still be joined by `factor_separator`
factor_separator: a string used to separate a model's input factors
Returns:
mapped_inputs: list of np.arrays, each with dimensionality (factors, time, 1)
"""
assert len(inputs) == len(self.fs_init), 'We need an input for each model'
mapped_inputs = []
for i, model_input in enumerate(inputs):
# Nematus needs encoded utf-8 as input
if type(model_input) is unicode:
model_input = model_input.encode('utf8')
tokens = model_input.strip().split()
mapped_input = []
for token in tokens:
# if there's only one factor, allow the separator to occur in tokens
if len(self.word_dicts[i]['input_dicts']) == 1:
token = [self.word_dicts[i]['input_dicts'][0].get(token, 1)]
else:
token = [self.word_dicts[i]['input_dicts'][j].get(f, 1)
for j, f in enumerate(token.split(factor_separator))]
mapped_input.append(token)
# append the eos index
mapped_input += [[0] * len(self.word_dicts[i]['input_dicts'])]
mapped_inputs.append(numpy.array(mapped_input).T.reshape(len(mapped_input[0]), len(mapped_input), 1))
return mapped_inputs
# Note: this method could actually be fully implemented in the base class
def map_constraints(self, constraint_token_seqs):
"""Map constraint sequences into the model's output vocabulary
Args:
constraint_token_seqs: a list of sequences of unicode strings corresponding to lexical constraints
Returns:
a list of sequences of ints corresponding to the constraint token indices in the output vocabulary
"""
constraint_seqs = []
for token_seq in constraint_token_seqs:
if type(token_seq) is str:
token_seq = token_seq.split()
elif type(token_seq) is unicode:
# Nematus needs encoded utf-8 as input
token_seq = token_seq.encode('utf8').split()
token_seq = [token.encode('utf8') if type(token) is unicode else token for token in token_seq]
assert type(token_seq) is list or type(token_seq) is tuple, 'Constraint token seqs must be lists or tuples'
# Note: all models share the same output dictionary, so we just use the first one
token_idxs = [self.word_dicts[0]['output_dict'].get(token, 1) for token in token_seq]
constraint_seqs.append(token_idxs)
return constraint_seqs
def start_hypothesis(self, inputs, constraints):
"""Compute the initial representation for each model, build the start hypothesis"""
assert len(inputs) == self.num_models, 'Number of inputs must match the number of models'
# Note: explicit initialization of coverage
coverage = [numpy.zeros(l, dtype='int16')
for l in [len(s) for s in constraints]]
next_states = [None] * self.num_models
# contexts will be static throughout decoding
contexts = [None] * self.num_models
# BOS index
next_w = -1 * numpy.ones((1,)).astype('int64')
for i, model_input in enumerate(inputs):
ret = self.fs_init[i](model_input)
next_states[i] = numpy.tile(ret[0], (1,1))
contexts[i] = ret[1]
# the payload contains everything that the next timestep will need to generate another output
payload = {
'next_states': next_states,
'contexts': contexts,
'next_w': next_w,
'model_scores': numpy.zeros(self.num_models),
'alignments': None
}
start_hyp = ConstraintHypothesis(
token=None,
score=None,
coverage=coverage,
constraints=constraints,
payload=payload,
backpointer=None,
constraint_index=None,
unfinished_constraint=False
)
return start_hyp
def generate(self, hyp, n_best):
"""
Generate the `n_best` hypotheses starting with `hyp`
"""
# if we already generated EOS and there are no constraints (vanilla beam search),
# there's only one option -- just continue it and copy the current cost
if hyp.token == self.eos_token and len(hyp.constraints) == 0:
new_hyp = ConstraintHypothesis(
token=self.eos_token,
score=hyp.score,
coverage=copy.deepcopy(hyp.coverage),
constraints=hyp.constraints,
payload=hyp.payload,
backpointer=hyp,
constraint_index=None,
unfinished_constraint=False
)
return [new_hyp]
# if there are constraints, and we generated eos, this hyp is dead
elif hyp.token == self.eos_token and len(hyp.constraints) > 0:
return []
next_states = [None] * self.num_models
next_p = [None] * self.num_models
alignments = []
for i in xrange(self.num_models):
# Note: batch size is implicitly = 1
inps = [hyp.payload['next_w'], hyp.payload['contexts'][i], hyp.payload['next_states'][i]]
ret = self.fs_next[i](*inps)
next_p[i], next_w_tmp, next_states[i], alignment_weights = ret[0], ret[1], ret[2], ret[3]
alignments.append(alignment_weights)
#if suppress_unk:
# next_p[i][:,1] = -numpy.inf
# Note we cannot naively take the mean because different models may have different inputs
#mean_alignment = sum(alignments)/self.num_models
mean_alignment = alignments[0]
# now compute the combined scores
weighted_scores, all_weighted_scores, probs = self.combine_model_scores(next_p)
flat_scores = weighted_scores.flatten()
n_best_idxs = numpy.argsort(flat_scores)[:n_best]
n_best_scores = flat_scores[n_best_idxs]
next_hyps = []
# create a new hypothesis for each of the n-best
for token_idx, score in zip(n_best_idxs, n_best_scores):
if hyp.score is not None:
next_score = hyp.score + score
else:
# hyp.score is None for the start hyp
next_score = score
payload = {
'next_states': next_states,
'contexts': hyp.payload['contexts'],
'next_w': numpy.array([token_idx]).astype('int64'),
'model_scores': hyp.payload['model_scores'] + all_weighted_scores[:, token_idx],
'alignments': mean_alignment
}
new_hyp = ConstraintHypothesis(
token=self.word_dicts[0]['output_idict'][token_idx],
score=next_score,
coverage=copy.deepcopy(hyp.coverage),
constraints=hyp.constraints,
payload=payload,
backpointer=hyp,
constraint_index=None,
unfinished_constraint=False
)
next_hyps.append(new_hyp)
return next_hyps
def generate_constrained(self, hyp):
"""
Use hyp.constraints and hyp.coverage to return new hypothesis which start constraints
that are not yet covered by this hypothesis.
"""
assert hyp.unfinished_constraint is not True, 'hyp must not be part of an unfinished constraint'
next_states = [None] * self.num_models
next_p = [None] * self.num_models
alignments = []
for i in xrange(self.num_models):
# Note: batch size is implicitly = 1
inps = [hyp.payload['next_w'], hyp.payload['contexts'][i], hyp.payload['next_states'][i]]
ret = self.fs_next[i](*inps)
next_p[i], next_w_tmp, next_states[i], alignment_weights = ret[0], ret[1], ret[2], ret[3]
alignments.append(alignment_weights)
#if suppress_unk:
# next_p[i][:,1] = -numpy.inf
# Note we cannot naively take the mean because different models may have different inputs
#mean_alignment = sum(alignments)/self.num_models
mean_alignment = alignments[0]
# now compute the combined scores
weighted_scores, all_weighted_scores, probs = self.combine_model_scores(next_p)
flat_scores = weighted_scores.flatten()
new_constraint_hyps = []
available_constraints = hyp.constraint_candidates()
for idx in available_constraints:
constraint_idx = hyp.constraints[idx][0]
constraint_score = flat_scores[constraint_idx]
if hyp.score is not None:
next_score = hyp.score + constraint_score
else:
# hyp.score is None for the start hyp
next_score = constraint_score
coverage = copy.deepcopy(hyp.coverage)
coverage[idx][0] = 1
if len(coverage[idx]) > 1:
unfinished_constraint = True
else:
unfinished_constraint = False
payload = {
'next_states': next_states,
'contexts': hyp.payload['contexts'],
'next_w': numpy.array([constraint_idx]).astype('int64'),
'model_scores': hyp.payload['model_scores'] + all_weighted_scores[:, constraint_idx],
'alignments': mean_alignment
}
new_hyp = ConstraintHypothesis(token=self.word_dicts[0]['output_idict'][constraint_idx],
score=next_score,
coverage=coverage,
constraints=hyp.constraints,
payload=payload,
backpointer=hyp,
constraint_index=(idx, 0),
unfinished_constraint=unfinished_constraint)
new_constraint_hyps.append(new_hyp)
return new_constraint_hyps
def continue_constrained(self, hyp):
assert hyp.unfinished_constraint is True, 'hyp must be part of an unfinished constraint'
next_states = [None] * self.num_models
next_p = [None] * self.num_models
alignments = []
for i in xrange(self.num_models):
# Note: batch size is implicitly = 1
inps = [hyp.payload['next_w'], hyp.payload['contexts'][i], hyp.payload['next_states'][i]]
ret = self.fs_next[i](*inps)
next_p[i], next_w_tmp, next_states[i], alignment_weights = ret[0], ret[1], ret[2], ret[3]
alignments.append(alignment_weights)
#if suppress_unk:
# next_p[i][:,1] = -numpy.inf
# Note we cannot naively take the mean because different models may have different inputs
#mean_alignment = sum(alignments)/self.num_models
mean_alignment = alignments[0]
# now compute the combined scores
weighted_scores, all_weighted_scores, probs = self.combine_model_scores(next_p)
flat_scores = weighted_scores.flatten()
constraint_row_index = hyp.constraint_index[0]
# the index of the next token in the constraint
constraint_tok_index = hyp.constraint_index[1] + 1
constraint_index = (constraint_row_index, constraint_tok_index)
continued_constraint_token = hyp.constraints[constraint_index[0]][constraint_index[1]]
# get the score for this token from the logprobs
next_score = hyp.score + flat_scores[continued_constraint_token]
coverage = copy.deepcopy(hyp.coverage)
coverage[constraint_row_index][constraint_tok_index] = 1
if len(hyp.constraints[constraint_row_index]) > constraint_tok_index + 1:
unfinished_constraint = True
else:
unfinished_constraint = False
payload = {
'next_states': next_states,
'contexts': hyp.payload['contexts'],
'next_w': numpy.array([continued_constraint_token]).astype('int64'),
'model_scores': hyp.payload['model_scores'] + all_weighted_scores[:, continued_constraint_token],
'alignments': mean_alignment
}
new_hyp = ConstraintHypothesis(token=self.word_dicts[0]['output_idict'][continued_constraint_token],
score=next_score,
coverage=coverage,
constraints=hyp.constraints,
payload=payload,
backpointer=hyp,
constraint_index=constraint_index,
unfinished_constraint=unfinished_constraint)
return new_hyp
def combine_model_scores(self, scores):
"""Use the weights to combine the scores from each model"""
assert len(scores) == self.num_models, 'we need a vector of scores for each model in the ensemble'
# this hack lets us do ad-hoc truncation of the vocabulary if we need to
scores = [a[:, :self.word_dicts[i]['trg_size']-1] if self.word_dicts[i]['trg_size'] is not None else a
for i, a in enumerate(scores)]
scores = numpy.array(scores)
# Note: this is another implicit batch size = 1 assumption
scores = numpy.squeeze(scores, axis=1)
# multiply weights along each row (rows correspond to the softmax output for a particular model)
# Note the negative sign here, letting us treat the score as a cost to minimize
all_weighted_scores = -numpy.log(scores) * self.model_weights[:, numpy.newaxis]
# we pass these through so they can be used for optimization
unweighted_scores = -(numpy.log(scores))
combined_weighted_scores = numpy.sum(all_weighted_scores, axis=0)
# We don't use the model weights with probs because we want them to sum to 1
probs = numpy.sum(scores, axis=0) / float(self.num_models)
return combined_weighted_scores, unweighted_scores, probs
| 41.097276
| 121
| 0.600549
|
5f1c0f36bf46fac38f028c60153e4cb16c090648
| 980
|
py
|
Python
|
tests/acceptance/steps/content.py
|
Valdis880/acceptanceTesting
|
1a8de0adb45ebb46e4791aba7458c3fc412cfdb1
|
[
"MIT"
] | null | null | null |
tests/acceptance/steps/content.py
|
Valdis880/acceptanceTesting
|
1a8de0adb45ebb46e4791aba7458c3fc412cfdb1
|
[
"MIT"
] | null | null | null |
tests/acceptance/steps/content.py
|
Valdis880/acceptanceTesting
|
1a8de0adb45ebb46e4791aba7458c3fc412cfdb1
|
[
"MIT"
] | null | null | null |
from behave import *
from acceptanceTesting.tests.acceptance.page_model.base_page import BasePage
from acceptanceTesting.tests.acceptance.page_model.blog_page import BlogPage
use_step_matcher('re')
@then('There is a title shown on the page')
def step_impl(context):
page = BasePage(context.driver)
assert page.title.is_displayed()
@step('The title tag has content "(.*)"')
def step_impl(context, content):
page = BasePage(context.driver)
assert page.title.text == content
@then('I can see there is a posts section on the page')
def step_impl(context):
page = BlogPage(context.driver)
assert page.posts_section.is_displayed()
@then('I can see there is a post with title "(.*)" in the posts section')
def step_impl(context, title):
page = BlogPage(context.driver)
posts_with_title = [post for post in page.posts if post.text == title]
assert len(posts_with_title) > 0
assert all([post.is_displayed() for post in posts_with_title])
| 28.823529
| 76
| 0.736735
|
c0dba0a6ad3167ebad1b3fb08afe8c633f7d03fe
| 2,686
|
py
|
Python
|
yt_dlp/extractor/uplynk.py
|
kevinoconnor7/yt-dlp
|
73d829c144601c105f7ee1a3d8f2aed6d8e1b76d
|
[
"Unlicense"
] | 5
|
2021-08-24T17:08:12.000Z
|
2022-03-03T13:06:09.000Z
|
yt_dlp/extractor/uplynk.py
|
kevinoconnor7/yt-dlp
|
73d829c144601c105f7ee1a3d8f2aed6d8e1b76d
|
[
"Unlicense"
] | 1
|
2021-07-01T13:07:07.000Z
|
2021-07-01T13:07:07.000Z
|
yt_dlp/extractor/uplynk.py
|
kevinoconnor7/yt-dlp
|
73d829c144601c105f7ee1a3d8f2aed6d8e1b76d
|
[
"Unlicense"
] | 1
|
2022-02-05T11:57:47.000Z
|
2022-02-05T11:57:47.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
ExtractorError,
)
class UplynkIE(InfoExtractor):
IE_NAME = 'uplynk'
_VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?'
_TEST = {
'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8',
'info_dict': {
'id': 'e89eaf2ce9054aa89d92ddb2d817a52e',
'ext': 'mp4',
'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4',
'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_uplynk_info(self, uplynk_content_url):
path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups()
display_id = video_id or external_id
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
'http://content.uplynk.com/%s.m3u8' % path,
display_id, 'mp4', 'm3u8_native')
if session_id:
for f in formats:
f['extra_param_to_segment_url'] = 'pbs=' + session_id
self._sort_formats(formats)
asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id)
if asset.get('error') == 1:
raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True)
return {
'id': asset['asset'],
'title': asset['desc'],
'thumbnail': asset.get('default_poster_url'),
'duration': float_or_none(asset.get('duration')),
'uploader_id': asset.get('owner'),
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
return self._extract_uplynk_info(url)
class UplynkPreplayIE(UplynkIE):
IE_NAME = 'uplynk:preplay'
_VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json'
_TEST = None
def _real_extract(self, url):
path, external_id, video_id = re.match(self._VALID_URL, url).groups()
display_id = video_id or external_id
preplay = self._download_json(url, display_id)
content_url = 'http://content.uplynk.com/%s.m3u8' % path
session_id = preplay.get('sid')
if session_id:
content_url += '?pbs=' + session_id
return self._extract_uplynk_info(content_url)
| 37.305556
| 168
| 0.600149
|
5cde65cde1def61721f0a8042ff0ae735bc0b641
| 1,144
|
py
|
Python
|
src/shear.py
|
WeixuanZ/materials-modelling
|
3b08acca078b511f25241307a97a44c305369d8c
|
[
"MIT"
] | null | null | null |
src/shear.py
|
WeixuanZ/materials-modelling
|
3b08acca078b511f25241307a97a44c305369d8c
|
[
"MIT"
] | null | null | null |
src/shear.py
|
WeixuanZ/materials-modelling
|
3b08acca078b511f25241307a97a44c305369d8c
|
[
"MIT"
] | null | null | null |
"""Calculations related to shear
Attributes:
cu_cell (CuCell): instance of CuCell class with default lattice vector size
"""
import numpy as np
try:
from UnitCell import CuCell
from util import map_func
except ModuleNotFoundError:
from .UnitCell import CuCell
from .util import map_func
cu_cell = CuCell()
def get_shear_stress(shear: float) -> np.ndarray:
r"""Calculate the stress after applying a shear in :math:`y` direction
The shear tensor is
.. math::
:nowrap:
\[\left[
\begin{array}{lll}\sigma_{x} & \tau_{x y} & \tau_{x z} \\
\tau_{y x} & \sigma_{y} & \tau_{y z} \\
\tau_{z x} & \tau_{z y} & \sigma_{z}\end{array}
\right]\]
if the stress is only applied in the :math:`y` direction, :math:`\tau_{x z}=\tau_{y z}=0`
Args:
shear (float): shear
Returns:
float: shear stress :math:`\tau_{x y}` (eV/Å^3)
"""
return cu_cell.shear_deform(shear).get_stress(voigt=False)[0, 1]
if __name__ == "__main__":
print(np.array(cu_cell.shear_deform(0.1).get_cell()))
print(get_shear_stress(0.1))
cu_cell.visualize()
| 24.340426
| 93
| 0.626748
|
77751d41eaee771736db329fff4b556c9db9cdf9
| 1,458
|
py
|
Python
|
pcat2py/class/20cd8a84-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20cd8a84-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20cd8a84-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 20cd8a84-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20cd8a84-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\System\CurrentControlSet\Services\Tcpip6\Parameters', 'DisableIpSourceRouting')
# Output Lines
self.output = [r'HKLM:\System\CurrentControlSet\Services\Tcpip6\Parameters', ('DisableIpSourceRouting=' + str(dword))]
if dword == 2:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\System\CurrentControlSet\Services'")
cli.powershell(r"New-Item -path 'HKLM:\System\CurrentControlSet\Services\Tcpip6'")
cli.powershell(r"New-Item -path 'HKLM:\System\CurrentControlSet\Services\Tcpip6\Parameters'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\System\CurrentControlSet\Services\Tcpip6\Parameters' -name 'DisableIpSourceRouting' -value 2 -Type DWord")
| 38.368421
| 161
| 0.613855
|
45fb1af1135c965b014d9df88288fb79edbfe1f1
| 1,999
|
py
|
Python
|
supriya/commands/ControlBusGetRequest.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/commands/ControlBusGetRequest.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/commands/ControlBusGetRequest.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
import supriya.osc
from supriya.commands.Request import Request
from supriya.enums import RequestId
class ControlBusGetRequest(Request):
"""
A /c_get request.
::
>>> import supriya
>>> server = supriya.Server.default().boot()
>>> request = supriya.commands.ControlBusGetRequest(
... indices=(0, 4, 8, 12),
... )
>>> request
ControlBusGetRequest(
indices=(0, 4, 8, 12),
)
::
>>> request.to_osc()
OscMessage('/c_get', 0, 4, 8, 12)
::
>>> with server.osc_protocol.capture() as transcript:
... request.communicate(server=server)
...
ControlBusSetResponse(
items=(
Item(bus_id=0, bus_value=0.0),
Item(bus_id=4, bus_value=0.0),
Item(bus_id=8, bus_value=0.0),
Item(bus_id=12, bus_value=0.0),
),
)
::
>>> for entry in transcript:
... (entry.label, entry.message)
...
('S', OscMessage('/c_get', 0, 4, 8, 12))
('R', OscMessage('/c_set', 0, 0.0, 4, 0.0, 8, 0.0, 12, 0.0))
"""
### CLASS VARIABLES ###
request_id = RequestId.CONTROL_BUS_GET
### INITIALIZER ###
def __init__(self, indices=None):
Request.__init__(self)
if indices:
indices = tuple(int(index) for index in indices)
assert all(0 <= index for index in indices)
self._indices = indices
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
contents = [request_id]
if self.indices:
contents.extend(self.indices)
message = supriya.osc.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def indices(self):
return self._indices
@property
def response_patterns(self):
return ["/c_set"], None
| 24.084337
| 68
| 0.530265
|
4c7477631dfb91cb0919121e4d062d0699582a0f
| 87
|
py
|
Python
|
HackerRank Solutions/Python/Math/Mod Divmod.py
|
DevashishPathrabe/Competetive-Coding
|
91049459359854b7834cbfb31415682600dc9c57
|
[
"MIT"
] | null | null | null |
HackerRank Solutions/Python/Math/Mod Divmod.py
|
DevashishPathrabe/Competetive-Coding
|
91049459359854b7834cbfb31415682600dc9c57
|
[
"MIT"
] | null | null | null |
HackerRank Solutions/Python/Math/Mod Divmod.py
|
DevashishPathrabe/Competetive-Coding
|
91049459359854b7834cbfb31415682600dc9c57
|
[
"MIT"
] | null | null | null |
a, b = (int(input()) for _ in range(2))
print(a // b)
print(a % b)
print(divmod(a, b))
| 17.4
| 39
| 0.563218
|
2d0044f7c17f5af59e67ca3922e46a7911ccddf9
| 597
|
py
|
Python
|
backend/apps/projects/efficiency/urls.py
|
wuchaofan1654/tester
|
ff38d42e06cbdfa04882e8e95ada2dd93e6609f2
|
[
"MIT"
] | null | null | null |
backend/apps/projects/efficiency/urls.py
|
wuchaofan1654/tester
|
ff38d42e06cbdfa04882e8e95ada2dd93e6609f2
|
[
"MIT"
] | null | null | null |
backend/apps/projects/efficiency/urls.py
|
wuchaofan1654/tester
|
ff38d42e06cbdfa04882e8e95ada2dd93e6609f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Create by sandy at 16:34 09/12/2021
Description: ToDo
"""
from django.urls import re_path
from rest_framework.routers import DefaultRouter
from apps.projects.efficiency.views import EfficiencyModelViewSet, ModuleModelViewSet
router = DefaultRouter()
router.register(r'efficiency', EfficiencyModelViewSet)
router.register(r'module', ModuleModelViewSet)
urlpatterns = [
re_path('module/tree/', ModuleModelViewSet.as_view({'get': 'tree_select_list'})),
re_path('module/children/', ModuleModelViewSet.as_view({'get': 'get_all'})),
]
urlpatterns += router.urls
| 27.136364
| 85
| 0.760469
|
2b5da4bec5aa2234b103ad88870d379417c85086
| 3,412
|
py
|
Python
|
app/modules/core/context.py
|
JohJohan/silverback
|
e27bc5d238d2b34955a470a8e8327ae44022b78b
|
[
"Apache-2.0"
] | null | null | null |
app/modules/core/context.py
|
JohJohan/silverback
|
e27bc5d238d2b34955a470a8e8327ae44022b78b
|
[
"Apache-2.0"
] | null | null | null |
app/modules/core/context.py
|
JohJohan/silverback
|
e27bc5d238d2b34955a470a8e8327ae44022b78b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Silverbackhq
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Local Library
from app.settings.info import AUTHOR
from app.settings.info import COPYRIGHT
from app.settings.info import LICENSE
from app.settings.info import VERSION
from app.settings.info import MAINTAINER
from app.settings.info import EMAIL
from app.settings.info import STATUS
from app.settings.info import REPO_URL
from app.settings.info import AUTHOR_URL
from app.settings.info import RELEASES
from app.settings.info import SUPPORT_URL
from app.modules.util.gravatar import Gravatar
from app.modules.entity.option_entity import OptionEntity
from app.modules.entity.user_entity import UserEntity
class Context():
def __init__(self):
self.__option_entity = OptionEntity()
self.__user_entity = UserEntity()
self.__data = {}
self.__data["AUTHOR"] = AUTHOR
self.__data["COPYRIGHT"] = COPYRIGHT
self.__data["LICENSE"] = LICENSE
self.__data["VERSION"] = VERSION
self.__data["MAINTAINER"] = MAINTAINER
self.__data["EMAIL"] = EMAIL
self.__data["STATUS"] = STATUS
self.__data["REPO_URL"] = REPO_URL
self.__data["AUTHOR_URL"] = AUTHOR_URL
self.__data["RELEASES"] = RELEASES
self.__data["SUPPORT_URL"] = SUPPORT_URL
def push(self, new_data):
self.__data.update(new_data)
def load_options(self, options):
options_to_load = {}
for key in options.keys():
options_to_load[key] = options[key]
if key not in self.__data.keys():
self.__data[key] = options[key]
if len(options_to_load.keys()) > 0:
new_options = self.__option_entity.get_many_by_keys(options_to_load.keys())
for option in new_options:
self.__data[option.key] = option.value
def autoload_options(self):
options = self.__option_entity.get_many_by_autoload(True)
for option in options:
self.__data[option.key] = option.value
def autoload_user(self, user_id):
user_data = {
"user_first_name": "",
"user_last_name": "",
"user_username": "",
"user_email": "",
"user_avatar": ""
}
if user_id is not None:
user = self.__user_entity.get_one_by_id(user_id)
if user is not False:
user_data["user_first_name"] = user.first_name
user_data["user_last_name"] = user.last_name
user_data["user_username"] = user.username
user_data["user_email"] = user.email
user_data["user_avatar"] = Gravatar(user.email).get_image()
self.__data.update(user_data)
def get(self, key=None, default=None):
if key is not None:
return self.__data[key] if key in self.__data else default
return self.__data
| 35.915789
| 87
| 0.662368
|
eaa3565c2aa71e4fe5ee35963e260560a219a138
| 7,942
|
py
|
Python
|
fuzzinator/ui/tui/reporter_dialogs.py
|
renatahodovan/fuzzinator
|
49e6cf1b5dad59e82f7bed5f14b23dbd7c520ad0
|
[
"BSD-3-Clause"
] | 202
|
2016-10-21T00:19:59.000Z
|
2022-03-07T07:05:57.000Z
|
fuzzinator/ui/tui/reporter_dialogs.py
|
renatahodovan/fuzzinator
|
49e6cf1b5dad59e82f7bed5f14b23dbd7c520ad0
|
[
"BSD-3-Clause"
] | 16
|
2016-11-27T05:36:25.000Z
|
2021-08-10T14:42:48.000Z
|
fuzzinator/ui/tui/reporter_dialogs.py
|
renatahodovan/fuzzinator
|
49e6cf1b5dad59e82f7bed5f14b23dbd7c520ad0
|
[
"BSD-3-Clause"
] | 43
|
2016-10-21T00:19:31.000Z
|
2022-03-07T07:06:54.000Z
|
# Copyright (c) 2016-2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import logging
from urwid import *
from ...config import config_get_object
from ...formatter import JsonFormatter
from ...tracker import BaseTracker, BugzillaTracker, TrackerError
from .button import FormattedButton
from .decor_widgets import PatternBox
from .dialogs import BugEditor
from .graphics import fz_box_pattern
logger = logging.getLogger(__name__)
class ReportDialog(PopUpTarget):
signals = ['close']
def __init__(self, issue, config, db, side_bar=None):
self.issue = issue
self.tracker = config_get_object(config, 'sut.' + issue['sut'], 'tracker') or BaseTracker()
self.db = db
self.duplicate = None
self.edit_dups = BugEditor()
self.result = Text('')
formatter = config_get_object(config, 'sut.' + issue['sut'], 'formatter') or JsonFormatter()
self.issue_title = BugEditor(edit_text=formatter(issue=issue, format='short'))
self.issue_desc = BugEditor(edit_text=formatter(issue=issue), multiline=True, wrap='clip')
self.body = SimpleListWalker([Columns([('fixed', 13, Text(('dialog_secondary', 'Summary: '))),
('weight', 10, self.issue_title)], dividechars=1),
Columns([('fixed', 13, Text(('dialog_secondary', 'Description: '))),
('weight', 10, self.issue_desc)], dividechars=1)])
frame = Frame(body=AttrMap(Columns([('weight', 10, ListBox(self.body)),
('weight', 3, AttrMap(ListBox(SimpleListWalker(side_bar or [])), attr_map='dialog_secondary'))]),
'dialog'),
footer=Columns([('pack', FormattedButton('Close', lambda button: self._emit('close_reporter'))),
('pack', FormattedButton('Report', lambda button: self.send_report())),
('pack', FormattedButton('Save as reported', lambda button: self.save_reported()))], dividechars=2),
focus_part='body')
super().__init__(AttrMap(PatternBox(frame, title=('dialog_title', issue['id']), **fz_box_pattern()), attr_map='dialog_border'))
self.find_duplicates()
def set_duplicate(self, btn, state):
if state:
self.duplicate = btn.label
def find_duplicates(self):
dups_walker = SimpleListWalker([self.edit_dups])
options = []
try:
for issue in self.tracker.find_issue(self.issue_title.get_text()[0]):
radio_btn = RadioButton(options, issue['url'], on_state_change=self.set_duplicate)
# Select the first suggested bug if there is not set any.
if self.duplicate is None:
self.duplicate = radio_btn.label
dups_walker.append(radio_btn)
except TrackerError as e:
logger.error(str(e), exc_info=e)
self.body.insert(0, Columns([('fixed', 13, Text(('dialog_secondary', 'Duplicates: '))),
('weight', 10, BoxAdapter(ListBox(dups_walker), height=len(dups_walker)))]))
def get_report_data(self):
assert False, 'Should never be reached.'
return dict()
def send_report(self):
try:
issue_url = self.tracker.report_issue(**self.get_report_data())['url']
self.result.set_text(('dialog_secondary', 'Reported at: {url}'.format(url=issue_url)))
self.db.update_issue_by_oid(self.issue['_id'], {'reported': issue_url})
except TrackerError as e:
logger.error(str(e), exc_info=e)
def save_reported(self):
if self.edit_dups.text:
url = self.edit_dups.text
elif self.duplicate:
url = self.duplicate
else:
url = ''
self.db.update_issue_by_oid(self.issue['_id'], {'reported': url})
self._emit('close')
def keypress(self, size, key):
if key in ['esc', 'f7']:
self._emit('close')
return None
return super().keypress(size, key)
class BugzillaReportDialog(ReportDialog):
def __init__(self, issue, config, db):
self.edit_blocks = BugEditor()
self.edit_cc = BugEditor(multiline=True)
self.edit_extension = BugEditor(edit_text='html')
tracker = config_get_object(config, 'sut.' + issue['sut'], 'tracker')
assert isinstance(tracker, BugzillaTracker), 'Tracker is not a Bugzilla instance.'
self.product_info = tracker.settings()
self.product = None
products_walker = SimpleListWalker([])
products = []
for product in self.product_info:
products_walker.append(RadioButton(products, product, on_state_change=self.set_product))
self.component = None
self.component_box = SimpleFocusListWalker([])
self.version = None
self.versions_box = SimpleFocusListWalker([])
side_bar = [LineBox(BoxAdapter(ListBox(products_walker), height=len(products_walker)), title='Products'),
LineBox(BoxAdapter(ListBox(self.component_box), height=10), title='Components'),
LineBox(BoxAdapter(ListBox(self.versions_box), height=10), title='Versions'),
Columns([('weight', 1, Text('CC: ')), ('weight', 4, self.edit_cc)]),
Columns([('weight', 1, Text('Blocks: ')), ('weight', 4, self.edit_blocks)]),
Columns([('weight', 1, Text('Ext: ')), ('weight', 4, self.edit_extension)])]
super().__init__(issue=issue, config=config, db=db, side_bar=side_bar)
self.set_product(products_walker.contents[0], True)
def set_product(self, btn, state):
if state:
self.product = btn.label
self.update_components(self.product_info[self.product]['components'])
self.update_versions(self.product_info[self.product]['versions'])
def set_component(self, btn, state):
if state:
self.component = btn.label
def set_version(self, btn, state):
if state:
self.version = btn.label
def update_components(self, components):
components_group = []
self.component_box.clear()
for component in components:
self.component_box.append(RadioButton(group=components_group, label=component, on_state_change=self.set_component))
def update_versions(self, versions):
versions_group = []
self.versions_box.clear()
for version in versions:
self.versions_box.append(RadioButton(group=versions_group, label=version, on_state_change=self.set_version))
def get_report_data(self):
return dict(title=self.issue_title.edit_text,
body=self.issue_desc.edit_text,
product=self.product,
product_version=self.version,
component=self.component,
blocks=self.edit_blocks.edit_text,
test=self.issue['test'],
extension=self.edit_extension.edit_text)
class GithubReportDialog(ReportDialog):
def get_report_data(self):
return dict(title=self.issue_title.edit_text,
body=self.issue_desc.edit_text)
class MonorailReportDialog(ReportDialog):
def get_report_data(self):
return dict(title=self.issue_title.edit_text,
body=self.issue_desc.edit_text)
class GitlabReportDialog(ReportDialog):
def get_report_data(self):
return dict(title=self.issue_title.edit_text,
body=self.issue_desc.edit_text)
| 42.244681
| 141
| 0.61521
|
db0175eccf28b616908cd1a01b1babaf48a947c7
| 2,606
|
py
|
Python
|
observations/r/bomsoi2001.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
observations/r/bomsoi2001.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
observations/r/bomsoi2001.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bomsoi2001(path):
"""Southern Oscillation Index Data
The Southern Oscillation Index (SOI) is the difference in barometric
pressure at sea level between Tahiti and Darwin. Annual SOI and
Australian rainfall data, for the years 1900-2001, are given.
Australia's annual mean rainfall is an area-weighted average of the
total annual precipitation at approximately 370 rainfall stations around
the country.
This data frame contains the following columns:
Year
a numeric vector
Jan
average January SOI values for each year
Feb
average February SOI values for each year
Mar
average March SOI values for each year
Apr
average April SOI values for each year
May
average May SOI values for each year
Jun
average June SOI values for each year
Jul
average July SOI values for each year
Aug
average August SOI values for each year
Sep
average September SOI values for each year
Oct
average October SOI values for each year
Nov
average November SOI values for each year
Dec
average December SOI values for each year
SOI
a numeric vector consisting of average annual SOI values
avrain
a numeric vector consisting of a weighted average annual rainfall at
a large number of Australian sites
Australian Bureau of Meteorology web pages:
http://www.bom.gov.au/climate/change/rain02.txt and
http://www.bom.gov.au/climate/current/soihtm1.shtml
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bomsoi2001.csv`.
Returns:
Tuple of np.ndarray `x_train` with 102 rows and 15 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bomsoi2001.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/bomsoi2001.csv'
maybe_download_and_extract(path, url,
save_file_name='bomsoi2001.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 25.300971
| 74
| 0.707598
|
bbc4a6ec48245a11a9c76cc8b5858656a6548443
| 6,294
|
py
|
Python
|
unittest/scripts/auto/py_adminapi/scripts/dba_upgrade_metadata_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | null | null | null |
unittest/scripts/auto/py_adminapi/scripts/dba_upgrade_metadata_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 1
|
2021-09-12T22:07:06.000Z
|
2021-09-12T22:07:06.000Z
|
unittest/scripts/auto/py_adminapi/scripts/dba_upgrade_metadata_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | null | null | null |
#@ {not real_host_is_loopback}
#@<> Snapshot File Name {VER(<8.0.0)}
snapshot_file = 'metadata-1.0.1-5.7.27-snapshot.sql'
#@<> Snapshot File Name {VER(>=8.0.0)}
snapshot_file = 'metadata-1.0.1-8.0.17-snapshot.sql'
#@ Creates the sample cluster
testutil.deploy_sandbox(__mysql_sandbox_port1, "root", {'report_host': hostname})
dba.configure_instance(__sandbox_uri1, {'clusterAdmin': 'tst_admin', 'clusterAdminPassword': 'tst_pwd'})
testutil.snapshot_sandbox_conf(__mysql_sandbox_port1)
cluster_admin_uri= "mysql://tst_admin:tst_pwd@" + __host + ":" + str(__mysql_sandbox_port1)
# Session to be used through all the AAPI calls
shell.connect(cluster_admin_uri)
dba.create_cluster('sample', {'ipAllowlist': '127.0.0.1,' + hostname_ip})
def set_metadata_1_0_1():
dba.drop_metadata_schema({"force": True})
testutil.import_data(__sandbox_uri1, __test_data_path + '/sql/' + snapshot_file)
session.run_sql("UPDATE mysql_innodb_cluster_metadata.instances SET mysql_server_uuid = @@server_uuid")
#@ Upgrades the metadata, no registered routers
set_metadata_1_0_1()
session.run_sql("delete from mysql_innodb_cluster_metadata.routers")
dba.upgrade_metadata({'interactive':True})
#@ Upgrades the metadata, up to date
dba.upgrade_metadata({'interactive':True})
#@ Upgrades the metadata, interactive off, error
set_metadata_1_0_1()
EXPECT_THROWS(lambda: dba.upgrade_metadata({'interactive':False}),
"Dba.upgrade_metadata: Outdated Routers found. Please upgrade the Routers before upgrading the Metadata schema")
#@ Upgrades the metadata, upgrade done by unregistering 10 routers and no router accounts
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (2, 'second', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (3, 'third', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (4, 'fourth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (5, 'fifth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (6, 'sixth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (7, 'seventh', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (8, 'eighth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (9, 'nineth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (10, 'tenth', 2, NULL)")
# Chooses to unregister the existing routers
testutil.expect_prompt("Please select an option: ", "2")
testutil.expect_prompt("Unregistering a Router implies it will not be used in the Cluster, do you want to continue? [y/N]:", "y")
dba.upgrade_metadata({'interactive':True})
#@ Upgrades the metadata, upgrade done by unregistering more than 10 routers with router accounts
# Fake router account to get the account upgrade tested
set_metadata_1_0_1()
session.run_sql("CREATE USER mysql_router_test@`%` IDENTIFIED BY 'whatever'")
session.run_sql("CREATE USER mysql_router1_bc0e9n9dnfzk@`%` IDENTIFIED BY 'whatever'")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (2, 'second', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (3, 'third', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (4, 'fourth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (5, 'fifth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (6, 'sixth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (7, 'seventh', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (8, 'eighth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (9, 'nineth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (10, 'tenth', 2, NULL)")
session.run_sql("INSERT INTO mysql_innodb_cluster_metadata.routers VALUES (11, 'eleventh', 2, NULL)")
# Chooses to unregister the existing routers
testutil.expect_prompt("Please select an option: ", "2")
testutil.expect_prompt("Unregistering a Router implies it will not be used in the Cluster, do you want to continue? [y/N]:", "y")
dba.upgrade_metadata({'interactive':True})
#@<> Verifying grants for mysql_router_test
session.run_sql("SHOW GRANTS FOR mysql_router_test@`%`")
EXPECT_STDOUT_CONTAINS("GRANT USAGE ON *.*")
EXPECT_STDOUT_CONTAINS("GRANT SELECT, EXECUTE ON `mysql_innodb_cluster_metadata`.*")
EXPECT_STDOUT_CONTAINS("GRANT INSERT, UPDATE, DELETE ON `mysql_innodb_cluster_metadata`.`routers`")
EXPECT_STDOUT_CONTAINS("GRANT INSERT, UPDATE, DELETE ON `mysql_innodb_cluster_metadata`.`v2_routers`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`global_variables`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`replication_group_member_stats`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`replication_group_members`")
#@<> Verifying grants for mysql_router1_bc0e9n9dnfzk
session.run_sql("SHOW GRANTS FOR mysql_router1_bc0e9n9dnfzk@`%`")
EXPECT_STDOUT_CONTAINS("GRANT USAGE ON *.*")
EXPECT_STDOUT_CONTAINS("GRANT SELECT, EXECUTE ON `mysql_innodb_cluster_metadata`.*")
EXPECT_STDOUT_CONTAINS("GRANT INSERT, UPDATE, DELETE ON `mysql_innodb_cluster_metadata`.`routers`")
EXPECT_STDOUT_CONTAINS("GRANT INSERT, UPDATE, DELETE ON `mysql_innodb_cluster_metadata`.`v2_routers`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`global_variables`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`replication_group_member_stats`")
EXPECT_STDOUT_CONTAINS("GRANT SELECT ON `performance_schema`.`replication_group_members`")
#@ Test Migration from 1.0.1 to 2.0.0
set_metadata_1_0_1()
test_session = mysql.get_session(__sandbox_uri1)
# Chooses to unregister the existing router
testutil.expect_prompt("Please select an option: ", "2")
testutil.expect_prompt("Unregistering a Router implies it will not be used in the Cluster, do you want to continue? [y/N]:", "y")
dba.upgrade_metadata({'interactive':True})
#@<> Cleanup
session.close()
test_session.close()
testutil.destroy_sandbox(__mysql_sandbox_port1)
| 58.277778
| 129
| 0.791389
|
bf2313fb4ccbb8cf5aa7a20fab775af41a3ea549
| 2,363
|
py
|
Python
|
config/presets/Modes/Python/S - Big City Scroll/main.py
|
The-XOR/EYESY_OS
|
6a5e3d0bc5574ba2311e0c7e81c600c3af7a3e34
|
[
"BSD-3-Clause"
] | 18
|
2021-03-06T05:39:30.000Z
|
2022-03-25T17:59:23.000Z
|
presets/Modes/Python/S - Big City Scroll/main.py
|
jqrsound/EYESY_OS_for_RasPiSound
|
ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62
|
[
"BSD-3-Clause"
] | null | null | null |
presets/Modes/Python/S - Big City Scroll/main.py
|
jqrsound/EYESY_OS_for_RasPiSound
|
ac117b91cd84ad4c0566bd1a7d4c7b1ccc01cf62
|
[
"BSD-3-Clause"
] | 4
|
2021-03-14T18:38:42.000Z
|
2021-07-11T14:31:18.000Z
|
import os
import pygame
import math
import time
last_point = [240, 160]
y1 = 640
x = 640
width = 25
XR = 320
YR = 240
def setup(screen, etc):
global XR, YR
XR = etc.xres
YR = etc.yres
pass
def draw(screen, etc):
global last_point, x, y1, width, XR, YR
etc.color_picker_bg(etc.knob5)
for i in range(0, 10) :
seg(screen, etc, i)
def seg(screen, etc, i):
global last_point, x, y1, width, XR, YR
audioraty = ((100*720)/YR)
audioratx = ((150*1280)/XR)
speedrat = ((20*XR)/1280)
y1 = int(etc.knob2 * YR) + (etc.audio_in[i] / audioraty)
width = (etc.audio_in[i] / audioratx)+3
sel = etc.knob4*9
Cmod = etc.knob3
if 1 > sel :
color = (int(127 + 127 * math.sin(i * 1*Cmod + time.time())),
int(127 + 127 * math.sin(i * 1*Cmod + time.time())),
int(127 + 127 * math.sin(i * 1*Cmod + time.time())))
if 1 <= sel < 2 :
color = (int(127+127 * math.sin(i * 1*Cmod + time.time())),0,45)
if 2 <= sel < 3 :
color = (255,int(155 + 100 * math.sin(i * 1*Cmod + time.time())),30)
if 3 <= sel < 4 :
color = (0,75,int(127 + 127 * math.sin(i * 1*Cmod + time.time())))
if 5 > sel >= 4 :
color = (int(127 + 127 * math.sin(i * (Cmod+.1) + time.time())),
int(127 + 127 * math.sin(i * (Cmod+.05) + time.time())),
int(127 + 127 * math.sin(i * (Cmod+.01) + time.time())))
if 6 > sel >= 5 :
color = (127*Cmod,
127*Cmod,
int(127 + 127 * math.sin(i * (Cmod+.1) + time.time())))
if 7 > sel >= 6 :
color = (127*Cmod,
int(127 + 127 * math.sin(i * (Cmod+.1) + time.time())),
127*Cmod)
if 8 > sel >= 7 :
color = (int(127 + 127 * math.sin(i * (Cmod+.1) + time.time())),
127*Cmod,
127*Cmod)
if sel >= 8 :
color = (int(127 + 127 * math.sin((i+30) * (1*Cmod+.01) + time.time())),
int(127 + 127 * math.sin((i+30) * (.5*Cmod+.005) + time.time())),
int(127 + 127 * math.sin((i+15) * (.1*Cmod+.001) + time.time())))
pygame.draw.line(screen, color, last_point, [x, y1], width)
speed = int(etc.knob1 * (2*speedrat)) - speedrat
x = x + speed
if x >= XR: x = 0
if 0 > x : x = XR
last_point = [x, y1]
| 31.932432
| 81
| 0.484554
|
04a1a4fa4fa0f6205ef00d604fb3b281a753929e
| 1,968
|
py
|
Python
|
lib/galaxy/schema/fields.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 47
|
2015-10-21T23:30:30.000Z
|
2022-03-09T06:51:32.000Z
|
lib/galaxy/schema/fields.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 2
|
2022-02-28T02:36:23.000Z
|
2022-03-02T13:17:41.000Z
|
lib/galaxy/schema/fields.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 35
|
2015-10-30T13:09:40.000Z
|
2021-05-03T23:17:46.000Z
|
import re
from pydantic import (
Field,
)
ENCODED_DATABASE_ID_PATTERN = re.compile('f?[0-9a-f]+')
ENCODED_ID_LENGTH_MULTIPLE = 16
class EncodedDatabaseIdField(str):
"""
Encoded Database ID validation.
"""
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
# __modify_schema__ should mutate the dict it receives in place,
# the returned value will be ignored
field_schema.update(
min_length=16,
pattern='[0-9a-fA-F]+',
examples=['0123456789ABCDEF'],
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('String required')
if v.startswith("F"):
# Library Folder ids start with an additional "F"
len_v = len(v) - 1
else:
len_v = len(v)
if len_v % ENCODED_ID_LENGTH_MULTIPLE:
raise ValueError('Invalid id length, must be multiple of 16')
m = ENCODED_DATABASE_ID_PATTERN.fullmatch(v.lower())
if not m:
raise ValueError('Invalid characters in encoded ID')
return cls(v)
def __repr__(self):
return f'EncodedDatabaseID ({super().__repr__()})'
def ModelClassField(class_name: str) -> str:
"""Represents a database model class name annotated as a constant
pydantic Field.
:param class_name: The name of the database class.
:return: A constant pydantic Field with default annotations for model classes.
"""
return Field(
class_name,
title="Model class",
description="The name of the database model class.",
const=True, # Make this field constant
)
| 30.276923
| 82
| 0.633638
|
aa10f062cb33dd34630d212adfeceadb55ca95aa
| 4,832
|
py
|
Python
|
xl_tensorflow/utils/hyperparams_flags.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | null | null | null |
xl_tensorflow/utils/hyperparams_flags.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | 1
|
2020-11-13T18:52:23.000Z
|
2020-11-13T18:52:23.000Z
|
xl_tensorflow/utils/hyperparams_flags.py
|
Lannister-Xiaolin/xl_tensorflow
|
99e0f458769ee1e45ebf55c789961e40f7d2eeac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common flags for importing hyperparameters."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from absl import flags
from .flags import core as flags_core
FLAGS = flags.FLAGS
def define_gin_flags():
"""Define common gin configurable flags."""
flags.DEFINE_multi_string('gin_file', None,
'List of paths to the config files.')
flags.DEFINE_multi_string(
'gin_param', None, 'Newline separated list of Gin parameter bindings.')
def define_common_hparams_flags():
"""Define the common flags across models."""
flags.DEFINE_string(
'model_dir',
default=None,
help=('The directory where the model and training/evaluation summaries'
'are stored.'))
flags.DEFINE_integer(
'train_batch_size', default=None, help='Batch size for training.')
flags.DEFINE_integer(
'eval_batch_size', default=None, help='Batch size for evaluation.')
flags.DEFINE_string(
'precision',
default=None,
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_string(
'config_file',
default=None,
help=('A YAML file which specifies overrides. Note that this file can be '
'used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, the one in '
'`--params_override` will be used finally.'))
flags.DEFINE_string(
'params_override',
default=None,
help=('a YAML/JSON string or a YAML file which specifies additional '
'overrides over the default parameters and those specified in '
'`--config_file`. Note that this is supposed to be used only to '
'override the model parameters, but not the parameters like TPU '
'specific flags. One canonical use case of `--config_file` and '
'`--params_override` is users first define a template config file '
'using `--config_file`, then use `--params_override` to adjust the '
'minimal set of tuning parameters, for example setting up different'
' `train_batch_size`. '
'The final override order of parameters: default_model_params --> '
'params from config_file --> params in params_override.'
'See also the help message of `--config_file`.'))
flags.DEFINE_integer('save_checkpoint_freq', None,
'Number of steps to save checkpoint.')
def initialize_common_flags():
"""Define the common flags across models."""
define_common_hparams_flags()
flags_core.define_device(tpu=True)
flags_core.define_base(
num_gpu=True, model_dir=False, data_dir=False, batch_size=False)
flags_core.define_distribution(worker_hosts=True, task_index=True)
flags_core.define_performance(all_reduce_alg=True, num_packs=True)
# Reset the default value of num_gpus to zero.
FLAGS.num_gpus = 0
flags.DEFINE_string(
'strategy_type', 'mirrored', 'Type of distribute strategy.'
'One of mirrored, tpu and multiworker.')
def strategy_flags_dict():
"""Returns TPU and/or GPU related flags in a dictionary."""
return {
'distribution_strategy': FLAGS.strategy_type,
# TPUStrategy related flags.
'tpu': FLAGS.tpu,
# MultiWorkerMirroredStrategy related flags.
'all_reduce_alg': FLAGS.all_reduce_alg,
'worker_hosts': FLAGS.worker_hosts,
'task_index': FLAGS.task_index,
# MirroredStrategy and OneDeviceStrategy
'num_gpus': FLAGS.num_gpus,
'num_packs': FLAGS.num_packs,
}
def hparam_flags_dict():
"""Returns model params related flags in a dictionary."""
return {
'data_dir': FLAGS.data_dir,
'model_dir': FLAGS.model_dir,
'train_batch_size': FLAGS.train_batch_size,
'eval_batch_size': FLAGS.eval_batch_size,
'precision': FLAGS.precision,
'config_file': FLAGS.config_file,
'params_override': FLAGS.params_override,
}
| 37.457364
| 80
| 0.683361
|
8d945b6e07dbbfa7352c98142f2cb2f6eb8e1203
| 24
|
py
|
Python
|
python/Exercicios/ex001.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/Exercicios/ex001.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/Exercicios/ex001.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
# print('Olá, mundo!')
| 8
| 22
| 0.541667
|
24c540fd567ba153174116d8c8fb197d0d698863
| 2,688
|
py
|
Python
|
custom/ilsgateway/tests/test_webusers_sync.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2017-02-10T03:14:51.000Z
|
2017-02-10T03:14:51.000Z
|
custom/ilsgateway/tests/test_webusers_sync.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | null | null | null |
custom/ilsgateway/tests/test_webusers_sync.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
import json
import os
from django.test import TestCase
from corehq.apps.commtrack.tests.util import bootstrap_domain as initial_bootstrap
from corehq.apps.users.models import WebUser, UserRole
from custom.ilsgateway.api import ILSUser
from custom.logistics.commtrack import sync_ilsgateway_webuser, webusers_sync
from custom.logistics.models import MigrationCheckpoint
TEST_DOMAIN = 'ilsgateway-commtrack-webusers-test'
class WebUsersSyncTest(TestCase):
def setUp(self):
self.datapath = os.path.join(os.path.dirname(__file__), 'data')
initial_bootstrap(TEST_DOMAIN)
for user in WebUser.by_domain(TEST_DOMAIN):
user.delete()
def test_create_webuser(self):
with open(os.path.join(self.datapath, 'sample_webusers.json')) as f:
webuser = ILSUser(json.loads(f.read())[0])
self.assertEqual(0, len(WebUser.by_domain(TEST_DOMAIN)))
ilsgateway_webuser = sync_ilsgateway_webuser(TEST_DOMAIN, webuser)
self.assertEqual(webuser.email, ilsgateway_webuser.username)
self.assertEqual(webuser.password, ilsgateway_webuser.password)
self.assertEqual(webuser.first_name, ilsgateway_webuser.first_name)
self.assertEqual(webuser.last_name, ilsgateway_webuser.last_name)
self.assertEqual(webuser.is_active, ilsgateway_webuser.is_active)
self.assertEqual(False, ilsgateway_webuser.is_superuser)
self.assertEqual(False, ilsgateway_webuser.is_staff)
#self.assertEqual(webuser.location, ilsgateway_webuser.location)
#self.assertEqual(webuser.supply_point, ilsgateway_webuser.supply_point)
domain_name = ilsgateway_webuser.get_domains()[0]
self.assertEqual(TEST_DOMAIN, domain_name)
self.assertEqual(UserRole.get_read_only_role_by_domain(TEST_DOMAIN)._id,
ilsgateway_webuser.get_domain_membership(TEST_DOMAIN).role_id)
def test_webusers_migration(self):
from custom.ilsgateway.tests import MockEndpoint
checkpoint = MigrationCheckpoint(
domain=TEST_DOMAIN,
start_date=datetime.now(),
date=datetime.now(),
api='product',
limit=1000,
offset=0
)
webusers_sync(TEST_DOMAIN,
MockEndpoint('http://test-api.com/', 'dummy', 'dummy'),
checkpoint,
limit=1000,
offset=0)
self.assertEqual('webuser', checkpoint.api)
self.assertEqual(1000, checkpoint.limit)
self.assertEqual(0, checkpoint.offset)
self.assertEqual(5, len(list(WebUser.by_domain(TEST_DOMAIN))))
| 43.354839
| 87
| 0.700149
|
525a40b0fcaa9e64167f94c0582ab62814fe2039
| 2,328
|
py
|
Python
|
src/som/primitives/known.py
|
smarr/RTruffleSOM
|
1efc698577830ff3fcd1607e7155d9c6423e8804
|
[
"MIT"
] | 9
|
2015-02-03T23:24:23.000Z
|
2020-06-28T23:49:59.000Z
|
src/som/primitives/known.py
|
SOM-st/RTruffleSOM
|
1efc698577830ff3fcd1607e7155d9c6423e8804
|
[
"MIT"
] | null | null | null |
src/som/primitives/known.py
|
SOM-st/RTruffleSOM
|
1efc698577830ff3fcd1607e7155d9c6423e8804
|
[
"MIT"
] | 2
|
2016-08-28T23:25:20.000Z
|
2016-08-30T16:49:50.000Z
|
from rpython.rlib.unroll import unrolling_iterable
from ..interp_type import is_ast_interpreter, is_bytecode_interpreter
"""Captures the known primitives at load time of this module, i.e., at compile
time with RPython.
"""
EXPECTED_NUMBER_OF_PRIMITIVE_FILES = 13 if is_ast_interpreter() else 11
class PrimitivesNotFound(Exception): pass
def _is_primitives_class(e):
"NOT_RPYTHON"
from som.primitives.primitives import Primitives
import inspect
_, entry = e
return (inspect.isclass(entry) and
issubclass(entry, Primitives)
and entry is not Primitives)
def _setup_primitives():
"NOT_RPYTHON"
from importlib import import_module
import inspect
import py
base_package = "som.primitives."
if is_ast_interpreter():
base_package += 'ast.'
interp_dir = 'ast'
elif is_bytecode_interpreter():
base_package += 'bc.'
interp_dir = 'bc'
else:
interp_dir = ''
directory = py.path.local(__file__).dirpath(interp_dir)
files = filter(lambda ent: ent.basename.endswith("_primitives.py"),
directory.listdir())
mods = map(lambda mod: import_module(base_package + mod.purebasename),
files)
all_members = map(lambda module: inspect.getmembers(module),
mods)
all_members = reduce(lambda all, each: all + each, all_members)
all_prims = filter(_is_primitives_class, all_members)
prim_pairs = map(lambda (name, cls):
(name[:name.find("Primitives")], cls), all_prims)
if EXPECTED_NUMBER_OF_PRIMITIVE_FILES != len(prim_pairs):
print ""
print "SOM PRIMITIVE DISCOVERY: following primitives found:"
for name, clazz in prim_pairs:
print " - %s" % name
print "Expected number of primitive files: %d, found %d" % (
EXPECTED_NUMBER_OF_PRIMITIVE_FILES, len(prim_pairs))
print "ERROR: did not find the expected number of primitive files!"
import sys
sys.exit(1)
return prim_pairs
_primitives = unrolling_iterable(_setup_primitives())
def primitives_for_class(cls):
name = cls.get_name().get_embedded_string()
for key, primitives in _primitives:
if key == name:
return primitives
raise PrimitivesNotFound
| 31.459459
| 78
| 0.667526
|
05b0c7cd8a987849068c8d409a494959c1b79627
| 26,896
|
py
|
Python
|
toontown/building/DistributedDoor.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/building/DistributedDoor.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/building/DistributedDoor.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from toontown.toonbase.ToonBaseGlobal import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.distributed import DistributedObject
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.distributed import DelayDelete
from . import FADoorCodes
from direct.task.Task import Task
from . import DoorTypes
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TeaserPanel
from toontown.distributed.DelayDeletable import DelayDeletable
if (__debug__):
import pdb
class DistributedDoor(DistributedObject.DistributedObject, DelayDeletable):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.openSfx = base.loader.loadSfx('phase_3.5/audio/sfx/Door_Open_1.ogg')
self.closeSfx = base.loader.loadSfx('phase_3.5/audio/sfx/Door_Close_1.ogg')
self.nametag = None
self.fsm = ClassicFSM.ClassicFSM('DistributedDoor_right', [State.State('off', self.enterOff, self.exitOff, ['closing',
'closed',
'opening',
'open']),
State.State('closing', self.enterClosing, self.exitClosing, ['closed', 'opening']),
State.State('closed', self.enterClosed, self.exitClosed, ['opening']),
State.State('opening', self.enterOpening, self.exitOpening, ['open']),
State.State('open', self.enterOpen, self.exitOpen, ['closing', 'open'])], 'off', 'off')
self.fsm.enterInitialState()
self.exitDoorFSM = ClassicFSM.ClassicFSM('DistributedDoor_left', [State.State('off', self.exitDoorEnterOff, self.exitDoorExitOff, ['closing',
'closed',
'opening',
'open']),
State.State('closing', self.exitDoorEnterClosing, self.exitDoorExitClosing, ['closed', 'opening']),
State.State('closed', self.exitDoorEnterClosed, self.exitDoorExitClosed, ['opening']),
State.State('opening', self.exitDoorEnterOpening, self.exitDoorExitOpening, ['open']),
State.State('open', self.exitDoorEnterOpen, self.exitDoorExitOpen, ['closing', 'open'])], 'off', 'off')
self.exitDoorFSM.enterInitialState()
self.specialDoorTypes = {DoorTypes.EXT_HQ: 0,
DoorTypes.EXT_COGHQ: 0,
DoorTypes.INT_COGHQ: 0,
DoorTypes.EXT_KS: 0,
DoorTypes.INT_KS: 0}
self.doorX = 1.5
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.avatarTracks = []
self.avatarExitTracks = []
self.avatarIDList = []
self.avatarExitIDList = []
self.doorTrack = None
self.doorExitTrack = None
return
def disable(self):
self.clearNametag()
taskMgr.remove(self.checkIsDoorHitTaskName())
self.ignore(self.getEnterTriggerEvent())
self.ignore(self.getExitTriggerEvent())
self.ignore('clearOutToonInterior')
self.fsm.request('off')
self.exitDoorFSM.request('off')
if 'building' in self.__dict__:
del self.building
self.finishAllTracks()
self.avatarIDList = []
self.avatarExitIDList = []
if hasattr(self, 'tempDoorNodePath'):
self.tempDoorNodePath.removeNode()
del self.tempDoorNodePath
DistributedObject.DistributedObject.disable(self)
def delete(self):
del self.fsm
del self.exitDoorFSM
del self.openSfx
del self.closeSfx
DistributedObject.DistributedObject.delete(self)
def wantsNametag(self):
return not ZoneUtil.isInterior(self.zoneId)
def setupNametag(self):
if not self.wantsNametag():
return
if self.nametag == None:
self.nametag = NametagGroup()
self.nametag.setFont(ToontownGlobals.getBuildingNametagFont())
if TTLocalizer.BuildingNametagShadow:
self.nametag.setShadow(*TTLocalizer.BuildingNametagShadow)
self.nametag.setContents(Nametag.CName)
self.nametag.setColorCode(NametagGroup.CCToonBuilding)
self.nametag.setActive(0)
self.nametag.setAvatar(self.getDoorNodePath())
self.nametag.setObjectCode(self.block)
name = self.cr.playGame.dnaStore.getTitleFromBlockNumber(self.block)
self.nametag.setName(name)
self.nametag.manage(base.marginManager)
return
def clearNametag(self):
if self.nametag != None:
self.nametag.unmanage(base.marginManager)
self.nametag.setAvatar(NodePath())
self.nametag = None
return
def getTriggerName(self):
if self.doorType == DoorTypes.INT_HQ or self.doorType in self.specialDoorTypes:
return 'door_trigger_' + str(self.block) + '_' + str(self.doorIndex)
else:
return 'door_trigger_' + str(self.block)
def getTriggerName_wip(self):
name = 'door_trigger_%d' % (self.doId,)
return name
def getEnterTriggerEvent(self):
return 'enter' + self.getTriggerName()
def getExitTriggerEvent(self):
return 'exit' + self.getTriggerName()
def hideDoorParts(self):
if self.doorType in self.specialDoorTypes:
self.hideIfHasFlat(self.findDoorNode('rightDoor'))
self.hideIfHasFlat(self.findDoorNode('leftDoor'))
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
else:
return
def setTriggerName(self):
if self.doorType in self.specialDoorTypes:
building = self.getBuilding()
doorTrigger = building.find('**/door_' + str(self.doorIndex) + '/**/door_trigger*')
doorTrigger.node().setName(self.getTriggerName())
else:
return
def setTriggerName_wip(self):
building = self.getBuilding()
doorTrigger = building.find('**/door_%d/**/door_trigger_%d' % (self.doorIndex, self.block))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_%d' % (self.block,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_%d/**/door_trigger_*' % (self.doorIndex,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_*')
doorTrigger.node().setName(self.getTriggerName())
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setDoorType(self, doorType):
self.notify.debug('Door type = ' + str(doorType) + ' on door #' + str(self.doId))
self.doorType = doorType
def setDoorIndex(self, doorIndex):
self.doorIndex = doorIndex
def setSwing(self, flags):
self.leftSwing = flags & 1 != 0
self.rightSwing = flags & 2 != 0
def setOtherZoneIdAndDoId(self, zoneId, distributedObjectID):
self.otherZoneId = zoneId
self.otherDoId = distributedObjectID
def setState(self, state, timestamp):
self.fsm.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def setExitDoorState(self, state, timestamp):
self.exitDoorFSM.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.doPostAnnounceGenerate()
def doPostAnnounceGenerate(self):
if self.doorType == DoorTypes.INT_STANDARD:
self.bHasFlat = True
else:
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
self.setTriggerName()
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.setupNametag()
def getBuilding(self):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_HQ:
door = render.find('**/door_0')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_KS:
self.building = render.find('**/KartShop_Interior*')
elif self.doorType == DoorTypes.EXT_STANDARD or self.doorType == DoorTypes.EXT_HQ or self.doorType == DoorTypes.EXT_KS:
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
if self.building.isEmpty():
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':animated_building_*_DNARoot;+s')
elif self.doorType == DoorTypes.EXT_COGHQ or self.doorType == DoorTypes.INT_COGHQ:
self.building = self.cr.playGame.hood.loader.geom
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.building
def getBuilding_wip(self):
if 'building' not in self.__dict__:
if 'block' in self.__dict__:
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
else:
self.building = self.cr.playGame.hood.loader.geom
print('---------------- door is interior -------')
return self.building
def readyToExit(self):
base.transitions.fadeScreen(1.0)
self.sendUpdate('requestExit')
def avatarEnterDoorTrack(self, avatar, duration):
trackName = 'avatarEnterDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(name=trackName)
otherNP = self.getDoorNodePath()
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
if avatar.doId == base.localAvatar.doId:
track.append(LerpPosHprInterval(nodePath=camera, other=avatar, duration=duration, pos=Point3(0, -8, avatar.getHeight()), hpr=VBase3(0, 0, 0), blendType='easeInOut'))
finalPos = avatar.getParent().getRelativePoint(otherNP, Point3(self.doorX, 2, ToontownGlobals.FloorOffset))
moveHere = Sequence(self.getAnimStateInterval(avatar, 'walk'), LerpPosInterval(nodePath=avatar, duration=duration, pos=finalPos, blendType='easeIn'))
track.append(moveHere)
if avatar.doId == base.localAvatar.doId:
track.append(Sequence(Wait(duration * 0.5), Func(base.transitions.irisOut, duration * 0.5), Wait(duration * 0.5), Func(avatar.b_setParent, ToontownGlobals.SPHidden)))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnterDoorTrack')
return track
def avatarEnqueueTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
back = -5.0 - 2.0 * len(self.avatarIDList)
if back < -9.0:
back = -9.0
offset = Point3(self.doorX, back, ToontownGlobals.FloorOffset)
otherNP = self.getDoorNodePath()
walkLike = ActorInterval(avatar, 'walk', startTime=1, duration=duration, endTime=0.0001)
standHere = Sequence(LerpPosHprInterval(nodePath=avatar, other=otherNP, duration=duration, pos=offset, hpr=VBase3(0, 0, 0), blendType='easeInOut'), self.getAnimStateInterval(avatar, 'neutral'))
trackName = 'avatarEnqueueDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(walkLike, standHere, name=trackName)
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnqueueTrack')
return track
def getAnimStateInterval(self, avatar, animName):
isSuit = isinstance(avatar, Suit.Suit)
if isSuit:
return Func(avatar.loop, animName, 0)
else:
return Func(avatar.setAnimState, animName)
def isDoorHit(self):
vec = base.localAvatar.getRelativeVector(self.currentDoorNp, self.currentDoorVec)
netScale = self.currentDoorNp.getNetTransform().getScale()
yToTest = vec.getY() / netScale[1]
return yToTest < -0.5
def enterDoor(self):
if self.allowedToEnter():
messenger.send('DistributedDoor_doorTrigger')
self.sendUpdate('requestEnter')
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName='otherHoods', doneFunc=self.handleOkTeaser)
def handleOkTeaser(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def allowedToEnter(self, zoneId = None):
allowed = False
if hasattr(base, 'ttAccess') and base.ttAccess:
if zoneId:
allowed = base.ttAccess.canAccess(zoneId)
else:
allowed = base.ttAccess.canAccess()
return allowed
def checkIsDoorHitTaskName(self):
return 'checkIsDoorHit' + self.getTriggerName()
def checkIsDoorHitTask(self, task):
if self.isDoorHit():
self.ignore(self.checkIsDoorHitTaskName())
self.ignore(self.getExitTriggerEvent())
self.enterDoor()
return Task.done
return Task.cont
def cancelCheckIsDoorHitTask(self, args):
taskMgr.remove(self.checkIsDoorHitTaskName())
del self.currentDoorNp
del self.currentDoorVec
self.ignore(self.getExitTriggerEvent())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
def doorTrigger(self, args = None):
self.ignore(self.getEnterTriggerEvent())
if args == None:
self.enterDoor()
else:
self.currentDoorNp = NodePath(args.getIntoNodePath())
self.currentDoorVec = Vec3(args.getSurfaceNormal(self.currentDoorNp))
if self.isDoorHit():
self.enterDoor()
else:
self.accept(self.getExitTriggerEvent(), self.cancelCheckIsDoorHitTask)
taskMgr.add(self.checkIsDoorHitTask, self.checkIsDoorHitTaskName())
return
def avatarEnter(self, avatarID):
avatar = self.cr.doId2do.get(avatarID, None)
if avatar:
avatar.setAnimState('neutral')
track = self.avatarEnqueueTrack(avatar, 0.5)
track.start()
self.avatarTracks.append(track)
self.avatarIDList.append(avatarID)
return
def rejectEnter(self, reason):
message = FADoorCodes.reasonDict[reason]
if message:
self.__faRejectEnter(message)
else:
self.__basicRejectEnter()
def __basicRejectEnter(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
if self.cr.playGame.getPlace():
self.cr.playGame.getPlace().setState('walk')
def __faRejectEnter(self, message):
self.rejectDialog = TTDialog.TTGlobalDialog(message=message, doneEvent='doorRejectAck', style=TTDialog.Acknowledge)
self.rejectDialog.show()
self.rejectDialog.delayDelete = DelayDelete.DelayDelete(self, '__faRejectEnter')
event = 'clientCleanup'
self.acceptOnce(event, self.__handleClientCleanup)
base.cr.playGame.getPlace().setState('stopped')
self.acceptOnce('doorRejectAck', self.__handleRejectAck)
self.acceptOnce('stoppedAsleep', self.__handleFallAsleepDoor)
def __handleClientCleanup(self):
if hasattr(self, 'rejectDialog') and self.rejectDialog:
self.rejectDialog.doneStatus = 'ok'
self.__handleRejectAck()
def __handleFallAsleepDoor(self):
self.rejectDialog.doneStatus = 'ok'
self.__handleRejectAck()
def __handleRejectAck(self):
self.ignore('doorRejectAck')
self.ignore('stoppedAsleep')
self.ignore('clientCleanup')
doneStatus = self.rejectDialog.doneStatus
if doneStatus != 'ok':
self.notify.error('Unrecognized doneStatus: ' + str(doneStatus))
self.__basicRejectEnter()
self.rejectDialog.delayDelete.destroy()
self.rejectDialog.cleanup()
del self.rejectDialog
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
if hasattr(self, 'tempDoorNodePath'):
return self.tempDoorNodePath
else:
posHpr = self.cr.playGame.dnaStore.getDoorPosHprFromBlockNumber(self.block)
otherNP = NodePath('doorOrigin')
otherNP.setPos(posHpr.getPos())
otherNP.setHpr(posHpr.getHpr())
self.tempDoorNodePath = otherNP
elif self.doorType in self.specialDoorTypes:
building = self.getBuilding()
otherNP = building.find('**/door_origin_' + str(self.doorIndex))
elif self.doorType == DoorTypes.INT_HQ:
otherNP = render.find('**/door_origin_' + str(self.doorIndex))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def avatarExitTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
otherNP = self.getDoorNodePath()
trackName = 'avatarExitDoor-%d-%d' % (self.doId, avatar.doId)
track = Sequence(name=trackName)
track.append(self.getAnimStateInterval(avatar, 'walk'))
track.append(PosHprInterval(avatar, Point3(-self.doorX, 0, ToontownGlobals.FloorOffset), VBase3(179, 0, 0), other=otherNP))
track.append(Func(avatar.setParent, ToontownGlobals.SPRender))
if avatar.doId == base.localAvatar.doId:
track.append(PosHprInterval(camera, VBase3(-self.doorX, 5, avatar.getHeight()), VBase3(180, 0, 0), other=otherNP))
if avatar.doId == base.localAvatar.doId:
finalPos = render.getRelativePoint(otherNP, Point3(-self.doorX, -6, ToontownGlobals.FloorOffset))
else:
finalPos = render.getRelativePoint(otherNP, Point3(-self.doorX, -3, ToontownGlobals.FloorOffset))
track.append(LerpPosInterval(nodePath=avatar, duration=duration, pos=finalPos, blendType='easeInOut'))
if avatar.doId == base.localAvatar.doId:
track.append(Func(self.exitCompleted))
track.append(Func(base.transitions.irisIn))
if hasattr(avatar, 'startSmooth'):
track.append(Func(avatar.startSmooth))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'DistributedDoor.avatarExitTrack')
return track
def exitCompleted(self):
base.localAvatar.setAnimState('neutral')
place = self.cr.playGame.getPlace()
if place:
place.setState('walk')
base.localAvatar.d_setParent(ToontownGlobals.SPRender)
def avatarExit(self, avatarID):
if avatarID in self.avatarIDList:
self.avatarIDList.remove(avatarID)
if avatarID == base.localAvatar.doId:
self.exitCompleted()
else:
self.avatarExitIDList.append(avatarID)
def finishDoorTrack(self):
if self.doorTrack:
self.doorTrack.finish()
self.doorTrack = None
return
def finishDoorExitTrack(self):
if self.doorExitTrack:
self.doorExitTrack.finish()
self.doorExitTrack = None
return
def finishAllTracks(self):
self.finishDoorTrack()
self.finishDoorExitTrack()
for t in self.avatarTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarTracks = []
for t in self.avatarExitTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarExitTracks = []
def enterOff(self):
pass
def exitOff(self):
pass
def getRequestStatus(self):
zoneId = self.otherZoneId
request = {'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'doorIn',
'hoodId': ZoneUtil.getHoodId(zoneId),
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId}
return request
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [request])
def exitClosing(self):
pass
def enterClosed(self, ts):
pass
def exitClosed(self):
pass
def enterOpening(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterOpening(): did not find doorFrameHoleRight')
return
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(SoundInterval(self.openSfx, node=rightDoor), Sequence(HprInterval(rightDoor, VBase3(0, 0, 0), other=otherNP), Wait(0.4), Func(rightDoor.show), Func(doorFrameHoleRight.show), LerpHprInterval(nodePath=rightDoor, duration=0.6, hpr=VBase3(h, 0, 0), startHpr=VBase3(0, 0, 0), other=otherNP, blendType='easeInOut')), name=trackName)
self.doorTrack.start(ts)
def exitOpening(self):
pass
def enterOpen(self, ts):
for avatarID in self.avatarIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarEnterDoorTrack(avatar, 1.0)
track.start(ts)
self.avatarTracks.append(track)
if avatarID == base.localAvatar.doId:
self.done = 1
self.avatarIDList = []
def exitOpen(self):
for track in self.avatarTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarTracks = []
def exitDoorEnterOff(self):
pass
def exitDoorExitOff(self):
pass
def exitDoorEnterClosing(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return
if self.leftSwing:
h = -100
else:
h = 100
leftDoor = self.findDoorNode('leftDoor')
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(LerpHprInterval(nodePath=leftDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleLeft.hide), Func(self.hideIfHasFlat, leftDoor), SoundInterval(self.closeSfx, node=leftDoor), name=trackName)
self.doorExitTrack.start(ts)
def exitDoorExitClosing(self):
pass
def exitDoorEnterClosed(self, ts):
pass
def exitDoorExitClosed(self):
pass
def exitDoorEnterOpening(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return
leftDoor = self.findDoorNode('leftDoor')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(SoundInterval(self.openSfx, node=leftDoor), Sequence(Func(leftDoor.show), Func(doorFrameHoleLeft.show), LerpHprInterval(nodePath=leftDoor, duration=0.6, hpr=VBase3(h, 0, 0), startHpr=VBase3(0, 0, 0), other=otherNP, blendType='easeInOut')), name=trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning('exitDoorEnterOpening(): did not find leftDoor')
def exitDoorExitOpening(self):
pass
def exitDoorEnterOpen(self, ts):
for avatarID in self.avatarExitIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarExitTrack(avatar, 0.2)
track.start()
self.avatarExitTracks.append(track)
self.avatarExitIDList = []
def exitDoorExitOpen(self):
for track in self.avatarExitTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarExitTracks = []
def findDoorNode(self, string, allowEmpty = False):
building = self.getBuilding()
if not building:
self.notify.warning('getBuilding() returned None, avoiding crash, remark 896029')
foundNode = None
else:
foundNode = building.find('**/door_' + str(self.doorIndex) + '/**/' + string + '*;+s+i')
if foundNode.isEmpty():
foundNode = building.find('**/' + string + '*;+s+i')
if allowEmpty:
return foundNode
return foundNode
def hideIfHasFlat(self, node):
if self.bHasFlat:
node.hide()
| 41.062595
| 360
| 0.635931
|
18e990c5cc1958bb33df2e7ba46bff9934c0309c
| 20,135
|
py
|
Python
|
qtp_target_gene/validate.py
|
charles-cowart/qtp-target-gene
|
b8a12eb8cd375a885e7a6f342e6a03796aadcfcc
|
[
"BSD-3-Clause"
] | null | null | null |
qtp_target_gene/validate.py
|
charles-cowart/qtp-target-gene
|
b8a12eb8cd375a885e7a6f342e6a03796aadcfcc
|
[
"BSD-3-Clause"
] | null | null | null |
qtp_target_gene/validate.py
|
charles-cowart/qtp-target-gene
|
b8a12eb8cd375a885e7a6f342e6a03796aadcfcc
|
[
"BSD-3-Clause"
] | 1
|
2019-08-30T18:22:58.000Z
|
2019-08-30T18:22:58.000Z
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename, join, splitext
from json import loads
from shutil import copy
from h5py import File
from qiita_client import ArtifactInfo
from qiita_files.util import open_file
from qiita_files.demux import to_hdf5, to_ascii_file
FILEPATH_TYPE_DICT = {
'SFF': ({'raw_sff'}, set()),
'FASTQ': ({'raw_forward_seqs', 'raw_barcodes'}, {'raw_reverse_seqs'}),
'FASTA': ({'raw_fasta', 'raw_qual'}, set()),
'FASTA_Sanger': ({'raw_fasta'}, set()),
}
def _validate_multiple(qclient, job_id, prep_info, files, atype):
"""Validate and fix a new 'SFF', 'FASTQ', 'FASTA' or 'FASTA_Sanger' artifact
Parameters
----------
qclient : qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
prep_info : dict of {str: dict of {str: str}}
The prep information keyed by sample id
files : dict of {str: list of str}
The files to add to the new artifact, keyed by filepath type
atype: str
The type of the artifact
Returns
-------
dict
The results of the job
"""
qclient.update_job_step(job_id, "Step 2: Validating '%s' files" % atype)
req_fp_types, opt_fp_types = FILEPATH_TYPE_DICT[atype]
all_fp_types = req_fp_types | opt_fp_types
# Check if there is any filepath type that is not supported
unsupported_fp_types = set(files) - all_fp_types
if unsupported_fp_types:
error_msg = ("Filepath type(s) %s not supported by artifact "
"type %s. Supported filepath types: %s"
% (', '.join(unsupported_fp_types), atype,
', '.join(sorted(all_fp_types))))
return False, None, error_msg
# Check if the run_prefix column is present in the prep info
offending = {}
types_seen = set()
if 'run_prefix' in prep_info[next(iter(prep_info))]:
# We can potentially have more than one lane in the prep information
# so check that the provided files are prefixed with the values in
# the run_prefix column
run_prefixes = set(v['run_prefix'] for k, v in prep_info.items())
num_prefixes = len(run_prefixes)
# Check those filepath types that are required
for ftype, t_files in files.items():
# SFF is an special case cause we can have multiple files with
# the same prefix
if num_prefixes != len(t_files) and atype != 'SFF':
offending[ftype] = (
"The number of provided files (%d) doesn't match the "
"number of run prefix values in the prep info (%d): %s"
% (len(t_files), num_prefixes,
', '.join(basename(f) for f in t_files)))
else:
rps = []
fps = []
for fp in t_files:
bn = basename(fp)
found = [rp for rp in run_prefixes if bn.startswith(rp)]
if found:
rps.extend(found)
else:
fps.append(bn)
if fps:
offending[ftype] = (
"The provided files do not match the run prefix "
"values in the prep information: %s" % ', '.join(fps))
else:
rps = run_prefixes - set(rps)
if rps:
offending[ftype] = (
"The following run prefixes in the prep "
"information file do not match any file: %s"
% ', '.join(rps))
types_seen.add(ftype)
else:
# If the run prefix column is not provided, we only allow a single
# lane, so check that we have a single file for each provided
# filepath type
for ftype, t_files in files.items():
if len(t_files) != 1:
offending[ftype] = (
"Only one file per type is allowed. Please provide the "
"column 'run_prefix' if you need more than one file per "
"type: %s" % ', '.join(basename(fp) for fp in t_files))
types_seen.add(ftype)
# Check that all required filepath types where present
missing = req_fp_types - types_seen
if missing:
error_msg = ("Missing required filepath type(s): %s"
% ', '.join(missing))
return False, None, error_msg
# Check if there was any offending file
if offending:
error_list = ["%s: %s" % (k, v) for k, v in offending.items()]
error_msg = ("Error creating artifact. Offending files:\n%s"
% '\n'.join(error_list))
return False, None, error_msg
# Everything is ok
filepaths = []
for fps_type, fps in files.items():
filepaths.extend([(fp, fps_type) for fp in fps])
return True, [ArtifactInfo(None, atype, filepaths)], ""
def _validate_per_sample_FASTQ(qclient, job_id, prep_info, files):
"""Validate and fix a new 'per_sample_FASTQ' artifact
Parameters
----------
qclient : qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
prep_info : dict of {str: dict of {str: str}}
The prep information keyed by sample id
files : dict of {str: list of str}
The files to add to the new artifact, keyed by filepath type
Returns
-------
dict
The results of the job
"""
qclient.update_job_step(
job_id, "Step 2: Validating 'per_sample_FASTQ' files")
samples = prep_info.keys()
samples_count = len(samples)
# Check if there is any filepath type that is not supported
unsupported_fp_types = set(files) - {'raw_forward_seqs',
'raw_reverse_seqs',
'preprocessed_fastq'}
if unsupported_fp_types:
error_msg = ("Filepath type(s) %s not supported by artifact "
"type per_sample_FASTQ. Supported filepath types: "
"raw_forward_seqs, raw_reverse_seqs, preprocessed_fastq"
% ', '.join(unsupported_fp_types))
return False, None, error_msg
if 'raw_forward_seqs' in files:
if 'preprocessed_fastq' in files:
error_msg = ("If raw_forward_seqs is provided, preprocessed_fastq "
"should not be provided")
return False, None, error_msg
read_files = files['raw_forward_seqs']
read_files_count = len(read_files)
counts_match = read_files_count == samples_count
elif 'preprocessed_fastq' in files:
if 'raw_reverse_seqs' in files:
error_msg = ("If preprocessed_fastq is provided, raw_reverse_seqs "
"should not be provided")
return False, None, error_msg
read_files = files['preprocessed_fastq']
read_files_count = len(read_files)
# In the preprocessed_fastq case, we either have 1 file per sample
# or 4 files per sample
counts_match = ((read_files_count == samples_count) or
(read_files_count == 4 * samples_count))
else:
error_msg = ("Missing required filepath type: raw_forward_seqs or "
"preprocessed_fastq")
return False, None, error_msg
# Make sure that we hve the same number of files than samples
if 'raw_reverse_seqs' in files:
rev_count = len(files['raw_reverse_seqs'])
counts_match = counts_match and (rev_count == samples_count)
else:
rev_count = 0
if not counts_match:
error_msg = ("The number of provided files doesn't match the "
"number of samples (%d): %d raw_forward_seqs, "
"%d raw_reverse_seqs (optional, 0 is ok)"
% (samples_count, read_files_count, rev_count))
return False, None, error_msg
def _check_files(run_prefixes, read_files, rev_count, files):
# Check that the provided files match the run prefixes
fwd_fail = [basename(fp) for fp in read_files
if not basename(fp).startswith(tuple(run_prefixes))]
if rev_count > 0:
rev_fail = [basename(fp) for fp in files['raw_reverse_seqs']
if not basename(fp).startswith(tuple(run_prefixes))]
else:
rev_fail = []
return fwd_fail, rev_fail
# first let's check via sample sample_names
run_prefixes = [sid.split('.', 1)[1] for sid in samples]
fwd_fail, rev_fail = _check_files(run_prefixes, read_files,
rev_count, files)
# if that doesn't work, let's test via run_prefix
run_prefix_present = 'run_prefix' in prep_info[samples[0]]
if (fwd_fail or rev_fail) and run_prefix_present:
run_prefixes = [v['run_prefix'] for k, v in prep_info.items()]
if samples_count != len(set(run_prefixes)):
repeated = ["%s (%d)" % (p, run_prefixes.count(p))
for p in set(run_prefixes)
if run_prefixes.count(p) > 1]
error_msg = ("The values for the column 'run_prefix' are not "
"unique for each sample. Repeated values: %s"
% ', '.join(repeated))
return False, None, error_msg
fwd_fail, rev_fail = _check_files(run_prefixes, read_files,
rev_count, files)
if fwd_fail or rev_fail:
error_msg = "The provided files are not prefixed by sample id"
if run_prefix_present:
error_msg += (" or do not match the run prefix values in the "
"prep information.")
else:
error_msg += "."
error_msg += (" Offending files:\n raw_forward_seqs: %s\n"
"raw_reverse_seqs: %s" % (', '.join(fwd_fail),
', '.join(rev_fail)))
return False, None, error_msg
filepaths = []
for fps_type, fps in files.items():
filepaths.extend([(fp, fps_type) for fp in fps])
return True, [ArtifactInfo(None, 'per_sample_FASTQ', filepaths)], ""
def _validate_demux_file(qclient, job_id, prep_info, out_dir, demux_fp,
fastq_fp=None, fasta_fp=None, log_fp=None):
"""Validate and fix a 'demux' file and regenerate fastq and fasta files
Parameters
----------
qclient : qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
prep_info : dict of {str: dict of {str: str}}
The prep information keyed by sample id
out_dir : str
The output directory
demux_fp : str
The demux file path
fastq_fp : str, optional
The original fastq filepath. If demux is correct, it will not be
regenerated
fasta_fp : str, optional
The original fasta filepath. If demux is correct, it will no be
regenerated
log_fp : str, optional
The original log filepath
Returns
-------
dict
The results og the job
"""
pt_sample_ids = set(prep_info)
with open_file(demux_fp) as f:
demux_sample_ids = set(f.keys())
if not pt_sample_ids.issuperset(demux_sample_ids):
# The demux sample ids are different from the ones in the prep template
qclient.update_job_step(job_id, "Step 3: Fixing sample ids")
# Atempt 1: the user provided the run prefix column - in this case the
# run prefix column holds the sample ids present in the demux file
if 'run_prefix' in prep_info[next(iter(pt_sample_ids))]:
id_map = {v['run_prefix']: k for k, v in prep_info.items()}
if not set(id_map).issuperset(demux_sample_ids):
error_msg = ('The sample ids in the "run_prefix" columns '
'from the prep information do not match the '
'ones in the demux file. Please, correct the '
'column "run_prefix" in the prep information to '
'map the existing sample ids to the prep '
'information sample ids.')
return False, None, error_msg
else:
# Attempt 2: the sample ids in the demux table are the same that
# in the prep template but without the prefix
prefix = next(iter(pt_sample_ids)).split('.', 1)[0]
prefixed = set("%s.%s" % (prefix, s) for s in demux_sample_ids)
if pt_sample_ids.issuperset(prefixed):
id_map = {s: "%s.%s" % (prefix, s) for s in demux_sample_ids}
else:
# There is nothing we can do. The samples in the demux file do
# not match the ones in the prep template and we can't fix it
error_msg = ('The sample ids in the demultiplexed files do '
'not match the ones in the prep information. '
'Please, provide the column "run_prefix" in '
'the prep information to map the existing sample'
' ids to the prep information sample ids.')
return False, None, error_msg
# Fix the sample ids
# Do not modify the original demux file, copy it to a new location
new_demux_fp = join(out_dir, basename(demux_fp))
# this if is important so we don't regenerate the demux file if the
# user uploads fastq or fna
if demux_fp != new_demux_fp:
copy(demux_fp, new_demux_fp)
demux_fp = new_demux_fp
with open_file(demux_fp, 'r+') as f:
for old in f:
f.move(old, id_map[old])
# When we fix, we always generate the FASTQ and FASTA file
# By setting them to None, below will be generated
fastq_fp = None
fasta_fp = None
# If we didn't fix anything, we only generate the files if they don't
# already exists
name = splitext(basename(demux_fp))[0]
if not fastq_fp:
fastq_fp = join(out_dir, "%s.fastq" % name)
to_ascii_file(demux_fp, fastq_fp, out_format='fastq')
if not fasta_fp:
fasta_fp = join(out_dir, "%s.fasta" % name)
to_ascii_file(demux_fp, fasta_fp, out_format='fasta')
filepaths = [(fastq_fp, 'preprocessed_fastq'),
(fasta_fp, 'preprocessed_fasta'),
(demux_fp, 'preprocessed_demux')]
if log_fp:
filepaths.append((log_fp, 'log'))
return True, [ArtifactInfo(None, 'Demultiplexed', filepaths)], ""
def _validate_demultiplexed(qclient, job_id, prep_info, files, out_dir):
"""Validate and fix a new 'Demultiplexed' artifact
Parameters
----------
qclient : qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
prep_info : dict of {str: dict of {str: str}}
The prep information keyed by sample id
files : dict of {str: list of str}
The files to add to the new artifact, keyed by filepath type
out_dir : str
The output directory
Returns
-------
dict
The results of the job
"""
qclient.update_job_step(job_id, "Step 2: Validating 'Demultiplexed' files")
supported_fp_types = {'preprocessed_fasta', 'preprocessed_fastq',
'preprocessed_demux', 'log'}
unsupported_fp_types = set(files) - supported_fp_types
if unsupported_fp_types:
error_msg = ("Filepath type(s) %s not supported by artifact type "
"Demultiplexed. Supported filepath types: %s"
% (', '.join(unsupported_fp_types),
', '.join(sorted(supported_fp_types))))
return False, None, error_msg
# At most one file of each type can be provided
offending = set(fp_t for fp_t, fps in files.items() if len(fps) > 1)
if offending:
errors = ["%s (%d): %s"
% (fp_t, len(files[fp_t]), ', '.join(files[fp_t]))
for fp_t in sorted(offending)]
error_msg = ("Only one filepath of each file type is supported, "
"offending types:\n%s" % "; ".join(errors))
return False, None, error_msg
# Check which files we have available:
fasta = (files['preprocessed_fasta'][0]
if 'preprocessed_fasta' in files else None)
fastq = (files['preprocessed_fastq'][0]
if 'preprocessed_fastq' in files else None)
demux = (files['preprocessed_demux'][0]
if 'preprocessed_demux' in files else None)
log = (files['log'][0] if 'log' in files else None)
if demux:
# If demux is available, use that one to perform the validation and
# generate the fasta and fastq from it
success, a_info, error_msg = _validate_demux_file(
qclient, job_id, prep_info, out_dir, demux, log_fp=log)
elif fastq:
# Generate the demux file from the fastq
demux = join(out_dir, "%s.demux" % splitext(basename(fastq))[0])
with File(demux, 'w') as f:
# to_hdf5 expects a list
to_hdf5([fastq], f)
# Validate the demux, providing the original fastq
success, a_info, error_msg = _validate_demux_file(
qclient, job_id, prep_info, out_dir, demux, fastq_fp=fastq,
log_fp=log)
elif fasta:
# Generate the demux file from the fasta
demux = join(out_dir, "%s.demux" % splitext(basename(fasta))[0])
with File(demux, 'w') as f:
# to_hdf5 expects a list
to_hdf5([fasta], f)
# Validate the demux, providing the original fasta
success, a_info, error_msg = _validate_demux_file(
qclient, job_id, prep_info, out_dir, demux, fasta_fp=fasta,
log_fp=log)
else:
error_msg = ("Either a 'preprocessed_demux', 'preprocessed_fastq' or "
"'preprocessed_fasta' file should be provided.")
return False, None, error_msg
return success, a_info, error_msg
def validate(qclient, job_id, parameters, out_dir):
"""Validae and fix a new artifact
Parameters
----------
qclient : qiita_client.QiitaClient
The Qiita server client
job_id : str
The job id
parameters : dict
The parameter values to validate and create the artifact
out_dir : str
The path to the job's output directory
Returns
-------
dict
The results of the job
Raises
------
ValueError
If there is any error gathering the information from the server
"""
prep_id = parameters['template']
files = loads(parameters['files'])
a_type = parameters['artifact_type']
qclient.update_job_step(job_id, "Step 1: Collecting prep information")
prep_info = qclient.get("/qiita_db/prep_template/%s/data/" % prep_id)
prep_info = prep_info['data']
if a_type in ['SFF', 'FASTQ', 'FASTA', 'FASTA_Sanger']:
return _validate_multiple(qclient, job_id, prep_info, files, a_type)
elif a_type == 'per_sample_FASTQ':
return _validate_per_sample_FASTQ(qclient, job_id, prep_info, files)
elif a_type == 'Demultiplexed':
return _validate_demultiplexed(qclient, job_id, prep_info, files,
out_dir)
else:
error_msg = ("Unknown artifact_type %s. Supported types: 'SFF', "
"'FASTQ', 'FASTA', 'FASTA_Sanger', 'per_sample_FASTQ', "
"'Demultiplexed'" % a_type)
return False, None, error_msg
| 40.924797
| 80
| 0.584107
|
ddcc5e3d0967ca24b7e571b3d0fb391063970a74
| 2,188
|
py
|
Python
|
test/scons-time/run/option/help.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1
|
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/scons-time/run/option/help.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/scons-time/run/option/help.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/run/option/help.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that the run -h option (and variants) prints help.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
expect = [
"Usage: scons-time run [OPTIONS] [FILE ...]\n",
" -h, --help Print this help and exit\n",
" -n, --no-exec No execute, just print command lines\n",
" -q, --quiet Don't print command lines\n",
" -v, --verbose Display output of commands\n",
]
test.run(arguments = 'run -h')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = 'run -?')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = 'run --help')
test.must_contain_all_lines(test.stdout(), expect)
test.run(arguments = 'help run')
test.must_contain_all_lines(test.stdout(), expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.151515
| 108
| 0.71298
|
9a9ec3ba5eae623be7e7b2e2f2caa66506e99e56
| 15,278
|
py
|
Python
|
venv/Lib/site-packages/traits/observation/tests/test_observe.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 1
|
2022-01-18T17:56:51.000Z
|
2022-01-18T17:56:51.000Z
|
venv/Lib/site-packages/traits/observation/tests/test_observe.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/traits/observation/tests/test_observe.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from unittest import mock
from traits.has_traits import HasTraits
from traits.trait_types import Instance, Int
from traits.observation.api import (
pop_exception_handler,
push_exception_handler,
)
from traits.observation._exceptions import NotifierNotFound
from traits.observation.expression import trait
from traits.observation.observe import (
observe,
)
from traits.observation._observer_graph import ObserverGraph
from traits.observation._testing import (
call_add_or_remove_notifiers,
create_graph,
DummyNotifier,
DummyObservable,
DummyObserver,
)
class TestObserveAddNotifier(unittest.TestCase):
""" Test the add_notifiers action."""
def test_add_trait_notifiers(self):
observable = DummyObservable()
notifier = DummyNotifier()
observer = DummyObserver(
notify=True,
observables=[observable],
notifier=notifier,
)
graph = ObserverGraph(node=observer)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=False,
)
# then
self.assertEqual(observable.notifiers, [notifier])
def test_add_trait_notifiers_notify_flag_is_false(self):
# Test when the notify flag is false, the notifier is not
# added.
observable = DummyObservable()
notifier = DummyNotifier()
observer = DummyObserver(
notify=False,
observables=[observable],
notifier=notifier,
)
graph = ObserverGraph(node=observer)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=False,
)
# then
self.assertEqual(observable.notifiers, [])
def test_add_maintainers(self):
# Test adding maintainers for children graphs
observable = DummyObservable()
maintainer = DummyNotifier()
root_observer = DummyObserver(
notify=False,
observables=[observable],
maintainer=maintainer,
)
# two children, each will have a maintainer
graph = ObserverGraph(
node=root_observer,
children=[
ObserverGraph(node=DummyObserver()),
ObserverGraph(node=DummyObserver()),
],
)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=False,
)
# then
# the dummy observer always return the same maintainer object.
self.assertEqual(
observable.notifiers, [maintainer, maintainer])
def test_add_notifiers_for_children_graphs(self):
# Test adding notifiers using children graphs
observable1 = DummyObservable()
child_observer1 = DummyObserver(
observables=[observable1],
)
observable2 = DummyObservable()
child_observer2 = DummyObserver(
observables=[observable2],
)
parent_observer = DummyObserver(
next_objects=[mock.Mock()],
)
graph = ObserverGraph(
node=parent_observer,
children=[
ObserverGraph(
node=child_observer1,
),
ObserverGraph(
node=child_observer2,
)
],
)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=False,
)
# then
self.assertCountEqual(
observable1.notifiers,
[
# For child1 observer
child_observer1.notifier,
]
)
self.assertCountEqual(
observable2.notifiers,
[
# For child2 observer
child_observer2.notifier,
]
)
def test_add_notifiers_for_extra_graph(self):
observable = DummyObservable()
extra_notifier = DummyNotifier()
extra_observer = DummyObserver(
observables=[observable],
notifier=extra_notifier,
)
extra_graph = ObserverGraph(
node=extra_observer,
)
observer = DummyObserver(
extra_graphs=[extra_graph],
)
graph = ObserverGraph(node=observer)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=False,
)
# then
self.assertEqual(
observable.notifiers, [extra_notifier]
)
def test_add_notifier_atomic(self):
class BadNotifier(DummyNotifier):
def add_to(self, observable):
raise ZeroDivisionError()
observable = DummyObservable()
good_observer = DummyObserver(
notify=True,
observables=[observable],
next_objects=[mock.Mock()],
notifier=DummyNotifier(),
maintainer=DummyNotifier(),
)
bad_observer = DummyObserver(
notify=True,
observables=[observable],
notifier=BadNotifier(),
maintainer=DummyNotifier(),
)
graph = create_graph(
good_observer,
bad_observer,
)
# when
with self.assertRaises(ZeroDivisionError):
call_add_or_remove_notifiers(
object=mock.Mock(),
graph=graph,
)
# then
self.assertEqual(observable.notifiers, [])
class TestObserveRemoveNotifier(unittest.TestCase):
""" Test the remove action."""
def test_remove_trait_notifiers(self):
observable = DummyObservable()
notifier = DummyNotifier()
observable.notifiers = [notifier]
observer = DummyObserver(
observables=[observable],
notifier=notifier,
)
graph = ObserverGraph(
node=observer,
)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=True,
)
# then
self.assertEqual(observable.notifiers, [])
def test_remove_notifiers_skip_if_notify_flag_is_false(self):
observable = DummyObservable()
notifier = DummyNotifier()
observable.notifiers = [notifier]
observer = DummyObserver(
notify=False,
observables=[observable],
notifier=notifier,
)
graph = ObserverGraph(
node=observer,
)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=True,
)
# then
# notify is false, remove does nothing.
self.assertEqual(
observable.notifiers, [notifier]
)
def test_remove_maintainers(self):
observable = DummyObservable()
maintainer = DummyNotifier()
observable.notifiers = [maintainer, maintainer]
root_observer = DummyObserver(
notify=False,
observables=[observable],
maintainer=maintainer,
)
# for there are two children graphs
# two maintainers will be removed.
graph = ObserverGraph(
node=root_observer,
children=[
ObserverGraph(node=DummyObserver()),
ObserverGraph(node=DummyObserver()),
],
)
# when
call_add_or_remove_notifiers(
graph=graph,
remove=True,
)
# then
self.assertEqual(observable.notifiers, [])
def test_remove_notifiers_for_children_graphs(self):
observable1 = DummyObservable()
notifier1 = DummyNotifier()
child_observer1 = DummyObserver(
observables=[observable1],
notifier=notifier1,
)
observable2 = DummyObservable()
notifier2 = DummyNotifier()
child_observer2 = DummyObserver(
observables=[observable2],
notifier=notifier2,
)
parent_observer = DummyObserver(
next_objects=[mock.Mock()],
)
graph = ObserverGraph(
node=parent_observer,
children=[
ObserverGraph(
node=child_observer1,
),
ObserverGraph(
node=child_observer2,
)
],
)
# suppose notifiers were added
observable1.notifiers = [notifier1]
observable2.notifiers = [notifier2]
# when
call_add_or_remove_notifiers(
graph=graph,
remove=True,
)
# then
self.assertEqual(observable1.notifiers, [])
self.assertEqual(observable2.notifiers, [])
def test_remove_notifiers_for_extra_graph(self):
observable = DummyObservable()
extra_notifier = DummyNotifier()
extra_observer = DummyObserver(
observables=[observable],
notifier=extra_notifier,
)
extra_graph = ObserverGraph(
node=extra_observer,
)
observer = DummyObserver(
extra_graphs=[extra_graph],
)
graph = ObserverGraph(node=observer)
# suppose the notifier was added before
observable.notifiers = [extra_notifier]
# when
call_add_or_remove_notifiers(
graph=graph,
remove=True,
)
# then
self.assertEqual(observable.notifiers, [])
def test_remove_notifier_raises_let_error_propagate(self):
# Test if the notifier remove_from raises, the error will
# be propagated.
# DummyNotifier.remove_from raises if the notifier is not found.
observer = DummyObserver(
observables=[DummyObservable()],
notifier=DummyNotifier(),
)
with self.assertRaises(NotifierNotFound):
call_add_or_remove_notifiers(
graph=ObserverGraph(node=observer),
remove=True,
)
def test_remove_atomic(self):
# Test atomicity
notifier = DummyNotifier()
maintainer = DummyNotifier()
observable1 = DummyObservable()
observable1.notifiers = [
notifier,
maintainer,
]
old_observable1_notifiers = observable1.notifiers.copy()
observable2 = DummyObservable()
observable2.notifiers = [maintainer]
old_observable2_notifiers = observable2.notifiers.copy()
observable3 = DummyObservable()
observable3.notifiers = [
notifier,
maintainer,
]
old_observable3_notifiers = observable3.notifiers.copy()
observer = DummyObserver(
notify=True,
observables=[
observable1,
observable2,
observable3,
],
notifier=notifier,
maintainer=maintainer,
)
graph = create_graph(
observer,
DummyObserver(), # Need a child graph to get maintainer in
)
# when
with self.assertRaises(NotifierNotFound):
call_add_or_remove_notifiers(
object=mock.Mock(),
graph=graph,
remove=True,
)
# then
# as if nothing has happened, the order might not be maintained though!
self.assertCountEqual(
observable1.notifiers,
old_observable1_notifiers,
)
self.assertCountEqual(
observable2.notifiers,
old_observable2_notifiers,
)
self.assertCountEqual(
observable3.notifiers,
old_observable3_notifiers,
)
# ---- Tests for public facing `observe` --------------------------------------
class ClassWithNumber(HasTraits):
number = Int()
class ClassWithInstance(HasTraits):
instance = Instance(ClassWithNumber)
class TestObserverIntegration(unittest.TestCase):
""" Test the public facing observe function."""
def setUp(self):
push_exception_handler(reraise_exceptions=True)
self.addCleanup(pop_exception_handler)
def test_observe_with_expression(self):
foo = ClassWithNumber()
handler = mock.Mock()
observe(
object=foo,
expression=trait("number"),
handler=handler,
)
# when
foo.number += 1
# then
self.assertEqual(handler.call_count, 1)
handler.reset_mock()
# when
observe(
object=foo,
expression=trait("number"),
handler=handler,
remove=True,
)
foo.number += 1
# then
self.assertEqual(handler.call_count, 0)
def test_observe_different_dispatcher(self):
self.dispatch_records = []
def dispatcher(handler, event):
self.dispatch_records.append((handler, event))
foo = ClassWithNumber()
handler = mock.Mock()
# when
observe(
object=foo,
expression=trait("number"),
handler=handler,
dispatcher=dispatcher,
)
foo.number += 1
# then
# the dispatcher is called.
self.assertEqual(len(self.dispatch_records), 1)
def test_observe_different_target(self):
# Test the result of setting target to be the same as object
parent1 = ClassWithInstance()
parent2 = ClassWithInstance()
# the instance is shared
instance = ClassWithNumber()
parent1.instance = instance
parent2.instance = instance
handler = mock.Mock()
# when
observe(
object=parent1,
expression=trait("instance").trait("number"),
handler=handler,
)
observe(
object=parent2,
expression=trait("instance").trait("number"),
handler=handler,
)
instance.number += 1
# then
# the handler should be called twice as the targets are different.
self.assertEqual(handler.call_count, 2)
def test_observe_with_any_callables_accepting_one_argument(self):
# If it is a callable that works with one positional argument, it
# can be used.
def handler_with_one_pos_arg(arg, *, optional=None):
pass
callables = [
repr,
lambda e: False,
handler_with_one_pos_arg,
]
for callable_ in callables:
with self.subTest(callable=callable_):
instance = ClassWithNumber()
instance.observe(callable_, "number")
instance.number += 1
| 27.330948
| 79
| 0.571344
|
6e5d2066c9c9271da1e0e42125b6f76abbe44e29
| 1,349
|
py
|
Python
|
web-server/constants.py
|
valgarn/fraud-detection-framework
|
52ce63a41af42de541354f32a3fb4bae773f2f86
|
[
"Apache-2.0"
] | null | null | null |
web-server/constants.py
|
valgarn/fraud-detection-framework
|
52ce63a41af42de541354f32a3fb4bae773f2f86
|
[
"Apache-2.0"
] | null | null | null |
web-server/constants.py
|
valgarn/fraud-detection-framework
|
52ce63a41af42de541354f32a3fb4bae773f2f86
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Fraud Detection Framework Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND< either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
REQUEST_TIMEOUT = (60, 180)
MESSAGE_TYPE_INFO = 5
MESSAGE_TYPE_WARNING = 4
MESSAGE_TYPE_ERROR = 3
DB_CONNECTION_STRING = 'postgresql://postgres:password@localhost:5432/fdf'
EXCEPTION_WAIT_SEC = 5
SETTING_STATUS_NAME = 'status'
SETTING_STATUS_PROCESSING = 'processing'
SETTING_STATUS_STOPPED = 'stopped'
SETTING_STATUS_RELOAD = 'reload'
SETTING_STATUS_CLEAN = 'clean'
SETTING_STATUS_PREPARING = 'preparing'
SETTING_STATUS_PREPARED = 'prepared'
SETTING_STATUS_PAUSED = 'paused'
SETTING_REFRESH_DATA_NAME = 'refreshData'
SETTING_REFRESH_DATA_TRUE = '1'
SETTING_REFRESH_DATA_FALSE = '0'
DATA_FOLDER = 'Data'
TF_LOG_LEVEL = "2"
TYPE_PHOTO_FRAUD_DETECTION = 1
FDF_PYD_PATH = "./fdf"
STATUS_NONE = 0
STATUS_COMPLETED = 1
| 28.104167
| 79
| 0.784285
|
3969797ceddd88bff171157462e576fffe239745
| 6,165
|
py
|
Python
|
src/ebay_rest/api/commerce_notification/models/subscription_payload_detail.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/commerce_notification/models/subscription_payload_detail.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/commerce_notification/models/subscription_payload_detail.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Notification API
The eBay Notification API enables management of the entire end-to-end eBay notification experience by allowing users to:<ul><li>Browse for supported notification topics and retrieve topic details</li><li>Create, configure, and manage notification destination endpionts</li><li>Configure, manage, and test notification subscriptions</li><li>Process eBay notifications and verify the integrity of the message payload</li></ul> # noqa: E501
OpenAPI spec version: v1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SubscriptionPayloadDetail(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'delivery_protocol': 'str',
'format': 'str',
'schema_version': 'str'
}
attribute_map = {
'delivery_protocol': 'deliveryProtocol',
'format': 'format',
'schema_version': 'schemaVersion'
}
def __init__(self, delivery_protocol=None, format=None, schema_version=None): # noqa: E501
"""SubscriptionPayloadDetail - a model defined in Swagger""" # noqa: E501
self._delivery_protocol = None
self._format = None
self._schema_version = None
self.discriminator = None
if delivery_protocol is not None:
self.delivery_protocol = delivery_protocol
if format is not None:
self.format = format
if schema_version is not None:
self.schema_version = schema_version
@property
def delivery_protocol(self):
"""Gets the delivery_protocol of this SubscriptionPayloadDetail. # noqa: E501
The supported protocol. For exmaple: <code>HTTPS</code> For implementation help, refer to <a href='https://developer.ebay.com/api-docs/commerce/notification/types/api:ProtocolEnum'>eBay API documentation</a> # noqa: E501
:return: The delivery_protocol of this SubscriptionPayloadDetail. # noqa: E501
:rtype: str
"""
return self._delivery_protocol
@delivery_protocol.setter
def delivery_protocol(self, delivery_protocol):
"""Sets the delivery_protocol of this SubscriptionPayloadDetail.
The supported protocol. For exmaple: <code>HTTPS</code> For implementation help, refer to <a href='https://developer.ebay.com/api-docs/commerce/notification/types/api:ProtocolEnum'>eBay API documentation</a> # noqa: E501
:param delivery_protocol: The delivery_protocol of this SubscriptionPayloadDetail. # noqa: E501
:type: str
"""
self._delivery_protocol = delivery_protocol
@property
def format(self):
"""Gets the format of this SubscriptionPayloadDetail. # noqa: E501
The supported format. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/commerce/notification/types/api:FormatTypeEnum'>eBay API documentation</a> # noqa: E501
:return: The format of this SubscriptionPayloadDetail. # noqa: E501
:rtype: str
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this SubscriptionPayloadDetail.
The supported format. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/commerce/notification/types/api:FormatTypeEnum'>eBay API documentation</a> # noqa: E501
:param format: The format of this SubscriptionPayloadDetail. # noqa: E501
:type: str
"""
self._format = format
@property
def schema_version(self):
"""Gets the schema_version of this SubscriptionPayloadDetail. # noqa: E501
The supported schema version. # noqa: E501
:return: The schema_version of this SubscriptionPayloadDetail. # noqa: E501
:rtype: str
"""
return self._schema_version
@schema_version.setter
def schema_version(self, schema_version):
"""Sets the schema_version of this SubscriptionPayloadDetail.
The supported schema version. # noqa: E501
:param schema_version: The schema_version of this SubscriptionPayloadDetail. # noqa: E501
:type: str
"""
self._schema_version = schema_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubscriptionPayloadDetail, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubscriptionPayloadDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.47929
| 442
| 0.636983
|
68efa54fc3fd50df8ac1a3fec57e7e04ef8ef9ab
| 347
|
py
|
Python
|
palletier/__init__.py
|
valiot/palletier
|
9950bd0637f0c73a0add6a79e945dbcaffc948b7
|
[
"MIT"
] | 8
|
2018-11-17T09:11:13.000Z
|
2021-06-18T12:13:14.000Z
|
palletier/__init__.py
|
DisruptiveAngels/palletier
|
9950bd0637f0c73a0add6a79e945dbcaffc948b7
|
[
"MIT"
] | 4
|
2018-03-20T03:18:02.000Z
|
2018-08-27T11:26:59.000Z
|
palletier/__init__.py
|
DisruptiveAngels/palletier
|
9950bd0637f0c73a0add6a79e945dbcaffc948b7
|
[
"MIT"
] | 3
|
2017-12-29T10:27:25.000Z
|
2018-06-14T23:17:29.000Z
|
# -*- coding: utf-8 -*-
"""Top-level package for palletier."""
__author__ = """Alan Velasco"""
__email__ = 'alanvelasco.a@gmail.com'
__version__ = '0.1.0'
from palletier.box import Box
from palletier.pallet import Pallet
from palletier.packedpallet import PackedPallet
from palletier.palletier import Solver
from palletier.packer import Packer
| 24.785714
| 47
| 0.763689
|
f8f363c83ee6956756b8e2a7058c4745b134ff00
| 385
|
py
|
Python
|
ccms/wsgi.py
|
esbozos/centinela-cms
|
cf47e9a42d851c2f56895472de736ebd9fccda6b
|
[
"MIT"
] | 2
|
2015-09-22T04:13:22.000Z
|
2015-10-08T05:21:52.000Z
|
ccms/wsgi.py
|
esbozos/centinela_cms
|
cf47e9a42d851c2f56895472de736ebd9fccda6b
|
[
"MIT"
] | null | null | null |
ccms/wsgi.py
|
esbozos/centinela_cms
|
cf47e9a42d851c2f56895472de736ebd9fccda6b
|
[
"MIT"
] | null | null | null |
"""
WSGI config for ccms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ccms.settings")
application = get_wsgi_application()
| 22.647059
| 78
| 0.781818
|
aa0882db4cac11a4026562e12fe747695f49349f
| 6,541
|
py
|
Python
|
ote_sdk/ote_sdk/entities/label.py
|
bes-dev/training_extensions
|
7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb
|
[
"Apache-2.0"
] | 775
|
2019-03-01T02:13:33.000Z
|
2020-09-07T22:49:15.000Z
|
ote_sdk/ote_sdk/entities/label.py
|
bes-dev/training_extensions
|
7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb
|
[
"Apache-2.0"
] | 229
|
2019-02-28T21:37:08.000Z
|
2020-09-07T15:11:49.000Z
|
ote_sdk/ote_sdk/entities/label.py
|
bes-dev/training_extensions
|
7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb
|
[
"Apache-2.0"
] | 290
|
2019-02-28T20:32:11.000Z
|
2020-09-07T05:51:41.000Z
|
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
"""This module define the label entity."""
import datetime
from enum import Enum, auto
from typing import Optional
from ote_sdk.entities.color import Color
from ote_sdk.entities.id import ID
from ote_sdk.utils.time_utils import now
class Domain(Enum):
"""
Describes an algorithm domain like classification, detection, ...
"""
NULL = auto()
CLASSIFICATION = auto()
DETECTION = auto()
SEGMENTATION = auto()
ANOMALY_CLASSIFICATION = auto()
ANOMALY_DETECTION = auto()
ANOMALY_SEGMENTATION = auto()
INSTANCE_SEGMENTATION = auto()
ROTATED_DETECTION = auto()
def __str__(self):
return str(self.name)
class LabelEntity:
"""
This represents a label. The Label is the object that the user annotates
and the tasks predict.
For example, a label with name "car" can be constructed as follows.
>>> car = LabelEntity(name="car", domain=Domain.DETECTION)
.. rubric:: About Empty Label
In addition to representing the presence of a certain object, the label can also
be used to represent the absence of objects in the image (or other media types).
Such a label is referred to as empty label.
The empty label is constructed as follows:
>>> empty = LabelEntity(name="empty", domain=Domain.DETECTION, is_empty=True)
Empty label is used to declare that there is nothing of interest inside this image.
For example, let's assume a car detection project. During annotation process,
for positive images (images with cars), the users are asked to annotate the images
with bounding boxes with car label. However, when the user sees a negative image
(no car), the user needs to annotate this image with an empty label.
The empty label is particularly useful to distinguish images with no objects
of interest from images that have not been annotated, especially in task-chain
scenario. Let's assume car detection task that is followed with with another
detection task which detects the driver inside the car. There are two issues here:
1. The user can (intentionally or unintentionally) miss to annotate
the driver inside a car.
2. There is no driver inside the car.
Without empty label, these two cases cannot be distinguished.
This is why an empty label is introduced. The empty label makes an explicit
distinction between missing annotations and "negative" images.
:param name: the name of the label
:param domain: the algorithm domain this label is associated to
:param color: the color of the label (See :class:`Color`)
:param hotkey: key or combination of keys to select this label in the UI
:param creation_date: the date time of the label creation
:param is_empty: set to True if the label is an empty label.
:param id: the ID of the label. Set to ID() so that a new unique ID
will be assigned upon saving. If the argument is None, it will be set to ID()
:param is_anomalous: boolean that indicates whether the label is the Anomalous label. Always set to False for non-
anomaly projects.
"""
# pylint: disable=redefined-builtin, too-many-instance-attributes, too-many-arguments; Requires refactor
def __init__(
self,
name: str,
domain: Domain,
color: Optional[Color] = None,
hotkey: str = "",
creation_date: Optional[datetime.datetime] = None,
is_empty: bool = False,
id: Optional[ID] = None,
is_anomalous: bool = False,
):
id = ID() if id is None else id
color = Color.random() if color is None else color
creation_date = now() if creation_date is None else creation_date
self._name = name
self._color = color
self._hotkey = hotkey
self._domain = domain
self._is_empty = is_empty
self._creation_date = creation_date
self.__id_ = id
self.is_anomalous = is_anomalous
@property
def name(self):
"""
Returns the label name.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def color(self) -> Color:
"""
Returns the Color object for the label.
"""
return self._color
@color.setter
def color(self, value):
self._color = value
@property
def hotkey(self) -> str:
"""
Returns the hotkey for the label
"""
return self._hotkey
@hotkey.setter
def hotkey(self, value):
self._hotkey = value
@property
def domain(self):
"""
Returns the algorithm domain associated to this label
"""
return self._domain
@domain.setter
def domain(self, value):
self._domain = value
@property
def is_empty(self) -> bool:
"""
Returns a boolean indicating if the label is an empty label
"""
return self._is_empty
@property
def creation_date(self) -> datetime.datetime:
"""
Returns the creation date of the label
"""
return self._creation_date
@property
def id_(self) -> ID:
"""
Returns the label id.
"""
return self.__id_
@id_.setter
def id_(self, value: ID):
self.__id_ = value
@property
def id(self) -> ID:
"""DEPRECATED"""
return self.__id_
@id.setter
def id(self, value: ID):
"""DEPRECATED"""
self.__id_ = value
def __repr__(self):
return (
f"LabelEntity({self.id_}, name={self.name}, hotkey={self.hotkey}, "
f"domain={self.domain}, color={self.color})"
)
def __eq__(self, other):
if isinstance(other, LabelEntity):
return (
self.id_ == other.id_
and self.name == other.name
and self.color == other.color
and self.hotkey == other.hotkey
and self.domain == other.domain
and self.is_anomalous == other.is_anomalous
)
return False
def __lt__(self, other):
if isinstance(other, LabelEntity):
return self.id_ < other.id_
return False
def __gt__(self, other):
if isinstance(other, LabelEntity):
return self.id_ > other.id_
return False
def __hash__(self):
return hash(str(self))
| 30.004587
| 118
| 0.633542
|
f56823dfa82d3763b3e7ee7661c6d0225d283a2e
| 287
|
py
|
Python
|
setup.py
|
badbayesian/pymarket
|
6517c296e2c025ccd04ae0447e4f466db5f36831
|
[
"MIT"
] | null | null | null |
setup.py
|
badbayesian/pymarket
|
6517c296e2c025ccd04ae0447e4f466db5f36831
|
[
"MIT"
] | null | null | null |
setup.py
|
badbayesian/pymarket
|
6517c296e2c025ccd04ae0447e4f466db5f36831
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="pymarket",
version="0.1",
description="Market simulation",
url="http://github.com/badbayesian/pymarket",
author="Daniel Silva-Inclan",
author_email="badbayesian@gmail.com",
license="MIT",
package=["pymarket"],
)
| 22.076923
| 49
| 0.66899
|
2e0845bddf7e2e0ce767428f0303dee4c6e037fa
| 5,632
|
py
|
Python
|
pandas/computation/align.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 29
|
2015-01-08T19:20:37.000Z
|
2021-04-20T08:25:56.000Z
|
pandas/computation/align.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 5
|
2021-03-19T08:36:48.000Z
|
2022-01-13T01:52:34.000Z
|
pandas/computation/align.py
|
certik/pandas
|
758ca05e2eb04532b5d78331ba87c291038e2c61
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 22
|
2015-01-02T12:14:20.000Z
|
2021-10-13T09:22:30.000Z
|
"""Core eval alignment algorithms
"""
import warnings
from functools import partial, wraps
from pandas.compat import zip, range
import numpy as np
import pandas as pd
from pandas import compat
import pandas.core.common as com
from pandas.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = typ,
if not hasattr(term.value, 'axes'):
ret += None,
else:
ret += _zip_axes_from_type(typ, term.value.axes),
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {}
for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
axes[ax_name] = new_axes[ax_ind]
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms)
if hasattr(term.value, 'axes')]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
for i, ndim in compat.iteritems(ndims):
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, 'reindex_axis'):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(abs(reindexer_size - term_axis_size))
if ordm >= 1 and reindexer_size >= 10000:
warnings.warn('Alignment difference on axis {0} is larger '
'than an order of magnitude on term {1!r}, '
'by more than {2:.4g}; performance may '
'suffer'.format(axis, terms[i].name, ordm),
category=pd.io.common.PerformanceWarning)
if transpose:
f = partial(ti.reindex, index=reindexer, copy=False)
else:
f = partial(ti.reindex_axis, reindexer, axis=axis,
copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.isscalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
try:
ret = ret_value.item()
except (ValueError, IndexError):
# XXX: we catch IndexError to absorb a
# regression in numpy 1.7.0
# fixed by numpy/numpy@04b89c63
ret = ret_value
return ret
| 30.608696
| 79
| 0.604226
|
761fe21c4f875e583228fd6094d85b7bc527b4af
| 841
|
py
|
Python
|
userbot/plugins/selfdestruct.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | 2
|
2020-08-02T17:20:12.000Z
|
2020-11-02T23:28:05.000Z
|
userbot/plugins/selfdestruct.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/selfdestruct.py
|
techyminati/DeOXy
|
014efbf6ba4ba31525f996e935279e8918c8ba96
|
[
"Apache-2.0"
] | 6
|
2020-08-17T16:11:18.000Z
|
2020-11-03T16:06:46.000Z
|
# For @UniBorg
# courtesy Yasir siddiqui
"""Self Destruct Plugin
.sd <time in seconds> <text>
"""
import time
from telethon.errors import rpcbaseerrors
from userbot.utils import admin_cmd
import importlib.util
@borg.on(admin_cmd("sd", outgoing=True ))
async def selfdestruct(destroy):
""" For .sd command, make seflf-destructable messages. """
if not destroy.text[0].isalpha() and destroy.text[0] not in ("/", "#", "@", "!"):
message = destroy.text
counter = int(message[4:6])
text = str(destroy.text[6:])
text = (
text
+ "\n\nDestruction Level: "
+ str(counter)
+ " seconds"
)
await destroy.delete()
smsg = await destroy.client.send_message(destroy.chat_id, text)
time.sleep(counter)
await smsg.delete()
| 25.484848
| 85
| 0.601665
|
89f1f44eeb38ac5029e2fd61d790bf6f7585b6c7
| 615
|
py
|
Python
|
contrib/qt_translations.py
|
NarubyRiverlione/spice
|
5fafc3410372b964bf5dc9535d02cb2377e73ee2
|
[
"MIT"
] | null | null | null |
contrib/qt_translations.py
|
NarubyRiverlione/spice
|
5fafc3410372b964bf5dc9535d02cb2377e73ee2
|
[
"MIT"
] | null | null | null |
contrib/qt_translations.py
|
NarubyRiverlione/spice
|
5fafc3410372b964bf5dc9535d02cb2377e73ee2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary Dune Spice distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'spice_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'spice_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 26.73913
| 104
| 0.679675
|
ee26c54b9e788dd089a57e890b56b57f84bd326c
| 432
|
py
|
Python
|
album.py
|
ChiuAlfredo/Spotify_playlist
|
d1911b3417863cababa1cad47ceb685b40afcb86
|
[
"MIT"
] | 1
|
2021-09-24T13:26:43.000Z
|
2021-09-24T13:26:43.000Z
|
album.py
|
ChiuAlfredo/Spotify_playlist
|
d1911b3417863cababa1cad47ceb685b40afcb86
|
[
"MIT"
] | null | null | null |
album.py
|
ChiuAlfredo/Spotify_playlist
|
d1911b3417863cababa1cad47ceb685b40afcb86
|
[
"MIT"
] | null | null | null |
class Album:
"""Track represents a piece of music."""
def __init__(self, album_name, id, artist):
"""
:param name (str): Album name
:param id (int): Spotify Album id
:param artist (str): Artist who created the track
"""
self.album_name = album_name
self.id = id
self.artist = artist
def album_id(self):
return f"{self.id}"
| 24
| 58
| 0.532407
|
30777d9b300e92a111030e8f287eca55b7d360f2
| 4,796
|
py
|
Python
|
PuppeteerLibrary/playwright/playwright_context.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | null | null | null |
PuppeteerLibrary/playwright/playwright_context.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | null | null | null |
PuppeteerLibrary/playwright/playwright_context.py
|
sdvicorp/robotframework-puppeteer
|
af6fa68b04c3cdac3a7662cffda6da2a5ace38d1
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from PuppeteerLibrary.custom_elements.base_page import BasePage
from PuppeteerLibrary.playwright.custom_elements.playwright_page import PlaywrightPage
from PuppeteerLibrary.playwright.async_keywords.playwright_checkbox import PlaywrightCheckbox
from PuppeteerLibrary.playwright.async_keywords.playwright_mockresponse import PlaywrightMockResponse
from PuppeteerLibrary.playwright.async_keywords.playwright_formelement import PlaywrightFormElement
from PuppeteerLibrary.playwright.async_keywords.playwright_dropdown import PlaywrightDropdown
from PuppeteerLibrary.playwright.async_keywords.playwright_alert import PlaywrightAlert
from PuppeteerLibrary.playwright.async_keywords.playwright_screenshot import PlaywrightScreenshot
from PuppeteerLibrary.playwright.async_keywords.playwright_waiting import PlaywrightWaiting
from PuppeteerLibrary.playwright.async_keywords.playwright_element import PlaywrightElement
from PuppeteerLibrary.playwright.async_keywords.playwright_dropdown import PlaywrightDropdown
from PuppeteerLibrary.playwright.async_keywords.playwright_mouseevent import PlaywrightMouseEvent
from PuppeteerLibrary.playwright.async_keywords.playwright_browsermanagement import PlaywrightBrowserManagement
from PuppeteerLibrary.playwright.async_keywords.playwright_pdf import PlaywrightPDF
from PuppeteerLibrary.playwright.async_keywords.playwright_javascript import PlaywrightJavascript
from PuppeteerLibrary.library_context.ilibrary_context import iLibraryContext
try:
from playwright import async_playwright
from playwright.playwright import Playwright as AsyncPlaywright
from playwright.browser import Browser
except ImportError:
print('import playwright error')
class PlaywrightContext(iLibraryContext):
playwright: any = None
browser: any = None
current_page: any = None
current_iframe = None
def __init__(self, browser_type: str):
super().__init__(browser_type)
async def start_server(self, options: dict=None):
self.playwright = await async_playwright().start()
if self.browser_type == "webkit":
self.browser = await self.playwright.webkit.launch(headless=False)
elif self.browser_type == "firefox":
self.browser = await self.playwright.firefox.launch(headless=False)
async def stop_server(self):
await self.playwright.stop()
self._reset_server_context()
def is_server_started(self) -> bool:
if self.browser is not None:
return True
return False
async def create_new_page(self, options: dict=None) -> BasePage:
device_options = {}
if 'emulate' in options:
device_options = self.playwright.devices[options['emulate']]
new_page = await self.browser.newPage(**device_options)
self.current_page = PlaywrightPage(new_page)
return self.current_page
def get_current_page(self) -> BasePage:
return self.current_page
def set_current_page(self, page: any) -> BasePage:
self.current_page = PlaywrightPage(page)
return self.current_page
async def get_all_pages(self):
return self.browser.contexts[0].pages
def get_browser_context(self):
return self.browser
async def close_browser_context(self):
if self.browser is not None:
try:
await asyncio.wait_for(self.browser.close(), timeout=3)
except asyncio.TimeoutError:
None
self._reset_context()
async def close_window(self):
await self.get_current_page().get_page().close()
pages = await self.get_all_pages()
self.set_current_page(pages[-1])
def get_async_keyword_group(self, keyword_group_name: str):
switcher = {
"AlertKeywords": PlaywrightAlert(self),
"BrowserManagementKeywords": PlaywrightBrowserManagement(self),
"CheckboxKeywords": PlaywrightCheckbox(self),
"DropdownKeywords": PlaywrightDropdown(self),
"ElementKeywords": PlaywrightElement(self),
"FormElementKeywords": PlaywrightFormElement(self),
"JavascriptKeywords": PlaywrightJavascript(self),
"MockResponseKeywords": PlaywrightMockResponse(self),
"MouseEventKeywords": PlaywrightMouseEvent(self),
"PDFKeywords": PlaywrightPDF(self),
"ScreenshotKeywords": PlaywrightScreenshot(self),
"WaitingKeywords": PlaywrightWaiting(self)
}
return switcher.get(keyword_group_name)
def _reset_context(self):
self.browser = None
self.current_page = None
self.current_iframe = None
def _reset_server_context(self):
self._reset_context()
self.playwright = None
| 43.207207
| 111
| 0.741451
|
05fa51960b9020988d06646b51f25006809abcad
| 546
|
py
|
Python
|
09/9.4.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | 1
|
2021-12-16T16:46:47.000Z
|
2021-12-16T16:46:47.000Z
|
09/9.4.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | null | null | null |
09/9.4.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Exercise 9.4. Write a function named uses_only that takes a word and a string of letters, and
that returns True if the word contains only letters in the list. Can you make a sentence using only
the letters acefhlo? Other than “Hoe alfalfa”?
"""
def uses_only(word, available):
for letter in word:
if letter not in available:
return False
return True
available = input('Enter available letters: ')
fin = open('words.txt')
for line in fin:
if uses_only(line, available):
print(line)
| 28.736842
| 99
| 0.697802
|
bf8ef307ff7bfe20bb677f7af7babce8e160eef0
| 67
|
py
|
Python
|
ml3d/torch/modules/metrics/__init__.py
|
hummat/Open3D-ML
|
165e5df875cdd1ddf5d404e5d65d9c4cac66acd9
|
[
"MIT"
] | null | null | null |
ml3d/torch/modules/metrics/__init__.py
|
hummat/Open3D-ML
|
165e5df875cdd1ddf5d404e5d65d9c4cac66acd9
|
[
"MIT"
] | null | null | null |
ml3d/torch/modules/metrics/__init__.py
|
hummat/Open3D-ML
|
165e5df875cdd1ddf5d404e5d65d9c4cac66acd9
|
[
"MIT"
] | null | null | null |
from .semseg_metric import SemSegMetric
__all__ = ['SemSegMetric']
| 22.333333
| 39
| 0.80597
|
c9ec64684caf4360adba9bd69345809ffa9c68a0
| 102
|
py
|
Python
|
library/test/test_compiler/testcorpus/06_funcall_varargs.py
|
creativemindplus/skybison
|
d1740e08d8de85a0a56b650675717da67de171a0
|
[
"CNRI-Python-GPL-Compatible"
] | 278
|
2021-08-31T00:46:51.000Z
|
2022-02-13T19:43:28.000Z
|
library/test/test_compiler/testcorpus/06_funcall_varargs.py
|
creativemindplus/skybison
|
d1740e08d8de85a0a56b650675717da67de171a0
|
[
"CNRI-Python-GPL-Compatible"
] | 9
|
2021-11-05T22:28:43.000Z
|
2021-11-23T08:39:04.000Z
|
library/test/test_compiler/testcorpus/06_funcall_varargs.py
|
tekknolagi/skybison
|
bea8fc2af0a70e7203b4c19f36c14a745512a335
|
[
"CNRI-Python-GPL-Compatible"
] | 12
|
2021-08-31T07:49:54.000Z
|
2021-10-08T01:09:01.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
c = (a, b)
fun(a, b, *c)
| 25.5
| 76
| 0.627451
|
ec914eb691ee32903b8a79e74c36137bf567011a
| 10,264
|
py
|
Python
|
eve/methods/patch.py
|
ehiggs/eve
|
b174c7dcb1e93151daadc08948a387e2dd4b0328
|
[
"BSD-3-Clause"
] | null | null | null |
eve/methods/patch.py
|
ehiggs/eve
|
b174c7dcb1e93151daadc08948a387e2dd4b0328
|
[
"BSD-3-Clause"
] | null | null | null |
eve/methods/patch.py
|
ehiggs/eve
|
b174c7dcb1e93151daadc08948a387e2dd4b0328
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
eve.methods.patch
~~~~~~~~~~~~~~~~~
This module implements the PATCH method.
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from copy import deepcopy
from flask import current_app as app, abort
from werkzeug import exceptions
from datetime import datetime
from eve.utils import config, debug_error_message, parse_request
from eve.auth import requires_auth
from cerberus.validator import DocumentError
from eve.methods.common import (
get_document,
parse,
payload as payload_,
ratelimit,
pre_event,
store_media_files,
resolve_embedded_fields,
build_response_document,
marshal_write_response,
resolve_document_etag,
oplog_push,
)
from eve.versioning import (
resolve_document_version,
insert_versioning_documents,
late_versioning_catch,
)
@ratelimit()
@requires_auth("item")
@pre_event
def patch(resource, payload=None, **lookup):
"""
Default function for handling PATCH requests, it has decorators for
rate limiting, authentication and for raising pre-request events.
After the decorators are applied forwards to call to :func:`patch_internal`
.. versionchanged:: 0.5
Split into patch() and patch_internal().
"""
return patch_internal(
resource, payload, concurrency_check=True, skip_validation=False, **lookup
)
def patch_internal(
resource, payload=None, concurrency_check=False, skip_validation=False, **lookup
):
""" Intended for internal patch calls, this method is not rate limited,
authentication is not checked, pre-request events are not raised, and
concurrency checking is optional. Performs a document patch/update.
Updates are first validated against the resource schema. If validation
passes, the document is updated and an OK status update is returned.
If validation fails, a set of validation issues is returned.
:param resource: the name of the resource to which the document belongs.
:param payload: alternative payload. When calling patch() from your own
code you can provide an alternative payload. This can be
useful, for example, when you have a callback function
hooked to a certain endpoint, and want to perform
additional patch() callsfrom there.
Please be advised that in order to successfully use this
option, a request context must be available.
:param concurrency_check: concurrency check switch (bool)
:param skip_validation: skip payload validation before write (bool)
:param **lookup: document lookup query.
.. versionchanged:: 0.6.2
Fix: validator is not set when skip_validation is true.
.. versionchanged:: 0.6
on_updated returns the updated document (#682).
Allow restoring soft deleted documents via PATCH
.. versionchanged:: 0.5
Updating nested document fields does not overwrite the nested document
itself (#519).
Push updates to the OpLog.
Original patch() has been split into patch() and patch_internal().
You can now pass a pre-defined custom payload to the funcion.
ETAG is now stored with the document (#369).
Catching all HTTPExceptions and returning them to the caller, allowing
for eventual flask.abort() invocations in callback functions to go
through. Fixes #395.
.. versionchanged:: 0.4
Allow abort() to be invoked by callback functions.
'on_update' raised before performing the update on the database.
Support for document versioning.
'on_updated' raised after performing the update on the database.
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise 'on_pre_<method>' event.
.. versionchanged:: 0.1.1
Item-identifier wrapper stripped from both request and response payload.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
Re-raises `exceptions.Unauthorized`, this could occur if the
`auth_field` condition fails
.. versionchanged:: 0.0.9
More informative error messages.
Support for Python 3.3.
.. versionchanged:: 0.0.8
Let ``werkzeug.exceptions.InternalServerError`` go through as they have
probably been explicitly raised by the data driver.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
ETag is now computed without the need of an additional db lookup
.. versionchanged:: 0.0.5
Support for 'application/json' Content-Type.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
JSON links. Superflous ``response`` container removed.
"""
if payload is None:
payload = payload_()
original = get_document(resource, concurrency_check, **lookup)
if not original:
# not found
abort(404)
resource_def = app.config["DOMAIN"][resource]
schema = resource_def["schema"]
normalize_document = resource_def.get("normalize_on_patch")
validator = app.validator(
schema, resource=resource, allow_unknown=resource_def["allow_unknown"]
)
object_id = original[resource_def["id_field"]]
last_modified = None
etag = None
issues = {}
response = {}
if config.BANDWIDTH_SAVER is True:
embedded_fields = []
else:
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
try:
updates = parse(payload, resource)
if skip_validation:
validation = True
else:
validation = validator.validate_update(
updates, object_id, original, normalize_document
)
updates = validator.document
if validation:
# Apply coerced values
# sneak in a shadow copy if it wasn't already there
late_versioning_catch(original, resource)
store_media_files(updates, resource, original)
resolve_document_version(updates, resource, "PATCH", original)
# some datetime precision magic
updates[config.LAST_UPDATED] = datetime.utcnow().replace(microsecond=0)
if resource_def["soft_delete"] is True:
# PATCH with soft delete enabled should always set the DELETED
# field to False. We are either carrying through un-deleted
# status, or restoring a soft deleted document
updates[config.DELETED] = False
# the mongo driver has a different precision than the python
# datetime. since we don't want to reload the document once it
# has been updated, and we still have to provide an updated
# etag, we're going to update the local version of the
# 'original' document, and we will use it for the etag
# computation.
updated = deepcopy(original)
# notify callbacks
getattr(app, "on_update")(resource, updates, original)
getattr(app, "on_update_%s" % resource)(updates, original)
if resource_def["merge_nested_documents"]:
updates = resolve_nested_documents(updates, updated)
updated.update(updates)
if config.IF_MATCH:
resolve_document_etag(updated, resource)
# now storing the (updated) ETAG with every document (#453)
updates[config.ETAG] = updated[config.ETAG]
try:
app.data.update(resource, object_id, updates, original)
except app.data.OriginalChangedError:
if concurrency_check:
abort(412, description="Client and server etags don't match")
# update oplog if needed
oplog_push(resource, updates, "PATCH", object_id)
insert_versioning_documents(resource, updated)
# nofity callbacks
getattr(app, "on_updated")(resource, updates, original)
getattr(app, "on_updated_%s" % resource)(updates, original)
updated.update(updates)
# build the full response document
build_response_document(updated, resource, embedded_fields, updated)
response = updated
if config.IF_MATCH:
etag = response[config.ETAG]
else:
issues = validator.errors
except DocumentError as e:
# TODO should probably log the error and abort 400 instead (when we
# got logging)
issues["validator exception"] = str(e)
except exceptions.HTTPException as e:
raise e
except Exception as e:
# consider all other exceptions as Bad Requests
app.logger.exception(e)
abort(400, description=debug_error_message("An exception occurred: %s" % e))
if len(issues):
response[config.ISSUES] = issues
response[config.STATUS] = config.STATUS_ERR
status = config.VALIDATION_ERROR_STATUS
else:
response[config.STATUS] = config.STATUS_OK
status = 200
# limit what actually gets sent to minimize bandwidth usage
response = marshal_write_response(response, resource)
return response, last_modified, etag, status
def resolve_nested_documents(updates, original):
""" Nested document updates are merged with the original contents
we don't overwrite the whole thing. See #519 for details.
.. versionadded:: 0.5
"""
r = {}
for field, value in updates.items():
if isinstance(value, dict):
orig_value = original.setdefault(field, {})
if orig_value is None:
r[field] = value
else:
orig_value.update(resolve_nested_documents(value, orig_value))
r[field] = orig_value
else:
r[field] = value
return r
| 35.515571
| 84
| 0.654034
|
be45f09e08b1b776cb0db2e6990d490ea30795f5
| 6,986
|
py
|
Python
|
python/src/nnabla/utils/nvml.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/nvml.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
python/src/nnabla/utils/nvml.py
|
isabella232/nnabla
|
82a3c6fed382f889d1a4a429c696bb8cedf6ce79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import ctypes
import threading
# NVML return codes
NVML_SUCCESS = 0
NVML_ERROR_UNINITIALIZED = 1
NVML_ERROR_INVALID_ARGUMENT = 2
NVML_ERROR_NOT_SUPPORTED = 3
NVML_ERROR_NO_PERMISSION = 4
NVML_ERROR_ALREADY_INITIALIZED = 5
NVML_ERROR_NOT_FOUND = 6
NVML_ERROR_INSUFFICIENT_SIZE = 7
NVML_ERROR_INSUFFICIENT_POWER = 8
NVML_ERROR_DRIVER_NOT_LOADED = 9
NVML_ERROR_TIMEOUT = 10
NVML_ERROR_IRQ_ISSUE = 11
NVML_ERROR_LIBRARY_NOT_FOUND = 12
NVML_ERROR_FUNCTION_NOT_FOUND = 13
NVML_ERROR_CORRUPTED_INFOROM = 14
NVML_ERROR_GPU_IS_LOST = 15
NVML_ERROR_RESET_REQUIRED = 16
NVML_ERROR_OPERATING_SYSTEM = 17
NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18
NVML_ERROR_IN_USE = 19
NVML_ERROR_MEMORY = 20
NVML_ERROR_NO_DATA = 21
NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22
NVML_ERROR_INSUFFICIENT_RESOURCES = 23
NVML_ERROR_UNKNOWN = 999
# CONTSTANT: buffer size
NVML_DEVICE_NAME_BUFFER_SIZE = 64
nvml_lib = None
lib_load_lock = threading.Lock()
_nvml_function_cache = dict()
# Device structures
c_nvmlDevice_t = ctypes.POINTER(
type('struct_c_nvmlDevice_t', (ctypes.Structure, ), {}))
class c_nvmlUtilization_t(ctypes.Structure):
_fields_ = [
('gpu', ctypes.c_uint),
('memory', ctypes.c_uint),
]
class c_nvmlMemory_t(ctypes.Structure):
_fields_ = [
('total', ctypes.c_ulonglong),
('free', ctypes.c_ulonglong),
('used', ctypes.c_ulonglong),
]
class NVMLError(Exception):
# Error codes list in use
_errcode_to_string = {
NVML_ERROR_UNINITIALIZED: "Uninitialized",
NVML_ERROR_INVALID_ARGUMENT: "Invalid Argument",
NVML_ERROR_NOT_SUPPORTED: "Not Supported",
NVML_ERROR_NO_PERMISSION: "Insufficient Permissions",
NVML_ERROR_ALREADY_INITIALIZED: "Already Initialized",
NVML_ERROR_NOT_FOUND: "Not Found",
NVML_ERROR_INSUFFICIENT_SIZE: "Insufficient Size",
NVML_ERROR_INSUFFICIENT_POWER: "Insufficient External Power",
NVML_ERROR_DRIVER_NOT_LOADED: "Driver Not Loaded",
NVML_ERROR_TIMEOUT: "Timeout",
NVML_ERROR_IRQ_ISSUE: "Interrupt Request Issue",
NVML_ERROR_LIBRARY_NOT_FOUND: "NVML Shared Library Not Found",
NVML_ERROR_FUNCTION_NOT_FOUND: "Function Not Found",
NVML_ERROR_CORRUPTED_INFOROM: "Corrupted infoROM",
NVML_ERROR_GPU_IS_LOST: "GPU is lost",
NVML_ERROR_RESET_REQUIRED: "GPU requires restart",
NVML_ERROR_OPERATING_SYSTEM: "The operating system has blocked the request.",
NVML_ERROR_LIB_RM_VERSION_MISMATCH: "RM has detected an NVML/RM version mismatch.",
NVML_ERROR_MEMORY: "Insufficient Memory",
NVML_ERROR_UNKNOWN: "Unknown Error",
}
def __init__(self, value):
self.value = value
def __str__(self):
try:
if self.value not in NVMLError._errcode_to_string:
NVMLError._errcode_to_string[self.value] = str(
nvmlErrorString(self.value))
return NVMLError._errcode_to_string[self.value]
except NVMLError:
return f"NVML Error code {self.value}"
def _get_nvml_function(name):
"""Get NVML function from the NVML library
"""
global nvml_lib
if name in _nvml_function_cache:
return _nvml_function_cache[name]
lib_load_lock.acquire()
try:
if nvml_lib is None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
_nvml_function_cache[name] = getattr(nvml_lib, name)
return _nvml_function_cache[name]
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
lib_load_lock.release()
def _check_return(ret):
if ret != NVML_SUCCESS:
raise NVMLError(ret)
return ret
def _load_nvml_library():
"""Load the NVML Shared Library
"""
global nvml_lib
lib_load_lock.acquire()
try:
# check if the library is loaded
if nvml_lib is not None:
return
if sys.platform[:3] == "win":
try:
# The file nvml.dll maybe in the `C:/Windows/System32` or
# the`C:/Program Files/NVIDIA Corporation/NVSMI`.
nvml_lib = ctypes.CDLL(os.path.join(
os.getenv("WINDIR", "C:/Windows"), "System32/nvml.dll"))
except OSError:
nvml_lib = ctypes.CDLL(
os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/nvml.dll"))
else:
nvml_lib = ctypes.CDLL("libnvidia-ml.so.1")
if nvml_lib is None:
_check_return(NVML_ERROR_LIBRARY_NOT_FOUND)
except OSError:
_check_return(NVML_ERROR_LIBRARY_NOT_FOUND)
finally:
lib_load_lock.release()
def nvmlErrorString(result):
"""Convert NVML error codes into readable strings.
"""
fn = _get_nvml_function("nvmlErrorString")
fn.restype = ctypes.c_char_p
return fn(result)
def nvmlInit(flags=0):
_load_nvml_library()
fn = _get_nvml_function("nvmlInitWithFlags")
ret = fn(flags)
_check_return(ret)
return True
def nvmlShutdown():
fn = _get_nvml_function("nvmlShutdown")
ret = fn()
_check_return(ret)
return None
def nvmlDeviceGetHandleByIndex(index):
c_index = ctypes.c_uint(index)
device = c_nvmlDevice_t()
fn = _get_nvml_function("nvmlDeviceGetHandleByIndex_v2")
ret = fn(c_index, ctypes.byref(device))
_check_return(ret)
return device
def nvmlDeviceGetUtilizationRates(handle):
c_util = c_nvmlUtilization_t()
fn = _get_nvml_function("nvmlDeviceGetUtilizationRates")
ret = fn(handle, ctypes.byref(c_util))
_check_return(ret)
return c_util
def nvmlDeviceGetName(handle):
c_name = ctypes.create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE)
fn = _get_nvml_function("nvmlDeviceGetName")
ret = fn(handle, c_name, ctypes.c_uint(NVML_DEVICE_NAME_BUFFER_SIZE))
_check_return(ret)
return c_name.value
def nvmlDeviceGetCount():
c_count = ctypes.c_uint()
fn = _get_nvml_function("nvmlDeviceGetCount_v2")
ret = fn(ctypes.byref(c_count))
_check_return(ret)
return c_count.value
def nvmlDeviceGetMemoryInfo(handle):
c_memory = c_nvmlMemory_t()
fn = _get_nvml_function("nvmlDeviceGetMemoryInfo")
ret = fn(handle, ctypes.byref(c_memory))
_check_return(ret)
return c_memory
| 29.854701
| 117
| 0.704552
|
326fb4f27aeecba4743a87fee3dc14dac3fd609a
| 20,621
|
py
|
Python
|
pyro/infer/mcmc/hmc.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | null | null | null |
pyro/infer/mcmc/hmc.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | null | null | null |
pyro/infer/mcmc/hmc.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import math
from collections import OrderedDict
import torch
from torch.distributions import biject_to, constraints
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions.util import eye_like
from pyro.infer import config_enumerate
from pyro.infer.mcmc.adaptation import WarmupAdapter
from pyro.infer.mcmc.trace_kernel import TraceKernel
from pyro.infer.mcmc.util import TraceEinsumEvaluator
from pyro.ops.integrator import velocity_verlet
from pyro.poutine.subsample_messenger import _Subsample
from pyro.util import optional, torch_isinf, torch_isnan, ignore_jit_warnings
class HMC(TraceKernel):
r"""
Simple Hamiltonian Monte Carlo kernel, where ``step_size`` and ``num_steps``
need to be explicitly specified by the user.
**References**
[1] `MCMC Using Hamiltonian Dynamics`,
Radford M. Neal
:param model: Python callable containing Pyro primitives.
:param float step_size: Determines the size of a single step taken by the
verlet integrator while computing the trajectory using Hamiltonian
dynamics. If not specified, it will be set to 1.
:param float trajectory_length: Length of a MCMC trajectory. If not
specified, it will be set to ``step_size x num_steps``. In case
``num_steps`` is not specified, it will be set to :math:`2\pi`.
:param int num_steps: The number of discrete steps over which to simulate
Hamiltonian dynamics. The state at the end of the trajectory is
returned as the proposal. This value is always equal to
``int(trajectory_length / step_size)``.
:param bool adapt_step_size: A flag to decide if we want to adapt step_size
during warm-up phase using Dual Averaging scheme.
:param bool adapt_mass_matrix: A flag to decide if we want to adapt mass
matrix during warm-up phase using Welford scheme.
:param bool full_mass: A flag to decide if mass matrix is dense or diagonal.
:param dict transforms: Optional dictionary that specifies a transform
for a sample site with constrained support to unconstrained space. The
transform should be invertible, and implement `log_abs_det_jacobian`.
If not specified and the model has sites with constrained support,
automatic transformations will be applied, as specified in
:mod:`torch.distributions.constraint_registry`.
:param int max_plate_nesting: Optional bound on max number of nested
:func:`pyro.plate` contexts. This is required if model contains
discrete sample sites that can be enumerated over in parallel.
:param bool jit_compile: Optional parameter denoting whether to use
the PyTorch JIT to trace the log density computation, and use this
optimized executable trace in the integrator.
:param dict jit_options: A dictionary contains optional arguments for
:func:`torch.jit.trace` function.
:param bool ignore_jit_warnings: Flag to ignore warnings from the JIT
tracer when ``jit_compile=True``. Default is False.
:param float target_accept_prob: Increasing this value will lead to a smaller
step size, hence the sampling will be slower and more robust. Default to 0.8.
.. note:: Internally, the mass matrix will be ordered according to the order
of the names of latent variables, not the order of their appearance in
the model.
Example:
>>> true_coefs = torch.tensor([1., 2., 3.])
>>> data = torch.randn(2000, 3)
>>> dim = 3
>>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()
>>>
>>> def model(data):
... coefs_mean = torch.zeros(dim)
... coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
... y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
... return y
>>>
>>> hmc_kernel = HMC(model, step_size=0.0855, num_steps=4)
>>> mcmc_run = MCMC(hmc_kernel, num_samples=500, warmup_steps=100).run(data)
>>> posterior = mcmc_run.marginal('beta').empirical['beta']
>>> posterior.mean # doctest: +SKIP
tensor([ 0.9819, 1.9258, 2.9737])
"""
def __init__(self,
model,
step_size=1,
trajectory_length=None,
num_steps=None,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
target_accept_prob=0.8):
self.model = model
self.max_plate_nesting = max_plate_nesting
if trajectory_length is not None:
self.trajectory_length = trajectory_length
elif num_steps is not None:
self.trajectory_length = step_size * num_steps
else:
self.trajectory_length = 2 * math.pi # from Stan
self._jit_compile = jit_compile
self._jit_options = {"check_trace": False} if jit_options is None else jit_options
self._ignore_jit_warnings = ignore_jit_warnings
# The following parameter is used in find_reasonable_step_size method.
# In NUTS paper, this threshold is set to a fixed log(0.5).
# After https://github.com/stan-dev/stan/pull/356, it is set to a fixed log(0.8).
self._direction_threshold = math.log(0.8) # from Stan
# number of tries to get a valid initial trace
self._max_tries_initial_trace = 100
self.transforms = {} if transforms is None else transforms
self._automatic_transform_enabled = True if transforms is None else False
self._reset()
self._adapter = WarmupAdapter(step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
target_accept_prob=target_accept_prob,
is_diag_mass=not full_mass)
super(HMC, self).__init__()
def _get_trace(self, z):
z_trace = self._prototype_trace
for name, value in z.items():
z_trace.nodes[name]["value"] = value
trace_poutine = poutine.trace(poutine.replay(self.model, trace=z_trace))
trace_poutine(*self._args, **self._kwargs)
return trace_poutine.trace
@staticmethod
def _iter_latent_nodes(trace):
for name, node in sorted(trace.iter_stochastic_nodes(), key=lambda x: x[0]):
if not (node["fn"].has_enumerate_support or isinstance(node["fn"], _Subsample)):
yield (name, node)
def _compute_trace_log_prob(self, model_trace):
return self._trace_prob_evaluator.log_prob(model_trace)
def _kinetic_energy(self, r):
r_flat = torch.cat([r[site_name].reshape(-1) for site_name in sorted(r)])
if self.inverse_mass_matrix.dim() == 2:
return 0.5 * self.inverse_mass_matrix.matmul(r_flat).dot(r_flat)
else:
return 0.5 * self.inverse_mass_matrix.dot(r_flat ** 2)
def _potential_energy(self, z):
if not z:
return 0.
if self._jit_compile:
return self._potential_energy_jit(z)
# Since the model is specified in the constrained space, transform the
# unconstrained R.V.s `z` to the constrained space.
z_constrained = z.copy()
for name, transform in self.transforms.items():
z_constrained[name] = transform.inv(z_constrained[name])
trace = self._get_trace(z_constrained)
potential_energy = -self._compute_trace_log_prob(trace)
# adjust by the jacobian for this transformation.
for name, transform in self.transforms.items():
potential_energy += transform.log_abs_det_jacobian(z_constrained[name], z[name]).sum()
return potential_energy
def _potential_energy_jit(self, z):
names, vals = zip(*sorted(z.items()))
if self._compiled_potential_fn:
return self._compiled_potential_fn(*vals)
def compiled(*zi):
z_constrained = list(zi)
# transform to constrained space.
for i, name in enumerate(names):
if name in self.transforms:
transform = self.transforms[name]
z_constrained[i] = transform.inv(z_constrained[i])
z_constrained = dict(zip(names, z_constrained))
trace = self._get_trace(z_constrained)
potential_energy = -self._compute_trace_log_prob(trace)
# adjust by the jacobian for this transformation.
for i, name in enumerate(names):
if name in self.transforms:
transform = self.transforms[name]
potential_energy += transform.log_abs_det_jacobian(z_constrained[name], zi[i]).sum()
return potential_energy
with pyro.validation_enabled(False), optional(ignore_jit_warnings(), self._ignore_jit_warnings):
self._compiled_potential_fn = torch.jit.trace(compiled, vals, **self._jit_options)
return self._compiled_potential_fn(*vals)
def _energy(self, z, r):
return self._kinetic_energy(r) + self._potential_energy(z)
def _reset(self):
self._t = 0
self._accept_cnt = 0
self._r_shapes = {}
self._r_numels = {}
self._args = None
self._compiled_potential_fn = None
self._kwargs = None
self._prototype_trace = None
self._initial_trace = None
self._has_enumerable_sites = False
self._trace_prob_evaluator = None
self._z_last = None
self._potential_energy_last = None
self._z_grads_last = None
self._warmup_steps = None
def _find_reasonable_step_size(self):
step_size = self.step_size
# We are going to find a step_size which make accept_prob (Metropolis correction)
# near the target_accept_prob. If accept_prob:=exp(-delta_energy) is small,
# then we have to decrease step_size; otherwise, increase step_size.
z, potential_energy, z_grads = self._fetch_from_cache()
if not z:
return self.step_size
r, _ = self._sample_r(name="r_presample_0")
energy_current = self._kinetic_energy(r) + potential_energy
z_new, r_new, z_grads_new, potential_energy_new = velocity_verlet(
z, r, self._potential_energy, self.inverse_mass_matrix, step_size, z_grads=z_grads)
energy_new = self._kinetic_energy(r_new) + potential_energy_new
delta_energy = energy_new - energy_current
# direction=1 means keep increasing step_size, otherwise decreasing step_size.
# Note that the direction is -1 if delta_energy is `NaN` which may be the
# case for a diverging trajectory (e.g. in the case of evaluating log prob
# of a value simulated using a large step size for a constrained sample site).
direction = 1 if self._direction_threshold < -delta_energy else -1
# define scale for step_size: 2 for increasing, 1/2 for decreasing
step_size_scale = 2 ** direction
direction_new = direction
# keep scale step_size until accept_prob crosses its target
# TODO: make thresholds for too small step_size or too large step_size
t = 0
while direction_new == direction:
t += 1
step_size = step_size_scale * step_size
r, _ = self._sample_r(name="r_presample_{}".format(t))
energy_current = self._kinetic_energy(r) + potential_energy
z_new, r_new, z_grads_new, potential_energy_new = velocity_verlet(
z, r, self._potential_energy, self.inverse_mass_matrix, step_size, z_grads=z_grads)
energy_new = self._kinetic_energy(r_new) + potential_energy_new
delta_energy = energy_new - energy_current
direction_new = 1 if self._direction_threshold < -delta_energy else -1
return step_size
def _guess_max_plate_nesting(self):
"""
Guesses max_plate_nesting by running the model once
without enumeration. This optimistically assumes static model
structure.
"""
with poutine.block():
model_trace = poutine.trace(self.model).get_trace(*self._args, **self._kwargs)
sites = [site
for site in model_trace.nodes.values()
if site["type"] == "sample"]
dims = [frame.dim
for site in sites
for frame in site["cond_indep_stack"]
if frame.vectorized]
self.max_plate_nesting = -min(dims) if dims else 0
def _sample_r(self, name):
r_dist = self._adapter.r_dist
r_flat = pyro.sample(name, r_dist)
r = {}
pos = 0
for name in sorted(self._r_shapes):
next_pos = pos + self._r_numels[name]
r[name] = r_flat[pos:next_pos].reshape(self._r_shapes[name])
pos = next_pos
assert pos == r_flat.size(0)
return r, r_flat
@property
def inverse_mass_matrix(self):
return self._adapter.inverse_mass_matrix
@property
def step_size(self):
return self._adapter.step_size
@property
def num_steps(self):
return max(1, int(self.trajectory_length / self.step_size))
@property
def initial_trace(self):
"""
Find a valid trace to initiate the MCMC sampler. This is also used as a
prototype trace to inter-convert between Pyro's trace object and dict
object used by the integrator.
"""
if self._initial_trace:
return self._initial_trace
trace = self._prototype_trace
for i in range(self._max_tries_initial_trace):
z = {name: node["value"].detach()
for name, node in self._iter_latent_nodes(trace)}
# automatically transform `z` to unconstrained space, if needed.
for name, transform in self.transforms.items():
z[name] = transform(z[name])
potential_energy = self._potential_energy(z)
if not torch_isnan(potential_energy) and not torch_isinf(potential_energy):
self._initial_trace = trace
return trace
trace = poutine.trace(self.model).get_trace(*self._args, **self._kwargs)
raise ValueError("Model specification seems incorrect - cannot find a valid trace.")
@initial_trace.setter
def initial_trace(self, trace):
self._initial_trace = trace
if self._warmup_steps is not None: # if setup is already called
self._initialize_step_size()
def _initialize_model_properties(self):
if self.max_plate_nesting is None:
self._guess_max_plate_nesting()
# Wrap model in `poutine.enum` to enumerate over discrete latent sites.
# No-op if model does not have any discrete latents.
self.model = poutine.enum(config_enumerate(self.model),
first_available_dim=-1 - self.max_plate_nesting)
if self._automatic_transform_enabled:
self.transforms = {}
trace = poutine.trace(self.model).get_trace(*self._args, **self._kwargs)
self._prototype_trace = trace
site_value = None
for name, node in trace.iter_stochastic_nodes():
if isinstance(node["fn"], _Subsample):
continue
if node["fn"].has_enumerate_support:
self._has_enumerable_sites = True
continue
site_value = node["value"]
if node["fn"].support is not constraints.real and self._automatic_transform_enabled:
self.transforms[name] = biject_to(node["fn"].support).inv
site_value = self.transforms[name](node["value"])
self._r_shapes[name] = site_value.shape
self._r_numels[name] = site_value.numel()
self._trace_prob_evaluator = TraceEinsumEvaluator(trace,
self._has_enumerable_sites,
self.max_plate_nesting)
if site_value is not None:
mass_matrix_size = sum(self._r_numels.values())
if self._adapter.is_diag_mass:
initial_mass_matrix = site_value.new_ones(mass_matrix_size)
else:
initial_mass_matrix = eye_like(site_value, mass_matrix_size)
self._adapter.configure(self._warmup_steps,
inv_mass_matrix=initial_mass_matrix,
find_reasonable_step_size_fn=self._find_reasonable_step_size)
self._initialize_step_size() # this method also caches z and its potential energy
def _initialize_step_size(self):
z = {name: node["value"].detach()
for name, node in self._iter_latent_nodes(self.initial_trace)}
# automatically transform `z` to unconstrained space, if needed.
for name, transform in self.transforms.items():
z[name] = transform(z[name])
potential_energy = self._potential_energy(z)
self._cache(z, potential_energy, None)
if z and self._adapter.adapt_step_size:
self._adapter.reset_step_size_adaptation()
def setup(self, warmup_steps, *args, **kwargs):
self._warmup_steps = warmup_steps
self._args = args
self._kwargs = kwargs
self._initialize_model_properties()
def cleanup(self):
self._reset()
def _cache(self, z, potential_energy, z_grads):
self._z_last = z
self._potential_energy_last = potential_energy
self._z_grads_last = z_grads
def _fetch_from_cache(self):
return self._z_last, self._potential_energy_last, self._z_grads_last
def sample(self, trace):
z, potential_energy, z_grads = self._fetch_from_cache()
# return early if no sample sites
if not z:
self._accept_cnt += 1
self._t += 1
return self._get_trace(z)
r, _ = self._sample_r(name="r_t={}".format(self._t))
energy_current = self._kinetic_energy(r) + potential_energy
# Temporarily disable distributions args checking as
# NaNs are expected during step size adaptation
with optional(pyro.validation_enabled(False), self._t < self._warmup_steps):
z_new, r_new, z_grads_new, potential_energy_new = velocity_verlet(z, r, self._potential_energy,
self.inverse_mass_matrix,
self.step_size,
self.num_steps,
z_grads=z_grads)
# apply Metropolis correction.
energy_proposal = self._kinetic_energy(r_new) + potential_energy_new
delta_energy = energy_proposal - energy_current
# Set accept prob to 0.0 if delta_energy is `NaN` which may be
# the case for a diverging trajectory when using a large step size.
if torch_isnan(delta_energy):
accept_prob = delta_energy.new_tensor(0.0)
else:
accept_prob = (-delta_energy).exp().clamp(max=1.)
rand = pyro.sample("rand_t={}".format(self._t), dist.Uniform(accept_prob.new_tensor(0.),
accept_prob.new_tensor(1.)))
if rand < accept_prob:
self._accept_cnt += 1
z = z_new
self._cache(z, potential_energy_new, z_grads_new)
if self._t < self._warmup_steps:
self._adapter.step(self._t, z, accept_prob)
self._t += 1
# get trace with the constrained values for `z`.
z = z.copy()
for name, transform in self.transforms.items():
z[name] = transform.inv(z[name])
return self._get_trace(z)
def diagnostics(self):
return OrderedDict([
("step size", "{:.2e}".format(self.step_size)),
("acc. rate", "{:.3f}".format(self._accept_cnt / self._t))
])
| 46.339326
| 107
| 0.631153
|
9bc453a1e2394ac82257be073fb5e41cc951302a
| 3,374
|
py
|
Python
|
project/170 solver copy/collect.py
|
AnthonyNg404/Algo
|
7b1e9f3a70d3bd64ef31a3e19f7cfbb2d6997cd1
|
[
"Unlicense"
] | null | null | null |
project/170 solver copy/collect.py
|
AnthonyNg404/Algo
|
7b1e9f3a70d3bd64ef31a3e19f7cfbb2d6997cd1
|
[
"Unlicense"
] | null | null | null |
project/170 solver copy/collect.py
|
AnthonyNg404/Algo
|
7b1e9f3a70d3bd64ef31a3e19f7cfbb2d6997cd1
|
[
"Unlicense"
] | null | null | null |
import networkx as nx
from parse import read_input_file, write_output_file
from utils import is_valid_solution, calculate_score
import sys
from os.path import basename, normpath
import glob
import heapq
import os
import copy
import random
import utils
import parse
def read_output_file(G, path):
"""
Parses and validates an output file
Args:
G: input graph corresponding to input file
path: str, path to output file
Returns:
score: the difference between the new and original shortest path
"""
H = G.copy()
if len(H) >= 20 and len(H) <= 30:
max_cities = 1
max_roads = 15
elif len(H) > 30 and len(H) <= 50:
max_cities = 3
max_roads = 30
elif len(H) > 50 and len(H) <= 100:
max_cities = 5
max_roads = 100
else:
print('Input Graph is not of a valid size')
assert H.has_node(0), 'Source vertex is missing in input graph'
assert H.has_node(len(G) - 1), 'Target vertex is missing in input graph'
cities = []
removed_edges = []
with open(path, "r") as fo:
number_of_cities = fo.readline().strip()
assert number_of_cities.isdigit(), 'Number of cities is not a digit'
number_of_cities = int(number_of_cities)
assert number_of_cities <= max_cities, 'Too many cities being removed from input graph'
for _ in range(number_of_cities):
city = fo.readline().strip()
assert city.isdigit(), 'Specified vertex is not a digit'
city = int(city)
assert H.has_node(city), 'Specified vertex is not in input graph'
cities.append(city)
number_of_roads = fo.readline().strip()
assert number_of_roads.isdigit(), 'Number of roads is not a digit'
number_of_roads = int(number_of_roads)
for _ in range(number_of_roads):
road = fo.readline().split()
assert len(road) == 2, 'An edge must be specified with a start and end vertex'
assert road[0].isdigit() and road[1].isdigit()
u = int(road[0])
v = int(road[1])
assert H.has_edge(u, v), 'Specified edge is not in input graph'
removed_edges.append((u,v))
return utils.calculate_score(G, cities, removed_edges), cities, removed_edges
input_dir = r"C:\Users\antho\Desktop\170\project\inputs\large"
graph_file = os.listdir(input_dir)
output1_dir = r"C:\Users\antho\Desktop\170\project\prepare\large2"
output1_file = os.listdir(output1_dir)
output2_dir = r"C:\Users\antho\Desktop\170\project\prepare\large"
output2_file = os.listdir(output1_dir)
if __name__ == '__main__':
num_overwrite = 0
for g, out1, out2 in zip(graph_file, output1_file, output2_file):
c1, k1, c2, k2 = None, None, None, None
input_path = input_dir + '/' + g
out1_path = output1_dir + '/' + out1
out2_path = output2_dir + '/' + out2
G = read_input_file(input_path)
score1, c1, k1 = read_output_file(G, out1_path)
score2, c2, k2 = read_output_file(G, out2_path)
print(score1, " ", score2)
assert is_valid_solution(G, c1, k1)
assert is_valid_solution(G, c2, k2)
if score1 > score2:
num_overwrite += 1
write_output_file(G, c1, k1, output2_dir + "//" + g[:-3] + '.out')
print(num_overwrite)
| 34.080808
| 95
| 0.634558
|
6e87cffb7a0c2bfad22f662dd66b989140dd80db
| 2,863
|
py
|
Python
|
setup.py
|
miroag/mfs
|
acb5b619864c2199c28b75e1800d3b2d3bf43dcc
|
[
"MIT"
] | null | null | null |
setup.py
|
miroag/mfs
|
acb5b619864c2199c28b75e1800d3b2d3bf43dcc
|
[
"MIT"
] | 11
|
2017-12-06T13:07:04.000Z
|
2017-12-15T10:38:17.000Z
|
setup.py
|
miroag/mfs
|
acb5b619864c2199c28b75e1800d3b2d3bf43dcc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='mfs',
version='0.1.0',
license='MIT license',
description='mfs is a set of utilities to ease image download from some Russian modelling forums',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexandre Ovtchinnikov',
author_email='abc@miroag.com',
url='https://github.com/miroag/mfs',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'beautifulsoup4', 'requests', 'aiohttp', 'tqdm', 'docopt'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'mfs = mfs.cli:main',
]
},
)
| 32.534091
| 102
| 0.598323
|
8cabc63f960da28af0d1133f1374b1258c5dd14e
| 573
|
py
|
Python
|
bokeh/themes/__init__.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | 1
|
2018-09-19T02:08:13.000Z
|
2018-09-19T02:08:13.000Z
|
bokeh/themes/__init__.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/themes/__init__.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | 1
|
2020-06-17T05:47:16.000Z
|
2020-06-17T05:47:16.000Z
|
''' Provides API for loading themes
'''
from __future__ import absolute_import
from os.path import dirname, realpath, join
from .theme import Theme
_THIS_DIR = dirname(realpath(__file__))
_FP_FMT = join(_THIS_DIR, '{0}.json')
LIGHT_MINIMAL = 'light_minimal'
DARK_MINIMAL = 'dark_minimal'
CALIBER = 'caliber'
default = Theme(json={})
built_in_themes = {
LIGHT_MINIMAL: Theme(filename=_FP_FMT.format(LIGHT_MINIMAL)),
DARK_MINIMAL: Theme(filename=_FP_FMT.format(DARK_MINIMAL)),
CALIBER: Theme(filename=_FP_FMT.format(CALIBER))
}
del dirname, realpath, join
| 22.92
| 65
| 0.755672
|
258f41025076dfc2bab8ae417cca8be86413aefc
| 4,183
|
py
|
Python
|
examples/inheritance/concrete.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 1
|
2018-04-02T18:41:52.000Z
|
2018-04-02T18:41:52.000Z
|
examples/inheritance/concrete.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | null | null | null |
examples/inheritance/concrete.py
|
gujun4990/sqlalchemy
|
057bae2295feb86529a04f09cd2f3d4c2c6d88a8
|
[
"MIT"
] | 3
|
2017-09-26T13:59:24.000Z
|
2020-12-04T17:51:54.000Z
|
"""Concrete-table (table-per-class) inheritance example."""
from sqlalchemy import Column, Integer, String, \
ForeignKey, create_engine, inspect, or_
from sqlalchemy.orm import relationship, Session, with_polymorphic
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import ConcreteBase
Base = declarative_base()
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
name = Column(String(50))
employees = relationship(
"Person",
back_populates='company',
cascade='all, delete-orphan')
def __repr__(self):
return "Company %s" % self.name
class Person(ConcreteBase, Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
company_id = Column(ForeignKey('company.id'))
name = Column(String(50))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {
'polymorphic_identity': 'person',
}
def __repr__(self):
return "Ordinary person %s" % self.name
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, primary_key=True)
name = Column(String(50))
company_id = Column(ForeignKey('company.id'))
status = Column(String(30))
engineer_name = Column(String(30))
primary_language = Column(String(30))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {
'polymorphic_identity': 'engineer',
'concrete': True
}
def __repr__(self):
return (
"Engineer %s, status %s, engineer_name %s, "
"primary_language %s" %
(
self.name, self.status,
self.engineer_name, self.primary_language)
)
class Manager(Person):
__tablename__ = 'manager'
id = Column(Integer, primary_key=True)
name = Column(String(50))
company_id = Column(ForeignKey('company.id'))
status = Column(String(30))
manager_name = Column(String(30))
company = relationship("Company", back_populates="employees")
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True
}
def __repr__(self):
return "Manager %s, status %s, manager_name %s" % (
self.name, self.status, self.manager_name)
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
c = Company(name='company1', employees=[
Manager(
name='pointy haired boss',
status='AAB',
manager_name='manager1'),
Engineer(
name='dilbert',
status='BBA',
engineer_name='engineer1',
primary_language='java'),
Person(name='joesmith'),
Engineer(
name='wally',
status='CGG',
engineer_name='engineer2',
primary_language='python'),
Manager(
name='jsmith',
status='ABA',
manager_name='manager2')
])
session.add(c)
session.commit()
c = session.query(Company).get(1)
for e in c.employees:
print(e, inspect(e).key, e.company)
assert set([e.name for e in c.employees]) == set(
['pointy haired boss', 'dilbert', 'joesmith', 'wally', 'jsmith'])
print("\n")
dilbert = session.query(Person).filter_by(name='dilbert').one()
dilbert2 = session.query(Engineer).filter_by(name='dilbert').one()
assert dilbert is dilbert2
dilbert.engineer_name = 'hes dilbert!'
session.commit()
c = session.query(Company).get(1)
for e in c.employees:
print(e)
# query using with_polymorphic.
eng_manager = with_polymorphic(Person, [Engineer, Manager])
print(
session.query(eng_manager).
filter(
or_(
eng_manager.Engineer.engineer_name == 'engineer1',
eng_manager.Manager.manager_name == 'manager2'
)
).all()
)
# illustrate join from Company
eng_manager = with_polymorphic(Person, [Engineer, Manager])
print(
session.query(Company).
join(
Company.employees.of_type(eng_manager)
).filter(
or_(eng_manager.Engineer.engineer_name == 'engineer1',
eng_manager.Manager.manager_name == 'manager2')
).all())
session.commit()
| 25.820988
| 69
| 0.647143
|
9bf084de68e97f7d6215676e99ff04e96cea10c0
| 5,906
|
py
|
Python
|
noaa.py
|
WillDignazio/NOAATides
|
ed928c1c2507f546f0645f6a46a719a86ee3e657
|
[
"MIT"
] | null | null | null |
noaa.py
|
WillDignazio/NOAATides
|
ed928c1c2507f546f0645f6a46a719a86ee3e657
|
[
"MIT"
] | null | null | null |
noaa.py
|
WillDignazio/NOAATides
|
ed928c1c2507f546f0645f6a46a719a86ee3e657
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Copyright (C) 2016 William Ziener-Dignazio
NOAA Tidal Data Retriever
Usage: python noaa.py
This program uses the exposed public API via noaa.gov to build a CSV file
consisting of tidal data since January 1, 2000 to the current date.
The online NOAA api has a max retrieval period of approximately 1 day, thus
we must iteratively retrieve the tidal through consecutive API calls. The
API calls themselves are through HTTP requests to a specificied endpoint
retrieved from the noaa.gov website. The enpoint URL is hardcoded below
(as 'url') with substitution points for the desired date ranges.
As a courtesy to the NOAA servers, we limit requests to 1 per second. This
both prevents heavy load to the government run servers, and avoids blacklisting
of the executing IP address, which with enough requests might look like a
DOS attack.
Output files are of the format: "tidal_data-${DATETIME}.csv"
Every time the tool is run, a new file will be generated, as to not destroy
previous runs that generated data csv files.
"""
from datetime import timedelta, date
from string import Template
import pycurl
from StringIO import StringIO
import time
import sys
"""
As mentioned above, this "magic" URL corresponds with the API for retrieving water
level tidal data from the NOAA API endpoint.
This value will be handed to the pycurl library, which will issue a GET request
against the enpoint, retrieving a portion of CSV data for us.
This URL is broken down into the following components:
1. NOAA subdomain for tidal and current data
2. API call to water_level via the specified application
3. Enter in begin and end dates by substitution values
4. Provide station number (station in hawaii)
5. Request CSV format
"""
url = 'http://tidesandcurrents.noaa.gov/api/datagetter?' + \
'product=water_level&application=NOS.COOPS.TAC.WL&' + \
'begin_date=$begin_date&end_date=$end_date&' + \
'datum=MLLW&station=1617760&time_zone=GMT&units=english&' + \
'format=csv'
"""
Generator function that produces the next day within the date range,
iterating once on every method call.
"""
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
"""
This is a close approximation of the start date from which NOAA started giving
their tidal data to the public. We might be able to tweak it to a slightly further,
date, but for data mining purposes this should be sufficient.
"""
start_date = date(2000, 1, 1)
end_date = date(date.today().year, date.today().month, date.today().day)
delta = end_date - start_date # Amount of time passed from today to the start_date
print "Starting download of NOAA tidal data..."
"""
Open up a file descriptor for the tidal_data-${DATETIME}.csv file,
with a buffer of 1024 bytes. This allows the data to be slightly
buffered before hitting disk. The Requests themselves are probably
smaller than the file buffer; as long as we don't kill -9 the process,
we'll probably not lose any data.
"""
file = open('tidal_data-' + str(time.time()) + '.csv', 'w+', 1024)
write_header = True
"""
Main retrieval loop
We iterate over the date range, from start date (January 1st, 2000) to
the end date (Today). Along the way, we substitute the day values for
the current iteration into the template "url".
We use pycurl to orchestrate the GET request against the NOAA enpoint,
and create a new instance of a library request every iteration.
The response to the request is stripped of metadata provided from the request,
and then written to the output CSV file on disk.
We maintain a percentage counter that is updated every iteration, to
track how far we've gone.
"""
days_traversed = 0
for single_date in daterange(start_date, end_date):
start_date_past = single_date - timedelta(days=1)
end_date_past = single_date
"""
We take our hardcoded template url, and substitute in the values
corresponding to the iterations day. Remember that we are limited to
retrieving approx. 1 days worth of data at a time.
"""
s = Template(url)
curl_url = s.substitute(begin_date=start_date_past.strftime("%Y%m%d"),
end_date=end_date_past.strftime("%Y%m%d"))
"""
Build up a python StringIO buffer, allowing us to write python string
data to an underyling byte buffer.
"""
buffer = StringIO()
"""
Create an instance of a pycurl Curl object, this will be our interface
to the libcurl library. We must set some configuration options so that
we can request against the correct location.
"""
c = pycurl.Curl()
c.setopt(c.URL, curl_url) # Set the completed url template as the target
c.setopt(c.WRITEDATA, buffer) # The response shall go to our string buffer
c.perform() # Execute the request
c.close() # Finish up the Curl object
"""
Now that we have processed the request, get the value of the body, and save
it to the output csv file.
"""
body = buffer.getvalue()
if write_header:
"""
On the first run, we want to include the header line that describes the
column values. We don't want to include these on subsequent runs.
"""
file.write(str(body))
write_header = False
else:
file.write("\n".join(str(body).splitlines()[1:])) # Doesn't include header line
file.write("\n")
"""
XXX: Caution changing this value
We sleep for a second so that we do not bombard the NOAA API endpoints with requests
for data. Doing so puts strain on their resources, as well as increases the chance that
your IP address will be blacklisted by their servers.
"""
time.sleep(1)
days_traversed += 1
percentComplete = float(days_traversed) / float(delta.days)
print '\r>> %f%% Complete' % percentComplete,
sys.stdout.flush()
| 36.012195
| 91
| 0.724009
|
0a973e7f3dd327991b4804d1907e2ce45e7a9d47
| 2,808
|
py
|
Python
|
deploy/test/sp_test6_7.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 19
|
2019-04-05T03:39:51.000Z
|
2021-11-09T10:55:58.000Z
|
deploy/test/sp_test6_7.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 3
|
2017-08-04T07:37:31.000Z
|
2021-06-10T19:42:58.000Z
|
deploy/test/sp_test6_7.py
|
zhexiao/mnet
|
51e64e6c5181702f90d4f68efb5e4ba8f20dff7b
|
[
"Apache-2.0"
] | 11
|
2017-08-02T09:14:45.000Z
|
2021-05-07T15:33:07.000Z
|
"""
./bin/spark-submit --packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.4 /vagrant/mnet/deploy/test/sp_test6.py
spark-sql-kafka-0-10_2.11:2.4.4 ===> 2.11代表scala版本,2.4.4代表spark版本
kafka:kafka_2.11-2.3.0.tgz
spark:spark-2.4.4-bin-hadoop2.7.tgz
"""
from pyspark.sql import SparkSession
from pyspark.sql.functions import from_json, window
from pyspark.sql.types import StructType, IntegerType, StringType, TimestampType
import pyspark.sql.functions as funcs
"""
常量
"""
spark_master = "spark://192.168.33.50:7077"
kafka_master = "192.168.33.50:9092"
mnet_topic = "test"
mnet_agg_topic = "testres"
window_time = "30 seconds"
spark = SparkSession.builder.master(
spark_master
).getOrCreate()
stream_data = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", kafka_master) \
.option("subscribe", mnet_topic) \
.load()
stream_data.printSchema()
# kafka json数据解析
data_schema = StructType().add(
"host", StringType()
).add(
"create_time", TimestampType()
).add(
"netflow", StructType().add(
"ipv4_src_addr", StringType()
).add(
"ipv4_dst_addr", StringType()
).add(
"in_bytes", IntegerType()
).add(
"in_pkts", IntegerType()
).add(
"protocol", IntegerType()
)
)
new_stream_data = stream_data.select(
stream_data.key.cast("string"),
from_json(stream_data.value.cast("string"), data_schema).alias('json_data')
)
new_stream_data.printSchema()
new_df = new_stream_data.filter(
(new_stream_data.json_data.netflow.protocol == 6) | (new_stream_data.json_data.netflow.protocol == 8)
).select(
(new_stream_data.json_data.netflow.ipv4_src_addr).alias('src_ip'),
(new_stream_data.json_data.netflow.ipv4_dst_addr).alias('dest_ip'),
(new_stream_data.json_data.netflow.in_bytes).alias('in_bytes'),
(new_stream_data.json_data.netflow.in_pkts).alias('in_pkts'),
(new_stream_data.json_data.create_time).alias('create_time'),
)
new_df.printSchema()
# 聚合
res_df = new_df.withWatermark(
'create_time', window_time
).groupBy(
new_df.src_ip,
new_df.dest_ip,
window(new_df.create_time, window_time, window_time),
).agg(
funcs.count("*").alias("flows"),
funcs.sum("in_bytes").alias("bytes"),
funcs.sum("in_pkts").alias("packets"),
)
res_df.printSchema()
# Start running the query that prints the running counts to the console
query = res_df \
.selectExpr("CAST(window AS STRING) AS key", "to_json(struct(*)) AS value") \
.writeStream \
.trigger(processingTime=window_time) \
.outputMode("update") \
.format("kafka") \
.option("kafka.bootstrap.servers", "192.168.33.50:9092") \
.option("topic", mnet_agg_topic) \
.option("checkpointLocation", "/tmp/{}".format(mnet_agg_topic)) \
.start() \
.awaitTermination()
| 29.557895
| 116
| 0.694801
|
b6081eeaefd34c9d424d8d6c19a5ef818dca0e99
| 2,458
|
py
|
Python
|
examples/split_csv_data.py
|
PaccMann/paccmann_datasets
|
0cb0cee349ffab8e227f09f7df0a8bca6a71f22e
|
[
"MIT"
] | 14
|
2019-11-01T12:45:56.000Z
|
2022-03-11T15:38:31.000Z
|
examples/split_csv_data.py
|
PaccMann/paccmann_datasets
|
0cb0cee349ffab8e227f09f7df0a8bca6a71f22e
|
[
"MIT"
] | 74
|
2019-11-12T19:36:27.000Z
|
2022-02-28T08:19:37.000Z
|
examples/split_csv_data.py
|
PaccMann/paccmann_datasets
|
0cb0cee349ffab8e227f09f7df0a8bca6a71f22e
|
[
"MIT"
] | 2
|
2021-08-14T11:15:07.000Z
|
2021-08-25T06:42:01.000Z
|
#!/usr/bin/env python3
"""Split in train and test a .csv."""
import argparse
import logging
import sys
from pytoda.data_splitter import csv_data_splitter
# setting up logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger('split_csv_data')
# define the parser arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-f',
'--filepaths',
nargs='+',
help='list of files to use to generate the splits',
required=True,
)
parser.add_argument(
'-o',
'--output_path',
type=str,
help='output path where to store the splits',
required=True,
)
parser.add_argument(
'-d',
'--data_type',
type=str,
help='data type, used to generate the output file names',
required=True,
)
parser.add_argument(
'-m',
'--mode',
type=str,
help='strategy used to split the data',
choices=['random', 'file'],
required=True,
)
parser.add_argument(
'-s',
'--seed',
type=int,
help=('seed used by the random generator. ' 'Defaults to 42'),
default=42,
)
parser.add_argument(
'-t',
'--test_fraction',
type=float,
help=('portion of samples in testing data. ' 'Defaults to 0.1'),
default=0.1,
)
parser.add_argument(
'-n',
'--number_of_columns',
type=int,
help=('number of columns used to generate a hash. ' 'Defaults to 12'),
default=12,
)
parser.add_argument(
'-i',
'--index_col',
type=int,
help=('index column in the .csv flies. ' 'Defaults to 0'),
default=0,
)
parser.add_argument(
'--separator',
type=str,
help=('separators in the .csv files. ' 'Defaults to ","'),
default=',',
)
parser.add_argument(
'--header',
type=int,
help=('header row in the .csv files. ' 'Defaults to 0'),
default=0,
)
if __name__ == '__main__':
# parse arguments
args = parser.parse_args()
# run the split
train_filepath, test_filepath = csv_data_splitter(
data_filepaths=args.filepaths,
save_path=args.output_path,
data_type=args.data_type,
mode=args.mode,
seed=args.seed,
test_fraction=args.test_fraction,
number_of_columns=args.number_of_columns,
index_col=args.index_col,
sep=args.separator,
header=args.header,
)
logger.info(
'Data splitted into train_filepath={} and test_filepath={}.'.format(
train_filepath, test_filepath
)
)
| 23.188679
| 76
| 0.631814
|
78212c4051f404373ae6a09ea3ed70f92c7fc21f
| 2,457
|
py
|
Python
|
students/views.py
|
BendalPrathmesh/E-learning-platform
|
0ff3dd90be5d0df0e820836ed321e61ba176d20e
|
[
"MIT"
] | null | null | null |
students/views.py
|
BendalPrathmesh/E-learning-platform
|
0ff3dd90be5d0df0e820836ed321e61ba176d20e
|
[
"MIT"
] | null | null | null |
students/views.py
|
BendalPrathmesh/E-learning-platform
|
0ff3dd90be5d0df0e820836ed321e61ba176d20e
|
[
"MIT"
] | null | null | null |
from django.core.urlresolvers import reverse_lazy
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from braces.views import LoginRequiredMixin
from courses.models import Course
from .forms import CourseEnrollForm
class StudentRegistrationView(CreateView):
template_name = 'students/student/registration.html'
form_class = UserCreationForm
success_url = reverse_lazy('student_course_list')
def form_valid(self, form):
result = super(StudentRegistrationView, self).form_valid(form)
cd = form.cleaned_data
user = authenticate(username=cd['username'],
password=cd['password1'])
login(self.request, user)
return result
class StudentEnrollCourseView(LoginRequiredMixin, FormView):
course = None
form_class = CourseEnrollForm
def form_valid(self, form):
self.course = form.cleaned_data['course']
self.course.students.add(self.request.user)
return super(StudentEnrollCourseView, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('student_course_detail', args=[self.course.id])
class StudentCourseListView(LoginRequiredMixin, ListView):
model = Course
template_name = 'students/course/list.html'
def get_queryset(self):
qs = super(StudentCourseListView, self).get_queryset()
return qs.filter(students__in=[self.request.user])
class StudentCourseDetailView(DetailView):
model = Course
template_name = 'students/course/detail.html'
def get_queryset(self):
qs = super(StudentCourseDetailView, self).get_queryset()
return qs.filter(students__in=[self.request.user])
def get_context_data(self, **kwargs):
context = super(StudentCourseDetailView, self).get_context_data(**kwargs)
# get course object
course = self.get_object()
if 'module_id' in self.kwargs:
# get current module
context['module'] = course.modules.get(id=self.kwargs['module_id'])
else:
# get first module
context['module'] = course.modules.all()[0]
return context
| 36.132353
| 82
| 0.693529
|
b2ab51153b86961aed80d3f0a5284667ac33e8bf
| 216
|
py
|
Python
|
tests/conftest.py
|
rafalcode/HapDab
|
4a0a300aefea923e07c08ff4c8ebc5e05fa6095c
|
[
"MIT"
] | 1
|
2021-02-07T07:23:36.000Z
|
2021-02-07T07:23:36.000Z
|
tests/conftest.py
|
rafalcode/HapDab
|
4a0a300aefea923e07c08ff4c8ebc5e05fa6095c
|
[
"MIT"
] | 27
|
2018-01-29T18:45:10.000Z
|
2018-03-05T13:31:20.000Z
|
tests/conftest.py
|
rafalcode/HapDab
|
4a0a300aefea923e07c08ff4c8ebc5e05fa6095c
|
[
"MIT"
] | 1
|
2018-10-16T16:52:27.000Z
|
2018-10-16T16:52:27.000Z
|
import pytest
import os
import hapdab as dab
import locuspocus as lp
import minus80.Tools as m80Tools
@pytest.fixture(scope='module')
def ACGTFasta():
x = lp.Fasta.from_file('data/ACGT.fasta')
return x
| 14.4
| 46
| 0.731481
|
1d3bfaad2650118652268fff4875250be564387a
| 784
|
py
|
Python
|
release/pipeline/dags/istio_daily_master.py
|
oulinbao/istio
|
91a85fa9917b45705d8a24ede6c1f167b61e4019
|
[
"Apache-2.0"
] | 15
|
2018-01-26T22:25:37.000Z
|
2022-03-29T14:32:59.000Z
|
release/pipeline/dags/istio_daily_master.py
|
oulinbao/istio
|
91a85fa9917b45705d8a24ede6c1f167b61e4019
|
[
"Apache-2.0"
] | 5
|
2020-09-04T10:54:34.000Z
|
2021-06-02T02:24:49.000Z
|
release/pipeline/dags/istio_daily_master.py
|
oulinbao/istio
|
91a85fa9917b45705d8a24ede6c1f167b61e4019
|
[
"Apache-2.0"
] | 5
|
2020-07-06T20:11:51.000Z
|
2022-01-25T13:26:21.000Z
|
"""Airfow DAG used is the daily release pipeline.
Copyright 2017 Istio Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from airflow import DAG
import istio_common_dag
branch_this_dag = 'master'
dailyDag = istio_common_dag.DailyPipeline(branch=branch_this_dag)
dailyDag
| 30.153846
| 72
| 0.797194
|
19ba011caa3ecb79b26b68b77f040bdb38da6c13
| 1,179
|
py
|
Python
|
src/numpy/linalg.py
|
mumupy/pythonlearn
|
5be03d156f11af2467a6052a476de4b706f7d53a
|
[
"Apache-2.0"
] | 9
|
2019-10-25T03:50:02.000Z
|
2022-03-22T13:22:11.000Z
|
src/numpy/linalg.py
|
mumupy/pythonlearn
|
5be03d156f11af2467a6052a476de4b706f7d53a
|
[
"Apache-2.0"
] | 11
|
2019-12-11T14:47:54.000Z
|
2022-02-10T00:23:57.000Z
|
src/numpy/linalg.py
|
mumupy/pythonlearn
|
5be03d156f11af2467a6052a476de4b706f7d53a
|
[
"Apache-2.0"
] | 1
|
2021-04-20T07:30:42.000Z
|
2021-04-20T07:30:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/21 14:42
# @Author : ganliang
# @File : linalg.py
# @Desc : 线性代数
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.array([[11, 12], [13, 14]])
print ("dot函数在是二维矩阵的时候就是矩阵相乘")
print (np.matrix(a) * np.matrix(b))
print ("dot下标元素的乘积和")
print(np.dot(a, b))
print ("\nvdot两个向量的点积")
# vdot 将数组展开计算内积 两个向量的点积。 如果第一个参数是复数,那么它的共轭复数会用于计算。 如果参数是多维数组,它会被展开。
print (np.vdot(a, b))
print ("\ninner向量内积")
print (np.inner(np.array([1, 2, 3]), np.array([0, 1, 0])))
# 等价于 1*0+2*1+3*0
print (np.inner(np.array([[1, 2], [3, 4]]), np.array([[11, 12], [13, 14]])))
print ("\nmatmul矩阵乘积")
a = [[1, 0], [0, 1]]
b = [[4, 1], [2, 2]]
print (np.matmul(a, b))
print ("\nlinalg.det 计算输入矩阵的行列式")
a = np.array([[1, 2], [3, 4]])
print (np.linalg.det(a))
print ("\nlinalg.solve 矩阵形式的线性方程的解")
a = np.array([[3, 1], [1, 2]])
b = np.array([9, 8])
x = np.linalg.solve(a, b)
print (x)
print ("\nlinalg.solve 乘法逆矩阵")
x = np.array([[1, 2], [3, 4]])
y = np.linalg.inv(x)
print (x)
print (y)
print (np.dot(x, y))
a = np.array([[1, 1, 1], [0, 2, 5], [2, 5, -1]])
b = np.array([[6], [-4], [27]])
x = np.linalg.solve(a, b)
print (x)
| 21.833333
| 76
| 0.562341
|
33e263f67551c0707048d12548b457e2fc1b4c73
| 2,571
|
py
|
Python
|
tests/023.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
tests/023.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
tests/023.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.append('./')
solutions = __import__('solutions.023_merge_k_sorted_lists', fromlist='*')
helper = __import__('utils.helper', fromlist='*')
class Test021(unittest.TestCase):
def test_sortListsHead(self):
s = solutions.Solution()
l1 = helper.constructListNode([1, 2, 3, 4])
l2 = helper.constructListNode([60, 400])
l3 = helper.constructListNode([40, 400])
nodes = s.sortListsHead([l1, l2, l3])
self.assertEqual(
[1, 40, 60],
[ node.val for node in nodes ])
nodes = s.sortListsHead([l2, l1, l3])
self.assertEqual(
[1, 40, 60],
[ node.val for node in nodes ])
nodes = s.sortListsHead([l3, l2, l1])
self.assertEqual(
[1, 40, 60],
[ node.val for node in nodes ])
def test_mergeKLists(self):
s = solutions.Solution()
l1 = helper.constructListNode([1, 2, 3, 4])
l2 = helper.constructListNode([60, 400])
head = s.mergeKLists([l1, l2])
self.assertEqual(helper.listNodeToArray(head), [1, 2, 3, 4, 60, 400])
l1 = helper.constructListNode([1, 2, 3, 4])
l2 = helper.constructListNode([60, 400])
l3 = helper.constructListNode([40, 400])
head = s.mergeKLists([l1, l2, l3])
self.assertEqual(helper.listNodeToArray(head), [1, 2, 3, 4, 40, 60, 400, 400])
l1 = helper.constructListNode([1, 2, 3, 4])
l2 = helper.constructListNode([60, 400])
l3 = helper.constructListNode([40, 400])
head = s.mergeKLists([l3, l2, l1])
self.assertEqual(helper.listNodeToArray(head), [1, 2, 3, 4, 40, 60, 400, 400])
l1 = helper.constructListNode([3])
l2 = helper.constructListNode([60])
l3 = helper.constructListNode([4])
head = s.mergeKLists([l3, l2, l1])
self.assertEqual(helper.listNodeToArray(head), [3, 4, 60])
l1 = helper.constructListNode([3, 4, 5, 6 ,7])
head = s.mergeKLists([l1])
self.assertEqual(helper.listNodeToArray(head), [3, 4, 5, 6 ,7])
l1 = helper.constructListNode([3, 4, 5, 6 ,7])
l2 = helper.constructListNode([1, 9, 19])
head = s.mergeKLists([l1, l2])
self.assertEqual(helper.listNodeToArray(head), [1, 3, 4, 5, 6 ,7, 9, 19])
l1 = helper.constructListNode([3, 4, 5, 6 ,7])
l2 = None
head = s.mergeKLists([l1, l2])
self.assertEqual(helper.listNodeToArray(head), [3, 4, 5, 6 ,7])
if __name__ == '__main__':
unittest.main()
| 32.544304
| 86
| 0.577596
|
cc18ef2f86f79587e77483bcf37a881fd2f2155c
| 1,993
|
py
|
Python
|
utils.py
|
intellivoid/Friendly-Telegram-Lydia
|
c252d723f8913a14c4c382171e82d727988a4cbe
|
[
"WTFPL"
] | null | null | null |
utils.py
|
intellivoid/Friendly-Telegram-Lydia
|
c252d723f8913a14c4c382171e82d727988a4cbe
|
[
"WTFPL"
] | null | null | null |
utils.py
|
intellivoid/Friendly-Telegram-Lydia
|
c252d723f8913a14c4c382171e82d727988a4cbe
|
[
"WTFPL"
] | null | null | null |
import os
from . import __main__
from telethon.tl.types import PeerUser, PeerChat, PeerChannel
def get_args(message):
try:
message = message.message
except AttributeError:
pass
if not message:
return False
return list(filter(lambda x: len(x) > 0, message.split(' ')))[1:]
def get_args_raw(message):
try:
message = message.message
except AttributeError:
pass
if not message:
return False
args = message.split(' ', 1)
if len(args) > 1:
return args[1]
def get_args_split_by(message, s):
m = get_args_raw(message)
mess = m.split(s)
return [st.strip() for st in mess]
def get_chat_id(message):
chat = message.to_id
attrs = chat.__dict__
if len(attrs) != 1:
return None
return next(iter(attrs.values()))
def escape_html(text):
return str(text).replace("<", "<").replace(">", ">").replace("&", "&")
def escape_quotes(text):
return str(text).replace("<", "<").replace(">", ">").replace("&", "&").replace('"', """)
def get_base_dir():
return os.path.relpath(os.path.dirname(__main__.__file__))
async def get_user(message):
try:
return await message.client.get_entity(message.from_id)
except ValueError: # Not in database. Lets go looking for them.
logging.debug("user not in session cache. searching...")
if isinstance(message.to_id, PeerUser):
await message.client.get_dialogs()
return await message.client.get_entity(message.from_id)
elif isinstance(message.to_id, PeerChat) or isinstance(message.to_id, PeerChannel):
async for user in message.client.iter_participants(message.to_id, aggressive=True):
if user.id == message.from_id:
return user
logging.error("WTF! user isn't in the group where they sent the message")
return None
else:
logging.error("WTF! to_id is not a user, chat or channel")
return None
| 32.145161
| 107
| 0.644255
|
d2e70bdd79effa9f4ad7f14df86e05a7d0a805af
| 1,095
|
py
|
Python
|
scripts/Combine_Data/combine_data_nonimputed.py
|
UoMResearchIT/UoM_AQ_Data_Tools
|
7c4dd37e130157885634d14ae764cc1cbcf42313
|
[
"MIT"
] | 1
|
2020-04-10T17:33:27.000Z
|
2020-04-10T17:33:27.000Z
|
scripts/Combine_Data/combine_data_nonimputed.py
|
UoMResearchIT/UoM_AQ_Data_Tools
|
7c4dd37e130157885634d14ae764cc1cbcf42313
|
[
"MIT"
] | 8
|
2020-10-13T16:06:49.000Z
|
2021-02-19T14:56:32.000Z
|
scripts/Combine_Data/combine_data_nonimputed.py
|
UoMResearchIT/UoM_AQ_Data_Tools
|
7c4dd37e130157885634d14ae764cc1cbcf42313
|
[
"MIT"
] | null | null | null |
import pandas as pd
aurn_file='../AURN_Data_Download/AURN_data/aurn_processed_daily_2016-2019.csv'
emep_file='../EMEP_Data_Extraction/EMEP_data/emep_daily_data_2016-2019.csv'
poll_file='../MEDMI_Data_Download/full_data/pollen_2016-2019.csv'
met_file='../Data_Processing/MEDMI_Met_data/Met_ppd_daily_mean_max_temp_RH_pres_2016-2019_no_imputation.csv'
outfile='Combined_dataset/turing_aq_daily_met_pollen_pollution_data.csv'
aurn_data = pd.read_csv(aurn_file,index_col=['timestamp','site_id'])
emep_data = pd.read_csv(emep_file,index_col=['timestamp','site_id'])
poll_data = pd.read_csv(poll_file,index_col=['timestamp','site_id'])
met_data = pd.read_csv(met_file,index_col=['timestamp','site_id'])
combined_data = aurn_data.copy()
combined_data = combined_data.merge(emep_data, how='outer', left_index=True, right_index=True)
combined_data = combined_data.merge(poll_data, how='outer', left_index=True, right_index=True)
combined_data = combined_data.merge(met_data, how='outer', left_index=True, right_index=True)
combined_data.to_csv(outfile,index=True,header=True,float_format='%.2f')
| 45.625
| 108
| 0.816438
|
9c0ac1e00d733a66861d028bd5c36525e5bd35b1
| 991
|
py
|
Python
|
Snake3Equipe1/config.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | 2
|
2021-01-24T01:04:34.000Z
|
2021-05-06T16:25:53.000Z
|
Snake3Equipe1/config.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | null | null | null |
Snake3Equipe1/config.py
|
LUDUSLab/stem-games
|
347afa8b1511d76f8070fa69f27a49b57e551376
|
[
"MIT"
] | 3
|
2021-01-26T21:35:43.000Z
|
2021-05-06T16:06:47.000Z
|
import pygame
from pygame.mixer import Sound
from itertools import cycle
pygame.init()
BLINK_EVENT = pygame.USEREVENT + 0
BACKGROUND_COLOR = (0, 0, 0)
block_size = 32
fps = 10
screen_dimensions = (1280, 720)
game_clock = pygame.time.Clock()
# SOUNDS AND FONTS
game_over = pygame.mixer.Sound('./assets/team_I.game-over.wav')
eat_fruit = pygame.mixer.Sound('./assets/team-I.eat.wav')
music_menu = pygame.mixer.Sound('./assets/super_mario.wav')
music_menu.set_volume(0.01)
game_over.set_volume(0.02)
eat_fruit.set_volume(0.06)
running_game = False
screen = pygame.display.set_mode((1280, 720))
FOOD_APPLE = 100 # 0 aux = 0
FOOD_BANANA = 200 # 1 aux = 1
FOOD_ORANGE = 1000 # 2 aux = 2
# Colors ------------------------------------------------------------------------------------------------------- #
color_0D6895 = (13, 104, 149)
color_0B3C53 = (11, 60, 83)
color_C01C1C = (192, 28, 28)
color_C0771C = (185, 110, 18)
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
| 26.078947
| 114
| 0.631685
|
43d47f9ca4d894ca02acf57a75b3e783bc86d197
| 5,500
|
py
|
Python
|
pythonforandroid/util.py
|
strubbi77/python-for-android
|
230fb66449f18217efa440b942ab6659f3f62edc
|
[
"MIT"
] | null | null | null |
pythonforandroid/util.py
|
strubbi77/python-for-android
|
230fb66449f18217efa440b942ab6659f3f62edc
|
[
"MIT"
] | null | null | null |
pythonforandroid/util.py
|
strubbi77/python-for-android
|
230fb66449f18217efa440b942ab6659f3f62edc
|
[
"MIT"
] | null | null | null |
import contextlib
from os.path import exists, join
from os import getcwd, chdir, makedirs, walk, uname
import io
import json
import shutil
import sys
from fnmatch import fnmatch
from tempfile import mkdtemp
try:
from urllib.request import FancyURLopener
except ImportError:
from urllib import FancyURLopener
from pythonforandroid.logger import (logger, Err_Fore, error, info)
IS_PY3 = sys.version_info[0] >= 3
class WgetDownloader(FancyURLopener):
version = ('Wget/1.17.1')
urlretrieve = WgetDownloader().retrieve
build_platform = '{system}-{machine}'.format(
system=uname()[0], machine=uname()[-1]).lower()
"""the build platform in the format `system-machine`. We use
this string to define the right build system when compiling some recipes or
to get the right path for clang compiler"""
@contextlib.contextmanager
def current_directory(new_dir):
cur_dir = getcwd()
logger.info(''.join((Err_Fore.CYAN, '-> directory context ', new_dir,
Err_Fore.RESET)))
chdir(new_dir)
yield
logger.info(''.join((Err_Fore.CYAN, '<- directory context ', cur_dir,
Err_Fore.RESET)))
chdir(cur_dir)
@contextlib.contextmanager
def temp_directory():
temp_dir = mkdtemp()
try:
logger.debug(''.join((Err_Fore.CYAN, ' + temp directory used ',
temp_dir, Err_Fore.RESET)))
yield temp_dir
finally:
shutil.rmtree(temp_dir)
logger.debug(''.join((Err_Fore.CYAN, ' - temp directory deleted ',
temp_dir, Err_Fore.RESET)))
def ensure_dir(filename):
if not exists(filename):
makedirs(filename)
class JsonStore(object):
"""Replacement of shelve using json, needed for support python 2 and 3.
"""
def __init__(self, filename):
super(JsonStore, self).__init__()
self.filename = filename
self.data = {}
if exists(filename):
try:
with io.open(filename, encoding='utf-8') as fd:
self.data = json.load(fd)
except ValueError:
print("Unable to read the state.db, content will be replaced.")
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.sync()
def __delitem__(self, key):
del self.data[key]
self.sync()
def __contains__(self, item):
return item in self.data
def get(self, item, default=None):
return self.data.get(item, default)
def keys(self):
return self.data.keys()
def remove_all(self, prefix):
for key in self.data.keys()[:]:
if not key.startswith(prefix):
continue
del self.data[key]
self.sync()
def sync(self):
# http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531
if IS_PY3:
with open(self.filename, 'w') as fd:
json.dump(self.data, fd, ensure_ascii=False)
else:
with io.open(self.filename, 'w', encoding='utf-8') as fd:
fd.write(unicode(json.dumps(self.data, ensure_ascii=False))) # noqa F821
def which(program, path_env):
'''Locate an executable in the system.'''
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in path_env.split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def walk_valid_filens(base_dir, invalid_dir_names, invalid_file_patterns):
"""Recursively walks all the files and directories in ``dirn``,
ignoring directories that match any pattern in ``invalid_dirns``
and files that patch any pattern in ``invalid_filens``.
``invalid_dirns`` and ``invalid_filens`` should both be lists of
strings to match. ``invalid_dir_patterns`` expects a list of
invalid directory names, while ``invalid_file_patterns`` expects a
list of glob patterns compared against the full filepath.
File and directory paths are evaluated as full paths relative to ``dirn``.
"""
for dirn, subdirs, filens in walk(base_dir):
# Remove invalid subdirs so that they will not be walked
for i in reversed(range(len(subdirs))):
subdir = subdirs[i]
if subdir in invalid_dir_names:
subdirs.pop(i)
for filen in filens:
for pattern in invalid_file_patterns:
if fnmatch(filen, pattern):
break
else:
yield join(dirn, filen)
class BuildInterruptingException(Exception):
def __init__(self, message, instructions=None):
super(BuildInterruptingException, self).__init__(message, instructions)
self.message = message
self.instructions = instructions
def handle_build_exception(exception):
"""
Handles a raised BuildInterruptingException by printing its error
message and associated instructions, if any, then exiting.
"""
error('Build failed: {}'.format(exception.message))
if exception.instructions is not None:
info('Instructions: {}'.format(exception.instructions))
exit(1)
| 30.054645
| 105
| 0.634545
|
204a8d14e453b2ac3b2e1a6b9179d1e3dcadb2b7
| 4,573
|
py
|
Python
|
functional/tests/identity/v3/test_project.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
functional/tests/identity/v3/test_project.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
functional/tests/identity/v3/test_project.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from functional.tests.identity.v3 import test_identity
class ProjectTests(test_identity.IdentityTests):
def test_project_create(self):
project_name = data_utils.rand_name('TestProject')
description = data_utils.rand_name('description')
raw_output = self.openstack(
'project create '
'--domain %(domain)s '
'--description %(description)s '
'--enable '
'--property k1=v1 '
'--property k2=v2 '
'%(name)s' % {'domain': self.domain_name,
'description': description,
'name': project_name})
self.addCleanup(
self.openstack,
'project delete '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': project_name}
)
items = self.parse_show(raw_output)
show_fields = list(self.PROJECT_FIELDS)
show_fields.extend(['k1', 'k2'])
self.assert_show_fields(items, show_fields)
project = self.parse_show_as_object(raw_output)
self.assertEqual('v1', project['k1'])
self.assertEqual('v2', project['k2'])
def test_project_delete(self):
project_name = self._create_dummy_project(add_clean_up=False)
raw_output = self.openstack(
'project delete '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': project_name})
self.assertEqual(0, len(raw_output))
def test_project_list(self):
raw_output = self.openstack('project list')
items = self.parse_listing(raw_output)
self.assert_table_structure(items, test_identity.BASIC_LIST_HEADERS)
def test_project_list_with_domain(self):
project_name = self._create_dummy_project()
raw_output = self.openstack(
'project list --domain %s' % self.domain_name)
items = self.parse_listing(raw_output)
self.assert_table_structure(items, test_identity.BASIC_LIST_HEADERS)
self.assertInOutput(project_name, raw_output)
self.assertTrue(len(items) > 0)
def test_project_set(self):
project_name = self._create_dummy_project()
new_project_name = data_utils.rand_name('NewTestProject')
raw_output = self.openstack(
'project set '
'--name %(new_name)s '
'--disable '
'--property k0=v0 '
'%(name)s' % {'new_name': new_project_name,
'domain': self.domain_name,
'name': project_name})
self.assertEqual(0, len(raw_output))
# check project details
raw_output = self.openstack(
'project show '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': new_project_name}
)
items = self.parse_show(raw_output)
fields = list(self.PROJECT_FIELDS)
fields.extend(['k0'])
self.assert_show_fields(items, fields)
project = self.parse_show_as_object(raw_output)
self.assertEqual(new_project_name, project['name'])
self.assertEqual('False', project['enabled'])
self.assertEqual('v0', project['k0'])
# reset project to make sure it will be cleaned up
self.openstack(
'project set '
'--name %(new_name)s '
'--enable '
'%(name)s' % {'new_name': project_name,
'name': new_project_name})
def test_project_show(self):
raw_output = self.openstack(
'project show '
'--domain %(domain)s '
'%(name)s' % {'domain': self.domain_name,
'name': self.project_name})
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.PROJECT_FIELDS)
| 40.114035
| 78
| 0.593921
|
54ec822f17dfe32585c57e0524c30366d87bc0ee
| 2,156
|
py
|
Python
|
contents/5.2_Prioritized_Replay_DQN/run_MountainCar.py
|
woluo/Reinforcement-learning-with-tensorflow
|
ec1a0d27635207c89d7638a4fd2b490ccaec644b
|
[
"MIT"
] | null | null | null |
contents/5.2_Prioritized_Replay_DQN/run_MountainCar.py
|
woluo/Reinforcement-learning-with-tensorflow
|
ec1a0d27635207c89d7638a4fd2b490ccaec644b
|
[
"MIT"
] | null | null | null |
contents/5.2_Prioritized_Replay_DQN/run_MountainCar.py
|
woluo/Reinforcement-learning-with-tensorflow
|
ec1a0d27635207c89d7638a4fd2b490ccaec644b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The DQN improvement: Prioritized Experience Replay (based on https://arxiv.org/abs/1511.05952)
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import gym
from RL_brain import DQNPrioritizedReplay
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
env = gym.make('MountainCar-v0')
env = env.unwrapped
env.seed(21)
MEMORY_SIZE = 10000
sess = tf.Session()
with tf.variable_scope('natural_DQN'):
RL_natural = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=False,
)
with tf.variable_scope('DQN_with_prioritized_replay'):
RL_prio = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=True, output_graph=True,
)
sess.run(tf.global_variables_initializer())
def train(RL):
total_steps = 0
steps = []
episodes = []
for i_episode in range(20):
observation = env.reset()
while True:
# env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
if done: reward = 10
RL.store_transition(observation, action, reward, observation_)
if total_steps > MEMORY_SIZE:
RL.learn()
if done:
print('episode ', i_episode, ' finished')
steps.append(total_steps)
episodes.append(i_episode)
break
observation = observation_
total_steps += 1
return np.vstack((episodes, steps))
his_natural = train(RL_natural)
his_prio = train(RL_prio)
# compare based on first success
plt.plot(his_natural[0, :], his_natural[1, :] - his_natural[1, 0], c='b', label='natural DQN')
plt.plot(his_prio[0, :], his_prio[1, :] - his_prio[1, 0], c='r', label='DQN with prioritized replay')
plt.legend(loc='best')
plt.ylabel('total training time')
plt.xlabel('episode')
plt.grid()
plt.show()
| 25.666667
| 101
| 0.654453
|
adadb441634d82e1e2f5b2af7e50e62dc2922411
| 7,846
|
py
|
Python
|
DataGenerator/DataGeneratorVideo.py
|
shamanDevel/IsosurfaceSuperresolution
|
0658e67b7ca9f633547c65e3e16f93d2e0c5a4a2
|
[
"MIT"
] | 7
|
2019-10-14T09:36:57.000Z
|
2022-02-27T05:13:28.000Z
|
DataGenerator/DataGeneratorVideo.py
|
shamanDevel/IsosurfaceSuperresolution
|
0658e67b7ca9f633547c65e3e16f93d2e0c5a4a2
|
[
"MIT"
] | null | null | null |
DataGenerator/DataGeneratorVideo.py
|
shamanDevel/IsosurfaceSuperresolution
|
0658e67b7ca9f633547c65e3e16f93d2e0c5a4a2
|
[
"MIT"
] | 3
|
2020-01-07T16:49:17.000Z
|
2021-06-23T14:21:00.000Z
|
import os
import os.path
import random
import numpy as np
import numpy.linalg
import subprocess
import imageio
import cv2 as cv
########################################
# CONFIGURATION
########################################
renderer = '../bin/GPURenderer.exe'
datasetPath = '../../data/volumes/vbx/'
descriptorFile = '../../data/volumes/inputs.dat'
#datasetExtensions = tuple(['.vbx'])
outputPath = '../../data/clouds/rendering_video5/'
outputExtension = '.exr'
numImages = 50
numFrames = 10
downscaling = 4
highResSize = 512
samplesHigh = 8
maxDist = 0.3
noShading = True
aoSamples = 256
aoRadius = 1.0
########################################
# MAIN
########################################
def randomPointOnSphere():
vec = np.random.randn(3)
vec /= np.linalg.norm(vec)
vec[2] = - abs(vec[2])
return vec;
def randomFloat(min, max):
return min + np.random.random() * (max-min)
def convertToNumpy(inputPath, idx):
# copied from datasetVideo.py
inputExtension = ".exr"
def get_image_name(i,j,mode):
if mode=='high':
return os.path.join(inputPath, "high_tmp_%05d%s" % (j, inputExtension))
if mode=='highdn':
return os.path.join(inputPath, "high_tmp_%05d_depth%s" % (j, inputExtension))
if mode=='highfx':
return os.path.join(inputPath, "high_tmp_%05d_fx%s" % (j, inputExtension))
elif mode=='low':
return os.path.join(inputPath, "low_tmp_%05d%s" % (j, inputExtension))
elif mode=='dn':
return os.path.join(inputPath, "low_tmp_%05d_depth%s" % (j, inputExtension))
elif mode=='flow':
return os.path.join(inputPath, "low_tmp_%05d_flow%s" % (j, inputExtension))
high = [None]*numFrames
low = [None]*numFrames
flow = [None]*numFrames
for j in range(numFrames):
high_rgb = np.clip(np.asarray(imageio.imread(get_image_name(idx, j, 'high'))).transpose((2, 0, 1)), 0, 1)
high_dn = np.asarray(imageio.imread(get_image_name(idx, j, 'highdn'))).transpose((2, 0, 1))
high_fx = np.asarray(imageio.imread(get_image_name(idx, j, 'highfx'))).transpose((2, 0, 1))
high[j] = np.concatenate((high_rgb[3:4,:,:], high_dn, high_fx[0:1,:,:]), axis=0)
high[j][0,:,:] = high[j][0,:,:] * 2 - 1
assert high[j].shape[0]==6
low_rgb = np.clip(np.asarray(imageio.imread(get_image_name(idx, j, 'low'))).transpose((2, 0, 1)), 0, 1)
low_dn = np.asarray(imageio.imread(get_image_name(idx, j, 'dn'))).transpose((2, 0, 1))
low[j] = np.concatenate((low_rgb[3:4], low_dn), axis=0)
low[j][0,:,:] = low[j][0,:,:] * 2 - 1 # transform mask to [-1,1]
assert low[j].shape[0]==5
flow_xy = imageio.imread(get_image_name(idx, j, 'flow'))[:,:,0:2]
flow_inpaint = np.stack((
cv.inpaint(flow_xy[:,:,0], np.uint8(low_rgb[3,:,:]==0), 3, cv.INPAINT_NS),
cv.inpaint(flow_xy[:,:,1], np.uint8(low_rgb[3,:,:]==0), 3, cv.INPAINT_NS)), axis=0)
flow[j] = flow_inpaint
images_high = np.stack(high, axis=0)
images_low = np.stack(low, axis=0)
flow_low = np.stack(flow, axis=0)
# save as numpy array
np.save(os.path.join(inputPath, "high_%05d.npy" % idx), images_high)
np.save(os.path.join(inputPath, "low_%05d.npy" % idx), images_low)
np.save(os.path.join(inputPath, "flow_%05d.npy" % idx), flow_low)
def main():
#create output
if not os.path.exists(outputPath):
os.mkdir(outputPath)
#list all datasets
dataset_info = np.genfromtxt(descriptorFile, skip_header=1, dtype=None)
num_files = dataset_info.shape[0]
datasets = [None]*num_files
print('Datasets:')
for i in range(num_files):
name = str(dataset_info[i][0].decode('ascii'))
min_iso = float(dataset_info[i][1])
max_iso = float(dataset_info[i][2])
datasets[i] = (name, min_iso, max_iso)
print(name," iso=[%f,%f]"%(min_iso, max_iso))
##list all datasets
#datasets = [file for file in os.listdir(datasetPath) if file.endswith(datasetExtensions)]
#print('Datasets found:', datasets)
#render images
for i in range(numImages):
print('Generate file',(i+1),'of',numImages)
set = random.randrange(num_files)
inputFile = os.path.join(datasetPath, datasets[set][0])
outputFileHigh = os.path.join(outputPath, "high_tmp_&05d%s" % (outputExtension))
outputFileDepthHigh = os.path.join(outputPath, "high_tmp_&05d_depth%s" % (outputExtension))
outputFileEffectsHigh = os.path.join(outputPath, "high_tmp_&05d_fx%s" % (outputExtension))
outputFileLow = os.path.join(outputPath, "low_tmp_&05d%s" % (outputExtension))
outputFileDepthLow = os.path.join(outputPath, "low_tmp_&05d_depth%s" % (outputExtension))
outputFileFlowLow = os.path.join(outputPath, "low_tmp_&05d_flow%s" % (outputExtension))
originStart = randomPointOnSphere() * randomFloat(0.6, 1.0) + np.array([0,0,-0.07])
lookAtStart = randomPointOnSphere() * 0.1 + np.array([0,0,-0.07])
while True:
originEnd = randomPointOnSphere() * randomFloat(0.6, 1.0) + np.array([0,0,-0.07])
if numpy.linalg.norm(originEnd - originStart) < maxDist:
break
lookAtEnd = randomPointOnSphere() * 0.1 + np.array([0,0,-0.07])
up = np.array([0,0,-1])#randomPointOnSphere()
isovalue = random.uniform(datasets[set][1], datasets[set][2])
diffuseColor = np.random.uniform(0.2,1.0,3)
specularColor = [pow(random.uniform(0,1), 3)*0.3] * 3
specularExponent = random.randint(4, 64)
if random.uniform(0,1)<0.7:
light = 'camera'
else:
lightDir = randomPointOnSphere()
light = '%5.3f,%5.3f,%5.3f'%(lightDir[0],lightDir[1],lightDir[2])
args = [
renderer,
'-m','iso',
'--res', '%d,%d'%(highResSize,highResSize),
'--animation', '%d'%numFrames,
'--origin', '%5.3f,%5.3f,%5.3f,%5.3f,%5.3f,%5.3f'%(originStart[0],originStart[1],originStart[2],originEnd[0],originEnd[1],originEnd[2]),
'--lookat', '%5.3f,%5.3f,%5.3f,%5.3f,%5.3f,%5.3f'%(lookAtStart[0],lookAtStart[1],lookAtStart[2],lookAtEnd[0],lookAtEnd[1],lookAtEnd[2]),
'--up', '%5.3f,%5.3f,%5.3f'%(up[0],up[1],up[2]),
'--isovalue', str(isovalue),
'--noshading', '1' if noShading else '0',
'--diffuse', '%5.3f,%5.3f,%5.3f'%(diffuseColor[0],diffuseColor[1],diffuseColor[2]),
'--specular', '%5.3f,%5.3f,%5.3f'%(specularColor[0],specularColor[1],specularColor[2]),
'--exponent', str(specularExponent),
'--light', light,
'--samples', str(samplesHigh),
'--downscale_path', outputFileLow,
'--downscale_factor', str(downscaling),
'--depth', outputFileDepthLow,
'--flow', outputFileFlowLow,
'--highdepth', outputFileDepthHigh,
'--ao', 'world',
'--aosamples', str(aoSamples),
'--aoradius', str(aoRadius),
'--higheffects', outputFileEffectsHigh,
inputFile,
outputFileHigh
]
print(' '.join(args))
subprocess.run(args, stdout=None, stderr=None, check=True)
print('Convert to Numpy')
convertToNumpy(outputPath, i)
# clean up
for i in range(numFrames):
os.remove((outputFileHigh.replace('&','%'))%i)
os.remove((outputFileDepthHigh.replace('&','%'))%i)
os.remove((outputFileEffectsHigh.replace('&','%'))%i)
os.remove((outputFileLow.replace('&','%'))%i)
os.remove((outputFileDepthLow.replace('&','%'))%i)
os.remove((outputFileFlowLow.replace('&','%'))%i)
if __name__ == "__main__":
main()
| 43.348066
| 148
| 0.585266
|
67fbedc311fadc67d54925dfc5f2b298813a35c0
| 930
|
py
|
Python
|
math/1363_largest_multiple_of_three/1363_largest_multiple_of_three.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | 6
|
2019-09-16T01:50:44.000Z
|
2020-09-17T08:52:25.000Z
|
math/1363_largest_multiple_of_three/1363_largest_multiple_of_three.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | null | null | null |
math/1363_largest_multiple_of_three/1363_largest_multiple_of_three.py
|
zdyxry/LeetCode
|
33371285d0f3302158230f46e8b1b63b9f4639c4
|
[
"Xnet",
"X11"
] | 4
|
2020-02-07T12:43:16.000Z
|
2021-04-11T06:38:55.000Z
|
import collections
class Solution(object):
def largestMultipleOfThree(self, A):
total = sum(A)
count = collections.Counter(A)
A.sort(reverse=1)
def f(i):
if count[i]:
A.remove(i)
count[i] -= 1
if not A:
return ''
if not any(A):
return '0'
if sum(A) % 3 == 0:
return ''.join(map(str, A))
if total % 3 == 0:
return f(-1)
if total % 3 == 1 and count[1] + count[4] + count[7]:
return f(1) or f(4) or f(7)
if total % 3 == 2 and count[2] + count[5] + count[8]:
return f(2) or f(5) or f(8)
if total % 3 == 2:
return f(1) or f(1) or f(4) or f(4) or f(7) or f(7)
return f(2) or f(2) or f(5) or f(5) or f(8) or f(8)
A = [1, 8, 9]
res = Solution().largestMultipleOfThree(A)
print(res)
| 28.181818
| 63
| 0.444086
|
9bb9e00b2e28f703fca426405052649857745e92
| 3,858
|
py
|
Python
|
src/main.py
|
jsun1590/chess.com-bot
|
2c13aaec86dba875a938d4ee799757e9ccba8a9f
|
[
"MIT"
] | 7
|
2021-12-11T01:46:14.000Z
|
2022-03-19T21:58:25.000Z
|
src/main.py
|
jsun1590/chess.com-bot
|
2c13aaec86dba875a938d4ee799757e9ccba8a9f
|
[
"MIT"
] | 4
|
2021-11-08T10:44:08.000Z
|
2022-03-18T13:16:51.000Z
|
src/main.py
|
jsun1590/chess.com-bot
|
2c13aaec86dba875a938d4ee799757e9ccba8a9f
|
[
"MIT"
] | 3
|
2021-09-13T04:42:07.000Z
|
2022-03-13T01:44:01.000Z
|
import chess
import chess.engine
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from pywinauto import application
import time
import os
import sys
import glob
from get_fen import get_fen
running_script_directory = os.path.dirname(os.path.realpath(__file__))
os.chdir(running_script_directory)
for file in glob.glob("stockfish*"):
print("Found Stockfish binary version", file.strip("stockfish_").strip(".exe"))
stockfish = file
try:
engine = chess.engine.SimpleEngine.popen_uci(stockfish)
except:
print("No Stockfish binary found")
input("Press any key to exit.")
sys.exit()
board = chess.Board()
limit = chess.engine.Limit(time=0.2)
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument("--log-level=3")
driver = webdriver.Chrome("chromedriver.exe", options=chrome_options)
with open("board.txt") as f:
array = [i.split() for i in f]
# url = input("Enter a url\n> ")
# for pawn promotion testing
# url = "https://www.chess.com/play/computer?fen=qkb3nr/ppppppPp/8/8/8/8/PPPPPPP1/RNBQKBNR%20w%20KQ%20-%200%201"
url = "https://www.chess.com/play/computer"
driver.get(url)
def open_chrome():
'''
Funtion makes sure that Chrome is open so that check_fen can work properly.
'''
app = application.Application().connect(title_re ="Play Chess.*")
app_dialog = app.top_window()
if not app_dialog.has_focus():
app_dialog.set_focus()
def check_fen(extension):
open_chrome()
base = get_fen(driver)
return f"{base} {extension}"
def find_loc(piece):
for i, row in enumerate(array):
for j, col in enumerate(row):
if col == piece:
return [j+1, 8-i]
color = input("Whose turn is it right now? Choices are 'w' for white; 'b' for black\n> ")
print("\nCan the white king castle?\nk for king's side; q for queen's side; - for neither")
castle_w = input("Choices are 'kq', 'k', 'q', or '-'\n> ").upper()
print("\nCan the black king castle?\nk for king's side; q for queen's side; - for neither")
castle_b = input("Choices are 'kq', 'k', 'q', or '-'\n> ").lower()
print("\nWhat is the en passant target square in algebraic notation?")
en_passant = input("If a pawn has just made a two-square move, this is origin square.\nIf there is no en passant or you are not sure, put '-'.\n> ").lower()
half_move = input("\nWhat is the number of half moves? Put '0' if you are not sure.\n> ")
full_move = input("\nWhat is the number of full moves? Put 1' if you are not sure.\n> ")
initial_fen = check_fen(f"{color} {castle_w}{castle_b} {en_passant} {half_move} {full_move}")
print(initial_fen, "\n")
while not board.is_game_over():
piece_size = driver.find_element_by_css_selector("#board-layout-chessboard").size["height"]/8
print(piece_size)
while True:
fen = check_fen(board.fen().split(" ", 1)[1])
print(fen, "\n")
if board.fen() != fen or board.fen() == initial_fen:
board = chess.Board(fen)
break
result = engine.play(board, limit)
origin = find_loc(str(result.move)[:2])
target = find_loc(str(result.move)[2:4])
offset = [a - b for a, b in zip(target, origin)]
offset[0] *= piece_size
offset[1] *= -piece_size
origin_push = driver.find_element_by_xpath(f"//div[contains(@class, 'piece') and contains(@class, 'square-{origin[0]}{origin[1]}')]")
action_chains = ActionChains(driver)
action_chains.drag_and_drop_by_offset(origin_push, offset[0], offset[1]).perform()
if len(str(result.move)) == 5:
promotion = driver.find_element_by_css_selector("div.promotion-piece." + fen.split()[1] + str(result.move)[-1].lower())
promotion.click()
board.push(result.move)
print(board, "\n")
time.sleep(3)
| 35.722222
| 156
| 0.675998
|
06afa90738db4214d2b605cee91b4c1bf5293039
| 4,472
|
py
|
Python
|
charlesbot_broadcast_message/broadcast_message.py
|
Thezone1975/charlesbot-broadcast-message
|
dc42228776db54c4710b146f1c77c971c47a8da8
|
[
"MIT"
] | 1
|
2018-09-15T05:00:12.000Z
|
2018-09-15T05:00:12.000Z
|
charlesbot_broadcast_message/broadcast_message.py
|
Thezone1975/charlesbot-broadcast-message
|
dc42228776db54c4710b146f1c77c971c47a8da8
|
[
"MIT"
] | null | null | null |
charlesbot_broadcast_message/broadcast_message.py
|
Thezone1975/charlesbot-broadcast-message
|
dc42228776db54c4710b146f1c77c971c47a8da8
|
[
"MIT"
] | 1
|
2019-06-18T09:50:35.000Z
|
2019-06-18T09:50:35.000Z
|
from charlesbot.util.slack import get_robot_channel_membership
from charlesbot.util.slack import get_robot_group_membership
from charlesbot.util.parse import parse_msg_with_prefix
from charlesbot.util.parse import does_msg_contain_prefix
from charlesbot.base_plugin import BasePlugin
from charlesbot.slack.slack_attachment import SlackAttachment
from charlesbot.slack.slack_user import SlackUser
from charlesbot.slack.slack_channel_joined import SlackChannelJoined
from charlesbot.slack.slack_channel_left import SlackChannelLeft
from charlesbot.slack.slack_group_joined import SlackGroupJoined
from charlesbot.slack.slack_group_left import SlackGroupLeft
from charlesbot.slack.slack_message import SlackMessage
import asyncio
class BroadcastMessage(BasePlugin):
def __init__(self):
super().__init__("Broadcast Message")
self.room_membership = {}
self.seed_initial_data()
def seed_initial_data(self): # pragma: no cover
loop = asyncio.get_event_loop()
loop.create_task(self.seed_channel_membership())
loop.create_task(self.seed_group_membership())
def get_help_message(self): # pragma: no cover
return "!wall <msg> - Broadcast a message to all channels I'm a part of" # NOQA
def log_room_membership(self):
self.log.info("Currently in: %s"
% ", ".join(sorted(self.room_membership.values())))
@asyncio.coroutine
def seed_channel_membership(self):
result = yield from self.slack.api_call('channels.list',
exclude_archived=1)
channels = get_robot_channel_membership(result)
self.room_membership.update(channels)
self.log_room_membership()
@asyncio.coroutine
def seed_group_membership(self):
result = yield from self.slack.api_call('groups.list',
exclude_archived=1)
groups = get_robot_group_membership(result)
self.room_membership.update(groups)
self.log_room_membership()
@asyncio.coroutine
def process_message(self, message):
tasks = [
self.add_to_room(message),
self.remove_from_room(message),
self.parse_wall_message(message)
]
yield from asyncio.gather(*tasks)
@asyncio.coroutine
def add_to_room(self, message):
if not type(message) is SlackChannelJoined and not type(message) is SlackGroupJoined: # NOQA
return
self.room_membership.update({message.id: message.name})
self.log.info("I was invited to join %s" % message.name)
self.log_room_membership()
@asyncio.coroutine
def remove_from_room(self, message):
if not type(message) is SlackChannelLeft and not type(message) is SlackGroupLeft: # NOQA
return
room_name = self.room_membership.get(message.channel, "")
self.room_membership.pop(message.channel, None)
if room_name:
self.log.info("I have been removed from %s" % room_name)
self.log_room_membership()
@asyncio.coroutine
def parse_wall_message(self, message):
if not type(message) is SlackMessage:
return
if not does_msg_contain_prefix("!wall", message.text):
return
parsed_message = parse_msg_with_prefix("!wall", message.text)
if not parsed_message: # pragma: no cover
return
slack_user = SlackUser()
yield from slack_user.retrieve_slack_user_info(self.slack,
message.user)
yield from self.send_broadcast_message(parsed_message, slack_user)
@asyncio.coroutine
def send_broadcast_message(self, msg, user):
wall = "Broadcast message from %s - %s" % (user.real_name, msg)
attachment = SlackAttachment(fallback=wall,
author_name=user.real_name,
author_icon=user.image_24,
text=msg)
for key in self.room_membership.keys():
yield from self.slack.api_call(
'chat.postMessage',
channel=key,
attachments=attachment,
as_user=False,
username="Broadcast Message",
icon_url="https://avatars.slack-edge.com/2015-07-31/8502215814_6662f69db3bed43d32e6_48.jpg" # NOQA
)
| 40.654545
| 115
| 0.653846
|
22e142b3ac73c50dedd0d373ac1685a0f6066d31
| 795
|
py
|
Python
|
binlin/utils/input_output.py
|
UKPLab/inlg2019-revisiting-binlin
|
250196403ee4050cac78c547add90087ea04243f
|
[
"Apache-2.0"
] | 1
|
2021-12-15T08:44:35.000Z
|
2021-12-15T08:44:35.000Z
|
binlin/utils/input_output.py
|
UKPLab/inlg2019-revisiting-binlin
|
250196403ee4050cac78c547add90087ea04243f
|
[
"Apache-2.0"
] | 3
|
2021-03-19T04:07:44.000Z
|
2022-01-13T01:40:50.000Z
|
binlin/utils/input_output.py
|
UKPLab/inlg2019-revisiting-binlin
|
250196403ee4050cac78c547add90087ea04243f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import string
logger = logging.getLogger('main')
def check_file_exists(fname):
if not os.path.exists(os.path.abspath(fname)):
logger.warning("%s does not exist!" % (fname))
return False
return True
def readlines(fn, no_punct=False):
with open(fn) as fh:
if no_punct:
lines = [' '.join([ch for ch in line.strip().split() if ch not in string.punctuation]) for line in fh]
else:
lines = [line.strip() for line in fh]
return lines
def get_dirname(fn):
"""
Return the name of the folder, containing the file
:param fn: filename, for which we want to know the directory it resides in
:return:
"""
return os.path.split(fn)[0]
| 23.382353
| 114
| 0.626415
|
3d345f5279f94a452879b9de9eaea36b42737e7e
| 4,205
|
py
|
Python
|
model-optimizer/extensions/front/tf/mvn_unrolled.py
|
anton-potapov/openvino
|
84119afe9a8c965e0a0cd920fff53aee67b05108
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/extensions/front/tf/mvn_unrolled.py
|
anton-potapov/openvino
|
84119afe9a8c965e0a0cd920fff53aee67b05108
|
[
"Apache-2.0"
] | 4
|
2021-04-01T08:29:48.000Z
|
2021-08-30T16:12:52.000Z
|
model-optimizer/extensions/front/tf/mvn_unrolled.py
|
anton-potapov/openvino
|
84119afe9a8c965e0a0cd920fff53aee67b05108
|
[
"Apache-2.0"
] | 3
|
2021-03-09T08:27:29.000Z
|
2021-04-07T04:58:54.000Z
|
"""
Copyright (C) 2017-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
from extensions.front.PowerToEltwises import PowerToEltwises
from extensions.front.div import Div
from extensions.front.squared_difference import SquaredDifference
from extensions.front.sub import Sub
from extensions.ops.mvn import MVN
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.graph.graph import Node, Graph
class MVNUnrolled(FrontReplacementSubgraph):
enabled = True
def run_before(self):
return [SquaredDifference, Div, Sub]
def run_after(self):
return [PowerToEltwises]
def pattern(self):
log.debug('Enabled MVN replacement')
return dict(
nodes=[
('mean', dict(kind='op', op='ReduceMean')),
('stop_grad', dict(kind='op', op='StopGradient')),
('sqdiff', dict(kind='op', op='SquaredDifference')),
('variance', dict(kind='op', op='ReduceMean')),
('add', dict(kind='op', op='Add')),
('pow', dict(kind='op', op='Pow')),
('sub', dict(kind='op', op='Sub')),
('truediv', dict(kind='op', op='Div')),
],
edges=[
('mean', 'stop_grad', {'in': 0}),
('stop_grad', 'sqdiff', {'in': 1}),
('sqdiff', 'variance', {'in': 0}),
('mean', 'sub', {'in': 1}),
('variance', 'add'),
('add', 'pow', {'in': 0}),
('pow', 'truediv', {'in': 1}),
('sub', 'truediv', {'in': 0}),
])
@staticmethod
def replace_sub_graph(graph: Graph, match: dict):
mvn = MVN(graph, dict(
name=match['truediv'].name + '/MVN_',
required_reduction_indices=[1, 2] if graph.graph['layout'] == 'NHWC' else [2, 3]
))
mvn.attrs['old_infer'] = mvn.attrs['infer']
mvn.attrs['infer'] = __class__.infer
mean_reduction = match['mean'].in_node(1)
variance_reduction = match['variance'].in_node(1)
pow2 = match['pow'].in_node(1)
eps = match['add'].in_node(0 if match['add'].in_node(0).id != match['variance'].id else 1)
new_subgraph = mvn.create_node([match['mean'].in_node(0), mean_reduction, variance_reduction, pow2, eps])
match['truediv'].replace_node(new_subgraph)
@staticmethod
def infer(node: Node):
if not (node.in_node(1).has_valid('value') and node.in_node(2).has_valid('value')):
log.warning('Reduction indices for mean and variance for MVN node {} are not constants'.format(node.name))
return
if not (all(node.in_node(1).value == node.required_reduction_indices) and
all(node.in_node(2).value == node.required_reduction_indices)):
log.warning('Reduction indices for mean {} and variance {} do not match required ones {}'.format(
node.in_node(1).value,
node.in_node(2).value,
node.required_reduction_indices
))
return
if not (node.in_node(3).has_valid('value') and node.in_node(4).has_valid('value')):
log.warning('Power or/and epsilon values for MVN node {} are not constants'.format(node.name))
return
if node.in_node(3).value != 0.5:
log.warning('Power for MVN node {} ({}) is not equal to 0.5'.format(node.name, node.in_node(3).value))
return
node['eps'] = node.in_node(4).value
for i in range(1, 5):
node.graph.remove_edge(node.in_node(i).id, node.id)
node.old_infer(node)
| 39.299065
| 118
| 0.592866
|
8ff64bfed0c283f17d32d3c38c8eb83fcc26136e
| 3,139
|
py
|
Python
|
code/process/processor.py
|
canerdogan/deep-trading-agent
|
37b56f2202f2c5030169ddf303d6d5dcb5139991
|
[
"MIT"
] | 1
|
2018-02-28T03:33:07.000Z
|
2018-02-28T03:33:07.000Z
|
code/process/processor.py
|
canerdogan/deep-trading-agent
|
37b56f2202f2c5030169ddf303d6d5dcb5139991
|
[
"MIT"
] | null | null | null |
code/process/processor.py
|
canerdogan/deep-trading-agent
|
37b56f2202f2c5030169ddf303d6d5dcb5139991
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from talib.abstract import *
from utils.constants import *
from utils.strings import *
from utils.util import print_and_log_message, print_and_log_message_list
class Processor:
'''Preprocessor for Bitcoin prices dataset as obtained by following the procedure
described in https://github.com/philipperemy/deep-learning-bitcoin'''
def __init__(self, config, logger):
self.dataset_path = config[DATASET_PATH]
self.logger = logger
self.history_length = config[HISTORY_LENGTH]
self.horizon = config[HORIZON]
self.preprocess()
self.generate_attributes()
@property
def price_blocks(self):
return self._price_blocks
@property
def timestamp_blocks(self):
return self._timestamp_blocks
def preprocess(self):
data = pd.read_csv(self.dataset_path)
message = 'Columns found in the dataset {}'.format(data.columns)
print_and_log_message(message, self.logger)
data = data.dropna()
start_time_stamp = data['Timestamp'][0]
timestamps = data['Timestamp'].apply(lambda x: (x - start_time_stamp) / 60)
timestamps = timestamps - range(timestamps.shape[0])
data.insert(0, 'blocks', timestamps)
blocks = data.groupby('blocks')
message = 'Number of blocks of continuous prices found are {}'.format(len(blocks))
print_and_log_message(message, self.logger)
self._data_blocks = []
distinct_episodes = 0
for name, indices in blocks.indices.items():
if len(indices) > (self.history_length + self.horizon):
self._data_blocks.append(blocks.get_group(name))
distinct_episodes = distinct_episodes + (len(indices) - (self.history_length + self.horizon) + 1)
data = None
message_list = ['Number of usable blocks obtained from the dataset are {}'.format(len(self._data_blocks))]
message_list.append('Number of distinct episodes for the current configuration are {}'.format(distinct_episodes))
print_and_log_message_list(message_list, self.logger)
def generate_attributes(self):
self._price_blocks = []
self._timestamp_blocks = []
for data_block in self._data_blocks:
weighted_prices = data_block['price_close'].values
diff = np.diff(weighted_prices)
diff = np.insert(diff, 0, 0)
sma15 = SMA(data_block, timeperiod=15, price='price_close')
sma30 = SMA(data_block, timeperiod=30, price='price_close')
price_block = np.column_stack((weighted_prices, diff, sma15,
weighted_prices - sma15, sma15 - sma30))
price_block = pd.DataFrame(data=price_block)
price_block.fillna(method='bfill', inplace=True)
self._price_blocks.append(price_block.as_matrix())
self._timestamp_blocks.append(data_block['DateTime_UTC'].values)
self._data_blocks = None #free memory
| 42.418919
| 121
| 0.643836
|
d06661395eb18cbe55bca17c03bbda01d8e37298
| 12,702
|
py
|
Python
|
theano/gof/optdb.py
|
royxue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | 2
|
2015-01-20T04:53:37.000Z
|
2015-01-20T04:53:40.000Z
|
theano/gof/optdb.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
theano/gof/optdb.py
|
RoyXue/Theano
|
626104a8c2b16898d270dc99e16a3ddb4a74678e
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy
from theano.compat.python2x import DefaultOrderedDict
from theano.misc.ordered_set import OrderedSet
from theano.compat.six import StringIO
from theano.gof import opt
from theano.configparser import AddConfigVar, FloatParam
from theano import config
AddConfigVar('optdb.position_cutoff',
'Where to stop eariler during optimization. It represent the'
' position of the optimizer where to stop.',
FloatParam(numpy.inf),
in_c_key=False)
AddConfigVar('optdb.max_use_ratio',
'A ratio that prevent infinite loop in EquilibriumOptimizer.',
FloatParam(5),
in_c_key=False)
class DB(object):
def __hash__(self):
if not hasattr(self, '_optimizer_idx'):
self._optimizer_idx = opt._optimizer_idx[0]
opt._optimizer_idx[0] += 1
return self._optimizer_idx
def __init__(self):
self.__db__ = DefaultOrderedDict(OrderedSet)
self._names = set()
self.name = None # will be reset by register
#(via obj.name by the thing doing the registering)
def register(self, name, obj, *tags, **kwargs):
"""
:param name: name of the optimizer.
:param obj: the optimizer to register.
:param tags: tag name that allow to select the optimizer.
:param kwargs: If non empty, should contain
only use_db_name_as_tag=False.
By default, all optimizations registered in EquilibriumDB
are selected when the EquilibriumDB name is used as a
tag. We do not want this behavior for some optimizer like
local_remove_all_assert. use_db_name_as_tag=False remove
that behavior. This mean only the optimizer name and the
tags specified will enable that optimization.
"""
# N.B. obj is not an instance of class Optimizer.
# It is an instance of a DB.In the tests for example,
# this is not always the case.
if not isinstance(obj, (DB, opt.Optimizer, opt.LocalOptimizer)):
raise TypeError('Object cannot be registered in OptDB', obj)
if name in self.__db__:
raise ValueError('The name of the object cannot be an existing'
' tag or the name of another existing object.',
obj, name)
if kwargs:
assert "use_db_name_as_tag" in kwargs
assert kwargs["use_db_name_as_tag"] is False
else:
if self.name is not None:
tags = tags + (self.name,)
obj.name = name
# This restriction is there because in many place we suppose that
# something in the DB is there only once.
if obj.name in self.__db__:
raise ValueError('''You can\'t register the same optimization
multiple time in a DB. Tryed to register "%s" again under the new name "%s".
Use theano.gof.ProxyDB to work around that''' % (obj.name, name))
self.__db__[name] = OrderedSet([obj])
self._names.add(name)
self.__db__[obj.__class__.__name__].add(obj)
self.add_tags(name, *tags)
def add_tags(self, name, *tags):
obj = self.__db__[name]
assert len(obj) == 1
obj = obj.copy().pop()
for tag in tags:
if tag in self._names:
raise ValueError('The tag of the object collides with a name.',
obj, tag)
self.__db__[tag].add(obj)
def remove_tags(self, name, *tags):
obj = self.__db__[name]
assert len(obj) == 1
obj = obj.copy().pop()
for tag in tags:
if tag in self._names:
raise ValueError('The tag of the object collides with a name.',
obj, tag)
self.__db__[tag].remove(obj)
def __query__(self, q):
if not isinstance(q, Query):
raise TypeError('Expected a Query.', q)
# The ordered set is needed for deterministic optimization.
variables = OrderedSet()
for tag in q.include:
variables.update(self.__db__[tag])
for tag in q.require:
variables.intersection_update(self.__db__[tag])
for tag in q.exclude:
variables.difference_update(self.__db__[tag])
remove = OrderedSet()
add = OrderedSet()
for obj in variables:
if isinstance(obj, DB):
sq = q.subquery.get(obj.name, q)
if sq:
replacement = obj.query(sq)
replacement.name = obj.name
remove.add(obj)
add.add(replacement)
variables.difference_update(remove)
variables.update(add)
return variables
def query(self, *tags, **kwtags):
if len(tags) >= 1 and isinstance(tags[0], Query):
if len(tags) > 1 or kwtags:
raise TypeError('If the first argument to query is a Query,'
' there should be no other arguments.',
tags, kwtags)
return self.__query__(tags[0])
include = [tag[1:] for tag in tags if tag.startswith('+')]
require = [tag[1:] for tag in tags if tag.startswith('&')]
exclude = [tag[1:] for tag in tags if tag.startswith('-')]
if len(include) + len(require) + len(exclude) < len(tags):
raise ValueError("All tags must start with one of the following"
" characters: '+', '&' or '-'", tags)
return self.__query__(Query(include=include,
require=require,
exclude=exclude,
subquery=kwtags))
def __getitem__(self, name):
variables = self.__db__[name]
if not variables:
raise KeyError("Nothing registered for '%s'" % name)
elif len(variables) > 1:
raise ValueError('More than one match for %s (please use query)' %
name)
for variable in variables:
return variable
def print_summary(self, stream=sys.stdout):
print >> stream, "%s (id %i)" % (self.__class__.__name__, id(self))
print >> stream, " names", self._names
print >> stream, " db", self.__db__
class Query(object):
def __init__(self, include, require=None, exclude=None,
subquery=None, position_cutoff=None):
"""
:type position_cutoff: float
:param position_cutoff: Used by SequenceDB to keep only optimizer that
are positioned before the cut_off point.
"""
self.include = OrderedSet(include)
self.require = require or OrderedSet()
self.exclude = exclude or OrderedSet()
self.subquery = subquery or {}
self.position_cutoff = position_cutoff
if isinstance(self.require, (list, tuple)):
self.require = OrderedSet(self.require)
if isinstance(self.exclude, (list, tuple)):
self.exclude = OrderedSet(self.exclude)
def __str__(self):
return "Query{inc=%s,ex=%s,require=%s,subquery=%s,position_cutoff=%d}" % (
self.include, self.exclude, self.require, self.subquery, self.position_cutoff)
# add all opt with this tag
def including(self, *tags):
return Query(self.include.union(tags),
self.require,
self.exclude,
self.subquery,
self.position_cutoff)
# remove all opt with this tag
def excluding(self, *tags):
return Query(self.include,
self.require,
self.exclude.union(tags),
self.subquery,
self.position_cutoff)
# keep only opt with this tag.
def requiring(self, *tags):
return Query(self.include,
self.require.union(tags),
self.exclude,
self.subquery,
self.position_cutoff)
class EquilibriumDB(DB):
"""A set of potential optimizations which should be applied in an
arbitrary order until equilibrium is reached.
Canonicalize, Stabilize, and Specialize are all equilibrium optimizations.
:param ignore_newtrees: If False, we will apply local opt on new
node introduced during local optimization application. This
could result in less fgraph iterations, but this don't mean it
will be faster globally.
.. note::
We can put LocalOptimizer and Optimizer as EquilibriumOptimizer
suppor both.
"""
def __init__(self, ignore_newtrees=True):
super(EquilibriumDB, self).__init__()
self.ignore_newtrees = ignore_newtrees
def query(self, *tags, **kwtags):
opts = super(EquilibriumDB, self).query(*tags, **kwtags)
return opt.EquilibriumOptimizer(
opts,
max_use_ratio=config.optdb.max_use_ratio,
ignore_newtrees=self.ignore_newtrees,
failure_callback=opt.NavigatorOptimizer.warn_inplace)
class SequenceDB(DB):
"""A sequence of potential optimizations.
Retrieve a sequence of optimizations (a SeqOptimizer) by calling query().
Each potential optimization is registered with a floating-point position.
No matter which optimizations are selected by a query, they are carried
out in order of increasing position.
The optdb itself (`theano.compile.mode.optdb`), from which (among many
other tags) fast_run and fast_compile optimizers are drawn is a SequenceDB.
"""
seq_opt = opt.SeqOptimizer
def __init__(self, failure_callback=opt.SeqOptimizer.warn):
super(SequenceDB, self).__init__()
self.__position__ = {}
self.failure_callback = failure_callback
def register(self, name, obj, position, *tags):
super(SequenceDB, self).register(name, obj, *tags)
self.__position__[name] = position
def query(self, *tags, **kwtags):
"""
:type position_cutoff: float or int
:param position_cutoff: only optimizations with position less than
the cutoff are returned.
"""
opts = super(SequenceDB, self).query(*tags, **kwtags)
position_cutoff = kwtags.pop('position_cutoff',
config.optdb.position_cutoff)
if len(tags) >= 1 and isinstance(tags[0], Query):
# the call to super should have raise an error with a good message
assert len(tags) == 1
if getattr(tags[0], 'position_cutoff', None):
position_cutoff = tags[0].position_cutoff
opts = [o for o in opts if self.__position__[o.name] < position_cutoff]
# We want to sort by position and then if collision by name
# for deterministic optimization. Since Python 2.2, sort is
# stable, so sort by name first, then by position. This give
# the order we want.
opts.sort(key=lambda obj: obj.name)
opts.sort(key=lambda obj: self.__position__[obj.name])
kwargs = {}
if self.failure_callback:
kwargs["failure_callback"] = self.failure_callback
ret = self.seq_opt(opts, **kwargs)
if hasattr(tags[0], 'name'):
ret.name = tags[0].name
return ret
def print_summary(self, stream=sys.stdout):
print >> stream, self.__class__.__name__ + " (id %i)" % id(self)
positions = self.__position__.items()
def c(a, b):
return cmp(a[1], b[1])
positions.sort(c)
print >> stream, " position", positions
print >> stream, " names", self._names
print >> stream, " db", self.__db__
def __str__(self):
sio = StringIO()
self.print_summary(sio)
return sio.getvalue()
class LocalGroupDB(SequenceDB):
"""This generate a local optimizer of type LocalOptGroup instead
of a global optimizer.
It support the tracks, to only get applied to some Op.
"""
seq_opt = opt.LocalOptGroup
def __init__(self, failure_callback=opt.SeqOptimizer.warn):
super(LocalGroupDB, self).__init__()
self.failure_callback = None
class ProxyDB(DB):
"""
Wrap an existing proxy.
This is needed as we can't register the same DB mutiple time in
different position in a SequentialDB
"""
def __init__(self, db):
assert isinstance(db, DB), ""
self.db = db
def query(self, *tags, **kwtags):
return self.db.query(*tags, **kwtags)
| 38.144144
| 90
| 0.598646
|
cdf5d3e7dedf487f33134899cfbf017519208b5c
| 15,339
|
py
|
Python
|
b2sdk/sync/action.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/sync/action.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
b2sdk/sync/action.py
|
ehossack/b2-sdk-python
|
034bec38671c0862b6956915993061359dbd51f6
|
[
"MIT"
] | null | null | null |
######################################################################
#
# File: b2sdk/sync/action.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from abc import ABCMeta, abstractmethod
import logging
import os
from ..download_dest import DownloadDestLocalFile
from .encryption_provider import AbstractSyncEncryptionSettingsProvider
from ..bucket import Bucket
from ..raw_api import SRC_LAST_MODIFIED_MILLIS
from ..transfer.outbound.upload_source import UploadSourceLocalFile
from .file import B2File
from .report import SyncFileReporter
logger = logging.getLogger(__name__)
class AbstractAction(metaclass=ABCMeta):
"""
An action to take, such as uploading, downloading, or deleting
a file. Multi-threaded tasks create a sequence of Actions which
are then run by a pool of threads.
An action can depend on other actions completing. An example of
this is making sure a CreateBucketAction happens before an
UploadFileAction.
"""
def run(self, bucket, reporter, dry_run=False):
"""
Main action routine.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
:param dry_run: if True, perform a dry run
:type dry_run: bool
"""
try:
if not dry_run:
self.do_action(bucket, reporter)
self.do_report(bucket, reporter)
except Exception as e:
logger.exception('an exception occurred in a sync action')
reporter.error(str(self) + ": " + repr(e) + ' ' + str(e))
raise # Re-throw so we can identify failed actions
@abstractmethod
def get_bytes(self):
"""
Return the number of bytes to transfer for this action.
:rtype: int
"""
@abstractmethod
def do_action(self, bucket, reporter):
"""
Perform the action, returning only after the action is completed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
@abstractmethod
def do_report(self, bucket, reporter):
"""
Report the action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
class B2UploadAction(AbstractAction):
"""
File uploading action.
"""
def __init__(
self,
local_full_path,
relative_name,
b2_file_name,
mod_time_millis,
size,
encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
):
"""
:param str local_full_path: a local file path
:param str relative_name: a relative file name
:param str b2_file_name: a name of a new remote file
:param int mod_time_millis: file modification time in milliseconds
:param int size: a file size
:param b2sdk.v1.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
"""
self.local_full_path = local_full_path
self.relative_name = relative_name
self.b2_file_name = b2_file_name
self.mod_time_millis = mod_time_millis
self.size = size
self.encryption_settings_provider = encryption_settings_provider
def get_bytes(self):
"""
Return file size.
:rtype: int
"""
return self.size
def do_action(self, bucket, reporter):
"""
Perform the uploading action, returning only after the action is completed.
:param b2sdk.v1.Bucket bucket: a Bucket object
:param reporter: a place to report errors
"""
if reporter:
progress_listener = SyncFileReporter(reporter)
else:
progress_listener = None
file_info = {SRC_LAST_MODIFIED_MILLIS: str(self.mod_time_millis)}
encryption = self.encryption_settings_provider.get_setting_for_upload(
bucket=bucket,
b2_file_name=self.b2_file_name,
file_info=file_info,
length=self.size,
)
bucket.upload(
UploadSourceLocalFile(self.local_full_path),
self.b2_file_name,
file_info=file_info,
progress_listener=progress_listener,
encryption=encryption,
)
def do_report(self, bucket, reporter):
"""
Report the uploading action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.print_completion('upload ' + self.relative_name)
def __str__(self):
return 'b2_upload(%s, %s, %s)' % (
self.local_full_path, self.b2_file_name, self.mod_time_millis
)
class B2HideAction(AbstractAction):
def __init__(self, relative_name, b2_file_name):
"""
:param relative_name: a relative file name
:type relative_name: str
:param b2_file_name: a name of a remote file
:type b2_file_name: str
"""
self.relative_name = relative_name
self.b2_file_name = b2_file_name
def get_bytes(self):
"""
Return file size.
:return: always zero
:rtype: int
"""
return 0
def do_action(self, bucket, reporter):
"""
Perform the hiding action, returning only after the action is completed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
bucket.hide_file(self.b2_file_name)
def do_report(self, bucket, reporter):
"""
Report the hiding action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.update_transfer(1, 0)
reporter.print_completion('hide ' + self.relative_name)
def __str__(self):
return 'b2_hide(%s)' % (self.b2_file_name,)
class B2DownloadAction(AbstractAction):
def __init__(
self,
source_file: B2File,
b2_file_name: str,
local_full_path: str,
encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
):
"""
:param b2sdk.v1.B2File source_file: the file to be downloaded
:param str b2_file_name: b2_file_name
:param str local_full_path: a local file path
:param b2sdk.v1.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
"""
self.source_file = source_file
self.b2_file_name = b2_file_name
self.local_full_path = local_full_path
self.encryption_settings_provider = encryption_settings_provider
def get_bytes(self):
"""
Return file size.
:rtype: int
"""
return self.source_file.latest_version().size
def _ensure_directory_existence(self):
parent_dir = os.path.dirname(self.local_full_path)
if not os.path.isdir(parent_dir):
try:
os.makedirs(parent_dir)
except OSError:
pass
if not os.path.isdir(parent_dir):
raise Exception('could not create directory %s' % (parent_dir,))
def do_action(self, bucket, reporter):
"""
Perform the downloading action, returning only after the action is completed.
:param b2sdk.v1.Bucket bucket: a Bucket object
:param reporter: a place to report errors
"""
self._ensure_directory_existence()
if reporter:
progress_listener = SyncFileReporter(reporter)
else:
progress_listener = None
# Download the file to a .tmp file
download_path = self.local_full_path + '.b2.sync.tmp'
download_dest = DownloadDestLocalFile(download_path)
encryption = self.encryption_settings_provider.get_setting_for_download(
bucket=bucket,
file_version_info=self.source_file.latest_version().file_version_info,
)
bucket.download_file_by_id(
self.source_file.latest_version().id_,
download_dest,
progress_listener,
encryption=encryption,
)
# Move the file into place
try:
os.unlink(self.local_full_path)
except OSError:
pass
os.rename(download_path, self.local_full_path)
def do_report(self, bucket, reporter):
"""
Report the downloading action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.print_completion('dnload ' + self.source_file.name)
def __str__(self):
return (
'b2_download(%s, %s, %s, %d)' % (
self.b2_file_name, self.source_file.latest_version().id_, self.local_full_path,
self.source_file.latest_version().mod_time
)
)
class B2CopyAction(AbstractAction):
"""
File copying action.
"""
def __init__(
self,
b2_file_name: str,
source_file: B2File,
dest_b2_file_name,
source_bucket: Bucket,
destination_bucket: Bucket,
encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
):
"""
:param str b2_file_name: a b2_file_name
:param b2sdk.v1.B2File source_file: the file to be copied
:param str dest_b2_file_name: a name of a destination remote file
:param Bucket source_bucket: bucket to copy from
:param Bucket destination_bucket: bucket to copy to
:param b2sdk.v1.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
"""
self.b2_file_name = b2_file_name
self.source_file = source_file
self.dest_b2_file_name = dest_b2_file_name
self.encryption_settings_provider = encryption_settings_provider
self.source_bucket = source_bucket
self.destination_bucket = destination_bucket
def get_bytes(self):
"""
Return file size.
:rtype: int
"""
return self.source_file.latest_version().size
def do_action(self, bucket, reporter):
"""
Perform the copying action, returning only after the action is completed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
if reporter:
progress_listener = SyncFileReporter(reporter)
else:
progress_listener = None
source_encryption = self.encryption_settings_provider.get_source_setting_for_copy(
bucket=self.source_bucket,
source_file_version_info=self.source_file.latest_version().file_version_info,
)
destination_encryption = self.encryption_settings_provider.get_destination_setting_for_copy(
bucket=self.destination_bucket,
source_file_version_info=self.source_file.latest_version().file_version_info,
dest_b2_file_name=self.dest_b2_file_name,
)
bucket.copy(
self.source_file.latest_version().id_,
self.dest_b2_file_name,
length=self.source_file.latest_version().size,
progress_listener=progress_listener,
destination_encryption=destination_encryption,
source_encryption=source_encryption,
source_file_info=self.source_file.latest_version().file_version_info.file_info,
source_content_type=self.source_file.latest_version().file_version_info.content_type,
)
def do_report(self, bucket, reporter):
"""
Report the copying action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.print_completion('copy ' + self.source_file.name)
def __str__(self):
return (
'b2_copy(%s, %s, %s, %d)' % (
self.b2_file_name, self.source_file.latest_version().id_, self.dest_b2_file_name,
self.source_file.latest_version().mod_time
)
)
class B2DeleteAction(AbstractAction):
def __init__(self, relative_name, b2_file_name, file_id, note):
"""
:param str relative_name: a relative file name
:param str b2_file_name: a name of a remote file
:param str file_id: a file ID
:param str note: a deletion note
"""
self.relative_name = relative_name
self.b2_file_name = b2_file_name
self.file_id = file_id
self.note = note
def get_bytes(self):
"""
Return file size.
:return: always zero
:rtype: int
"""
return 0
def do_action(self, bucket, reporter):
"""
Perform the deleting action, returning only after the action is completed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
bucket.api.delete_file_version(self.file_id, self.b2_file_name)
def do_report(self, bucket, reporter):
"""
Report the deleting action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.update_transfer(1, 0)
reporter.print_completion('delete ' + self.relative_name + ' ' + self.note)
def __str__(self):
return 'b2_delete(%s, %s, %s)' % (self.b2_file_name, self.file_id, self.note)
class LocalDeleteAction(AbstractAction):
def __init__(self, relative_name, full_path):
"""
:param relative_name: a relative file name
:type relative_name: str
:param full_path: a full local path
:type: str
"""
self.relative_name = relative_name
self.full_path = full_path
def get_bytes(self):
"""
Return file size.
:return: always zero
:rtype: int
"""
return 0
def do_action(self, bucket, reporter):
"""
Perform the deleting of a local file action,
returning only after the action is completed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
os.unlink(self.full_path)
def do_report(self, bucket, reporter):
"""
Report the deleting of a local file action performed.
:param bucket: a Bucket object
:type bucket: b2sdk.bucket.Bucket
:param reporter: a place to report errors
"""
reporter.update_transfer(1, 0)
reporter.print_completion('delete ' + self.relative_name)
def __str__(self):
return 'local_delete(%s)' % (self.full_path)
| 31.626804
| 120
| 0.628007
|
f2f85e010517f5c4c92e925263433b9ce77ba626
| 3,197
|
py
|
Python
|
pympc/create_pycoeman_config_run_massive_potree_converter.py
|
iyush1993/Massive-PotreeConverter
|
f43eb8bb1eccc9b2409a2f5e71f4cc95df7adfc0
|
[
"Apache-2.0"
] | null | null | null |
pympc/create_pycoeman_config_run_massive_potree_converter.py
|
iyush1993/Massive-PotreeConverter
|
f43eb8bb1eccc9b2409a2f5e71f4cc95df7adfc0
|
[
"Apache-2.0"
] | null | null | null |
pympc/create_pycoeman_config_run_massive_potree_converter.py
|
iyush1993/Massive-PotreeConverter
|
f43eb8bb1eccc9b2409a2f5e71f4cc95df7adfc0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import argparse, os
from lxml import etree
def run(inputFolder, outputFile, outputFormat, levels, spacing, extent):
# Check user parameters
if not os.path.isdir(inputFolder):
raise Exception(inputFolder + ' does not exist')
if os.path.isfile(outputFile):
raise Exception(outputFile + ' already exists!')
outputFileAbsPath = os.path.abspath(outputFile)
# Create output file
oFile = open(outputFileAbsPath, 'w')
xmlRootElement = etree.Element('ParCommands')
for tile in os.listdir(inputFolder):
if tile != 'tiles.js':
tileRelPath = inputFolder + '/' + tile
xmlComponentElement = etree.SubElement(xmlRootElement, 'Component')
xmlIdElement = etree.SubElement(xmlComponentElement, 'id')
xmlIdElement.text = tile + '_potree_converter'
xmlRequireElement = etree.SubElement(xmlComponentElement, 'require')
xmlRequireElement.text = tileRelPath
localOutputFolder = tile + '_potree'
xmlCommandElement = etree.SubElement(xmlComponentElement, 'command')
# xmlCommandElement.text = 'PotreeConverter --outdir ' + localOutputFolder + ' --levels ' + str(levels) + ' --output-format ' + str(outputFormat).upper() + ' --source ' + tile + ' --spacing ' + str(spacing) + ' --aabb "' + extent + '"'
xmlCommandElement.text = 'PotreeConverter ' + tile + ' ' + localOutputFolder + ' --levels ' + str(levels) + ' --output-format ' + str(outputFormat).upper() + ' --spacing ' + str(spacing) + ' --aabb "' + extent + '"'
xmlOutputElement = etree.SubElement(xmlComponentElement, 'output')
xmlOutputElement.text = localOutputFolder
oFile.write(etree.tostring(xmlRootElement, pretty_print=True, encoding='utf-8').decode('utf-8'))
oFile.close()
def argument_parser():
# define argument menu
parser = argparse.ArgumentParser(
description="Creates a parallel commands XML configuration file. This XML file can be used with pycoeman to run the tasks in a SGE cluster, in a bunch of ssh-reachable hosts or in the local machine")
parser.add_argument('-i','--input',default='',help='Input folder with the tiles. This folder must contain subfolders, one for each tile. Each tile subfolder must contain the LAS/LAZ files in the tile',type=str, required=True)
parser.add_argument('-o','--output',default='',help='Output parallel commands XML configuration file',type=str, required=True)
parser.add_argument('-f','--format',default='',help='Format (LAS or LAZ)',type=str, required=True)
parser.add_argument('-l','--levels',default='',help='Number of levels for OctTree',type=int, required=True)
parser.add_argument('-s','--spacing',default='',help='Spacing at root level',type=int, required=True)
parser.add_argument('-e','--extent',default='',help='Extent to be used for all the OctTree, specify as "minX minY minZ maxX maxY maxZ"',type=str, required=True)
return parser
def main():
args = argument_parser().parse_args()
run(args.input, args.output, args.format, args.levels, args.spacing, args.extent)
if __name__ == "__main__":
main()
| 51.564516
| 247
| 0.6797
|
94e16b99ff3c5cdbaacb4e8b66aeebcfb8fe44eb
| 2,187
|
py
|
Python
|
cmd/updateMultipleEndpoints.py
|
robertchoi80/tks-client
|
ba7cb9b3e937e2e2784f8c06c2f49fb819837bf3
|
[
"MIT"
] | null | null | null |
cmd/updateMultipleEndpoints.py
|
robertchoi80/tks-client
|
ba7cb9b3e937e2e2784f8c06c2f49fb819837bf3
|
[
"MIT"
] | null | null | null |
cmd/updateMultipleEndpoints.py
|
robertchoi80/tks-client
|
ba7cb9b3e937e2e2784f8c06c2f49fb819837bf3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import argparse
import git
import ruamel.yaml
import os
import sys
print("Entering updateMultipleEndpoint script..")
parser = argparse.ArgumentParser()
parser.add_argument('current_cluster', type=str,
help="cluster name to which the endpoints are added")
parser.add_argument('endpoint_list', type=str,
help="endpoint list to add")
args = parser.parse_args()
clusterName = args.current_cluster
endpointListStr = args.endpoint_list
repo = None
config = {}
sitePath = './decapod-site'
siteFileName = "{}/lma/site-values.yaml".format(clusterName)
siteFileNameFull = "{}/{}".format(sitePath, siteFileName)
# Tested with 'robertchoi80' repo
repoOrgName = ''
if not os.path.isdir(sitePath):
print("Cloning repository...")
repo = git.Repo.clone_from("https://github.com/{}/decapod-site".format(repoOrgName), 'decapod-site')
with repo.config_writer() as git_config:
git_config.set_value('user', 'email', 'tks-argo@tks.com')
git_config.set_value('user', 'name', 'TKS Argo')
else:
repo = git.Repo(sitePath)
with open(siteFileNameFull, 'r') as f:
config = ruamel.yaml.round_trip_load(f, preserve_quotes=True)
endpointList = endpointListStr.split()
print("endpointList: {}".format(endpointList))
appendCount = 0
charts = config["charts"]
thanosChart = [chart for chart in charts if chart['name'] == "thanos"][0]
for ep in endpointList:
if (ep in thanosChart['override']['querier.stores']):
print("The endpoint {} already exists.".format(ep))
else:
thanosChart['override']['querier.stores'].append(ep)
appendCount += 1
if appendCount == 0:
print("All endpoints already exists. Exiting script..")
sys.exit(0)
print("After insertion: {}".format(thanosChart))
with open(siteFileNameFull, 'w') as f:
ruamel.yaml.round_trip_dump(config, f)
diff = repo.git.diff(repo.head.commit.tree)
print(diff)
# Provide a list of the files to stage
repo.index.add([siteFileName])
# Provide a commit message
repo.index.commit("add previous thanos-sidecar endpoints to '{}' cluster".format(clusterName))
repo.remotes.origin.push()
print("Exiting updateMultipleEndpoint script..")
| 28.402597
| 104
| 0.712391
|
b13a1a7078a9f86859d9fb8dcb8208379ac34995
| 149
|
py
|
Python
|
crafting/Recipe.py
|
uuk0/mcpython-3
|
6dc7fc9f2610c999c9b690536b867a78eff25699
|
[
"MIT"
] | null | null | null |
crafting/Recipe.py
|
uuk0/mcpython-3
|
6dc7fc9f2610c999c9b690536b867a78eff25699
|
[
"MIT"
] | null | null | null |
crafting/Recipe.py
|
uuk0/mcpython-3
|
6dc7fc9f2610c999c9b690536b867a78eff25699
|
[
"MIT"
] | null | null | null |
import globals as G
class IRecipe:
pass
class Crafting(IRecipe):
def __init__(self, inputs, outputs, stay=[], convert=[]):
pass
| 12.416667
| 61
| 0.637584
|
2c1d5431ab7a84805e6133f3fac61e89e1dcf72a
| 3,858
|
py
|
Python
|
src/util/vec.py
|
nrbabcock/HeartOfGold
|
279f473da091de937614f8824fbb1f8e65b2d1a3
|
[
"MIT"
] | null | null | null |
src/util/vec.py
|
nrbabcock/HeartOfGold
|
279f473da091de937614f8824fbb1f8e65b2d1a3
|
[
"MIT"
] | null | null | null |
src/util/vec.py
|
nrbabcock/HeartOfGold
|
279f473da091de937614f8824fbb1f8e65b2d1a3
|
[
"MIT"
] | null | null | null |
import math
from typing import Union
from rlbot.utils.structures.game_data_struct import Vector3
class Vec3:
"""
This class should provide you with all the basic vector operations that you need, but feel free to extend its
functionality when needed.
The vectors found in the GameTickPacket will be flatbuffer vectors. Cast them to Vec3 like this:
`car_location = Vec3(car.physics.location)`.
Remember that the in-game axis are left-handed.
When in doubt visit the wiki: https://github.com/RLBot/RLBot/wiki/Useful-Game-Values
"""
# https://docs.python.org/3/reference/datamodel.html#slots
__slots__ = [
'x',
'y',
'z'
]
def __init__(self, x: Union[float, 'Vec3', 'Vector3']=0, y: float=0, z: float=0):
"""
Create a new Vec3. The x component can alternatively be another vector with an x, y, and z component, in which
case the created vector is a copy of the given vector and the y and z parameter is ignored. Examples:
a = Vec3(1, 2, 3)
b = Vec3(a)
"""
if hasattr(x, 'x'):
# We have been given a vector. Copy it
self.x = float(x.x)
self.y = float(x.y) if hasattr(x, 'y') else 0
self.z = float(x.z) if hasattr(x, 'z') else 0
else:
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __getitem__(self, item: int):
return (self.x, self.y, self.z)[item]
def __add__(self, other: 'Vec3') -> 'Vec3':
return Vec3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec3') -> 'Vec3':
return Vec3(self.x - other.x, self.y - other.y, self.z - other.z)
def __neg__(self):
return Vec3(-self.x, -self.y, -self.z)
def __mul__(self, scale: float) -> 'Vec3':
return Vec3(self.x * scale, self.y * scale, self.z * scale)
def __rmul__(self, scale):
return self * scale
def __truediv__(self, scale: float) -> 'Vec3':
scale = 1 / float(scale)
return self * scale
def __str__(self):
return f"Vec3({self.x:.2f}, {self.y:.2f}, {self.z:.2f})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if other == None: return False
return self.x == other.x and self.y == other.y and self.z == other.z
def flat(self):
"""Returns a new Vec3 that equals this Vec3 but projected onto the ground plane. I.e. where z=0."""
return Vec3(self.x, self.y, 0)
def length(self):
"""Returns the length of the vector. Also called magnitude and norm."""
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def dist(self, other: 'Vec3') -> float:
"""Returns the distance between this vector and another vector using pythagoras."""
return (self - other).length()
def normalized(self):
"""Returns a vector with the same direction but a length of one."""
return self / self.length()
def rescale(self, new_len: float) -> 'Vec3':
"""Returns a vector with the same direction but a different length."""
return new_len * self.normalized()
def dot(self, other: 'Vec3') -> float:
"""Returns the dot product."""
return self.x*other.x + self.y*other.y + self.z*other.z
def cross(self, other: 'Vec3') -> 'Vec3':
"""Returns the cross product."""
return Vec3(
self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x
)
def ang_to(self, ideal: 'Vec3') -> float:
"""Returns the angle to the ideal vector. Angle will be between 0 and pi."""
cos_ang = self.dot(ideal) / (self.length() * ideal.length())
return math.acos(cos_ang)
| 33.842105
| 118
| 0.589425
|
8eddce3d12ab38f6157a064775149f07cdd7e4e4
| 560
|
py
|
Python
|
compiler/uniquifyGDS.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 335
|
2018-03-13T21:05:22.000Z
|
2022-03-30T07:53:25.000Z
|
compiler/uniquifyGDS.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 87
|
2018-03-06T00:55:51.000Z
|
2022-03-30T19:38:29.000Z
|
compiler/uniquifyGDS.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 95
|
2018-03-14T16:22:55.000Z
|
2022-03-24T00:34:37.000Z
|
#!/usr/bin/env python3
import sys
from gdsMill import gdsMill
if len(sys.argv) < 4:
print("Script to prefix every instance and structure with the root cell name to provide unique namespace, but skip cells that begin with the library prefix.")
print("Usage: {0} <library prefix> in.gds out.gds".format(sys.argv[0]))
sys.exit(1)
gds_file = sys.argv[2]
gds = gdsMill.VlsiLayout()
reader = gdsMill.Gds2reader(gds)
reader.loadFromFile(gds_file)
gds.uniquify(prefix_name=sys.argv[1])
writer = gdsMill.Gds2writer(gds)
writer.writeToFile(sys.argv[3])
| 28
| 162
| 0.742857
|
b5b8a0b0c6f78210e02fd6fb20104a1566e81934
| 1,521
|
py
|
Python
|
test/functional/feature_reindex.py
|
MiracleCity/MiracleCity
|
7520173d387085b0b3e4d24ac4791d7179ee2c58
|
[
"MIT"
] | null | null | null |
test/functional/feature_reindex.py
|
MiracleCity/MiracleCity
|
7520173d387085b0b3e4d24ac4791d7179ee2c58
|
[
"MIT"
] | null | null | null |
test/functional/feature_reindex.py
|
MiracleCity/MiracleCity
|
7520173d387085b0b3e4d24ac4791d7179ee2c58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Miracle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running miracled with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import MiracleTestFramework
from test_framework.util import assert_equal
import time
class ReindexTest(MiracleTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generate(3)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
self.start_nodes(extra_args)
while self.nodes[0].getblockcount() < blockcount:
time.sleep(0.1)
assert_equal(self.nodes[0].getblockcount(), blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
| 36.214286
| 106
| 0.708087
|
86423780f2645e5eca1b551b0559b73fc7dcbd48
| 5,973
|
py
|
Python
|
src/lib/common/mtmodule.py
|
williamwmarx/mtx
|
31548b60a4e88124b0384350cbec8df1d88975cb
|
[
"CC0-1.0"
] | null | null | null |
src/lib/common/mtmodule.py
|
williamwmarx/mtx
|
31548b60a4e88124b0384350cbec8df1d88975cb
|
[
"CC0-1.0"
] | null | null | null |
src/lib/common/mtmodule.py
|
williamwmarx/mtx
|
31548b60a4e88124b0384350cbec8df1d88975cb
|
[
"CC0-1.0"
] | null | null | null |
from abc import ABC, abstractmethod
import os
import struct
import yaml
import multiprocessing
from functools import partial, wraps
from types import GeneratorType
from typing import Generator
from itertools import islice, chain
from pathlib import Path
from lib.common.util import hashdict, get_batch_size, batch
from lib.common.exceptions import ImproperLoggedPhaseError
from lib.common.util import MAX_CPUS
TWO_INTS = "II"
RET_VAL_TESTS_ONLY = "no error"
CONFIG_PATH = "/run_args.yaml"
def db_run(dbfile, q, batches_running):
with open(dbfile, "ab") as f:
while batches_running.value is not 0:
try:
done_info = q.get_nowait()
f.write(struct.pack("II", *done_info))
f.flush()
except:
pass
while q.qsize() > 0:
done_info = q.get()
f.write(struct.pack("II", *done_info))
f.flush()
f.close()
class MTModule(ABC):
"""Handles parallelisation and component-specific logging. Invoked primarily through the @MTModule.phase decorator
on a method, which parallelises based on the function signature."""
def __init__(self, config, name, storage):
self.config = config
self.name = name
self.disk = storage
self.base_path = Path("/mtriage")
self.UNIQUE_ID = hashdict(config)
self.PHASE_KEY = None
self.__LOGS = []
def get_full_config(self):
with open(CONFIG_PATH, "r") as c:
cfg = yaml.safe_load(c)
return cfg
@property
def in_parallel(self):
inp = self.config.get("in_parallel")
return not (self.config.get("dev") or (inp is not None and not inp) or MAX_CPUS <= 1)
@in_parallel.setter
def in_parallel(self, boolean):
return boolean
def process_batch(self, innards, done_dict, done_queue, batch_num, c, other_args):
for idx, i in enumerate(c):
if idx not in done_dict:
innards(self, [i], *other_args)
done_queue.put((batch_num, idx))
else:
print("Batch %d item %d already done, skipping job." % (batch_num, idx))
def process_in_batches(self, args, process_element, remove_db=True):
"""
Process elements in parallel using multiprocessing. Automatically applied to a phase that takes a single
Generator argument, `all_elements`.
`all_elements` is split into a number of batches, depending on the available CPU power of the machine on which
mtriage is running. `process_element` is then run on each element in each batch, in parallel by number of
batches. The results are collected together and return as a single list of results.
"""
all_elements = list(args[0])
batch_size = get_batch_size(len(all_elements))
other_args = args[1:]
# each chunk is a generator
cs = batch(all_elements, n=batch_size)
manager = multiprocessing.Manager()
# switch logs to multiprocess access list
self.__LOGS = manager.list()
# NOTE: abstraction leak to getter/setter in analyser.py...
self.dest_q = manager.Value("i", None)
done_queue = manager.Queue()
batches_running = manager.Value("i", 1)
dbfile = f"{self.disk.base_dir}/{self.UNIQUE_ID}.db"
done_dict = {}
try:
with open(dbfile, "rb") as f:
_bytes = f.read(8) # 8 bytes = two unsiged ints
while _bytes:
fst, snd = struct.unpack(TWO_INTS, _bytes)
if fst not in done_dict:
done_dict[fst] = {}
done_dict[fst][snd] = 1
_bytes = f.read(8)
f.close()
except:
pass
db_process = multiprocessing.Process(target=db_run, args=(dbfile, done_queue, batches_running))
db_process.start()
processes = []
for idx, c in enumerate(cs):
_done_dict = {}
if idx in done_dict:
_done_dict = done_dict[idx]
p = multiprocessing.Process(
target=self.process_batch,
args=(process_element, _done_dict, done_queue, idx, c, other_args),
)
p.start()
processes.append(p)
for p in processes:
p.join()
batches_running.value = 0
db_process.join()
if remove_db:
os.remove(dbfile)
self.flush_logs()
return RET_VAL_TESTS_ONLY
@staticmethod
def phase(phase_key: str, **kwargs):
"""
Provides phased logging for class methods on classes that inherit from MTModule.
Before the function is called, the PHASE_KEY is set, and afterwards logs are saved to disk and the buffer
cleared.
If the first argument to the decorator function is a generator, then the application of the function is
deferred to `process_in_batches`. This can be disabled by explicitly setting 'is_parallel' to False in the
`options` argument.
"""
def decorator(function):
@wraps(function)
def wrapper(self, *args):
self.PHASE_KEY = phase_key
ret_val = None
if not isinstance(self, MTModule):
raise ImproperLoggedPhaseError(function.__name__)
if (self.in_parallel and (len(args) >= 1) and isinstance(args[0], GeneratorType)):
_remove_db = kwargs.get("remove_db", True)
ret_val = self.process_in_batches(args, function, remove_db=_remove_db)
else:
ret_val = function(self, *args)
self.flush_logs()
return ret_val
return wrapper
return decorator
def flush_logs(self):
self.disk.write_logs(self.__LOGS)
self.__LOGS = []
def logger(self, msg, element=None):
context = self.__get_context(element)
msg = f"{context}{msg}"
self.__LOGS.append(msg)
print(msg)
def error_logger(self, msg, element=None):
context = self.__get_context(element)
err_msg = f"ERROR: {context}{msg}"
self.__LOGS.append("")
self.__LOGS.append(
"-----------------------------------------------------------------------------")
self.__LOGS.append(err_msg)
self.__LOGS.append(
"-----------------------------------------------------------------------------")
self.__LOGS.append("")
err_msg = f"\033[91m{err_msg}\033[0m"
print(err_msg)
def __get_context(self, element):
context = f"{self.name}: {self.PHASE_KEY}: "
if element != None:
context = context + f"{element.id}: "
return context
def is_dev(self):
return "dev" in self.config and self.config["dev"]
| 28.442857
| 118
| 0.678888
|
f0ff5e91674e0e72e6e6e666bd599b8da3a5aa82
| 115,684
|
py
|
Python
|
sympy/core/function.py
|
bertiewooster/sympy
|
7f72eb9bc20b4b24ade8436f98c51ae6be883e08
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/function.py
|
bertiewooster/sympy
|
7f72eb9bc20b4b24ade8436f98c51ae6be883e08
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/function.py
|
bertiewooster/sympy
|
7f72eb9bc20b4b24ade8436f98c51ae6be883e08
|
[
"BSD-3-Clause"
] | null | null | null |
"""
There are three types of functions implemented in SymPy:
1) defined functions (in the sense that they can be evaluated) like
exp or sin; they have a name and a body:
f = exp
2) undefined function which have a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) anonymous function (or lambda function) which have a body (defined
with dummy variables) but have no name:
f = Lambda(x, exp(x)*x)
f = Lambda((x, y), exp(x)*y)
The fourth type of functions are composites, like (sin + cos)(x); these work in
SymPy core, but are not yet part of SymPy.
Examples
========
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print(sympy.srepr(f(x).func))
Function('f')
>>> f(x).args
(x,)
"""
from typing import Any, Dict as tDict, Optional, Set as tSet, Tuple as tTuple, Union as tUnion
from collections.abc import Iterable
from .add import Add
from .assumptions import ManagedProperties
from .basic import Basic, _atomic
from .cache import cacheit
from .containers import Tuple, Dict
from .decorators import _sympifyit
from .evalf import pure_complex
from .expr import Expr, AtomicExpr
from .logic import fuzzy_and, fuzzy_or, fuzzy_not, FuzzyBool
from .mul import Mul
from .numbers import Rational, Float, Integer
from .operations import LatticeOp
from .parameters import global_parameters
from .rules import Transform
from .singleton import S
from .sympify import sympify, _sympify
from .sorting import default_sort_key, ordered
from sympy.utilities.exceptions import (sympy_deprecation_warning,
SymPyDeprecationWarning, ignore_warnings)
from sympy.utilities.iterables import (has_dups, sift, iterable,
is_sequence, uniq, topological_sort)
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
from sympy.utilities.misc import as_int, filldedent, func_name
import mpmath
from mpmath.libmp.libmpf import prec_to_dps
import inspect
from collections import Counter
def _coeff_isneg(a):
"""Return True if the leading Number is negative.
Examples
========
>>> from sympy.core.function import _coeff_isneg
>>> from sympy import S, Symbol, oo, pi
>>> _coeff_isneg(-3*pi)
True
>>> _coeff_isneg(S(3))
False
>>> _coeff_isneg(-oo)
True
>>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1
False
For matrix expressions:
>>> from sympy import MatrixSymbol, sqrt
>>> A = MatrixSymbol("A", 3, 3)
>>> _coeff_isneg(-sqrt(2)*A)
True
>>> _coeff_isneg(sqrt(2)*A)
False
"""
if a.is_MatMul:
a = a.args[0]
if a.is_Mul:
a = a.args[0]
return a.is_Number and a.is_extended_negative
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class BadSignatureError(TypeError):
'''Raised when a Lambda is created with an invalid signature'''
pass
class BadArgumentsError(TypeError):
'''Raised when a Lambda is called with an incorrect number of arguments'''
pass
# Python 3 version that does not raise a Deprecation warning
def arity(cls):
"""Return the arity of the function if it is known, else None.
Explanation
===========
When default values are specified for some arguments, they are
optional and the arity is reported as a tuple of possible values.
Examples
========
>>> from sympy import arity, log
>>> arity(lambda x: x)
1
>>> arity(log)
(1, 2)
>>> arity(lambda *x: sum(x)) is None
True
"""
eval_ = getattr(cls, 'eval', cls)
parameters = inspect.signature(eval_).parameters.items()
if [p for _, p in parameters if p.kind == p.VAR_POSITIONAL]:
return
p_or_k = [p for _, p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD]
# how many have no default and how many have a default value
no, yes = map(len, sift(p_or_k,
lambda p:p.default == p.empty, binary=True))
return no if not yes else tuple(range(no, no + yes + 1))
class FunctionClass(ManagedProperties):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
_new = type.__new__
def __init__(cls, *args, **kwargs):
# honor kwarg value or class-defined value before using
# the number of arguments in the eval function (if present)
nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', arity(cls)))
if nargs is None and 'nargs' not in cls.__dict__:
for supcls in cls.__mro__:
if hasattr(supcls, '_nargs'):
nargs = supcls._nargs
break
else:
continue
# Canonicalize nargs here; change to set in nargs.
if is_sequence(nargs):
if not nargs:
raise ValueError(filldedent('''
Incorrectly specified nargs as %s:
if there are no arguments, it should be
`nargs = 0`;
if there are any number of arguments,
it should be
`nargs = None`''' % str(nargs)))
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
cls._nargs = nargs
# When __init__ is called from UndefinedFunction it is called with
# just one arg but when it is called from subclassing Function it is
# called with the usual (name, bases, namespace) type() signature.
if len(args) == 3:
namespace = args[2]
if 'eval' in namespace and not isinstance(namespace['eval'], classmethod):
raise TypeError("eval on Function subclasses should be a class method (defined with @classmethod)")
super().__init__(*args, **kwargs)
@property
def __signature__(self):
"""
Allow Python 3's inspect.signature to give a useful signature for
Function subclasses.
"""
# Python 3 only, but backports (like the one in IPython) still might
# call this.
try:
from inspect import signature
except ImportError:
return None
# TODO: Look at nargs
return signature(self.eval)
@property
def free_symbols(self):
return set()
@property
def xreplace(self):
# Function needs args so we define a property that returns
# a function that takes args...and then use that function
# to return the right value
return lambda rule, **_: rule.get(self, self)
@property
def nargs(self):
"""Return a set of the allowed number of arguments for the function.
Examples
========
>>> from sympy import Function
>>> f = Function('f')
If the function can take any number of arguments, the set of whole
numbers is returned:
>>> Function('f').nargs
Naturals0
If the function was initialized to accept one or more arguments, a
corresponding set will be returned:
>>> Function('f', nargs=1).nargs
{1}
>>> Function('f', nargs=(2, 1)).nargs
{1, 2}
The undefined function, after application, also has the nargs
attribute; the actual number of arguments is always available by
checking the ``args`` attribute:
>>> f = Function('f')
>>> f(1).nargs
Naturals0
>>> len(f(1).args)
1
"""
from sympy.sets.sets import FiniteSet
# XXX it would be nice to handle this in __init__ but there are import
# problems with trying to import FiniteSet there
return FiniteSet(*self._nargs) if self._nargs else S.Naturals0
def __repr__(cls):
return cls.__name__
class Application(Basic, metaclass=FunctionClass):
"""
Base class for applied functions.
Explanation
===========
Instances of Application represent the result of applying an application of
any type to any object.
"""
is_Function = True
@cacheit
def __new__(cls, *args, **options):
from sympy.sets.fancysets import Naturals0
from sympy.sets.sets import FiniteSet
args = list(map(sympify, args))
evaluate = options.pop('evaluate', global_parameters.evaluate)
# WildFunction (and anything else like it) may have nargs defined
# and we throw that value away here
options.pop('nargs', None)
if options:
raise ValueError("Unknown options: %s" % options)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
obj = super().__new__(cls, *args, **options)
# make nargs uniform here
sentinel = object()
objnargs = getattr(obj, "nargs", sentinel)
if objnargs is not sentinel:
# things passing through here:
# - functions subclassed from Function (e.g. myfunc(1).nargs)
# - functions like cos(1).nargs
# - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs
# Canonicalize nargs here
if is_sequence(objnargs):
nargs = tuple(ordered(set(objnargs)))
elif objnargs is not None:
nargs = (as_int(objnargs),)
else:
nargs = None
else:
# things passing through here:
# - WildFunction('f').nargs
# - AppliedUndef with no nargs like Function('f')(1).nargs
nargs = obj._nargs # note the underscore here
# convert to FiniteSet
obj.nargs = FiniteSet(*nargs) if nargs else Naturals0()
return obj
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
Explanation
===========
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Examples of eval() for the function "sign"
---------------------------------------------
.. code-block:: python
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg.is_zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, Mul):
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff is not S.One:
return cls(coeff) * cls(terms)
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if (old.is_Function and new.is_Function and
callable(old) and callable(new) and
old == self.func and len(self.args) in new.nargs):
return new(*[i._subs(old, new) for i in self.args])
class Function(Application, Expr):
"""
Base class for applied mathematical functions.
It also serves as a constructor for undefined function classes.
Examples
========
First example shows how to use Function as a constructor for undefined
function classes:
>>> from sympy import Function, Symbol
>>> x = Symbol('x')
>>> f = Function('f')
>>> g = Function('g')(x)
>>> f
f
>>> f(x)
f(x)
>>> g
g(x)
>>> f(x).diff(x)
Derivative(f(x), x)
>>> g.diff(x)
Derivative(g(x), x)
Assumptions can be passed to Function, and if function is initialized with a
Symbol, the function inherits the name and assumptions associated with the Symbol:
>>> f_real = Function('f', real=True)
>>> f_real(x).is_real
True
>>> f_real_inherit = Function(Symbol('f', real=True))
>>> f_real_inherit(x).is_real
True
Note that assumptions on a function are unrelated to the assumptions on
the variable it is called on. If you want to add a relationship, subclass
Function and define the appropriate ``_eval_is_assumption`` methods.
In the following example Function is used as a base class for
``my_func`` that represents a mathematical function *my_func*. Suppose
that it is well known, that *my_func(0)* is *1* and *my_func* at infinity
goes to *0*, so we want those two simplifications to occur automatically.
Suppose also that *my_func(x)* is real exactly when *x* is real. Here is
an implementation that honours those requirements:
>>> from sympy import Function, S, oo, I, sin
>>> class my_func(Function):
...
... @classmethod
... def eval(cls, x):
... if x.is_Number:
... if x.is_zero:
... return S.One
... elif x is S.Infinity:
... return S.Zero
...
... def _eval_is_real(self):
... return self.args[0].is_real
...
>>> x = S('x')
>>> my_func(0) + sin(0)
1
>>> my_func(oo)
0
>>> my_func(3.54).n() # Not yet implemented for my_func.
my_func(3.54)
>>> my_func(I).is_real
False
In order for ``my_func`` to become useful, several other methods would
need to be implemented. See source code of some of the already
implemented functions for more complete examples.
Also, if the function can take more than one argument, then ``nargs``
must be defined, e.g. if ``my_func`` can take one or two arguments
then,
>>> class my_func(Function):
... nargs = (1, 2)
...
>>>
"""
@property
def _diff_wrt(self):
return False
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args, **options)
n = len(args)
if n not in cls.nargs:
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
temp = ('%(name)s takes %(qual)s %(args)s '
'argument%(plural)s (%(given)s given)')
raise TypeError(temp % {
'name': cls,
'qual': 'exactly' if len(cls.nargs) == 1 else 'at least',
'args': min(cls.nargs),
'plural': 's'*(min(cls.nargs) != 1),
'given': n})
evaluate = options.get('evaluate', global_parameters.evaluate)
result = super().__new__(cls, *args, **options)
if evaluate and isinstance(result, cls) and result.args:
_should_evalf = [cls._should_evalf(a) for a in result.args]
pr2 = min(_should_evalf)
if pr2 > 0:
pr = max(_should_evalf)
result = result.evalf(prec_to_dps(pr))
return _sympify(result)
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
Explanation
===========
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number (including complex numbers).
This function is used by __new__.
Returns the precision to evalf to, or -1 if it should not evalf.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
m = pure_complex(arg)
if m is None:
return -1
# the elements of m are of type Number, so have a _prec
return max(m[0]._prec, m[1]._prec)
@classmethod
def class_key(cls):
from sympy.sets.fancysets import Naturals0
funcs = {
'exp': 10,
'log': 11,
'sin': 20,
'cos': 21,
'tan': 22,
'cot': 23,
'sinh': 30,
'cosh': 31,
'tanh': 32,
'coth': 33,
'conjugate': 40,
're': 41,
'im': 42,
'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
i = 0 if isinstance(cls.nargs, Naturals0) else 10000
return 4, i, name
def _eval_evalf(self, prec):
def _get_mpmath_func(fname):
"""Lookup mpmath function based on name"""
if isinstance(self, AppliedUndef):
# Shouldn't lookup in mpmath but might have ._imp_
return None
if not hasattr(mpmath, fname):
fname = MPMATH_TRANSLATIONS.get(fname, None)
if fname is None:
return None
return getattr(mpmath, fname)
_eval_mpmath = getattr(self, '_eval_mpmath', None)
if _eval_mpmath is None:
func = _get_mpmath_func(self.func.__name__)
args = self.args
else:
func, args = _eval_mpmath()
# Fall-back evaluation
if func is None:
imp = getattr(self, '_imp_', None)
if imp is None:
return None
try:
return Float(imp(*[i.evalf(prec) for i in self.args]), prec)
except (TypeError, ValueError):
return None
# Convert all args to mpf or mpc
# Convert the arguments to *higher* precision than requested for the
# final result.
# XXX + 5 is a guess, it is similar to what is used in evalf.py. Should
# we be more intelligent about it?
try:
args = [arg._to_mpmath(prec + 5) for arg in args]
def bad(m):
from mpmath import mpf, mpc
# the precision of an mpf value is the last element
# if that is 1 (and m[1] is not 1 which would indicate a
# power of 2), then the eval failed; so check that none of
# the arguments failed to compute to a finite precision.
# Note: An mpc value has two parts, the re and imag tuple;
# check each of those parts, too. Anything else is allowed to
# pass
if isinstance(m, mpf):
m = m._mpf_
return m[1] !=1 and m[-1] == 1
elif isinstance(m, mpc):
m, n = m._mpc_
return m[1] !=1 and m[-1] == 1 and \
n[1] !=1 and n[-1] == 1
else:
return False
if any(bad(a) for a in args):
raise ValueError # one or more args failed to compute with significance
except ValueError:
return
with mpmath.workprec(prec):
v = func(*args)
return Expr._from_mpmath(v, prec)
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da.is_zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
return fuzzy_and(a.is_commutative for a in self.args)
def _eval_is_meromorphic(self, x, a):
if not self.args:
return True
if any(arg.has(x) for arg in self.args[1:]):
return False
arg = self.args[0]
if not arg._eval_is_meromorphic(x, a):
return None
return fuzzy_not(type(self).is_singular(arg.subs(x, a)))
_singularities = None # type: tUnion[FuzzyBool, tTuple[Expr, ...]]
@classmethod
def is_singular(cls, a):
"""
Tests whether the argument is an essential singularity
or a branch point, or the functions is non-holomorphic.
"""
ss = cls._singularities
if ss in (True, None, False):
return ss
return fuzzy_or(a.is_infinite if s is S.ComplexInfinity
else (a - s).is_zero for s in ss)
def as_base_exp(self):
"""
Returns the method as the 2-tuple (base, exponent).
"""
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
raise PoleError(filldedent('''
Asymptotic expansion of %s around %s is
not implemented.''' % (type(self), args0)))
def _eval_nseries(self, x, n, logx, cdir=0):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples
========
>>> from sympy import atan2
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
from .symbol import uniquely_named_symbol
from sympy.series.order import Order
from sympy.sets.sets import FiniteSet
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_finite is False for t in args0):
from .numbers import oo, zoo, nan
# XXX could use t.as_leading_term(x) here but it's a little
# slower
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any(t.has(oo, -oo, zoo, nan) for t in a0):
return self._eval_aseries(n, args0, x, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for _ in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None:
raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs is S.Naturals0
or (self.func.nargs == FiniteSet(1) and args0[0])
or any(c > 1 for c in self.func.nargs)):
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
if len(e.args) == 1:
# issue 14411
e = e.func(e.args[0].cancel())
term = e.subs(x, S.Zero)
if term.is_finite is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
_x = uniquely_named_symbol('xi', self)
e = e.subs(x, _x)
for i in range(n - 1):
i += 1
fact *= Rational(i)
e = e.diff(_x)
subs = e.subs(_x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(_x, S.Zero)
if subs.is_finite is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
# try to predict a number of terms needed
nterms = n + 2
cf = Order(arg.as_leading_term(x), x).getn()
if cf != 0:
nterms = (n/cf).ceiling()
for i in range(nterms):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + Order(x**n, x)
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if not (1 <= argindex <= len(self.args)):
raise ArgumentIndexError(self, argindex)
ix = argindex - 1
A = self.args[ix]
if A._diff_wrt:
if len(self.args) == 1 or not A.is_Symbol:
return _derivative_dispatch(self, A)
for i, v in enumerate(self.args):
if i != ix and A in v.free_symbols:
# it can't be in any other argument's free symbols
# issue 8510
break
else:
return _derivative_dispatch(self, A)
# See issue 4624 and issue 4719, 5600 and 8510
D = Dummy('xi_%i' % argindex, dummy_index=hash(A))
args = self.args[:ix] + (D,) + self.args[ix + 1:]
return Subs(Derivative(self.func(*args), D), D, A)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
"""Stub that should be overridden by new Functions to return
the first non-zero term in a series if ever an x-dependent
argument whose leading term vanishes as x -> 0 might be encountered.
See, for example, cos._eval_as_leading_term.
"""
from sympy.series.order import Order
args = [a.as_leading_term(x, logx=logx) for a in self.args]
o = Order(1, x)
if any(x in a.free_symbols and o.contains(a) for a in args):
# Whereas x and any finite number are contained in O(1, x),
# expressions like 1/x are not. If any arg simplified to a
# vanishing expression as x -> 0 (like x or x**2, but not
# 3, 1/x, etc...) then the _eval_as_leading_term is needed
# to supply the first non-zero term of the series,
#
# e.g. expression leading term
# ---------- ------------
# cos(1/x) cos(1/x)
# cos(cos(x)) cos(1)
# cos(x) 1 <- _eval_as_leading_term needed
# sin(x) x <- _eval_as_leading_term needed
#
raise NotImplementedError(
'%s has no _eval_as_leading_term routine' % self.func)
else:
return self.func(*args)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined
function.
"""
is_number = False
def __new__(cls, *args, **options):
args = list(map(sympify, args))
u = [a.name for a in args if isinstance(a, UndefinedFunction)]
if u:
raise TypeError('Invalid argument: expecting an expression, not UndefinedFunction%s: %s' % (
's'*(len(u) > 1), ', '.join(u)))
obj = super().__new__(cls, *args, **options)
return obj
def _eval_as_leading_term(self, x, logx=None, cdir=0):
return self
@property
def _diff_wrt(self):
"""
Allow derivatives wrt to undefined functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
>>> f(x).diff(x)
Derivative(f(x), x)
"""
return True
class UndefSageHelper:
"""
Helper to facilitate Sage conversion.
"""
def __get__(self, ins, typ):
import sage.all as sage
if ins is None:
return lambda: sage.function(typ.__name__)
else:
args = [arg._sage_() for arg in ins.args]
return lambda : sage.function(ins.__class__.__name__)(*args)
_undef_sage_helper = UndefSageHelper()
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs):
from .symbol import _filter_assumptions
# Allow Function('f', real=True)
# and/or Function(Symbol('f', real=True))
assumptions, kwargs = _filter_assumptions(kwargs)
if isinstance(name, Symbol):
assumptions = name._merge(assumptions)
name = name.name
elif not isinstance(name, str):
raise TypeError('expecting string or Symbol for name')
else:
commutative = assumptions.get('commutative', None)
assumptions = Symbol(name, **assumptions).assumptions0
if commutative is None:
assumptions.pop('commutative')
__dict__ = __dict__ or {}
# put the `is_*` for into __dict__
__dict__.update({'is_%s' % k: v for k, v in assumptions.items()})
# You can add other attributes, although they do have to be hashable
# (but seriously, if you want to add anything other than assumptions,
# just subclass Function)
__dict__.update(kwargs)
# add back the sanitized assumptions without the is_ prefix
kwargs.update(assumptions)
# Save these for __eq__
__dict__.update({'_kwargs': kwargs})
# do this for pickling
__dict__['__module__'] = None
obj = super().__new__(mcl, name, bases, __dict__)
obj.name = name
obj._sage_ = _undef_sage_helper
return obj
def __instancecheck__(cls, instance):
return cls in type(instance).__mro__
_kwargs = {} # type: tDict[str, Optional[bool]]
def __hash__(self):
return hash((self.class_key(), frozenset(self._kwargs.items())))
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.class_key() == other.class_key() and
self._kwargs == other._kwargs)
def __ne__(self, other):
return not self == other
@property
def _diff_wrt(self):
return False
# XXX: The type: ignore on WildFunction is because mypy complains:
#
# sympy/core/function.py:939: error: Cannot determine type of 'sort_key' in
# base class 'Expr'
#
# Somehow this is because of the @cacheit decorator but it is not clear how to
# fix it.
class WildFunction(Function, AtomicExpr): # type: ignore
"""
A WildFunction function matches any function (with its arguments).
Examples
========
>>> from sympy import WildFunction, Function, cos
>>> from sympy.abc import x, y
>>> F = WildFunction('F')
>>> f = Function('f')
>>> F.nargs
Naturals0
>>> x.match(F)
>>> F.match(F)
{F_: F_}
>>> f(x).match(F)
{F_: f(x)}
>>> cos(x).match(F)
{F_: cos(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a given number of arguments, set ``nargs`` to the
desired value at instantiation:
>>> F = WildFunction('F', nargs=2)
>>> F.nargs
{2}
>>> f(x).match(F)
>>> f(x, y).match(F)
{F_: f(x, y)}
To match functions with a range of arguments, set ``nargs`` to a tuple
containing the desired number of arguments, e.g. if ``nargs = (1, 2)``
then functions with 1 or 2 arguments will be matched.
>>> F = WildFunction('F', nargs=(1, 2))
>>> F.nargs
{1, 2}
>>> f(x).match(F)
{F_: f(x)}
>>> f(x, y).match(F)
{F_: f(x, y)}
>>> f(x, y, 1).match(F)
"""
# XXX: What is this class attribute used for?
include = set() # type: tSet[Any]
def __init__(cls, name, **assumptions):
from sympy.sets.sets import Set, FiniteSet
cls.name = name
nargs = assumptions.pop('nargs', S.Naturals0)
if not isinstance(nargs, Set):
# Canonicalize nargs here. See also FunctionClass.
if is_sequence(nargs):
nargs = tuple(ordered(set(nargs)))
elif nargs is not None:
nargs = (as_int(nargs),)
nargs = FiniteSet(*nargs)
cls.nargs = nargs
def matches(self, expr, repl_dict=None, old=False):
if not isinstance(expr, (AppliedUndef, Function)):
return None
if len(expr.args) not in self.nargs:
return None
if repl_dict is None:
repl_dict = dict()
else:
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
Examples
========
>>> from sympy import Derivative, Function, symbols, Subs
>>> from sympy.abc import x, y
>>> f, g = symbols('f g', cls=Function)
>>> Derivative(x**2, x, evaluate=True)
2*x
Denesting of derivatives retains the ordering of variables:
>>> Derivative(Derivative(f(x, y), y), x)
Derivative(f(x, y), y, x)
Contiguously identical symbols are merged into a tuple giving
the symbol and the count:
>>> Derivative(f(x), x, x, y, x)
Derivative(f(x), (x, 2), y, x)
If the derivative cannot be performed, and evaluate is True, the
order of the variables of differentiation will be made canonical:
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Derivatives with respect to undefined functions can be calculated:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
Such derivatives will show up when the chain rule is used to
evalulate a derivative:
>>> f(g(x)).diff(x)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
Substitution is used to represent derivatives of functions with
arguments that are not symbols or functions:
>>> f(2*x + 3).diff(x) == 2*Subs(f(y).diff(y), y, 2*x + 3)
True
Notes
=====
Simplification of high-order derivatives:
Because there can be a significant amount of simplification that can be
done when multiple differentiations are performed, results will be
automatically simplified in a fairly conservative fashion unless the
keyword ``simplify`` is set to False.
>>> from sympy import sqrt, diff, Function, symbols
>>> from sympy.abc import x, y, z
>>> f, g = symbols('f,g', cls=Function)
>>> e = sqrt((x + 1)**2 + x)
>>> diff(e, (x, 5), simplify=False).count_ops()
136
>>> diff(e, (x, 5)).count_ops()
30
Ordering of variables:
If evaluate is set to True and the expression cannot be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked.
Derivative wrt non-Symbols:
For the most part, one may not differentiate wrt non-symbols.
For example, we do not allow differentiation wrt `x*y` because
there are multiple ways of structurally defining where x*y appears
in an expression: a very strict definition would make
(x*y*z).diff(x*y) == 0. Derivatives wrt defined functions (like
cos(x)) are not allowed, either:
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't calculate derivative wrt x*y.
To make it easier to work with variational calculus, however,
derivatives wrt AppliedUndef and Derivatives are allowed.
For example, in the Euler-Lagrange method one may write
F(t, u, v) where u = f(t) and v = f'(t). These variables can be
written explicitly as functions of time::
>>> from sympy.abc import t
>>> F = Function('F')
>>> U = f(t)
>>> V = U.diff(t)
The derivative wrt f(t) can be obtained directly:
>>> direct = F(t, U, V).diff(U)
When differentiation wrt a non-Symbol is attempted, the non-Symbol
is temporarily converted to a Symbol while the differentiation
is performed and the same answer is obtained:
>>> indirect = F(t, U, V).subs(U, x).diff(x).subs(x, U)
>>> assert direct == indirect
The implication of this non-symbol replacement is that all
functions are treated as independent of other functions and the
symbols are independent of the functions that contain them::
>>> x.diff(f(x))
0
>>> g(x).diff(f(x))
0
It also means that derivatives are assumed to depend only
on the variables of differentiation, not on anything contained
within the expression being differentiated::
>>> F = f(x)
>>> Fx = F.diff(x)
>>> Fx.diff(F) # derivative depends on x, not F
0
>>> Fxx = Fx.diff(x)
>>> Fxx.diff(Fx) # derivative depends on x, not Fx
0
The last example can be made explicit by showing the replacement
of Fx in Fxx with y:
>>> Fxx.subs(Fx, y)
Derivative(y, x)
Since that in itself will evaluate to zero, differentiating
wrt Fx will also be zero:
>>> _.doit()
0
Replacing undefined functions with concrete expressions
One must be careful to replace undefined functions with expressions
that contain variables consistent with the function definition and
the variables of differentiation or else insconsistent result will
be obtained. Consider the following example:
>>> eq = f(x)*g(y)
>>> eq.subs(f(x), x*y).diff(x, y).doit()
y*Derivative(g(y), y) + g(y)
>>> eq.diff(x, y).subs(f(x), x*y).doit()
y*Derivative(g(y), y)
The results differ because `f(x)` was replaced with an expression
that involved both variables of differentiation. In the abstract
case, differentiation of `f(x)` by `y` is 0; in the concrete case,
the presence of `y` made that derivative nonvanishing and produced
the extra `g(y)` term.
Defining differentiation for an object
An object must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Any class can allow derivatives to be taken with respect to
itself (while indicating its scalar nature). See the
docstring of Expr._diff_wrt.
See Also
========
_sort_variable_count
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""An expression may be differentiated wrt a Derivative if
it is in elementary form.
Examples
========
>>> from sympy import Function, Derivative, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> Derivative(f(x), x)._diff_wrt
True
>>> Derivative(cos(x), x)._diff_wrt
False
>>> Derivative(x + 1, x)._diff_wrt
False
A Derivative might be an unevaluated form of what will not be
a valid variable of differentiation if evaluated. For example,
>>> Derivative(f(f(x)), x).doit()
Derivative(f(x), x)*Derivative(f(f(x)), f(x))
Such an expression will present the same ambiguities as arise
when dealing with any other product, like ``2*x``, so ``_diff_wrt``
is False:
>>> Derivative(f(f(x)), x)._diff_wrt
False
"""
return self.expr._diff_wrt and isinstance(self.doit(), Derivative)
def __new__(cls, expr, *variables, **kwargs):
expr = sympify(expr)
symbols_or_none = getattr(expr, "free_symbols", None)
has_symbol_set = isinstance(symbols_or_none, set)
if not has_symbol_set:
raise ValueError(filldedent('''
Since there are no variables in the expression %s,
it cannot be differentiated.''' % expr))
# determine value for variables if it wasn't given
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
if expr.is_number:
return S.Zero
if len(variables) == 0:
raise ValueError(filldedent('''
Since there are no variables in the expression,
the variable(s) of differentiation must be supplied
to differentiate %s''' % expr))
else:
raise ValueError(filldedent('''
Since there is more than one variable in the
expression, the variable(s) of differentiation
must be supplied to differentiate %s''' % expr))
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
array_likes = (tuple, list, Tuple)
from sympy.tensor.array import Array, NDimArray
for i, v in enumerate(variables):
if isinstance(v, UndefinedFunction):
raise TypeError(
"cannot differentiate wrt "
"UndefinedFunction: %s" % v)
if isinstance(v, array_likes):
if len(v) == 0:
# Ignore empty tuples: Derivative(expr, ... , (), ... )
continue
if isinstance(v[0], array_likes):
# Derive by array: Derivative(expr, ... , [[x, y, z]], ... )
if len(v) == 1:
v = Array(v[0])
count = 1
else:
v, count = v
v = Array(v)
else:
v, count = v
if count == 0:
continue
variable_count.append(Tuple(v, count))
continue
v = sympify(v)
if isinstance(v, Integer):
if i == 0:
raise ValueError("First variable cannot be a number: %i" % v)
count = v
prev, prevcount = variable_count[-1]
if prevcount != 1:
raise TypeError("tuple {} followed by number {}".format((prev, prevcount), v))
if count == 0:
variable_count.pop()
else:
variable_count[-1] = Tuple(prev, count)
else:
count = 1
variable_count.append(Tuple(v, count))
# light evaluation of contiguous, identical
# items: (x, 1), (x, 1) -> (x, 2)
merged = []
for t in variable_count:
v, c = t
if c.is_negative:
raise ValueError(
'order of differentiation must be nonnegative')
if merged and merged[-1][0] == v:
c += merged[-1][1]
if not c:
merged.pop()
else:
merged[-1] = Tuple(v, c)
else:
merged.append(t)
variable_count = merged
# sanity check of variables of differentation; we waited
# until the counts were computed since some variables may
# have been removed because the count was 0
for v, c in variable_count:
# v must have _diff_wrt True
if not v._diff_wrt:
__ = '' # filler to make error message neater
raise ValueError(filldedent('''
Can't calculate derivative wrt %s.%s''' % (v,
__)))
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if len(variable_count) == 0:
return expr
evaluate = kwargs.get('evaluate', False)
if evaluate:
if isinstance(expr, Derivative):
expr = expr.canonical
variable_count = [
(v.canonical if isinstance(v, Derivative) else v, c)
for v, c in variable_count]
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannot check non-symbols like
# Derivatives as those can be created by intermediate
# derivatives.
zero = False
free = expr.free_symbols
from sympy.matrices.expressions.matexpr import MatrixExpr
for v, c in variable_count:
vfree = v.free_symbols
if c.is_positive and vfree:
if isinstance(v, AppliedUndef):
# these match exactly since
# x.diff(f(x)) == g(x).diff(f(x)) == 0
# and are not created by differentiation
D = Dummy()
if not expr.xreplace({v: D}).has(D):
zero = True
break
elif isinstance(v, MatrixExpr):
zero = False
break
elif isinstance(v, Symbol) and v not in free:
zero = True
break
else:
if not free & vfree:
# e.g. v is IndexedBase or Matrix
zero = True
break
if zero:
return cls._get_zero_with_shape_like(expr)
# make the order of symbols canonical
#TODO: check if assumption of discontinuous derivatives exist
variable_count = cls._sort_variable_count(variable_count)
# denest
if isinstance(expr, Derivative):
variable_count = list(expr.variable_count) + variable_count
expr = expr.expr
return _derivative_dispatch(expr, *variable_count, **kwargs)
# we return here if evaluate is False or if there is no
# _eval_derivative method
if not evaluate or not hasattr(expr, '_eval_derivative'):
# return an unevaluated Derivative
if evaluate and variable_count == [(expr, 1)] and expr.is_scalar:
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
return S.One
return Expr.__new__(cls, expr, *variable_count)
# evaluate the derivative by calling _eval_derivative method
# of expr for each variable
# -------------------------------------------------------------
nderivs = 0 # how many derivatives were performed
unhandled = []
from sympy.matrices.common import MatrixCommon
for i, (v, count) in enumerate(variable_count):
old_expr = expr
old_v = None
is_symbol = v.is_symbol or isinstance(v,
(Iterable, Tuple, MatrixCommon, NDimArray))
if not is_symbol:
old_v = v
v = Dummy('xi')
expr = expr.xreplace({old_v: v})
# Derivatives and UndefinedFunctions are independent
# of all others
clashing = not (isinstance(old_v, Derivative) or \
isinstance(old_v, AppliedUndef))
if v not in expr.free_symbols and not clashing:
return expr.diff(v) # expr's version of 0
if not old_v.is_scalar and not hasattr(
old_v, '_eval_derivative'):
# special hack providing evaluation for classes
# that have defined is_scalar=True but have no
# _eval_derivative defined
expr *= old_v.diff(old_v)
obj = cls._dispatch_eval_derivative_n_times(expr, v, count)
if obj is not None and obj.is_zero:
return obj
nderivs += count
if old_v is not None:
if obj is not None:
# remove the dummy that was used
obj = obj.subs(v, old_v)
# restore expr
expr = old_expr
if obj is None:
# we've already checked for quick-exit conditions
# that give 0 so the remaining variables
# are contained in the expression but the expression
# did not compute a derivative so we stop taking
# derivatives
unhandled = variable_count[i:]
break
expr = obj
# what we have so far can be made canonical
expr = expr.replace(
lambda x: isinstance(x, Derivative),
lambda x: x.canonical)
if unhandled:
if isinstance(expr, Derivative):
unhandled = list(expr.variable_count) + unhandled
expr = expr.expr
expr = Expr.__new__(cls, expr, *unhandled)
if (nderivs > 1) == True and kwargs.get('simplify', True):
from .exprtools import factor_terms
from sympy.simplify.simplify import signsimp
expr = factor_terms(signsimp(expr))
return expr
@property
def canonical(cls):
return cls.func(cls.expr,
*Derivative._sort_variable_count(cls.variable_count))
@classmethod
def _sort_variable_count(cls, vc):
"""
Sort (variable, count) pairs into canonical order while
retaining order of variables that do not commute during
differentiation:
* symbols and functions commute with each other
* derivatives commute with each other
* a derivative does not commute with anything it contains
* any other object is not allowed to commute if it has
free symbols in common with another object
Examples
========
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variable_count
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
Contiguous items are collapsed into one pair:
>>> vsort([(x, 1), (x, 1)])
[(x, 2)]
>>> vsort([(y, 1), (f(x), 1), (y, 1), (f(x), 1)])
[(y, 2), (f(x), 2)]
Ordering is canonical.
>>> def vsort0(*v):
... # docstring helper to
... # change vi -> (vi, 0), sort, and return vi vals
... return [i[0] for i in vsort([(i, 0) for i in v])]
>>> vsort0(y, x)
[x, y]
>>> vsort0(g(y), g(x), f(y))
[f(y), g(x), g(y)]
Symbols are sorted as far to the left as possible but never
move to the left of a derivative having the same symbol in
its variables; the same applies to AppliedUndef which are
always sorted after Symbols:
>>> dfx = f(x).diff(x)
>>> assert vsort0(dfx, y) == [y, dfx]
>>> assert vsort0(dfx, x) == [dfx, x]
"""
if not vc:
return []
vc = list(vc)
if len(vc) == 1:
return [Tuple(*vc[0])]
V = list(range(len(vc)))
E = []
v = lambda i: vc[i][0]
D = Dummy()
def _block(d, v, wrt=False):
# return True if v should not come before d else False
if d == v:
return wrt
if d.is_Symbol:
return False
if isinstance(d, Derivative):
# a derivative blocks if any of it's variables contain
# v; the wrt flag will return True for an exact match
# and will cause an AppliedUndef to block if v is in
# the arguments
if any(_block(k, v, wrt=True)
for k in d._wrt_variables):
return True
return False
if not wrt and isinstance(d, AppliedUndef):
return False
if v.is_Symbol:
return v in d.free_symbols
if isinstance(v, AppliedUndef):
return _block(d.xreplace({v: D}), D)
return d.free_symbols & v.free_symbols
for i in range(len(vc)):
for j in range(i):
if _block(v(j), v(i)):
E.append((j,i))
# this is the default ordering to use in case of ties
O = dict(zip(ordered(uniq([i for i, c in vc])), range(len(vc))))
ix = topological_sort((V, E), key=lambda i: O[v(i)])
# merge counts of contiguously identical items
merged = []
for v, c in [vc[i] for i in ix]:
if merged and merged[-1][0] == v:
merged[-1][1] += c
else:
merged.append([v, c])
return [Tuple(*i) for i in merged]
def _eval_is_commutative(self):
return self.expr.is_commutative
def _eval_derivative(self, v):
# If v (the variable of differentiation) is not in
# self.variables, we might be able to take the derivative.
if v not in self._wrt_variables:
dedv = self.expr.diff(v)
if isinstance(dedv, Derivative):
return dedv.func(dedv.expr, *(self.variable_count + dedv.variable_count))
# dedv (d(self.expr)/dv) could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when dedv is a simple
# number so that the derivative wrt anything else will vanish.
return self.func(dedv, *self.variables, evaluate=True)
# In this case v was in self.variables so the derivative wrt v has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
variable_count = list(self.variable_count)
variable_count.append((v, 1))
return self.func(self.expr, *variable_count, evaluate=False)
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
rv = self.func(expr, *self.variable_count, **hints)
if rv!= self and rv.has(Derivative):
rv = rv.doit(**hints)
return rv
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval,
z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def _wrt_variables(self):
# return the variables of differentiation without
# respect to the type of count (int or symbolic)
return [i[0] for i in self.variable_count]
@property
def variables(self):
# TODO: deprecate? YES, make this 'enumerated_variables' and
# name _wrt_variables as variables
# TODO: support for `d^n`?
rv = []
for v, count in self.variable_count:
if not count.is_Integer:
raise TypeError(filldedent('''
Cannot give expansion for symbolic count. If you just
want a list of all variables of differentiation, use
_wrt_variables.'''))
rv.extend([v]*count)
return tuple(rv)
@property
def variable_count(self):
return self._args[1:]
@property
def derivative_count(self):
return sum([count for _, count in self.variable_count], 0)
@property
def free_symbols(self):
ret = self.expr.free_symbols
# Add symbolic counts to free_symbols
for _, count in self.variable_count:
ret.update(count.free_symbols)
return ret
@property
def kind(self):
return self.args[0].kind
def _eval_subs(self, old, new):
# The substitution (old, new) cannot be done inside
# Derivative(expr, vars) for a variety of reasons
# as handled below.
if old in self._wrt_variables:
# first handle the counts
expr = self.func(self.expr, *[(v, c.subs(old, new))
for v, c in self.variable_count])
if expr != self:
return expr._eval_subs(old, new)
# quick exit case
if not getattr(new, '_diff_wrt', False):
# case (0): new is not a valid variable of
# differentiation
if isinstance(old, Symbol):
# don't introduce a new symbol if the old will do
return Subs(self, old, new)
else:
xi = Dummy('xi')
return Subs(self.xreplace({old: xi}), xi, new)
# If both are Derivatives with the same expr, check if old is
# equivalent to self or if old is a subderivative of self.
if old.is_Derivative and old.expr == self.expr:
if self.canonical == old.canonical:
return new
# collections.Counter doesn't have __le__
def _subset(a, b):
return all((a[i] <= b[i]) == True for i in a)
old_vars = Counter(dict(reversed(old.variable_count)))
self_vars = Counter(dict(reversed(self.variable_count)))
if _subset(old_vars, self_vars):
return _derivative_dispatch(new, *(self_vars - old_vars).items()).canonical
args = list(self.args)
newargs = list(x._subs(old, new) for x in args)
if args[0] == old:
# complete replacement of self.expr
# we already checked that the new is valid so we know
# it won't be a problem should it appear in variables
return _derivative_dispatch(*newargs)
if newargs[0] != args[0]:
# case (1) can't change expr by introducing something that is in
# the _wrt_variables if it was already in the expr
# e.g.
# for Derivative(f(x, g(y)), y), x cannot be replaced with
# anything that has y in it; for f(g(x), g(y)).diff(g(y))
# g(x) cannot be replaced with anything that has g(y)
syms = {vi: Dummy() for vi in self._wrt_variables
if not vi.is_Symbol}
wrt = {syms.get(vi, vi) for vi in self._wrt_variables}
forbidden = args[0].xreplace(syms).free_symbols & wrt
nfree = new.xreplace(syms).free_symbols
ofree = old.xreplace(syms).free_symbols
if (nfree - ofree) & forbidden:
return Subs(self, old, new)
viter = ((i, j) for ((i, _), (j, _)) in zip(newargs[1:], args[1:]))
if any(i != j for i, j in viter): # a wrt-variable change
# case (2) can't change vars by introducing a variable
# that is contained in expr, e.g.
# for Derivative(f(z, g(h(x), y)), y), y cannot be changed to
# x, h(x), or g(h(x), y)
for a in _atomic(self.expr, recursive=True):
for i in range(1, len(newargs)):
vi, _ = newargs[i]
if a == vi and vi != args[i][0]:
return Subs(self, old, new)
# more arg-wise checks
vc = newargs[1:]
oldv = self._wrt_variables
newe = self.expr
subs = []
for i, (vi, ci) in enumerate(vc):
if not vi._diff_wrt:
# case (3) invalid differentiation expression so
# create a replacement dummy
xi = Dummy('xi_%i' % i)
# replace the old valid variable with the dummy
# in the expression
newe = newe.xreplace({oldv[i]: xi})
# and replace the bad variable with the dummy
vc[i] = (xi, ci)
# and record the dummy with the new (invalid)
# differentiation expression
subs.append((xi, vi))
if subs:
# handle any residual substitution in the expression
newe = newe._subs(old, new)
# return the Subs-wrapped derivative
return Subs(Derivative(newe, *vc), *zip(*subs))
# everything was ok
return _derivative_dispatch(*newargs)
def _eval_lseries(self, x, logx, cdir=0):
dx = self.variables
for term in self.expr.lseries(x, logx=logx, cdir=cdir):
yield self.func(term, *dx)
def _eval_nseries(self, x, n, logx, cdir=0):
arg = self.expr.nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.variables
rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x, logx=None, cdir=0):
series_gen = self.expr.lseries(x)
d = S.Zero
for leading_term in series_gen:
d = diff(leading_term, *self.variables)
if d != 0:
break
return d
def as_finite_difference(self, points=1, x0=None, wrt=None):
""" Expresses a Derivative instance as a finite difference.
Parameters
==========
points : sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. Default: 1 (step-size 1)
x0 : number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt : Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> f(x).diff(x).as_finite_difference()
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and
``order + 1`` respectively. We can change the step size by
passing a symbol as a parameter:
>>> f(x).diff(x).as_finite_difference(h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a
sequence:
>>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/...
To approximate ``Derivative`` around ``x0`` using a non-equidistant
spacing step, the algorithm supports assignment of undefined
functions to ``points``:
>>> dx = Function('dx')
>>> f(x).diff(x).as_finite_difference(points=dx(x), x0=x-h)
-f(-h + x - dx(-h + x)/2)/dx(-h + x) + f(-h + x + dx(-h + x)/2)/dx(-h + x)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> d2fdxdy.as_finite_difference(wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
We can apply ``as_finite_difference`` to ``Derivative`` instances in
compound expressions using ``replace``:
>>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative,
... lambda arg: arg.as_finite_difference())
42**(-f(x - 1/2) + f(x + 1/2)) + 1
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.differentiate_finite
sympy.calculus.finite_diff.finite_diff_weights
"""
from sympy.calculus.finite_diff import _as_finite_diff
return _as_finite_diff(self, points, x0, wrt)
@classmethod
def _get_zero_with_shape_like(cls, expr):
return S.Zero
@classmethod
def _dispatch_eval_derivative_n_times(cls, expr, v, count):
# Evaluate the derivative `n` times. If
# `_eval_derivative_n_times` is not overridden by the current
# object, the default in `Basic` will call a loop over
# `_eval_derivative`:
return expr._eval_derivative_n_times(v, count)
def _derivative_dispatch(expr, *variables, **kwargs):
from sympy.matrices.common import MatrixCommon
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.tensor.array import NDimArray
array_types = (MatrixCommon, MatrixExpr, NDimArray, list, tuple, Tuple)
if isinstance(expr, array_types) or any(isinstance(i[0], array_types) if isinstance(i, (tuple, list, Tuple)) else isinstance(i, array_types) for i in variables):
from sympy.tensor.array.array_derivatives import ArrayDerivative
return ArrayDerivative(expr, *variables, **kwargs)
return Derivative(expr, *variables, **kwargs)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
Examples
========
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
It is also possible to unpack tuple arguments:
>>> f = Lambda(((x, y), z), x + y + z)
>>> f((1, 2), 3)
6
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
def __new__(cls, signature, expr):
if iterable(signature) and not isinstance(signature, (tuple, Tuple)):
sympy_deprecation_warning(
"""
Using a non-tuple iterable as the first argument to Lambda
is deprecated. Use Lambda(tuple(args), expr) instead.
""",
deprecated_since_version="1.5",
active_deprecations_target="deprecated-non-tuple-lambda",
)
signature = tuple(signature)
sig = signature if iterable(signature) else (signature,)
sig = sympify(sig)
cls._check_signature(sig)
if len(sig) == 1 and sig[0] == expr:
return S.IdentityFunction
return Expr.__new__(cls, sig, sympify(expr))
@classmethod
def _check_signature(cls, sig):
syms = set()
def rcheck(args):
for a in args:
if a.is_symbol:
if a in syms:
raise BadSignatureError("Duplicate symbol %s" % a)
syms.add(a)
elif isinstance(a, Tuple):
rcheck(a)
else:
raise BadSignatureError("Lambda signature should be only tuples"
" and symbols, not %s" % a)
if not isinstance(sig, Tuple):
raise BadSignatureError("Lambda signature should be a tuple not %s" % sig)
# Recurse through the signature:
rcheck(sig)
@property
def signature(self):
"""The expected form of the arguments to be unpacked into variables"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def variables(self):
"""The variables used in the internal representation of the function"""
def _variables(args):
if isinstance(args, Tuple):
for arg in args:
yield from _variables(arg)
else:
yield args
return tuple(_variables(self.signature))
@property
def nargs(self):
from sympy.sets.sets import FiniteSet
return FiniteSet(len(self.signature))
bound_symbols = variables
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
def __call__(self, *args):
n = len(args)
if n not in self.nargs: # Lambda only ever has 1 value in nargs
# XXX: exception message must be in exactly this format to
# make it work with NumPy's functions like vectorize(). See,
# for example, https://github.com/numpy/numpy/issues/1697.
# The ideal solution would be just to attach metadata to
# the exception and change NumPy to take advantage of this.
## XXX does this apply to Lambda? If not, remove this comment.
temp = ('%(name)s takes exactly %(args)s '
'argument%(plural)s (%(given)s given)')
raise BadArgumentsError(temp % {
'name': self,
'args': list(self.nargs)[0],
'plural': 's'*(list(self.nargs)[0] != 1),
'given': n})
d = self._match_signature(self.signature, args)
return self.expr.xreplace(d)
def _match_signature(self, sig, args):
symargmap = {}
def rmatch(pars, args):
for par, arg in zip(pars, args):
if par.is_symbol:
symargmap[par] = arg
elif isinstance(par, Tuple):
if not isinstance(arg, (tuple, Tuple)) or len(args) != len(pars):
raise BadArgumentsError("Can't match %s and %s" % (args, pars))
rmatch(par, arg)
rmatch(sig, args)
return symargmap
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
return self.signature == self.expr
def _eval_evalf(self, prec):
return self.func(self.args[0], self.args[1].evalf(n=prec_to_dps(prec)))
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` represents the expression resulting
from substituting x with x0 in expr.
Parameters
==========
expr : Expr
An expression.
x : tuple, variable
A variable or list of distinct variables.
x0 : tuple or list of tuples
A point or list of evaluation points
corresponding to those variables.
Notes
=====
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
Examples
========
>>> from sympy import Subs, Function, sin, cos
>>> from sympy.abc import x, y, z
>>> f = Function('f')
Subs are created when a particular substitution cannot be made. The
x in the derivative cannot be replaced with 0 because 0 is not a
valid variables of differentiation:
>>> f(x).diff(x).subs(x, 0)
Subs(Derivative(f(x), x), x, 0)
Once f is known, the derivative and evaluation at 0 can be done:
>>> _.subs(f, sin).doit() == sin(x).diff(x).subs(x, 0) == cos(0)
True
Subs can also be created directly with one or more variables:
>>> Subs(f(x)*sin(y) + z, (x, y), (0, 1))
Subs(z + f(x)*sin(y), (x, y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
Notes
=====
In order to allow expressions to combine before doit is done, a
representation of the Subs expression is used internally to make
expressions that are superficially different compare the same:
>>> a, b = Subs(x, x, 0), Subs(y, y, 0)
>>> a + b
2*Subs(x, x, 0)
This can lead to unexpected consequences when using methods
like `has` that are cached:
>>> s = Subs(x, x, 0)
>>> s.has(x), s.has(y)
(True, False)
>>> ss = s.subs(x, y)
>>> ss.has(x), ss.has(y)
(True, False)
>>> s, ss
(Subs(x, x, 0), Subs(y, y, 0))
"""
def __new__(cls, expr, variables, point, **assumptions):
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*variables)
if has_dups(variables):
repeated = [str(v) for v, i in Counter(variables).items() if i > 1]
__ = ', '.join(repeated)
raise ValueError(filldedent('''
The following expressions appear more than once: %s
''' % __))
point = Tuple(*(point if is_sequence(point, Tuple) else [point]))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
if not point:
return sympify(expr)
# denest
if isinstance(expr, Subs):
variables = expr.variables + variables
point = expr.point + point
expr = expr.expr
else:
expr = sympify(expr)
# use symbols with names equal to the point value (with prepended _)
# to give a variable-independent expression
pre = "_"
pts = sorted(set(point), key=default_sort_key)
from sympy.printing.str import StrPrinter
class CustomStrPrinter(StrPrinter):
def _print_Dummy(self, expr):
return str(expr) + str(expr.dummy_index)
def mystr(expr, **settings):
p = CustomStrPrinter(settings)
return p.doprint(expr)
while 1:
s_pts = {p: Symbol(pre + mystr(p)) for p in pts}
reps = [(v, s_pts[p])
for v, p in zip(variables, point)]
# if any underscore-prepended symbol is already a free symbol
# and is a variable with a different point value, then there
# is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0))
# because the new symbol that would be created is _1 but _1
# is already mapped to 0 so __0 and __1 are used for the new
# symbols
if any(r in expr.free_symbols and
r in variables and
Symbol(pre + mystr(point[variables.index(r)])) != r
for _, r in reps):
pre += "_"
continue
break
obj = Expr.__new__(cls, expr, Tuple(*variables), point)
obj._expr = expr.xreplace(dict(reps))
return obj
def _eval_is_commutative(self):
return self.expr.is_commutative
def doit(self, **hints):
e, v, p = self.args
# remove self mappings
for i, (vi, pi) in enumerate(zip(v, p)):
if vi == pi:
v = v[:i] + v[i + 1:]
p = p[:i] + p[i + 1:]
if not v:
return self.expr
if isinstance(e, Derivative):
# apply functions first, e.g. f -> cos
undone = []
for i, vi in enumerate(v):
if isinstance(vi, FunctionClass):
e = e.subs(vi, p[i])
else:
undone.append((vi, p[i]))
if not isinstance(e, Derivative):
e = e.doit()
if isinstance(e, Derivative):
# do Subs that aren't related to differentiation
undone2 = []
D = Dummy()
arg = e.args[0]
for vi, pi in undone:
if D not in e.xreplace({vi: D}).free_symbols:
if arg.has(vi):
e = e.subs(vi, pi)
else:
undone2.append((vi, pi))
undone = undone2
# differentiate wrt variables that are present
wrt = []
D = Dummy()
expr = e.expr
free = expr.free_symbols
for vi, ci in e.variable_count:
if isinstance(vi, Symbol) and vi in free:
expr = expr.diff((vi, ci))
elif D in expr.subs(vi, D).free_symbols:
expr = expr.diff((vi, ci))
else:
wrt.append((vi, ci))
# inject remaining subs
rv = expr.subs(undone)
# do remaining differentiation *in order given*
for vc in wrt:
rv = rv.diff(vc)
else:
# inject remaining subs
rv = e.subs(undone)
else:
rv = e.doit(**hints).subs(list(zip(v, p)))
if hints.get('deep', True) and rv != self:
rv = rv.doit(**hints)
return rv
def evalf(self, prec=None, **options):
return self.doit().evalf(prec, **options)
n = evalf # type:ignore
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
bound_symbols = variables
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
@property
def expr_free_symbols(self):
sympy_deprecation_warning("""
The expr_free_symbols property is deprecated. Use free_symbols to get
the free symbols of an expression.
""",
deprecated_since_version="1.9",
active_deprecations_target="deprecated-expr-free-symbols")
# Don't show the warning twice from the recursive call
with ignore_warnings(SymPyDeprecationWarning):
return (self.expr.expr_free_symbols - set(self.variables) |
set(self.point.expr_free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super().__hash__()
def _hashable_content(self):
return (self._expr.xreplace(self.canonical_variables),
) + tuple(ordered([(v, p) for v, p in
zip(self.variables, self.point) if not self.expr.has(v)]))
def _eval_subs(self, old, new):
# Subs doit will do the variables in order; the semantics
# of subs for Subs is have the following invariant for
# Subs object foo:
# foo.doit().subs(reps) == foo.subs(reps).doit()
pt = list(self.point)
if old in self.variables:
if _atomic(new) == {new} and not any(
i.has(new) for i in self.args):
# the substitution is neutral
return self.xreplace({old: new})
# any occurrence of old before this point will get
# handled by replacements from here on
i = self.variables.index(old)
for j in range(i, len(self.variables)):
pt[j] = pt[j]._subs(old, new)
return self.func(self.expr, self.variables, pt)
v = [i._subs(old, new) for i in self.variables]
if v != list(self.variables):
return self.func(self.expr, self.variables + (old,), pt + [new])
expr = self.expr._subs(old, new)
pt = [i._subs(old, new) for i in self.point]
return self.func(expr, v, pt)
def _eval_derivative(self, s):
# Apply the chain rule of the derivative on the substitution variables:
f = self.expr
vp = V, P = self.variables, self.point
val = Add.fromiter(p.diff(s)*Subs(f.diff(v), *vp).doit()
for v, p in zip(V, P))
# these are all the free symbols in the expr
efree = f.free_symbols
# some symbols like IndexedBase include themselves and args
# as free symbols
compound = {i for i in efree if len(i.free_symbols) > 1}
# hide them and see what independent free symbols remain
dums = {Dummy() for i in compound}
masked = f.xreplace(dict(zip(compound, dums)))
ifree = masked.free_symbols - dums
# include the compound symbols
free = ifree | compound
# remove the variables already handled
free -= set(V)
# add back any free symbols of remaining compound symbols
free |= {i for j in free & compound for i in j.free_symbols}
# if symbols of s are in free then there is more to do
if free & s.free_symbols:
val += Subs(f.diff(s), self.variables, self.point).doit()
return val
def _eval_nseries(self, x, n, logx, cdir=0):
if x in self.point:
# x is the variable being substituted into
apos = self.point.index(x)
other = self.variables[apos]
else:
other = x
arg = self.expr.nseries(other, n=n, logx=logx)
o = arg.getO()
terms = Add.make_args(arg.removeO())
rv = Add(*[self.func(a, *self.args[1:]) for a in terms])
if o:
rv += o.subs(other, x)
return rv
def _eval_as_leading_term(self, x, logx=None, cdir=0):
if x in self.point:
ipos = self.point.index(x)
xvar = self.variables[ipos]
return self.expr.as_leading_term(xvar)
if x in self.variables:
# if `x` is a dummy variable, it means it won't exist after the
# substitution has been performed:
return self
# The variable is independent of the substitution:
return self.expr.as_leading_term(x)
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
Explanation
===========
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
Examples
========
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), (x, 3))
>>> diff(f(x), x, 3)
Derivative(f(x), (x, 3))
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
References
==========
.. [1] http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html
See Also
========
Derivative
idiff: computes the derivative implicitly
"""
if hasattr(f, 'diff'):
return f.diff(*symbols, **kwargs)
kwargs.setdefault('evaluate', True)
return _derivative_dispatch(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
r"""
Expand an expression using methods given as hints.
Explanation
===========
Hints evaluated unless explicitly set to False are: ``basic``, ``log``,
``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following
hints are supported but not applied unless set to True: ``complex``,
``func``, and ``trig``. In addition, the following meta-hints are
supported by some or all of the other hints: ``frac``, ``numer``,
``denom``, ``modulus``, and ``force``. ``deep`` is supported by all
hints. Additionally, subclasses of Expr may define their own hints or
meta-hints.
The ``basic`` hint is used for any special rewriting of an object that
should be done automatically (along with the other hints like ``mul``)
when expand is called. This is a catch-all hint to handle any sort of
expansion that may not be described by the existing hint names. To use
this hint an object should override the ``_eval_expand_basic`` method.
Objects may also define their own expand methods, which are not run by
default. See the API section below.
If ``deep`` is set to ``True`` (the default), things like arguments of
functions are recursively expanded. Use ``deep=False`` to only expand on
the top level.
If the ``force`` hint is used, assumptions about variables will be ignored
in making the expansion.
Hints
=====
These hints are run by default
mul
---
Distributes multiplication over addition:
>>> from sympy import cos, exp, sin
>>> from sympy.abc import x, y, z
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
multinomial
-----------
Expand (x + y + ...)**n where n is a positive integer.
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
power_exp
---------
Expand addition in exponents into multiplied bases.
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base
----------
Split powers of multiplied bases.
This only happens by default if assumptions allow, or if the
``force`` meta-hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
Note that in some cases where this expansion always holds, SymPy performs
it automatically:
>>> (x*y)**2
x**2*y**2
log
---
Pull out power of an argument as a coefficient and split logs products
into sums of logs.
Note that these only work if the arguments of the log function have the
proper assumptions--the arguments must be positive and the exponents must
be real--or else the ``force`` hint must be True:
>>> from sympy import log, symbols
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
basic
-----
This hint is intended primarily as a way for custom subclasses to enable
expansion by default.
These hints are not run by default:
complex
-------
Split an expression into real and imaginary parts.
>>> x, y = symbols('x,y')
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
Note that this is just a wrapper around ``as_real_imag()``. Most objects
that wish to redefine ``_eval_expand_complex()`` should consider
redefining ``as_real_imag()`` instead.
func
----
Expand other functions.
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
trig
----
Do trigonometric expansions.
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
>>> sin(2*x).expand(trig=True)
2*sin(x)*cos(x)
Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)``
and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x)
= 1`. The current implementation uses the form obtained from Chebyshev
polynomials, but this may change. See `this MathWorld article
<http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more
information.
Notes
=====
- You can shut off unwanted methods::
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
- Use deep=False to only expand on the top level::
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
- Hints are applied in an arbitrary, but consistent order (in the current
implementation, they are applied in alphabetical order, except
multinomial comes before mul, but this may change). Because of this,
some hints may prevent expansion by other hints if they are applied
first. For example, ``mul`` may distribute multiplications and prevent
``log`` and ``power_base`` from expanding them. Also, if ``mul`` is
applied before ``multinomial`, the expression might not be fully
distributed. The solution is to use the various ``expand_hint`` helper
functions or to use ``hint=False`` to this function to finely control
which hints are applied. Here are some examples::
>>> from sympy import expand, expand_mul, expand_power_base
>>> x, y, z = symbols('x,y,z', positive=True)
>>> expand(log(x*(y + z)))
log(x) + log(y + z)
Here, we see that ``log`` was applied before ``mul``. To get the mul
expanded form, either of the following will work::
>>> expand_mul(log(x*(y + z)))
log(x*y + x*z)
>>> expand(log(x*(y + z)), log=False)
log(x*y + x*z)
A similar thing can happen with the ``power_base`` hint::
>>> expand((x*(y + z))**x)
(x*y + x*z)**x
To get the ``power_base`` expanded form, either of the following will
work::
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
>>> expand_power_base((x*(y + z))**x)
x**x*(y + z)**x
>>> expand((x + y)*y/x)
y + y**2/x
The parts of a rational expression can be targeted::
>>> expand((x + y)*y/x/(x + 1), frac=True)
(x*y + y**2)/(x**2 + x)
>>> expand((x + y)*y/x/(x + 1), numer=True)
(x*y + y**2)/(x*(x + 1))
>>> expand((x + y)*y/x/(x + 1), denom=True)
y*(x + y)/(x**2 + x)
- The ``modulus`` meta-hint can be used to reduce the coefficients of an
expression post-expansion::
>>> expand((3*x + 1)**2)
9*x**2 + 6*x + 1
>>> expand((3*x + 1)**2, modulus=5)
4*x**2 + x + 1
- Either ``expand()`` the function or ``.expand()`` the method can be
used. Both are equivalent::
>>> expand((x + 1)**2)
x**2 + 2*x + 1
>>> ((x + 1)**2).expand()
x**2 + 2*x + 1
API
===
Objects can define their own expand hints by defining
``_eval_expand_hint()``. The function should take the form::
def _eval_expand_hint(self, **hints):
# Only apply the method to the top-level expression
...
See also the example below. Objects should define ``_eval_expand_hint()``
methods only if ``hint`` applies to that specific object. The generic
``_eval_expand_hint()`` method defined in Expr will handle the no-op case.
Each hint should be responsible for expanding that hint only.
Furthermore, the expansion should be applied to the top-level expression
only. ``expand()`` takes care of the recursion that happens when
``deep=True``.
You should only call ``_eval_expand_hint()`` methods directly if you are
100% sure that the object has the method, as otherwise you are liable to
get unexpected ``AttributeError``s. Note, again, that you do not need to
recursively apply the hint to args of your object: this is handled
automatically by ``expand()``. ``_eval_expand_hint()`` should
generally not be used at all outside of an ``_eval_expand_hint()`` method.
If you want to apply a specific expansion from within another method, use
the public ``expand()`` function, method, or ``expand_hint()`` functions.
In order for expand to work, objects must be rebuildable by their args,
i.e., ``obj.func(*obj.args) == obj`` must hold.
Expand methods are passed ``**hints`` so that expand hints may use
'metahints'--hints that control how different expand methods are applied.
For example, the ``force=True`` hint described above that causes
``expand(log=True)`` to ignore assumptions is such a metahint. The
``deep`` meta-hint is handled exclusively by ``expand()`` and is not
passed to ``_eval_expand_hint()`` methods.
Note that expansion hints should generally be methods that perform some
kind of 'expansion'. For hints that simply rewrite an expression, use the
.rewrite() API.
Examples
========
>>> from sympy import Expr, sympify
>>> class MyClass(Expr):
... def __new__(cls, *args):
... args = sympify(args)
... return Expr.__new__(cls, *args)
...
... def _eval_expand_double(self, *, force=False, **hints):
... '''
... Doubles the args of MyClass.
...
... If there more than four args, doubling is not performed,
... unless force=True is also used (False by default).
... '''
... if not force and len(self.args) > 4:
... return self
... return self.func(*(self.args + self.args))
...
>>> a = MyClass(1, 2, MyClass(3, 4))
>>> a
MyClass(1, 2, MyClass(3, 4))
>>> a.expand(double=True)
MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4))
>>> a.expand(double=True, deep=False)
MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4))
>>> b = MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True)
MyClass(1, 2, 3, 4, 5)
>>> b.expand(double=True, force=True)
MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5)
See Also
========
expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig,
expand_power_base, expand_power_exp, expand_func, sympy.simplify.hyperexpand.hyperexpand
"""
# don't modify this; modify the Expr.expand method
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# This is a special application of two hints
def _mexpand(expr, recursive=False):
# expand multinomials and then expand products; this may not always
# be sufficient to give a fully expanded expression (see
# test_issue_8247_8354 in test_arit)
if expr is None:
return
was = None
while was != expr:
was, expr = expr, expand_mul(expand_multinomial(expr))
if not recursive:
break
return expr
# These are simple wrappers around single hints.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True, force=False, factor=False):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
from sympy.functions.elementary.exponential import log
if factor is False:
def _handle(x):
x1 = expand_mul(expand_log(x, deep=deep, force=force, factor=True))
if x1.count(log) <= x.count(log):
return x1
return x
expr = expr.replace(
lambda x: x.is_Mul and all(any(isinstance(i, log) and i.args[0].is_Rational
for i in Mul.make_args(j)) for j in x.as_numer_denom()),
_handle)
return sympify(expr).expand(deep=deep, log=True, mul=False,
power_exp=False, power_base=False, multinomial=False,
basic=False, force=force, factor=factor)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_trig, sin
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Examples
========
>>> from sympy import expand_complex, exp, sqrt, I
>>> from sympy.abc import z
>>> expand_complex(exp(z))
I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z))
>>> expand_complex(sqrt(I))
sqrt(2)/2 + sqrt(2)*I/2
See Also
========
sympy.core.expr.Expr.as_real_imag
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_power_base(expr, deep=True, force=False):
"""
Wrapper around expand that only uses the power_base hint.
A wrapper to expand(power_base=True) which separates a power with a base
that is a Mul into a product of powers, without performing any other
expansions, provided that assumptions about the power's base and exponent
allow.
deep=False (default is True) will only apply to the top-level expression.
force=True (default is False) will cause the expansion to ignore
assumptions about the base and exponent. When False, the expansion will
only happen if the base is non-negative or the exponent is an integer.
>>> from sympy.abc import x, y, z
>>> from sympy import expand_power_base, sin, cos, exp
>>> (x*y)**2
x**2*y**2
>>> (2*x)**y
(2*x)**y
>>> expand_power_base(_)
2**y*x**y
>>> expand_power_base((x*y)**z)
(x*y)**z
>>> expand_power_base((x*y)**z, force=True)
x**z*y**z
>>> expand_power_base(sin((x*y)**z), deep=False)
sin((x*y)**z)
>>> expand_power_base(sin((x*y)**z), force=True)
sin(x**z*y**z)
>>> expand_power_base((2*sin(x))**y + (2*cos(x))**y)
2**y*sin(x)**y + 2**y*cos(x)**y
>>> expand_power_base((2*exp(y))**x)
2**x*exp(y)**x
>>> expand_power_base((2*cos(x))**y)
2**y*cos(x)**y
Notice that sums are left untouched. If this is not the desired behavior,
apply full ``expand()`` to the expression:
>>> expand_power_base(((x+y)*z)**2)
z**2*(x + y)**2
>>> (((x+y)*z)**2).expand()
x**2*z**2 + 2*x*y*z**2 + y**2*z**2
>>> expand_power_base((2*y)**(1+z))
2**(z + 1)*y**(z + 1)
>>> ((2*y)**(1+z)).expand()
2*2**z*y*y**z
See Also
========
expand
"""
return sympify(expr).expand(deep=deep, log=False, mul=False,
power_exp=False, power_base=True, multinomial=False,
basic=False, force=force)
def expand_power_exp(expr, deep=True):
"""
Wrapper around expand that only uses the power_exp hint.
See the expand docstring for more information.
Examples
========
>>> from sympy import expand_power_exp
>>> from sympy.abc import x, y
>>> expand_power_exp(x**(y + 2))
x**2*x**y
"""
return sympify(expr).expand(deep=deep, complex=False, basic=False,
log=False, mul=False, power_exp=True, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
Parameters
==========
expr : Expr
If expr is an iterable, the sum of the op counts of the
items will be returned.
visual : bool, optional
If ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
Examples
========
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there is not a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from .relational import Relational
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
from sympy.logic.boolalg import BooleanFunction
from sympy.simplify.radsimp import fraction
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
ops = []
args = [expr]
NEG = Symbol('NEG')
DIV = Symbol('DIV')
SUB = Symbol('SUB')
ADD = Symbol('ADD')
EXP = Symbol('EXP')
while args:
a = args.pop()
# if the following fails because the object is
# not Basic type, then the object should be fixed
# since it is the intention that all args of Basic
# should themselves be Basic
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul or a.is_MatMul:
if _coeff_isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add or a.is_MatAdd:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if _coeff_isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if a == S.Exp1:
ops.append(EXP)
continue
if a.is_Pow and a.base == S.Exp1:
ops.append(EXP)
args.append(a.exp)
continue
if a.is_Mul or isinstance(a, LatticeOp):
o = Symbol(a.func.__name__.upper())
# count the args
ops.append(o*(len(a.args) - 1))
elif a.args and (
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, Integral) or
isinstance(a, Sum)):
# if it's not in the list above we don't
# consider a.func something to count, e.g.
# Tuple, MatrixSymbol, etc...
if isinstance(a.func, UndefinedFunction):
o = Symbol("FUNC_" + a.func.__name__.upper())
else:
o = Symbol(a.func.__name__.upper())
ops.append(o)
if not a.is_Symbol:
args.extend(a.args)
elif isinstance(expr, Dict):
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.items()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif isinstance(expr, (Relational, BooleanFunction)):
ops = []
for arg in expr.args:
ops.append(count_ops(arg, visual=True))
o = Symbol(func_name(expr, short=True).upper())
ops.append(o)
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
if not isinstance(expr, Basic):
raise TypeError("Invalid type of expr")
else:
ops = []
args = [expr]
while args:
a = args.pop()
if a.args:
o = Symbol(type(a).__name__.upper())
if a.is_Boolean:
ops.append(o*(len(a.args)-1))
else:
ops.append(o)
args.extend(a.args)
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
def nfloat(expr, n=15, exponent=False, dkeys=False):
"""Make all Rationals in expr Floats except those in exponents
(unless the exponents flag is set to True) and those in undefined
functions. When processing dictionaries, do not modify the keys
unless ``dkeys=True``.
Examples
========
>>> from sympy import nfloat, cos, pi, sqrt
>>> from sympy.abc import x, y
>>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y))
x**4 + 0.5*x + sqrt(y) + 1.5
>>> nfloat(x**4 + sqrt(y), exponent=True)
x**4.0 + y**0.5
Container types are not modified:
>>> type(nfloat((1, 2))) is tuple
True
"""
from sympy.matrices.matrices import MatrixBase
kw = dict(n=n, exponent=exponent, dkeys=dkeys)
if isinstance(expr, MatrixBase):
return expr.applyfunc(lambda e: nfloat(e, **kw))
# handling of iterable containers
if iterable(expr, exclude=str):
if isinstance(expr, (dict, Dict)):
if dkeys:
args = [tuple(map(lambda i: nfloat(i, **kw), a))
for a in expr.items()]
else:
args = [(k, nfloat(v, **kw)) for k, v in expr.items()]
if isinstance(expr, dict):
return type(expr)(args)
else:
return expr.func(*args)
elif isinstance(expr, Basic):
return expr.func(*[nfloat(a, **kw) for a in expr.args])
return type(expr)([nfloat(a, **kw) for a in expr])
rv = sympify(expr)
if rv.is_Number:
return Float(rv, n)
elif rv.is_number:
# evalf doesn't always set the precision
rv = rv.n(n)
if rv.is_Number:
rv = Float(rv.n(n), n)
else:
pass # pure_complex(rv) is likely True
return rv
elif rv.is_Atom:
return rv
elif rv.is_Relational:
args_nfloat = (nfloat(arg, **kw) for arg in rv.args)
return rv.func(*args_nfloat)
# watch out for RootOf instances that don't like to have
# their exponents replaced with Dummies and also sometimes have
# problems with evaluating at low precision (issue 6393)
from sympy.polys.rootoftools import RootOf
rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)})
from .power import Pow
if not exponent:
reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)]
rv = rv.xreplace(dict(reps))
rv = rv.n(n)
if not exponent:
rv = rv.xreplace({d.exp: p.exp for p, d in reps})
else:
# Pow._eval_evalf special cases Integer exponents so if
# exponent is suppose to be handled we have to do so here
rv = rv.xreplace(Transform(
lambda x: Pow(x.base, Float(x.exp, n)),
lambda x: x.is_Pow and x.exp.is_Integer))
return rv.xreplace(Transform(
lambda x: x.func(*nfloat(x.args, n, exponent)),
lambda x: isinstance(x, Function) and not isinstance(x, AppliedUndef)))
from .symbol import Dummy, Symbol
| 34.17548
| 165
| 0.560743
|
8cbf699e0d9a284d81f1263a05a2964f1f06d7e4
| 14,438
|
py
|
Python
|
python/ray/serve/controller.py
|
jamesanto/ray
|
f640758c852b9eece756d3d2a562af78273a9f36
|
[
"Apache-2.0"
] | 1
|
2020-05-30T21:01:17.000Z
|
2020-05-30T21:01:17.000Z
|
python/ray/serve/controller.py
|
jamesanto/ray
|
f640758c852b9eece756d3d2a562af78273a9f36
|
[
"Apache-2.0"
] | 66
|
2019-12-10T22:30:09.000Z
|
2022-03-05T08:04:38.000Z
|
python/ray/serve/controller.py
|
majacQ/ray
|
bc08c6cdcc7ddf4da751ca2a972defd3db509061
|
[
"Apache-2.0"
] | 1
|
2020-01-16T20:52:25.000Z
|
2020-01-16T20:52:25.000Z
|
import asyncio
from collections import defaultdict
import inspect
from typing import Dict, Any, Optional, Set, Tuple
import ray
from ray.actor import ActorHandle
from ray.serve.async_goal_manager import AsyncGoalManager
from ray.serve.backend_state import BackendState
from ray.serve.backend_worker import create_backend_replica
from ray.serve.common import (
BackendInfo,
BackendTag,
EndpointInfo,
EndpointTag,
GoalId,
NodeId,
ReplicaTag,
TrafficPolicy,
)
from ray.serve.config import BackendConfig, HTTPOptions, ReplicaConfig
from ray.serve.constants import (
ALL_HTTP_METHODS,
RESERVED_VERSION_TAG,
)
from ray.serve.endpoint_state import EndpointState
from ray.serve.http_state import HTTPState
from ray.serve.kv_store import RayInternalKVStore
from ray.serve.long_poll import LongPollHost
from ray.serve.utils import logger
# Used for testing purposes only. If this is set, the controller will crash
# after writing each checkpoint with the specified probability.
_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0
CHECKPOINT_KEY = "serve-controller-checkpoint"
# How often to call the control loop on the controller.
CONTROL_LOOP_PERIOD_S = 0.1
@ray.remote(num_cpus=0)
class ServeController:
"""Responsible for managing the state of the serving system.
The controller implements fault tolerance by persisting its state in
a new checkpoint each time a state change is made. If the actor crashes,
the latest checkpoint is loaded and the state is recovered. Checkpoints
are written/read using a provided KV-store interface.
All hard state in the system is maintained by this actor and persisted via
these checkpoints. Soft state required by other components is fetched by
those actors from this actor on startup and updates are pushed out from
this actor.
All other actors started by the controller are named, detached actors
so they will not fate share with the controller if it crashes.
The following guarantees are provided for state-changing calls to the
controller:
- If the call succeeds, the change was made and will be reflected in
the system even if the controller or other actors die unexpectedly.
- If the call fails, the change may have been made but isn't guaranteed
to have been. The client should retry in this case. Note that this
requires all implementations here to be idempotent.
"""
async def __init__(self,
controller_name: str,
http_config: HTTPOptions,
detached: bool = False):
# Used to read/write checkpoints.
self.kv_store = RayInternalKVStore(namespace=controller_name)
# Dictionary of backend_tag -> proxy_name -> most recent queue length.
self.backend_stats = defaultdict(lambda: defaultdict(dict))
# Used to ensure that only a single state-changing operation happens
# at any given time.
self.write_lock = asyncio.Lock()
self.long_poll_host = LongPollHost()
self.goal_manager = AsyncGoalManager()
self.http_state = HTTPState(controller_name, detached, http_config)
self.endpoint_state = EndpointState(self.kv_store, self.long_poll_host)
self.backend_state = BackendState(controller_name, detached,
self.kv_store, self.long_poll_host,
self.goal_manager)
asyncio.get_event_loop().create_task(self.run_control_loop())
async def wait_for_goal(self, goal_id: GoalId) -> None:
await self.goal_manager.wait_for_goal(goal_id)
async def _num_pending_goals(self) -> int:
return self.goal_manager.num_pending_goals()
async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids (Dict[str, int]): Snapshot IDs are used to
determine whether or not the host should immediately return the
data or wait for the value to be changed.
"""
return await (
self.long_poll_host.listen_for_change(keys_to_snapshot_ids))
def get_http_proxies(self) -> Dict[NodeId, ActorHandle]:
"""Returns a dictionary of node ID to http_proxy actor handles."""
return self.http_state.get_http_proxy_handles()
async def run_control_loop(self) -> None:
while True:
async with self.write_lock:
try:
self.http_state.update()
except Exception as e:
logger.error(f"Exception updating HTTP state: {e}")
try:
self.backend_state.update()
except Exception as e:
logger.error(f"Exception updating backend state: {e}")
await asyncio.sleep(CONTROL_LOOP_PERIOD_S)
def _all_replica_handles(
self) -> Dict[BackendTag, Dict[ReplicaTag, ActorHandle]]:
"""Used for testing."""
return self.backend_state.get_running_replica_handles()
def get_all_backends(self) -> Dict[BackendTag, BackendConfig]:
"""Returns a dictionary of backend tag to backend config."""
return self.backend_state.get_backend_configs()
def get_all_endpoints(self) -> Dict[EndpointTag, Dict[BackendTag, Any]]:
"""Returns a dictionary of backend tag to backend config."""
return self.endpoint_state.get_endpoints()
def _validate_traffic_dict(self, traffic_dict: Dict[str, float]):
for backend in traffic_dict:
if self.backend_state.get_backend(backend) is None:
raise ValueError(
"Attempted to assign traffic to a backend '{}' that "
"is not registered.".format(backend))
async def set_traffic(self, endpoint: str,
traffic_dict: Dict[str, float]) -> None:
"""Sets the traffic policy for the specified endpoint."""
async with self.write_lock:
self._validate_traffic_dict(traffic_dict)
logger.info("Setting traffic for endpoint "
f"'{endpoint}' to '{traffic_dict}'.")
self.endpoint_state.set_traffic_policy(endpoint,
TrafficPolicy(traffic_dict))
async def shadow_traffic(self, endpoint_name: str, backend_tag: BackendTag,
proportion: float) -> None:
"""Shadow traffic from the endpoint to the backend."""
async with self.write_lock:
if self.backend_state.get_backend(backend_tag) is None:
raise ValueError(
"Attempted to shadow traffic to a backend '{}' that "
"is not registered.".format(backend_tag))
logger.info(
"Shadowing '{}' of traffic to endpoint '{}' to backend '{}'.".
format(proportion, endpoint_name, backend_tag))
self.endpoint_state.shadow_traffic(endpoint_name, backend_tag,
proportion)
async def create_endpoint(
self,
endpoint: str,
traffic_dict: Dict[str, float],
route: Optional[str],
methods: Set[str],
) -> None:
"""Create a new endpoint with the specified route and methods.
If the route is None, this is a "headless" endpoint that will not
be exposed over HTTP and can only be accessed via a handle.
"""
async with self.write_lock:
self._validate_traffic_dict(traffic_dict)
logger.info(
"Registering route '{}' to endpoint '{}' with methods '{}'.".
format(route, endpoint, methods))
self.endpoint_state.create_endpoint(
endpoint, EndpointInfo(methods, route=route),
TrafficPolicy(traffic_dict))
# TODO(simon): Use GoalID mechanism for this so client can check for
# goal id and http_state complete the goal id.
await self.http_state.ensure_http_route_exists(endpoint, timeout_s=30)
async def delete_endpoint(self, endpoint: str) -> None:
"""Delete the specified endpoint.
Does not modify any corresponding backends.
"""
logger.info("Deleting endpoint '{}'".format(endpoint))
async with self.write_lock:
self.endpoint_state.delete_endpoint(endpoint)
async def create_backend(
self, backend_tag: BackendTag, backend_config: BackendConfig,
replica_config: ReplicaConfig) -> Optional[GoalId]:
"""Register a new backend under the specified tag."""
async with self.write_lock:
backend_info = BackendInfo(
worker_class=create_backend_replica(
replica_config.backend_def),
version=RESERVED_VERSION_TAG,
backend_config=backend_config,
replica_config=replica_config)
return self.backend_state.deploy_backend(backend_tag, backend_info)
async def delete_backend(self,
backend_tag: BackendTag,
force_kill: bool = False) -> Optional[GoalId]:
async with self.write_lock:
# Check that the specified backend isn't used by any endpoints.
for endpoint, info in self.endpoint_state.get_endpoints().items():
if (backend_tag in info["traffic"]
or backend_tag in info["shadows"]):
raise ValueError("Backend '{}' is used by endpoint '{}' "
"and cannot be deleted. Please remove "
"the backend from all endpoints and try "
"again.".format(backend_tag, endpoint))
return self.backend_state.delete_backend(backend_tag, force_kill)
async def update_backend_config(self, backend_tag: BackendTag,
config_options: BackendConfig) -> GoalId:
"""Set the config for the specified backend."""
async with self.write_lock:
existing_info = self.backend_state.get_backend(backend_tag)
if existing_info is None:
raise ValueError(f"Backend {backend_tag} is not registered.")
backend_info = BackendInfo(
worker_class=existing_info.worker_class,
version=existing_info.version,
backend_config=existing_info.backend_config.copy(
update=config_options.dict(exclude_unset=True)),
replica_config=existing_info.replica_config)
return self.backend_state.deploy_backend(backend_tag, backend_info)
def get_backend_config(self, backend_tag: BackendTag) -> BackendConfig:
"""Get the current config for the specified backend."""
if self.backend_state.get_backend(backend_tag) is None:
raise ValueError(f"Backend {backend_tag} is not registered.")
return self.backend_state.get_backend(backend_tag).backend_config
def get_http_config(self):
"""Return the HTTP proxy configuration."""
return self.http_state.get_config()
async def shutdown(self) -> None:
"""Shuts down the serve instance completely."""
async with self.write_lock:
for proxy in self.http_state.get_http_proxy_handles().values():
ray.kill(proxy, no_restart=True)
for replica_dict in self.backend_state.get_running_replica_handles(
).values():
for replica in replica_dict.values():
ray.kill(replica, no_restart=True)
self.kv_store.delete(CHECKPOINT_KEY)
async def deploy(self, name: str, backend_config: BackendConfig,
replica_config: ReplicaConfig, version: Optional[str],
route_prefix: Optional[str]) -> Optional[GoalId]:
if route_prefix is not None:
assert route_prefix.startswith("/")
python_methods = []
if inspect.isclass(replica_config.backend_def):
for method_name, _ in inspect.getmembers(
replica_config.backend_def, inspect.isfunction):
python_methods.append(method_name)
async with self.write_lock:
backend_info = BackendInfo(
worker_class=create_backend_replica(
replica_config.backend_def),
version=version,
backend_config=backend_config,
replica_config=replica_config)
goal_id = self.backend_state.deploy_backend(name, backend_info)
endpoint_info = EndpointInfo(
ALL_HTTP_METHODS,
route=route_prefix,
python_methods=python_methods,
legacy=False)
self.endpoint_state.update_endpoint(name, endpoint_info,
TrafficPolicy({
name: 1.0
}))
return goal_id
def delete_deployment(self, name: str) -> Optional[GoalId]:
self.endpoint_state.delete_endpoint(name)
return self.backend_state.delete_backend(name, force_kill=False)
def get_deployment_info(self, name: str) -> Tuple[BackendInfo, str]:
"""Get the current information about a deployment.
Args:
name(str): the name of the deployment.
Returns:
(BackendInfo, route)
Raises:
KeyError if the deployment doesn't exist.
"""
backend_info: BackendInfo = self.backend_state.get_backend(name)
if backend_info is None:
raise KeyError(f"Deployment {name} does not exist.")
route = self.endpoint_state.get_endpoint_route(name)
return backend_info, route
def list_deployments(self) -> Dict[str, Tuple[BackendInfo, str]]:
"""Gets the current information about all active deployments."""
return {
name: (self.backend_state.get_backend(name),
self.endpoint_state.get_endpoint_route(name))
for name in self.backend_state.get_backend_configs()
}
| 42.715976
| 79
| 0.632013
|
ef06c8d7fd0de8d6dbc5e1842e423c70b5f84850
| 1,836
|
py
|
Python
|
extensions/interactions/NumericExpressionInput/NumericExpressionInput.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:53.000Z
|
2022-02-06T13:00:14.000Z
|
extensions/interactions/NumericExpressionInput/NumericExpressionInput.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 80
|
2020-10-31T09:14:46.000Z
|
2021-01-12T23:38:15.000Z
|
extensions/interactions/NumericExpressionInput/NumericExpressionInput.py
|
Tim810306/oppia
|
6f90044d12dbe0979c999265cbe46f267c4c592d
|
[
"Apache-2.0"
] | 1
|
2017-12-06T19:41:49.000Z
|
2017-12-06T19:41:49.000Z
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python configuration for NumericExpressionInput interaction."""
from __future__ import absolute_import
from __future__ import unicode_literals
from extensions.interactions import base
class NumericExpressionInput(base.BaseInteraction):
"""Interaction for numeric expression input."""
name = 'Numeric Expression Input'
description = 'Allows learners to enter numeric expressions.'
display_mode = base.DISPLAY_MODE_INLINE
is_trainable = False
_dependency_ids = ['guppy', 'nerdamer']
answer_type = 'NumericExpression'
can_have_solution = True
show_generic_submit_button = True
_customization_arg_specs = [{
'name': 'placeholder',
'description': 'Placeholder text',
'schema': {
'type': 'custom',
'obj_type': 'SubtitledUnicode'
},
'default_value': {
'content_id': None,
'unicode_str': 'Type an expression here, using only numbers.'
}
}, {
'name': 'useFractionForDivision',
'description': (
'Represent division using fractions (rather than ÷).'),
'schema': {
'type': 'bool'
},
'default_value': False
}]
| 32.210526
| 74
| 0.674292
|
cf96d0a4bf8e3e165a3e7d2480225748326b19b8
| 52
|
py
|
Python
|
dirchromatic/logger/__init__.py
|
karlding/dirchromatic
|
4b375d65cc7b233b105e574d9f4604398af10b0d
|
[
"MIT"
] | 6
|
2017-09-28T16:29:48.000Z
|
2020-05-19T17:59:36.000Z
|
dirchromatic/logger/__init__.py
|
karlding/dirchromatic
|
4b375d65cc7b233b105e574d9f4604398af10b0d
|
[
"MIT"
] | null | null | null |
dirchromatic/logger/__init__.py
|
karlding/dirchromatic
|
4b375d65cc7b233b105e574d9f4604398af10b0d
|
[
"MIT"
] | 1
|
2020-05-01T20:45:57.000Z
|
2020-05-01T20:45:57.000Z
|
from .logger import Logger
from .level import Level
| 17.333333
| 26
| 0.807692
|
75c74a2b343f5f57adda38c1a13131e625cc987a
| 2,521
|
py
|
Python
|
tests/test_calling_convention_analysis.py
|
MarSoft/angr
|
c46e848d4921dafca8d4c304bfadcad4d043bda9
|
[
"BSD-2-Clause"
] | 2
|
2020-04-29T02:39:42.000Z
|
2020-04-29T08:07:44.000Z
|
tests/test_calling_convention_analysis.py
|
MarSoft/angr
|
c46e848d4921dafca8d4c304bfadcad4d043bda9
|
[
"BSD-2-Clause"
] | 2
|
2018-11-13T16:19:16.000Z
|
2018-12-10T15:45:53.000Z
|
tests/test_calling_convention_analysis.py
|
MarSoft/angr
|
c46e848d4921dafca8d4c304bfadcad4d043bda9
|
[
"BSD-2-Clause"
] | 1
|
2020-11-18T16:39:13.000Z
|
2020-11-18T16:39:13.000Z
|
import logging
import os
import nose.tools
import archinfo
import angr
from angr.calling_conventions import SimStackArg, SimRegArg, SimCCCdecl, SimCCSystemVAMD64
test_location = os.path.join(os.path.dirname(os.path.realpath(str(__file__))), '..', '..',
'binaries',
)
def run_fauxware(arch, function_and_cc_list):
binary_path = os.path.join(test_location, 'tests', arch, 'fauxware')
fauxware = angr.Project(binary_path, auto_load_libs=False)
cfg = fauxware.analyses.CFG()
for func_name, expected_cc in function_and_cc_list:
authenticate = cfg.functions[func_name]
_ = fauxware.analyses.VariableRecoveryFast(authenticate)
cc_analysis = fauxware.analyses.CallingConvention(authenticate)
cc = cc_analysis.cc
nose.tools.assert_equal(cc, expected_cc)
def run_cgc(binary_name):
binary_path = os.path.join(test_location, '..', 'binaries-private', 'cgc_qualifier_event', 'cgc', binary_name)
project = angr.Project(binary_path)
categorization = project.analyses.FunctionCategorizationAnalysis()
tag_manager = categorization.function_tag_manager
#print "INPUT:", map(hex, tag_manager.input_functions())
#print "OUTPUT:", map(hex, tag_manager.output_functions())
def test_fauxware():
amd64 = archinfo.arch_from_id('amd64')
args = {
'i386': [
('authenticate', SimCCCdecl(
archinfo.arch_from_id('i386'),
args=[SimStackArg(4, 4), SimStackArg(8, 4)], sp_delta=4
)
),
],
'x86_64': [
('authenticate', SimCCSystemVAMD64(
amd64,
args=[SimRegArg('rdi', 8), SimRegArg('rsi', 8)],
sp_delta=8
)
),
],
}
for arch, lst in args.items():
yield run_fauxware, arch, lst
# def test_cgc():
def disabled_cgc():
# Skip this test since we do not have the binaries-private repo cloned on Travis CI.
binaries = [
'002ba801_01',
'01cf6c01_01',
]
for binary in binaries:
yield run_cgc, binary
def run_all():
logging.getLogger("angr.analyses.variable_recovery.variable_recovery_fast").setLevel(logging.DEBUG)
for args in test_fauxware():
func, args = args[0], args[1:]
func(*args)
#for args in test_cgc():
# func, args = args[0], args[1:]
# func(*args)
if __name__ == "__main__":
run_all()
| 26.260417
| 114
| 0.618405
|
eb2e42d534ab7f9cf90d0a1e1af8b788186d81b0
| 240
|
py
|
Python
|
apps/employee/api/serializers.py
|
LHerdy/People_Manager
|
e35ba2333a26e1cf35b7234af10f3c849eaa0270
|
[
"MIT"
] | null | null | null |
apps/employee/api/serializers.py
|
LHerdy/People_Manager
|
e35ba2333a26e1cf35b7234af10f3c849eaa0270
|
[
"MIT"
] | 1
|
2021-08-15T15:02:10.000Z
|
2021-08-15T15:02:25.000Z
|
apps/employee/api/serializers.py
|
LHerdy/People_Manager
|
e35ba2333a26e1cf35b7234af10f3c849eaa0270
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from apps.employee.models import Employee
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ['name', 'departament', 'company', 'user']
| 24
| 59
| 0.725
|
d9094b140b01769d671ab7396958e4e7d79c5511
| 1,061
|
py
|
Python
|
djangofiles/BlogProject/blog/migrations/0005_auto_20190523_2013.py
|
manvith263/tricalidee
|
69cf66a416be7917eb8cbb3562cff7d5a66df088
|
[
"BSD-3-Clause"
] | 1
|
2021-05-11T01:52:35.000Z
|
2021-05-11T01:52:35.000Z
|
djangofiles/BlogProject/blog/migrations/0005_auto_20190523_2013.py
|
manvith263/tricalidee
|
69cf66a416be7917eb8cbb3562cff7d5a66df088
|
[
"BSD-3-Clause"
] | null | null | null |
djangofiles/BlogProject/blog/migrations/0005_auto_20190523_2013.py
|
manvith263/tricalidee
|
69cf66a416be7917eb8cbb3562cff7d5a66df088
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2 on 2019-05-23 18:13
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_comment'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='text',
new_name='body',
),
migrations.RemoveField(
model_name='comment',
name='approved_comment',
),
migrations.RemoveField(
model_name='comment',
name='created_date',
),
migrations.AddField(
model_name='comment',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(default=django.utils.timezone.now, max_length=254),
preserve_default=False,
),
]
| 26.525
| 93
| 0.566447
|
4844af7a0597625f3e714ff282ab5ee50ed76639
| 3,812
|
py
|
Python
|
sdk/python/pulumi_azure_native/portal/get_tenant_configuration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/portal/get_tenant_configuration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/portal/get_tenant_configuration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetTenantConfigurationResult',
'AwaitableGetTenantConfigurationResult',
'get_tenant_configuration',
]
@pulumi.output_type
class GetTenantConfigurationResult:
"""
Tenant configuration.
"""
def __init__(__self__, enforce_private_markdown_storage=None, id=None, name=None, type=None):
if enforce_private_markdown_storage and not isinstance(enforce_private_markdown_storage, bool):
raise TypeError("Expected argument 'enforce_private_markdown_storage' to be a bool")
pulumi.set(__self__, "enforce_private_markdown_storage", enforce_private_markdown_storage)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="enforcePrivateMarkdownStorage")
def enforce_private_markdown_storage(self) -> Optional[bool]:
"""
When flag is set to true Markdown tile will require external storage configuration (URI). The inline content configuration will be prohibited.
"""
return pulumi.get(self, "enforce_private_markdown_storage")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTenantConfigurationResult(GetTenantConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTenantConfigurationResult(
enforce_private_markdown_storage=self.enforce_private_markdown_storage,
id=self.id,
name=self.name,
type=self.type)
def get_tenant_configuration(configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTenantConfigurationResult:
"""
Tenant configuration.
API Version: 2020-09-01-preview.
:param str configuration_name: The configuration name. Value must be 'default'
"""
__args__ = dict()
__args__['configurationName'] = configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:portal:getTenantConfiguration', __args__, opts=opts, typ=GetTenantConfigurationResult).value
return AwaitableGetTenantConfigurationResult(
enforce_private_markdown_storage=__ret__.enforce_private_markdown_storage,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type)
| 37.009709
| 193
| 0.685729
|
442f0d5a383034f8e73f42a0e06f21b1df6306e0
| 34,415
|
py
|
Python
|
vnpy/gateway/huobis/huobis_gateway.py
|
hardywu/vnpy
|
81ab73dc57d12a3ff7c74c73665513b46fc0f668
|
[
"MIT"
] | 1
|
2021-05-14T12:57:08.000Z
|
2021-05-14T12:57:08.000Z
|
vnpy/gateway/huobis/huobis_gateway.py
|
hardywu/vnpy
|
81ab73dc57d12a3ff7c74c73665513b46fc0f668
|
[
"MIT"
] | null | null | null |
vnpy/gateway/huobis/huobis_gateway.py
|
hardywu/vnpy
|
81ab73dc57d12a3ff7c74c73665513b46fc0f668
|
[
"MIT"
] | 1
|
2021-06-14T13:26:41.000Z
|
2021-06-14T13:26:41.000Z
|
"""
火币合约接口
"""
import re
import urllib
import base64
import json
import zlib
import hashlib
import hmac
import sys
from copy import copy
from datetime import datetime, timedelta
from threading import Lock
from typing import Sequence
import pytz
from typing import Dict, List, Any
from time import sleep
from vnpy.event import Event
from vnpy.api.rest import RestClient, Request
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Product,
Status,
OrderType,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
BarData,
AccountData,
PositionData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.event import EVENT_TIMER
REST_HOST = "https://api.hbdm.com"
INVERSE_WEBSOCKET_DATA_HOST = "wss://api.hbdm.com/swap-ws" # Market Data
INVERSE_WEBSOCKET_TRADE_HOST = "wss://api.hbdm.com/swap-notification" # Account and Order
WEBSOCKET_DATA_HOST = "wss://api.hbdm.com/linear-swap-ws" # Market Data
WEBSOCKET_TRADE_HOST = "wss://api.hbdm.com/linear-swap-notification" # Account and Order
STATUS_HUOBIS2VT: Dict[int, Status] = {
3: Status.NOTTRADED,
4: Status.PARTTRADED,
5: Status.CANCELLED,
6: Status.ALLTRADED,
7: Status.CANCELLED,
}
ORDERTYPE_VT2HUOBIS: Dict[OrderType, Any] = {
OrderType.MARKET: "opponent",
OrderType.LIMIT: "limit",
OrderType.FOK: "fok",
OrderType.FAK: "ioc"
}
ORDERTYPE_HUOBIS2VT: Dict[Any, OrderType] = {v: k for k, v in ORDERTYPE_VT2HUOBIS.items()}
ORDERTYPE_HUOBIS2VT[1] = OrderType.LIMIT
ORDERTYPE_HUOBIS2VT[3] = OrderType.MARKET
ORDERTYPE_HUOBIS2VT[4] = OrderType.MARKET
ORDERTYPE_HUOBIS2VT[5] = OrderType.STOP
ORDERTYPE_HUOBIS2VT[6] = OrderType.LIMIT
ORDERTYPE_HUOBIS2VT["lightning"] = OrderType.MARKET
ORDERTYPE_HUOBIS2VT["optimal_5"] = OrderType.MARKET
ORDERTYPE_HUOBIS2VT["optimal_10"] = OrderType.MARKET
ORDERTYPE_HUOBIS2VT["optimal_20"] = OrderType.MARKET
DIRECTION_VT2HUOBIS: Dict[Direction, str] = {
Direction.LONG: "buy",
Direction.SHORT: "sell",
}
DIRECTION_HUOBIS2VT: Dict[str, Direction] = {v: k for k, v in DIRECTION_VT2HUOBIS.items()}
OFFSET_VT2HUOBIS: Dict[Offset, str] = {
Offset.OPEN: "open",
Offset.CLOSE: "close",
}
OFFSET_HUOBIS2VT: Dict[str, Offset] = {v: k for k, v in OFFSET_VT2HUOBIS.items()}
INTERVAL_VT2HUOBIS: Dict[Interval, str] = {
Interval.MINUTE: "1min",
Interval.HOUR: "60min",
Interval.DAILY: "1day"
}
TIMEDELTA_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class HuobisGateway(BaseGateway):
"""
VN Trader Gateway for Huobis connection.
"""
default_setting: Dict[str, Any] = {
"API Key": "",
"Secret Key": "",
"会话数": 3,
"合约模式": ["反向", "正向"],
"代理地址": "",
"代理端口": "",
}
exchanges = [Exchange.HUOBI]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "HUOBIS")
self.rest_api = HuobisRestApi(self)
self.trade_ws_api = HuobisTradeWebsocketApi(self)
self.market_ws_api = HuobisDataWebsocketApi(self)
def connect(self, setting: dict) -> None:
""""""
key = setting["API Key"]
secret = setting["Secret Key"]
session_number = setting["会话数"]
proxy_host = setting["代理地址"]
proxy_port = setting["代理端口"]
if setting["合约模式"] == "正向":
usdt_base = True
else:
usdt_base = False
if proxy_port.isdigit():
proxy_port = int(proxy_port)
else:
proxy_port = 0
self.rest_api.connect(usdt_base, key, secret, session_number,
proxy_host, proxy_port)
self.trade_ws_api.connect(usdt_base, key, secret, proxy_host, proxy_port)
self.market_ws_api.connect(usdt_base, key, secret, proxy_host, proxy_port)
self.init_query()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.market_ws_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.rest_api.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest]) -> str:
""""""
return self.rest_api.send_orders(reqs)
def query_account(self) -> None:
""""""
self.rest_api.query_account()
def query_position(self) -> None:
""""""
self.rest_api.query_position()
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
return self.rest_api.query_history(req)
def close(self) -> None:
""""""
self.rest_api.stop()
self.trade_ws_api.stop()
self.market_ws_api.stop()
def process_timer_event(self, event: Event) -> None:
""""""
self.count += 1
if self.count < 3:
return
self.query_account()
self.query_position()
def init_query(self) -> None:
""""""
self.count = 0
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class HuobisRestApi(RestClient):
"""
HUOBIS REST API
"""
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway: HuobisGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.usdt_base: bool = False
self.host: str = ""
self.key: str = ""
self.secret: str = ""
self.account_id: str = ""
self.order_count: int = 10000
self.order_count_lock: Lock = Lock()
self.connect_time: int = 0
self.positions: Dict[str, PositionData] = {}
self.contract_codes: set = set()
def sign(self, request) -> Request:
"""
Generate HUOBIS signature.
"""
request.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
"Connection": "close"
}
params_with_signature = create_signature(
self.key,
request.method,
self.host,
request.path,
self.secret,
request.params
)
request.params = params_with_signature
if request.method == "POST":
request.headers["Content-Type"] = "application/json"
if request.data:
request.data = json.dumps(request.data)
return request
def connect(
self,
usdt_base: bool,
key: str,
secret: str,
session_number: int,
proxy_host: str,
proxy_port: int
) -> None:
"""
Initialize connection to REST server.
"""
self.usdt_base = usdt_base
self.key = key
self.secret = secret
self.host, _ = _split_url(REST_HOST)
self.connect_time = int(datetime.now(CHINA_TZ).strftime("%y%m%d%H%M%S"))
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.query_contract()
def query_account(self) -> None:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_account_info"
else:
path = "/swap-api/v1/swap_account_info"
self.add_request(
method="POST",
path=path,
callback=self.on_query_account
)
def query_position(self) -> None:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_position_info"
else:
path = "/swap-api/v1/swap_position_info"
self.add_request(
method="POST",
path=path,
callback=self.on_query_position
)
def query_order(self, contract_code: str) -> Request:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_openorders"
else:
path = "/swap-api/v1/swap_openorders"
data = {"contract_code": contract_code}
self.add_request(
method="POST",
path=path,
callback=self.on_query_order,
data=data,
extra=contract_code
)
def query_contract(self) -> None:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_contract_info"
data = {"support_margin_mode": "cross"}
else:
path = "/swap-api/v1/swap_contract_info"
data = {}
self.add_request(
method="GET",
path=path,
data=data,
callback=self.on_query_contract
)
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
if self.usdt_base:
path = "/linear-swap-ex/market/history/kline"
else:
path = "/swap-ex/market/history/kline"
history = []
count = 1999
start = req.start
time_delta = TIMEDELTA_MAP[req.interval]
# Convert symbol
buf = [i for i in req.symbol if not i.isdigit()]
symbol = "".join(buf)
ws_symbol = f"{symbol}"
while True:
# Calculate end time
end = start + time_delta * count
# Create query params
params = {
"contract_code": ws_symbol,
"period": INTERVAL_VT2HUOBIS[req.interval],
"from": int(start.timestamp()),
"to": int(end.timestamp())
}
# Get response from server
resp = self.request(
"GET",
path=path,
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空"
self.gateway.write_log(msg)
break
if not data["data"]:
msg = f"获取历史数据为空"
self.gateway.write_log(msg)
break
buf = []
for d in data["data"]:
dt = generate_datetime(d["id"])
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=dt,
interval=req.interval,
volume=d["vol"],
open_price=d["open"],
high_price=d["high"],
low_price=d["low"],
close_price=d["close"],
gateway_name=self.gateway_name
)
buf.append(bar)
history.extend(buf)
begin = buf[0].datetime
end = buf[-1].datetime
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Update start time
start = bar.datetime
# Break if data end reached
if len(buf) < count:
break
return history
def new_local_orderid(self) -> str:
""""""
with self.order_count_lock:
self.order_count += 1
local_orderid = f"{self.connect_time}{self.order_count}"
return local_orderid
def send_order(self, req: OrderRequest) -> str:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_order"
else:
path = "/swap-api/v1/swap_order"
local_orderid = self.new_local_orderid()
order = req.create_order_data(
local_orderid,
self.gateway_name
)
order.datetime = datetime.now(CHINA_TZ)
data = {
"contract_code": req.symbol,
"client_order_id": int(local_orderid),
"price": req.price,
"volume": int(req.volume),
"direction": DIRECTION_VT2HUOBIS.get(req.direction, ""),
"offset": OFFSET_VT2HUOBIS.get(req.offset, ""),
"order_price_type": ORDERTYPE_VT2HUOBIS.get(req.type, ""),
"lever_rate": 20
}
self.add_request(
method="POST",
path=path,
callback=self.on_send_order,
data=data,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
self.gateway.on_order(order)
return order.vt_orderid
def send_orders(self, reqs: Sequence[OrderRequest]) -> str:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_batchorder"
else:
path = "/swap-api/v1/swap_batchorder"
orders_data = []
orders = []
vt_orderids = []
for req in reqs:
local_orderid = self.new_local_orderid()
order = req.create_order_data(
local_orderid,
self.gateway_name
)
order.datetime = datetime.now(CHINA_TZ)
self.gateway.on_order(order)
d = {
"contract_code": req.symbol,
"client_order_id": int(local_orderid),
"price": req.price,
"volume": int(req.volume),
"direction": DIRECTION_VT2HUOBIS.get(req.direction, ""),
"offset": OFFSET_VT2HUOBIS.get(req.offset, ""),
"order_price_type": ORDERTYPE_VT2HUOBIS.get(req.type, ""),
"lever_rate": 20
}
orders_data.append(d)
orders.append(order)
vt_orderids.append(order.vt_orderid)
data = {
"orders_data": orders_data
}
self.add_request(
method="POST",
path=path,
callback=self.on_send_orders,
data=data,
extra=orders,
on_error=self.on_send_orders_error,
on_failed=self.on_send_orders_failed
)
return vt_orderids
def cancel_order(self, req: CancelRequest) -> None:
""""""
if self.usdt_base:
path = "/linear-swap-api/v1/swap_cross_cancel"
else:
path = "/swap-api/v1/swap_cancel"
data = {
"contract_code": req.symbol,
}
orderid = int(req.orderid)
if orderid > 1000000:
data["client_order_id"] = orderid
else:
data["order_id"] = orderid
self.add_request(
method="POST",
path=path,
callback=self.on_cancel_order,
on_failed=self.on_cancel_order_failed,
data=data,
extra=req
)
def on_query_account(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询账户"):
return
if self.usdt_base:
for d in data["data"]:
if d["margin_mode"] == "cross":
account = AccountData(
accountid=d["margin_account"],
balance=d["margin_balance"],
frozen=d["margin_frozen"],
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
else:
for d in data["data"]:
account = AccountData(
accountid=d["symbol"],
balance=d["margin_balance"],
frozen=d["margin_frozen"],
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
def on_query_position(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询持仓"):
return
# Clear all buf data
for position in self.positions.values():
position.volume = 0
position.frozen = 0
position.price = 0
position.pnl = 0
for d in data["data"]:
key = f"{d['contract_code']}_{d['direction']}"
position = self.positions.get(key, None)
if not position:
position = PositionData(
symbol=d["contract_code"],
exchange=Exchange.HUOBI,
direction=DIRECTION_HUOBIS2VT[d["direction"]],
gateway_name=self.gateway_name
)
self.positions[key] = position
position.volume = d["volume"]
position.frozen = d["frozen"]
position.price = d["cost_hold"]
position.pnl = d["profit"]
for position in self.positions.values():
self.gateway.on_position(position)
def on_query_order(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询活动委托"):
return
for d in data["data"]["orders"]:
timestamp = d["created_at"]
dt = generate_datetime(timestamp / 1000)
if d["client_order_id"]:
orderid = d["client_order_id"]
else:
orderid = d["order_id"]
order = OrderData(
orderid=orderid,
symbol=d["contract_code"],
exchange=Exchange.HUOBI,
price=d["price"],
volume=d["volume"],
type=ORDERTYPE_HUOBIS2VT[d["order_price_type"]],
direction=DIRECTION_HUOBIS2VT[d["direction"]],
offset=OFFSET_HUOBIS2VT[d["offset"]],
traded=d["trade_volume"],
status=STATUS_HUOBIS2VT[d["status"]],
datetime=dt,
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
self.gateway.write_log(f"{request.extra}活动委托信息查询成功")
if self.order_codes:
sleep(0.1)
contract_code = self.order_codes.pop()
self.query_order(contract_code)
def on_query_contract(self, data: dict, request: Request) -> None:
""""""
if self.check_error(data, "查询合约"):
return
for d in data["data"]:
# Only allow cross margin contract
if not self.usdt_base or d["support_margin_mode"] != "isolated":
self.contract_codes.add(d["contract_code"])
contract = ContractData(
symbol=d["contract_code"],
exchange=Exchange.HUOBI,
name=d["contract_code"],
pricetick=d["price_tick"],
size=d["contract_size"],
min_volume=1,
product=Product.FUTURES,
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
self.gateway.write_log("合约信息查询成功")
# Start querying open order info
self.order_codes = copy(self.contract_codes)
contract_code = self.order_codes.pop()
self.query_order(contract_code)
def on_send_order(self, data: dict, request: Request) -> None:
""""""
order = request.extra
if self.check_error(data, "委托"):
order.status = Status.REJECTED
self.gateway.on_order(order)
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
):
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data: dict, request: Request) -> None:
""""""
self.check_error(data, "撤单")
def on_cancel_order_failed(
self,
status_code: str,
request: Request
) -> None:
"""
Callback when canceling order failed on server.
"""
msg = f"撤单失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_orders(self, data: dict, request: Request) -> None:
""""""
orders = request.extra
errors = data.get("errors", None)
if errors:
for d in errors:
ix = d["index"]
code = d["err_code"]
msg = d["err_msg"]
order = orders[ix]
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"批量委托失败,状态码:{code},信息:{msg}"
self.gateway.write_log(msg)
def on_send_orders_failed(
self,
status_code: str,
request: Request
) -> None:
"""
Callback when sending order failed on server.
"""
orders = request.extra
for order in orders:
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"批量委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_orders_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
):
"""
Callback when sending order caused exception.
"""
orders = request.extra
for order in orders:
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_error(
self,
exception_type: type,
exception_value: Exception,
tb,
request: Request
) -> None:
"""
Callback to handler request exception.
"""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def check_error(self, data: dict, func: str = "") -> bool:
""""""
if data["status"] != "error":
return False
error_code = data["err_code"]
error_msg = data["err_msg"]
self.gateway.write_log(f"{func}请求出错,代码:{error_code},信息:{error_msg}")
return True
class HuobisWebsocketApiBase(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super(HuobisWebsocketApiBase, self).__init__()
self.gateway: HuobisGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.usdt_base: bool = False
self.key: str = ""
self.secret: str = ""
self.sign_host: str = ""
self.path: str = ""
self.req_id: int = 0
def connect(
self,
usdt_base: bool,
key: str,
secret: str,
url: str,
proxy_host: str,
proxy_port: int
) -> None:
""""""
self.key = key
self.secret = secret
self.usdt_base = usdt_base
host, path = _split_url(url)
self.sign_host = host
self.path = path
self.init(url, proxy_host, proxy_port)
self.start()
def login(self) -> int:
""""""
self.req_id += 1
params = {
"op": "auth",
"type": "api",
"cid": str(self.req_id),
}
params.update(
create_signature(
self.key,
"GET",
self.sign_host,
self.path,
self.secret
)
)
return self.send_packet(params)
def on_login(self, packet) -> None:
""""""
pass
@staticmethod
def unpack_data(data) -> json.JSONDecoder:
""""""
return json.loads(zlib.decompress(data, 31))
def on_packet(self, packet) -> None:
""""""
if "ping" in packet:
req = {"pong": packet["ping"]}
self.send_packet(req)
elif "op" in packet and packet["op"] == "ping":
req = {
"op": "pong",
"ts": packet["ts"]
}
self.send_packet(req)
elif "err-msg" in packet:
return self.on_error_msg(packet)
elif "op" in packet and packet["op"] == "auth":
return self.on_login()
else:
self.on_data(packet)
def on_data(self, packet) -> None:
""""""
print("data : {}".format(packet))
def on_error_msg(self, packet) -> None:
""""""
msg = packet["err-msg"]
if msg == "invalid pong":
return
self.gateway.write_log(packet["err-msg"])
class HuobisTradeWebsocketApi(HuobisWebsocketApiBase):
""""""
def __init__(self, gateway):
""""""
super().__init__(gateway)
def connect(
self,
usdt_base: bool,
key: str,
secret: str,
proxy_host: str,
proxy_port: int
) -> None:
""""""
if usdt_base:
url = WEBSOCKET_TRADE_HOST
else:
url = INVERSE_WEBSOCKET_TRADE_HOST
super().connect(
usdt_base,
key,
secret,
url,
proxy_host,
proxy_port
)
def subscribe(self) -> int:
""""""
self.req_id += 1
if self.usdt_base:
req = {
"op": "sub",
"cid": str(self.req_id),
"topic": f"orders_cross.*"
}
else:
req = {
"op": "sub",
"cid": str(self.req_id),
"topic": f"orders.*"
}
self.send_packet(req)
def on_connected(self) -> None:
""""""
self.gateway.write_log("交易Websocket API连接成功")
self.login()
def on_login(self) -> None:
""""""
self.gateway.write_log("交易Websocket API登录成功")
self.subscribe()
def on_data(self, packet) -> None:
""""""
op = packet.get("op", None)
if op != "notify":
return
topic = packet["topic"]
if "orders" in topic:
self.on_order(packet)
def on_order(self, data: dict) -> None:
""""""
dt = generate_datetime(data["created_at"] / 1000)
if data["client_order_id"]:
orderid = data["client_order_id"]
else:
orderid = data["order_id"]
order = OrderData(
symbol=data["contract_code"],
exchange=Exchange.HUOBI,
orderid=orderid,
type=ORDERTYPE_HUOBIS2VT[data["order_price_type"]],
direction=DIRECTION_HUOBIS2VT[data["direction"]],
offset=OFFSET_HUOBIS2VT[data["offset"]],
price=data["price"],
volume=data["volume"],
traded=data["trade_volume"],
status=STATUS_HUOBIS2VT[data["status"]],
datetime=dt,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
# Push trade event
trades = data["trade"]
if not trades:
return
for d in trades:
dt = generate_datetime(d["created_at"] / 1000)
trade = TradeData(
symbol=order.symbol,
exchange=Exchange.HUOBI,
orderid=order.orderid,
tradeid=str(d["id"]),
direction=order.direction,
offset=order.offset,
price=d["trade_price"],
volume=d["trade_volume"],
datetime=dt,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
class HuobisDataWebsocketApi(HuobisWebsocketApiBase):
""""""
def __init__(self, gateway):
""""""
super().__init__(gateway)
self.ticks = {}
def connect(
self,
usdt_base: bool,
key: str,
secret: str,
proxy_host: str,
proxy_port: int
) -> None:
""""""
if usdt_base:
url = WEBSOCKET_DATA_HOST
else:
url = INVERSE_WEBSOCKET_DATA_HOST
super().connect(
usdt_base,
key,
secret,
url,
proxy_host,
proxy_port
)
def on_connected(self) -> None:
""""""
self.gateway.write_log("行情Websocket API连接成功")
for ws_symbol in self.ticks.keys():
self.subscribe_data(ws_symbol)
def subscribe(self, req: SubscribeRequest) -> None:
""""""
buf = [i for i in req.symbol if not i.isdigit()]
symbol = "".join(buf)
ws_symbol = f"{symbol}"
# Create tick data buffer
tick = TickData(
symbol=req.symbol,
name=req.symbol,
exchange=Exchange.HUOBI,
datetime=datetime.now(CHINA_TZ),
gateway_name=self.gateway_name,
)
self.ticks[ws_symbol] = tick
self.subscribe_data(ws_symbol)
def subscribe_data(self, ws_symbol: str) -> None:
""""""
# Subscribe to market depth update
self.req_id += 1
req = {
"sub": f"market.{ws_symbol}.depth.step0",
"id": str(self.req_id)
}
self.send_packet(req)
# Subscribe to market detail update
self.req_id += 1
req = {
"sub": f"market.{ws_symbol}.detail",
"id": str(self.req_id)
}
self.send_packet(req)
def on_data(self, packet) -> None:
""""""
channel = packet.get("ch", None)
if channel:
if "depth.step" in channel:
self.on_market_depth(packet)
elif "detail" in channel:
self.on_market_detail(packet)
elif "err_code" in packet:
code = packet["err_code"]
msg = packet["err_msg"]
self.gateway.write_log(f"错误代码:{code}, 错误信息:{msg}")
def on_market_depth(self, data: dict) -> None:
"""行情深度推送 """
ws_symbol = data["ch"].split(".")[1]
tick = self.ticks[ws_symbol]
tick.datetime = generate_datetime(data["ts"] / 1000)
tick_data = data["tick"]
if "bids" not in tick_data or "asks" not in tick_data:
return
bids = tick_data["bids"]
bids_n = len(bids)
bids_n = min(bids_n, 5)
for n in range(bids_n):
price, volume = bids[n]
tick.__setattr__("bid_price_" + str(n + 1), float(price))
tick.__setattr__("bid_volume_" + str(n + 1), float(volume))
asks = tick_data["asks"]
asks_n = len(asks)
asks_n = min(asks_n, 5)
for n in range(asks_n):
price, volume = asks[n]
tick.__setattr__("ask_price_" + str(n + 1), float(price))
tick.__setattr__("ask_volume_" + str(n + 1), float(volume))
if tick.last_price:
self.gateway.on_tick(copy(tick))
def on_market_detail(self, data: dict) -> None:
"""市场细节推送"""
ws_symbol = data["ch"].split(".")[1]
tick = self.ticks[ws_symbol]
tick.datetime = generate_datetime(data["ts"] / 1000)
tick_data = data["tick"]
tick.open_price = tick_data["open"]
tick.high_price = tick_data["high"]
tick.low_price = tick_data["low"]
tick.last_price = tick_data["close"]
tick.volume = tick_data["vol"]
if tick.bid_price_1:
self.gateway.on_tick(copy(tick))
def _split_url(url) -> str:
"""
将url拆分为host和path
:return: host, path
"""
result = re.match("\w+://([^/]*)(.*)", url) # noqa
if result:
return result.group(1), result.group(2)
def create_signature(
api_key: str,
method: str,
host: str,
path: str,
secret_key: str,
get_params=None
) -> Dict:
"""
创建签名
:param get_params: dict 使用GET方法时附带的额外参数(urlparams)
:return:
"""
sorted_params = [
("AccessKeyId", api_key),
("SignatureMethod", "HmacSHA256"),
("SignatureVersion", "2"),
("Timestamp", datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"))
]
if get_params:
sorted_params.extend(list(get_params.items()))
sorted_params = list(sorted(sorted_params))
encode_params = urllib.parse.urlencode(sorted_params)
payload = [method, host, path, encode_params]
payload = "\n".join(payload)
payload = payload.encode(encoding="UTF8")
secret_key = secret_key.encode(encoding="UTF8")
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
params = dict(sorted_params)
params["Signature"] = signature.decode("UTF8")
return params
def generate_datetime(timestamp: float) -> datetime:
""""""
dt = datetime.fromtimestamp(timestamp)
dt = CHINA_TZ.localize(dt)
return dt
| 28.232158
| 137
| 0.53529
|
21c3a9bcd87ddcf5a8255a035df555cb564f80fc
| 777
|
py
|
Python
|
checkov/cloudformation/checks/resource/aws/CloudWatchLogGroupRetention.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
checkov/cloudformation/checks/resource/aws/CloudWatchLogGroupRetention.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
checkov/cloudformation/checks/resource/aws/CloudWatchLogGroupRetention.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
from checkov.common.models.enums import CheckCategories
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.consts import ANY_VALUE
class CloudWatchLogGroupRetention(BaseResourceValueCheck):
def __init__(self):
name = "Ensure that CloudWatch Log Group specifies retention days"
id = "CKV_AWS_66"
supported_resource = ['AWS::Logs::LogGroup']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
def get_inspected_key(self):
return 'Properties/RetentionInDays'
def get_expected_value(self):
return ANY_VALUE
check = CloudWatchLogGroupRetention()
| 35.318182
| 105
| 0.764479
|
e94841ae21a0452a009383767c8d35499fc0b4db
| 357
|
py
|
Python
|
pos_backend/order/migrations/0002_auto_20201027_0751.py
|
ZeroExistence/pos-backend
|
38c7795de08f6a5fd7359ab54a5f802939ca438f
|
[
"MIT"
] | null | null | null |
pos_backend/order/migrations/0002_auto_20201027_0751.py
|
ZeroExistence/pos-backend
|
38c7795de08f6a5fd7359ab54a5f802939ca438f
|
[
"MIT"
] | null | null | null |
pos_backend/order/migrations/0002_auto_20201027_0751.py
|
ZeroExistence/pos-backend
|
38c7795de08f6a5fd7359ab54a5f802939ca438f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-27 07:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='orderitem',
old_name='transaction',
new_name='order',
),
]
| 18.789474
| 47
| 0.577031
|
013797e47b3bddaa2be791106762236f0a86892a
| 17,259
|
py
|
Python
|
sdk/python/pulumi_azure/datafactory/integration_runtime_managed.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/datafactory/integration_runtime_managed.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/datafactory/integration_runtime_managed.py
|
adnang/pulumi-azure
|
32360d2f1e41e27d7fdd6522cb26d65e531f279f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class IntegrationRuntimeManaged(pulumi.CustomResource):
catalog_info: pulumi.Output[dict]
"""
A `catalog_info` block as defined below.
* `administrator_login` (`str`) - Administrator login name for the SQL Server.
* `administratorPassword` (`str`) - Administrator login password for the SQL Server.
* `pricing_tier` (`str`) - Pricing tier for the database that will be created for the SSIS catalog. Valid values are: `Basic`, `Standard`, `Premium` and `PremiumRS`.
* `serverEndpoint` (`str`) - The endpoint of an Azure SQL Server that will be used to host the SSIS catalog.
"""
custom_setup_script: pulumi.Output[dict]
"""
A `custom_setup_script` block as defined below.
* `blobContainerUri` (`str`) - The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
* `sasToken` (`str`) - A container SAS token that gives access to the files. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
"""
data_factory_name: pulumi.Output[str]
"""
Specifies the name of the Data Factory the Managed Integration Runtime belongs to. Changing this forces a new resource to be created.
"""
description: pulumi.Output[str]
edition: pulumi.Output[str]
"""
The Managed Integration Runtime edition. Valid values are `Standard` and `Enterprise`. Defaults to `Standard`.
"""
license_type: pulumi.Output[str]
"""
The type of the license that is used. Valid values are `LicenseIncluded` and `BasePrize`. Defaults to `LicenseIncluded`.
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
max_parallel_executions_per_node: pulumi.Output[float]
"""
Defines the maximum parallel executions per node. Defaults to `1`. Max is `16`.
"""
name: pulumi.Output[str]
"""
Specifies the name of the Managed Integration Runtime. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
"""
node_size: pulumi.Output[str]
"""
The size of the nodes on which the Managed Integration Runtime runs. Valid values are: `Standard_D2_v3`, `Standard_D4_v3`, `Standard_D8_v3`, `Standard_D16_v3`, `Standard_D32_v3`, `Standard_D64_v3`, `Standard_E2_v3`, `Standard_E4_v3`, `Standard_E8_v3`, `Standard_E16_v3`, `Standard_E32_v3`, `Standard_E64_v3`, `Standard_D1_v2`, `Standard_D2_v2`, `Standard_D3_v2`, `Standard_D4_v2`, `Standard_A4_v2` and `Standard_A8_v2`
"""
number_of_nodes: pulumi.Output[float]
"""
Number of nodes for the Managed Integration Runtime. Max is `10`. Defaults to `1`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the Managed Integration Runtime. Changing this forces a new resource to be created.
"""
vnet_integration: pulumi.Output[dict]
"""
A `vnet_integration` block as defined below.
* `subnetName` (`str`) - Name of the subnet to which the nodes of the Managed Integration Runtime will be added.
* `vnetId` (`str`) - ID of the virtual network to which the nodes of the Managed Integration Runtime will be added.
"""
def __init__(__self__, resource_name, opts=None, catalog_info=None, custom_setup_script=None, data_factory_name=None, description=None, edition=None, license_type=None, location=None, max_parallel_executions_per_node=None, name=None, node_size=None, number_of_nodes=None, resource_group_name=None, vnet_integration=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an Azure Data Factory Managed Integration Runtime.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="northeurope")
example_factory = azure.datafactory.Factory("exampleFactory",
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_integration_runtime_managed = azure.datafactory.IntegrationRuntimeManaged("exampleIntegrationRuntimeManaged",
data_factory_name=example_factory.name,
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
node_size="Standard_D8_v3")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] catalog_info: A `catalog_info` block as defined below.
:param pulumi.Input[dict] custom_setup_script: A `custom_setup_script` block as defined below.
:param pulumi.Input[str] data_factory_name: Specifies the name of the Data Factory the Managed Integration Runtime belongs to. Changing this forces a new resource to be created.
:param pulumi.Input[str] edition: The Managed Integration Runtime edition. Valid values are `Standard` and `Enterprise`. Defaults to `Standard`.
:param pulumi.Input[str] license_type: The type of the license that is used. Valid values are `LicenseIncluded` and `BasePrize`. Defaults to `LicenseIncluded`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[float] max_parallel_executions_per_node: Defines the maximum parallel executions per node. Defaults to `1`. Max is `16`.
:param pulumi.Input[str] name: Specifies the name of the Managed Integration Runtime. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[str] node_size: The size of the nodes on which the Managed Integration Runtime runs. Valid values are: `Standard_D2_v3`, `Standard_D4_v3`, `Standard_D8_v3`, `Standard_D16_v3`, `Standard_D32_v3`, `Standard_D64_v3`, `Standard_E2_v3`, `Standard_E4_v3`, `Standard_E8_v3`, `Standard_E16_v3`, `Standard_E32_v3`, `Standard_E64_v3`, `Standard_D1_v2`, `Standard_D2_v2`, `Standard_D3_v2`, `Standard_D4_v2`, `Standard_A4_v2` and `Standard_A8_v2`
:param pulumi.Input[float] number_of_nodes: Number of nodes for the Managed Integration Runtime. Max is `10`. Defaults to `1`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Managed Integration Runtime. Changing this forces a new resource to be created.
:param pulumi.Input[dict] vnet_integration: A `vnet_integration` block as defined below.
The **catalog_info** object supports the following:
* `administrator_login` (`pulumi.Input[str]`) - Administrator login name for the SQL Server.
* `administratorPassword` (`pulumi.Input[str]`) - Administrator login password for the SQL Server.
* `pricing_tier` (`pulumi.Input[str]`) - Pricing tier for the database that will be created for the SSIS catalog. Valid values are: `Basic`, `Standard`, `Premium` and `PremiumRS`.
* `serverEndpoint` (`pulumi.Input[str]`) - The endpoint of an Azure SQL Server that will be used to host the SSIS catalog.
The **custom_setup_script** object supports the following:
* `blobContainerUri` (`pulumi.Input[str]`) - The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
* `sasToken` (`pulumi.Input[str]`) - A container SAS token that gives access to the files. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
The **vnet_integration** object supports the following:
* `subnetName` (`pulumi.Input[str]`) - Name of the subnet to which the nodes of the Managed Integration Runtime will be added.
* `vnetId` (`pulumi.Input[str]`) - ID of the virtual network to which the nodes of the Managed Integration Runtime will be added.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['catalog_info'] = catalog_info
__props__['custom_setup_script'] = custom_setup_script
if data_factory_name is None:
raise TypeError("Missing required property 'data_factory_name'")
__props__['data_factory_name'] = data_factory_name
__props__['description'] = description
__props__['edition'] = edition
__props__['license_type'] = license_type
__props__['location'] = location
__props__['max_parallel_executions_per_node'] = max_parallel_executions_per_node
__props__['name'] = name
if node_size is None:
raise TypeError("Missing required property 'node_size'")
__props__['node_size'] = node_size
__props__['number_of_nodes'] = number_of_nodes
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['vnet_integration'] = vnet_integration
super(IntegrationRuntimeManaged, __self__).__init__(
'azure:datafactory/integrationRuntimeManaged:IntegrationRuntimeManaged',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, catalog_info=None, custom_setup_script=None, data_factory_name=None, description=None, edition=None, license_type=None, location=None, max_parallel_executions_per_node=None, name=None, node_size=None, number_of_nodes=None, resource_group_name=None, vnet_integration=None):
"""
Get an existing IntegrationRuntimeManaged resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] catalog_info: A `catalog_info` block as defined below.
:param pulumi.Input[dict] custom_setup_script: A `custom_setup_script` block as defined below.
:param pulumi.Input[str] data_factory_name: Specifies the name of the Data Factory the Managed Integration Runtime belongs to. Changing this forces a new resource to be created.
:param pulumi.Input[str] edition: The Managed Integration Runtime edition. Valid values are `Standard` and `Enterprise`. Defaults to `Standard`.
:param pulumi.Input[str] license_type: The type of the license that is used. Valid values are `LicenseIncluded` and `BasePrize`. Defaults to `LicenseIncluded`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[float] max_parallel_executions_per_node: Defines the maximum parallel executions per node. Defaults to `1`. Max is `16`.
:param pulumi.Input[str] name: Specifies the name of the Managed Integration Runtime. Changing this forces a new resource to be created. Must be globally unique. See the [Microsoft documentation](https://docs.microsoft.com/en-us/azure/data-factory/naming-rules) for all restrictions.
:param pulumi.Input[str] node_size: The size of the nodes on which the Managed Integration Runtime runs. Valid values are: `Standard_D2_v3`, `Standard_D4_v3`, `Standard_D8_v3`, `Standard_D16_v3`, `Standard_D32_v3`, `Standard_D64_v3`, `Standard_E2_v3`, `Standard_E4_v3`, `Standard_E8_v3`, `Standard_E16_v3`, `Standard_E32_v3`, `Standard_E64_v3`, `Standard_D1_v2`, `Standard_D2_v2`, `Standard_D3_v2`, `Standard_D4_v2`, `Standard_A4_v2` and `Standard_A8_v2`
:param pulumi.Input[float] number_of_nodes: Number of nodes for the Managed Integration Runtime. Max is `10`. Defaults to `1`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Managed Integration Runtime. Changing this forces a new resource to be created.
:param pulumi.Input[dict] vnet_integration: A `vnet_integration` block as defined below.
The **catalog_info** object supports the following:
* `administrator_login` (`pulumi.Input[str]`) - Administrator login name for the SQL Server.
* `administratorPassword` (`pulumi.Input[str]`) - Administrator login password for the SQL Server.
* `pricing_tier` (`pulumi.Input[str]`) - Pricing tier for the database that will be created for the SSIS catalog. Valid values are: `Basic`, `Standard`, `Premium` and `PremiumRS`.
* `serverEndpoint` (`pulumi.Input[str]`) - The endpoint of an Azure SQL Server that will be used to host the SSIS catalog.
The **custom_setup_script** object supports the following:
* `blobContainerUri` (`pulumi.Input[str]`) - The blob endpoint for the container which contains a custom setup script that will be run on every node on startup. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
* `sasToken` (`pulumi.Input[str]`) - A container SAS token that gives access to the files. See [https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup](https://docs.microsoft.com/en-us/azure/data-factory/how-to-configure-azure-ssis-ir-custom-setup) for more information.
The **vnet_integration** object supports the following:
* `subnetName` (`pulumi.Input[str]`) - Name of the subnet to which the nodes of the Managed Integration Runtime will be added.
* `vnetId` (`pulumi.Input[str]`) - ID of the virtual network to which the nodes of the Managed Integration Runtime will be added.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["catalog_info"] = catalog_info
__props__["custom_setup_script"] = custom_setup_script
__props__["data_factory_name"] = data_factory_name
__props__["description"] = description
__props__["edition"] = edition
__props__["license_type"] = license_type
__props__["location"] = location
__props__["max_parallel_executions_per_node"] = max_parallel_executions_per_node
__props__["name"] = name
__props__["node_size"] = node_size
__props__["number_of_nodes"] = number_of_nodes
__props__["resource_group_name"] = resource_group_name
__props__["vnet_integration"] = vnet_integration
return IntegrationRuntimeManaged(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 73.75641
| 462
| 0.721131
|
a14e3a36be8d1e2c0e020a4e6f7b1efd2f5f739b
| 3,692
|
py
|
Python
|
chrome/common/extensions/docs/server2/render_servlet.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/render_servlet.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/common/extensions/docs/server2/render_servlet.py
|
pozdnyakov/chromium-crosswalk
|
0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from fnmatch import fnmatch
import logging
import mimetypes
import traceback
from urlparse import urlsplit
from branch_utility import BranchUtility
from file_system import FileNotFoundError
from servlet import Servlet, Response
import svn_constants
def _IsBinaryMimetype(mimetype):
return any(
mimetype.startswith(prefix) for prefix in ['audio', 'image', 'video'])
class RenderServlet(Servlet):
'''Servlet which renders templates.
'''
class Delegate(object):
def CreateServerInstanceForChannel(self, channel):
raise NotImplementedError()
def __init__(self, request, delegate, default_channel='stable'):
Servlet.__init__(self, request)
self._delegate = delegate
self._default_channel = default_channel
def Get(self):
''' Render the page for a request.
'''
headers = self._request.headers
channel, path = BranchUtility.SplitChannelNameFromPath(self._request.path)
if path.split('/')[-1] == 'redirects.json':
return Response.Ok('')
if channel == self._default_channel:
return Response.Redirect('/' + path)
if channel is None:
channel = self._default_channel
server_instance = self._delegate.CreateServerInstanceForChannel(channel)
redirect = server_instance.redirector.Redirect(self._request.host, path)
if redirect is not None:
if (channel != self._default_channel and
not urlsplit(redirect).scheme in ('http', 'https')):
redirect = '/%s%s' % (channel, redirect)
return Response.Redirect(redirect)
canonical_path = server_instance.path_canonicalizer.Canonicalize(path)
redirect = canonical_path.lstrip('/')
if path != redirect:
if channel is not None:
redirect = '%s/%s' % (channel, canonical_path)
return Response.Redirect('/' + redirect)
templates = server_instance.template_data_source_factory.Create(
self._request, path)
content = None
content_type = None
try:
if fnmatch(path, 'extensions/examples/*.zip'):
content = server_instance.example_zipper.Create(
path[len('extensions/'):-len('.zip')])
content_type = 'application/zip'
elif path.startswith('extensions/examples/'):
mimetype = mimetypes.guess_type(path)[0] or 'text/plain'
content = server_instance.content_cache.GetFromFile(
'%s/%s' % (svn_constants.DOCS_PATH, path[len('extensions/'):]),
binary=_IsBinaryMimetype(mimetype))
content_type = mimetype
elif path.startswith('static/'):
mimetype = mimetypes.guess_type(path)[0] or 'text/plain'
content = server_instance.content_cache.GetFromFile(
('%s/%s' % (svn_constants.DOCS_PATH, path)),
binary=_IsBinaryMimetype(mimetype))
content_type = mimetype
elif path.endswith('.html'):
content = templates.Render(path)
content_type = 'text/html'
except FileNotFoundError:
logging.warning(traceback.format_exc())
content = None
headers = {'x-frame-options': 'sameorigin'}
if content is None:
doc_class = path.split('/', 1)[0]
content = templates.Render('%s/404' % doc_class)
if not content:
content = templates.Render('extensions/404')
return Response.NotFound(content, headers=headers)
if not content:
logging.error('%s had empty content' % path)
headers.update({
'content-type': content_type,
'cache-control': 'max-age=300',
})
return Response.Ok(content, headers=headers)
| 34.185185
| 78
| 0.681744
|
04b67322edd0a572e8de8323c9f14836dbe7c222
| 610
|
py
|
Python
|
mopo/static/__init__.py
|
xtwentian3/mopo
|
2bdf0db3862a4510b9511327ef0b23cc6ec10405
|
[
"MIT"
] | null | null | null |
mopo/static/__init__.py
|
xtwentian3/mopo
|
2bdf0db3862a4510b9511327ef0b23cc6ec10405
|
[
"MIT"
] | null | null | null |
mopo/static/__init__.py
|
xtwentian3/mopo
|
2bdf0db3862a4510b9511327ef0b23cc6ec10405
|
[
"MIT"
] | null | null | null |
import sys
import os
import importlib
import pdb
def import_fns(path, file, fns_name='StaticFns'):
full_path = os.path.join(path, file)
import_path = full_path.replace('/', '.')
module = importlib.import_module(import_path)
fns = getattr(module, fns_name)
return fns
# cwd = 'mopo/static'
cwd = '.'
files = os.listdir(cwd)
## remove __init__.py
files = filter(lambda x: '__' not in x and x[0] != '.', files)
## env.py --> env
files = map(lambda x: x.replace('.py', ''), files)
## {env: StaticFns, ... }
static_fns = {file: import_fns(cwd, file) for file in files}
sys.modules[__name__] = static_fns
| 22.592593
| 62
| 0.677049
|
f66c822db1c07e801d08726d00d41dd9d99d6104
| 270
|
py
|
Python
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_Pbr_06_07TunaFishCan_bop_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_Pbr_01_02MasterChefCan_bop_test.py"
OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV3_Flat_ycbvPbr_SO/06_07TunaFishCan"
DATASETS = dict(TRAIN=("ycbv_007_tuna_fish_can_train_pbr",))
| 67.5
| 113
| 0.877778
|
7d1736071592c5faaeacc63fbac10d8c99aff338
| 5,028
|
py
|
Python
|
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py
|
Cyp/glTF-Blender-IO
|
232018e779469912e68ae876a9e02ad4cd258b5b
|
[
"Apache-2.0"
] | null | null | null |
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py
|
Cyp/glTF-Blender-IO
|
232018e779469912e68ae876a9e02ad4cd258b5b
|
[
"Apache-2.0"
] | null | null | null |
addons/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py
|
Cyp/glTF-Blender-IO
|
232018e779469912e68ae876a9e02ad4cd258b5b
|
[
"Apache-2.0"
] | 1
|
2019-08-23T16:10:36.000Z
|
2019-08-23T16:10:36.000Z
|
# Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import math
from typing import Optional, List, Dict, Any
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.io.com import gltf2_io_lights_punctual
from io_scene_gltf2.io.com import gltf2_io_debug
from io_scene_gltf2.blender.exp import gltf2_blender_gather_light_spots
from io_scene_gltf2.blender.exp import gltf2_blender_search_node_tree
@cached
def gather_lights_punctual(blender_lamp, export_settings) -> Optional[Dict[str, Any]]:
if not __filter_lights_punctual(blender_lamp, export_settings):
return None
light = gltf2_io_lights_punctual.Light(
color=__gather_color(blender_lamp, export_settings),
intensity=__gather_intensity(blender_lamp, export_settings),
spot=__gather_spot(blender_lamp, export_settings),
type=__gather_type(blender_lamp, export_settings),
range=__gather_range(blender_lamp, export_settings),
name=__gather_name(blender_lamp, export_settings),
extensions=__gather_extensions(blender_lamp, export_settings),
extras=__gather_extras(blender_lamp, export_settings)
)
return light.to_dict()
def __filter_lights_punctual(blender_lamp, export_settings) -> bool:
if blender_lamp.type in ["HEMI", "AREA"]:
gltf2_io_debug.print_console("WARNING", "Unsupported light source {}".format(blender_lamp.type))
return False
return True
def __gather_color(blender_lamp, export_settings) -> Optional[List[float]]:
emission_node = __get_cycles_emission_node(blender_lamp)
if emission_node is not None:
return emission_node.inputs["Color"].default_value
return list(blender_lamp.color)
def __gather_intensity(blender_lamp, _) -> Optional[float]:
emission_node = __get_cycles_emission_node(blender_lamp)
if emission_node is not None:
if blender_lamp.type != 'SUN':
# When using cycles, the strength should be influenced by a LightFalloff node
result = gltf2_blender_search_node_tree.from_socket(
emission_node.get("Strength"),
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeLightFalloff)
)
if result:
quadratic_falloff_node = result[0].shader_node
emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0)
else:
gltf2_io_debug.print_console('WARNING',
'No quadratic light falloff node attached to emission strength property')
emission_strength = blender_lamp.energy
else:
emission_strength = emission_node.inputs["Strength"].default_value
return emission_strength
return blender_lamp.energy
def __gather_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]:
if blender_lamp.type == "SPOT":
return gltf2_blender_gather_light_spots.gather_light_spot(blender_lamp, export_settings)
return None
def __gather_type(blender_lamp, _) -> str:
return {
"POINT": "point",
"SUN": "directional",
"SPOT": "spot"
}[blender_lamp.type]
def __gather_range(blender_lamp, export_settings) -> Optional[float]:
# TODO: calculate range from
# https://github.com/KhronosGroup/glTF/tree/master/extensions/2.0/Khronos/KHR_lights_punctual#range-property
return None
def __gather_name(blender_lamp, export_settings) -> Optional[str]:
return blender_lamp.name
def __gather_extensions(blender_lamp, export_settings) -> Optional[dict]:
return None
def __gather_extras(blender_lamp, export_settings) -> Optional[Any]:
return None
def __get_cycles_emission_node(blender_lamp) -> Optional[bpy.types.ShaderNodeEmission]:
if blender_lamp.use_nodes and blender_lamp.node_tree:
for currentNode in blender_lamp.node_tree.nodes:
if isinstance(currentNode, bpy.types.ShaderNodeOutputLamp):
if not currentNode.is_active_output:
continue
result = gltf2_blender_search_node_tree.from_socket(
currentNode.inputs.get("Surface"),
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeEmission)
)
if not result:
continue
return result[0].shader_node
return None
| 38.090909
| 118
| 0.716985
|
34db0cd82d836345ef6de0b3d12580248640be03
| 13,693
|
py
|
Python
|
test/test_pinserver.py
|
valerio-vaccaro/blind_pin_server
|
8cf4e9b3e8ba8d7ee074253aac3154fec16d4a9e
|
[
"MIT"
] | 4
|
2021-01-18T07:58:28.000Z
|
2021-11-17T22:03:37.000Z
|
test/test_pinserver.py
|
valerio-vaccaro/blind_pin_server
|
8cf4e9b3e8ba8d7ee074253aac3154fec16d4a9e
|
[
"MIT"
] | 5
|
2021-01-05T14:30:30.000Z
|
2021-12-15T10:18:54.000Z
|
test/test_pinserver.py
|
valerio-vaccaro/blind_pin_server
|
8cf4e9b3e8ba8d7ee074253aac3154fec16d4a9e
|
[
"MIT"
] | 5
|
2021-02-19T09:30:16.000Z
|
2022-03-29T21:34:00.000Z
|
import unittest
import os
import json
import time
from multiprocessing import Process
from hmac import compare_digest
import requests
from ..client import PINClientECDH
from ..server import PINServerECDH
from ..pindb import PINDb
from ..flaskserver import app
from ..flaskserver import SESSION_LIFETIME
from wallycore import sha256, ec_sig_from_bytes, hex_from_bytes, hex_to_bytes,\
AES_KEY_LEN_256, EC_FLAG_ECDSA, EC_FLAG_RECOVERABLE
b2h = hex_from_bytes
h2b = hex_to_bytes
class PINServerTest(unittest.TestCase):
@staticmethod
def new_pin_secret():
return os.urandom(32)
@staticmethod
def new_entropy():
return os.urandom(32)
@classmethod
def post(cls, url='', data=None):
if data:
userdata = json.dumps(data)
else:
userdata = None
f = requests.post(cls.pinserver_url + '/' + url,
data=userdata)
if f.status_code != 200:
raise ValueError(f.status_code)
return f.json() if url else f.text
# Make new logical client static keys
@classmethod
def new_static_client_keys(cls):
private_key, public_key = PINClientECDH.generate_ec_key_pair()
# Cache the pinfile for this client key so we can ensure it is removed
pinfile = bytes(sha256(public_key))
cls.pinfiles.add(bytes(pinfile))
# Return the keys and the pin-filename
return private_key, public_key, pinfile
# setUpClass() runs up the webserver
@classmethod
def setUpClass(cls):
# The server public key the client would know
with open(PINServerECDH.STATIC_SERVER_PUBLIC_KEY_FILE, 'rb') as f:
cls.static_server_public_key = f.read()
# pinfiles that may be created, so we can ensure they are deleted
cls.pinfiles = set()
# Work out the server port and localhost url
svrport = os.getenv('PINSERVER_PORT', '5000')
cls.pinserver_url = 'http://127.0.0.1:' + svrport
# Start the flask server
cls.server = Process(target=app.run, kwargs={'port': svrport})
cls.server.start()
# Wait for server to start
while True:
try:
f = requests.get(cls.pinserver_url)
assert f.status_code == 200
break
except Exception:
pass
# tearDownClass() shuts down the webserver and tidies up pinfiles
@classmethod
def tearDownClass(cls):
# Close the web server
cls.server.terminate()
# Delete any pinfiles
for f in cls.pinfiles:
if PINDb.storage.exists(f):
PINDb.storage.remove(f)
# Helpers
# Start the client/server key-exchange handshake
def start_handshake(self, client):
handshake = self.post('start_handshake')
client.handshake(h2b(handshake['ske']), h2b(handshake['sig']))
return client
# Make a new ephemeral client and initialise with server handshake
def new_client_handshake(self):
client = PINClientECDH(self.static_server_public_key)
return self.start_handshake(client)
# Make the server call to get/set the pin - returns the decrypted response
def server_call(self, private_key, client, endpoint, pin_secret, entropy):
# Make and encrypt the payload (ie. pin secret)
ske, cke = client.get_key_exchange()
sig = ec_sig_from_bytes(private_key,
sha256(cke + pin_secret + entropy),
EC_FLAG_ECDSA | EC_FLAG_RECOVERABLE)
payload = pin_secret + entropy + sig
encrypted, hmac = client.encrypt_request_payload(payload)
# Make call and parse response
urldata = {'ske': b2h(ske),
'cke': b2h(cke),
'encrypted_data': b2h(encrypted),
'hmac_encrypted_data': b2h(hmac)}
response = self.post(endpoint, urldata)
encrypted = h2b(response['encrypted_key'])
hmac = h2b(response['hmac'])
# Return decrypted payload
return client.decrypt_response_payload(encrypted, hmac)
def get_pin(self, private_key, pin_secret, entropy):
# Create new ephemeral client, initiate handshake, and make call
client = self.new_client_handshake()
return self.server_call(
private_key, client, 'get_pin', pin_secret, entropy)
def set_pin(self, private_key, pin_secret, entropy):
# Create new ephemeral client, initiate handshake, and make call
client = self.new_client_handshake()
return self.server_call(
private_key, client, 'set_pin', pin_secret, entropy)
# Tests
def test_get_index(self):
# No index or similar
for path in ['index.htm', 'index.html', 'public/']:
f = requests.get(self.pinserver_url + '/' + path)
self.assertEqual(f.status_code, 404)
f = requests.post(self.pinserver_url + '/' + path)
self.assertEqual(f.status_code, 404)
def test_get_root_empty(self):
# Root is an empty document
f = requests.get(self.pinserver_url)
self.assertEqual(f.status_code, 200)
self.assertFalse(f.text)
# But get 405 if we try to POST
f = requests.post(self.pinserver_url)
self.assertEqual(f.status_code, 405)
def test_set_and_get_pin(self):
# Make ourselves a static key pair for this logical client
priv_key, _, _ = self.new_static_client_keys()
# The 'correct' client pin
pin_secret = self.new_pin_secret()
# Make a new client and set the pin secret to get a new aes key
aeskey_s = self.set_pin(priv_key, pin_secret, self.new_entropy())
self.assertEqual(len(aeskey_s), AES_KEY_LEN_256)
# Get key with a new client, with the correct pin secret (new entropy)
for attempt in range(5):
aeskey_g = self.get_pin(priv_key, pin_secret, self.new_entropy())
self.assertTrue(compare_digest(aeskey_g, aeskey_s))
def test_bad_guesses_clears_pin(self):
# Make ourselves a static key pair for this logical client
priv_key, _, pinfile = self.new_static_client_keys()
# The 'correct' client pin
pin_secret, entropy = self.new_pin_secret(), self.new_entropy()
# Set and verify the pin - ensure underlying file created
self.assertFalse(PINDb.storage.exists(pinfile))
aeskey_s = self.set_pin(priv_key, pin_secret, entropy)
self.assertEqual(len(aeskey_s), AES_KEY_LEN_256)
aeskey_g = self.get_pin(priv_key, pin_secret, entropy)
self.assertTrue(compare_digest(aeskey_g, aeskey_s))
self.assertTrue(PINDb.storage.exists(pinfile))
# Bad guesses at PIN
for attempt in range(3):
# Attempt to get with bad pin
bad_secret = os.urandom(32)
guesskey = self.get_pin(priv_key, bad_secret, entropy)
# Wrong pin should return junk aes-key
self.assertEqual(len(aeskey_s), len(guesskey))
self.assertFalse(compare_digest(aeskey_s, guesskey))
# after three failed attempts server deletes the file
self.assertFalse(PINDb.storage.exists(pinfile))
# Now even the correct pin will fail...
aeskey = self.get_pin(priv_key, bad_secret, entropy)
self.assertEqual(len(aeskey), len(aeskey_s))
self.assertFalse(compare_digest(aeskey, aeskey_s))
self.assertFalse(PINDb.storage.exists(pinfile))
def test_bad_pubkey_breaks(self):
# Make ourselves a static key pair for this logical client
priv_key, _, pinfile = self.new_static_client_keys()
# The 'correct' client pin
pin_secret, entropy = self.new_pin_secret(), self.new_entropy()
# Set and verify the pin - ensure underlying file created
self.assertFalse(PINDb.storage.exists(pinfile))
aeskey_s = self.set_pin(priv_key, pin_secret, entropy)
self.assertEqual(len(aeskey_s), AES_KEY_LEN_256)
aeskey_g = self.get_pin(priv_key, pin_secret, entropy)
self.assertTrue(compare_digest(aeskey_g, aeskey_s))
self.assertTrue(PINDb.storage.exists(pinfile))
# Bad attempts with bad pub_key
for attempt in range(3):
# Attempt to get with bad pub_key
bad_key = os.urandom(32)
guesskey = self.get_pin(bad_key, pin_secret, entropy)
# Wrong pin should return junk aes-key
self.assertEqual(len(aeskey_s), len(guesskey))
self.assertFalse(compare_digest(aeskey_s, guesskey))
# after three failed attempts server does nothing
self.assertTrue(PINDb.storage.exists(pinfile))
# The correct pin will continue to work
aeskey = self.get_pin(priv_key, pin_secret, entropy)
self.assertEqual(len(aeskey), len(aeskey_s))
self.assertTrue(compare_digest(aeskey, aeskey_s))
self.assertTrue(PINDb.storage.exists(pinfile))
def test_two_users_with_same_pin(self):
# Two users
clientA_private_key, _, _ = self.new_static_client_keys()
clientB_private_key, _, _ = self.new_static_client_keys()
# pin plus its salt/iv/entropy
pin_secret, entropy = self.new_pin_secret(), self.new_entropy()
# A and B use the same values... bizarre but should be fine
aeskey_sA = self.set_pin(clientA_private_key, pin_secret, entropy)
aeskey_sB = self.set_pin(clientB_private_key, pin_secret, entropy)
self.assertFalse(compare_digest(aeskey_sA, aeskey_sB))
aeskey_gA = self.get_pin(clientA_private_key, pin_secret, entropy)
self.assertTrue(compare_digest(aeskey_gA, aeskey_sA))
aeskey_gB = self.get_pin(clientB_private_key, pin_secret, entropy)
self.assertTrue(compare_digest(aeskey_gB, aeskey_sB))
self.assertFalse(compare_digest(aeskey_gA, aeskey_gB))
def test_rejects_on_bad_json(self):
# Create new ephemeral client, initiate handshake, and make call
client = self.new_client_handshake()
ske, cke = client.get_key_exchange()
# Make call with bad/missing parameters
urldata = {'ske': b2h(ske),
'cke': b2h(cke),
# 'encrypted_data' missing
'hmac_encrypted_data': 'abc123'}
with self.assertRaises(ValueError) as cm:
self.post('get_pin', urldata)
# Make call with not-even-json
urldata = 'This is not even json'
with self.assertRaises(ValueError) as cm:
self.post('get_pin', urldata)
def test_rejects_without_client_entropy(self):
# Make ourselves a static key pair for this logical client
priv_key, _, _ = self.new_static_client_keys()
# The 'correct' client pin but no salt/iv/entropy
pin_secret, entropy = self.new_pin_secret(), bytearray()
# Make a new client and set the pin secret to get a new aes key
with self.assertRaises(ValueError) as cm:
self.set_pin(priv_key, pin_secret, entropy)
self.assertEqual('500', str(cm.exception.args[0]))
with self.assertRaises(ValueError) as cm:
self.get_pin(priv_key, pin_secret, entropy)
self.assertEqual('500', str(cm.exception.args[0]))
def test_delayed_interaction(self):
# Make ourselves a static key pair for this logical client
priv_key, _, _ = self.new_static_client_keys()
# The 'correct' client pin plus its salt/iv/entropy
pin_secret = self.new_pin_secret()
# Set and verify the pin
aeskey_s = self.set_pin(priv_key, pin_secret, self.new_entropy())
aeskey_g = self.get_pin(priv_key, pin_secret, self.new_entropy())
self.assertTrue(compare_digest(aeskey_g, aeskey_s))
# If we delay in the server interaction it will fail with a 500 error
client = self.new_client_handshake()
time.sleep(SESSION_LIFETIME + 1) # Sufficiently long delay
with self.assertRaises(ValueError) as cm:
self.server_call(priv_key, client, 'get_pin', pin_secret,
self.new_entropy())
self.assertEqual('500', str(cm.exception.args[0]))
def test_cannot_reuse_client_session(self):
# Make ourselves a static key pair for this logical client
priv_key, _, _ = self.new_static_client_keys()
# The 'correct' client pin plus its salt/iv/entropy
pin_secret = self.new_pin_secret()
# Set pin
aeskey_s = self.set_pin(priv_key, pin_secret, self.new_entropy())
# Get/verify pin with a new client
client = self.new_client_handshake()
aeskey_g = self.server_call(priv_key, client, 'get_pin', pin_secret,
self.new_entropy())
self.assertTrue(compare_digest(aeskey_g, aeskey_s))
# Trying to reuse the session should fail with a 500 error
with self.assertRaises(ValueError) as cm:
self.server_call(priv_key, client, 'get_pin', pin_secret,
self.new_entropy())
self.assertEqual('500', str(cm.exception.args[0]))
# Not great, but we could reuse the client if we re-initiate handshake
# (But that would use same cke which is not ideal.)
self.start_handshake(client)
aeskey = self.server_call(priv_key, client, 'get_pin', pin_secret,
self.new_entropy())
self.assertTrue(compare_digest(aeskey, aeskey_s))
if __name__ == '__main__':
unittest.main()
| 37.825967
| 79
| 0.648434
|
be5d5d4851c0b3294a155ac7e273c99d207aaac5
| 11,178
|
pyw
|
Python
|
example_program.pyw
|
zach-king/GlyphyType
|
dd930730af4396ad7d5ac12d5011a35a1f5ab9bc
|
[
"MIT"
] | null | null | null |
example_program.pyw
|
zach-king/GlyphyType
|
dd930730af4396ad7d5ac12d5011a35a1f5ab9bc
|
[
"MIT"
] | null | null | null |
example_program.pyw
|
zach-king/GlyphyType
|
dd930730af4396ad7d5ac12d5011a35a1f5ab9bc
|
[
"MIT"
] | null | null | null |
from PyQt4 import QtCore, QtGui
class ScribbleArea(QtGui.QWidget):
"""
this scales the image but it's not good, too many refreshes really mess it up!!!
"""
def __init__(self, parent=None):
super(ScribbleArea, self).__init__(parent)
self.setAttribute(QtCore.Qt.WA_StaticContents)
self.modified = False
self.scribbling = False
self.myPenWidth = 1
self.myPenColor = QtCore.Qt.blue
imageSize = QtCore.QSize(500, 500)
# self.image = QtGui.QImage()
self.image = QtGui.QImage(imageSize, QtGui.QImage.Format_RGB32)
self.lastPoint = QtCore.QPoint()
def openImage(self, fileName):
loadedImage = QtGui.QImage()
if not loadedImage.load(fileName):
return False
w = loadedImage.width()
h = loadedImage.height()
self.mainWindow.resize(w, h)
# newSize = loadedImage.size().expandedTo(self.size())
# self.resizeImage(loadedImage, newSize)
self.image = loadedImage
self.modified = False
self.update()
return True
def saveImage(self, fileName, fileFormat):
visibleImage = self.image
self.resizeImage(visibleImage, self.size())
if visibleImage.save(fileName, fileFormat):
self.modified = False
return True
else:
return False
def setPenColor(self, newColor):
self.myPenColor = newColor
def setPenWidth(self, newWidth):
self.myPenWidth = newWidth
def clearImage(self):
self.image.fill(QtGui.qRgb(255, 255, 255))
self.modified = True
self.update()
def mousePressEvent(self, event):
# print "self.image.width() = %d" % self.image.width()
# print "self.image.height() = %d" % self.image.height()
# print "self.image.size() = %s" % self.image.size()
# print "self.size() = %s" % self.size()
# print "event.pos() = %s" % event.pos()
if event.button() == QtCore.Qt.LeftButton:
self.lastPoint = event.pos()
self.scribbling = True
def mouseMoveEvent(self, event):
if (event.buttons() & QtCore.Qt.LeftButton) and self.scribbling:
self.drawLineTo(event.pos())
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self.scribbling:
self.drawLineTo(event.pos())
self.scribbling = False
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(event.rect(), self.image)
def resizeEvent(self, event):
# print "resize event"
# print "event = %s" % event
# print "event.oldSize() = %s" % event.oldSize()
# print "event.size() = %s" % event.size()
self.resizeImage(self.image, event.size())
# if self.width() > self.image.width() or self.height() > self.image.height():
# newWidth = max(self.width() + 128, self.image.width())
# newHeight = max(self.height() + 128, self.image.height())
# print "newWidth = %d, newHeight = %d" % (newWidth, newHeight)
# self.resizeImage(self.image, QtCore.QSize(newWidth, newHeight))
# self.update()
super(ScribbleArea, self).resizeEvent(event)
def drawLineTo(self, endPoint):
painter = QtGui.QPainter(self.image)
painter.setPen(QtGui.QPen(self.myPenColor, self.myPenWidth,
QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
painter.drawLine(self.lastPoint, endPoint)
self.modified = True
# rad = self.myPenWidth / 2 + 2
# self.update(QtCore.QRect(self.lastPoint, endPoint).normalized().adjusted(-rad, -rad, +rad, +rad))
self.update()
self.lastPoint = QtCore.QPoint(endPoint)
def resizeImage(self, image, newSize):
if image.size() == newSize:
return
# print "image.size() = %s" % repr(image.size())
# print "newSize = %s" % newSize
# this resizes the canvas without resampling the image
newImage = QtGui.QImage(newSize, QtGui.QImage.Format_RGB32)
newImage.fill(QtGui.qRgb(255, 255, 255))
painter = QtGui.QPainter(newImage)
painter.drawImage(QtCore.QPoint(0, 0), image)
## this resampled the image but it gets messed up with so many events...
## painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, True)
## painter.setRenderHint(QtGui.QPainter.HighQualityAntialiasing, True)
#
# newImage = QtGui.QImage(newSize, QtGui.QImage.Format_RGB32)
# newImage.fill(QtGui.qRgb(255, 255, 255))
# painter = QtGui.QPainter(newImage)
# srcRect = QtCore.QRect(QtCore.QPoint(0,0), image.size())
# dstRect = QtCore.QRect(QtCore.QPoint(0,0), newSize)
## print "srcRect = %s" % srcRect
## print "dstRect = %s" % dstRect
# painter.drawImage(dstRect, image, srcRect)
self.image = newImage
def print_(self):
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
printDialog = QtGui.QPrintDialog(printer, self)
if printDialog.exec_() == QtGui.QDialog.Accepted:
painter = QtGui.QPainter(printer)
rect = painter.viewport()
size = self.image.size()
size.scale(rect.size(), QtCore.Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(), size.height())
painter.setWindow(self.image.rect())
painter.drawImage(0, 0, self.image)
painter.end()
def isModified(self):
return self.modified
def penColor(self):
return self.myPenColor
def penWidth(self):
return self.myPenWidth
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.saveAsActs = []
self.scribbleArea = ScribbleArea(self)
self.scribbleArea.clearImage()
self.scribbleArea.mainWindow = self # maybe not using this?
self.setCentralWidget(self.scribbleArea)
self.createActions()
self.createMenus()
self.setWindowTitle("Scribble")
self.resize(500, 500)
def closeEvent(self, event):
if self.maybeSave():
event.accept()
else:
event.ignore()
def open(self):
if self.maybeSave():
fileName = QtGui.QFileDialog.getOpenFileName(self, "Open File",
QtCore.QDir.currentPath())
if fileName:
self.scribbleArea.openImage(fileName)
def save(self):
action = self.sender()
fileFormat = action.data()
self.saveFile(fileFormat)
def penColor(self):
newColor = QtGui.QColorDialog.getColor(self.scribbleArea.penColor())
if newColor.isValid():
self.scribbleArea.setPenColor(newColor)
def penWidth(self):
newWidth, ok = QtGui.QInputDialog.getInteger(self, "Scribble",
"Select pen width:", self.scribbleArea.penWidth(), 1, 50, 1)
if ok:
self.scribbleArea.setPenWidth(newWidth)
def about(self):
QtGui.QMessageBox.about(self, "About Scribble",
"<p>The <b>Scribble</b> example shows how to use "
"QMainWindow as the base widget for an application, and how "
"to reimplement some of QWidget's event handlers to receive "
"the events generated for the application's widgets:</p>"
"<p> We reimplement the mouse event handlers to facilitate "
"drawing, the paint event handler to update the application "
"and the resize event handler to optimize the application's "
"appearance. In addition we reimplement the close event "
"handler to intercept the close events before terminating "
"the application.</p>"
"<p> The example also demonstrates how to use QPainter to "
"draw an image in real time, as well as to repaint "
"widgets.</p>")
def createActions(self):
self.openAct = QtGui.QAction("&Open...", self, shortcut="Ctrl+O",
triggered=self.open)
for format in QtGui.QImageWriter.supportedImageFormats():
format = str(format)
text = format.upper() + "..."
action = QtGui.QAction(text, self, triggered=self.save)
action.setData(format)
self.saveAsActs.append(action)
self.printAct = QtGui.QAction("&Print...", self,
triggered=self.scribbleArea.print_)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.penColorAct = QtGui.QAction("&Pen Color...", self,
triggered=self.penColor)
self.penWidthAct = QtGui.QAction("Pen &Width...", self,
triggered=self.penWidth)
self.clearScreenAct = QtGui.QAction("&Clear Screen", self,
shortcut="Ctrl+L", triggered=self.scribbleArea.clearImage)
self.aboutAct = QtGui.QAction("&About", self, triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.saveAsMenu = QtGui.QMenu("&Save As", self)
for action in self.saveAsActs:
self.saveAsMenu.addAction(action)
fileMenu = QtGui.QMenu("&File", self)
fileMenu.addAction(self.openAct)
fileMenu.addMenu(self.saveAsMenu)
fileMenu.addAction(self.printAct)
fileMenu.addSeparator()
fileMenu.addAction(self.exitAct)
optionMenu = QtGui.QMenu("&Options", self)
optionMenu.addAction(self.penColorAct)
optionMenu.addAction(self.penWidthAct)
optionMenu.addSeparator()
optionMenu.addAction(self.clearScreenAct)
helpMenu = QtGui.QMenu("&Help", self)
helpMenu.addAction(self.aboutAct)
helpMenu.addAction(self.aboutQtAct)
self.menuBar().addMenu(fileMenu)
self.menuBar().addMenu(optionMenu)
self.menuBar().addMenu(helpMenu)
def maybeSave(self):
if self.scribbleArea.isModified():
ret = QtGui.QMessageBox.warning(self, "Scribble",
"The image has been modified.\n"
"Do you want to save your changes?",
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.Save:
return self.saveFile('png')
elif ret == QtGui.QMessageBox.Cancel:
return False
return True
def saveFile(self, fileFormat):
initialPath = QtCore.QDir.currentPath() + '/untitled.' + fileFormat
fileName = QtGui.QFileDialog.getSaveFileName(self, "Save As",
initialPath,
"%s Files (*.%s);;All Files (*)" % (fileFormat.upper(), fileFormat))
if fileName:
return self.scribbleArea.saveImage(fileName, fileFormat)
return False
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 35.150943
| 107
| 0.61299
|
5eae892104b1d574b63481bd1496293f84cf82eb
| 1,272
|
py
|
Python
|
locallibrary/urls.py
|
byronvhughey/django_local_library
|
1429b95b3a4fe38af3722c352a3d05c8b332cd9b
|
[
"MIT"
] | null | null | null |
locallibrary/urls.py
|
byronvhughey/django_local_library
|
1429b95b3a4fe38af3722c352a3d05c8b332cd9b
|
[
"MIT"
] | null | null | null |
locallibrary/urls.py
|
byronvhughey/django_local_library
|
1429b95b3a4fe38af3722c352a3d05c8b332cd9b
|
[
"MIT"
] | null | null | null |
"""locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^catalog/', include('catalog.urls')),
url(r'^$', RedirectView.as_view(url='/catalog/', permanent=True)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Add Django site authentication urls (for login, logout, password management)
urlpatterns += [
url('^accounts/', include('django.contrib.auth.urls')),
]
| 39.75
| 79
| 0.724843
|
c3d9e228e57c118418379984383ae31e82ad75da
| 3,319
|
py
|
Python
|
easy_rec/python/input/tfrecord_input.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 61
|
2021-08-19T06:10:03.000Z
|
2021-10-09T06:44:54.000Z
|
easy_rec/python/input/tfrecord_input.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 41
|
2021-09-08T03:02:42.000Z
|
2021-09-29T09:00:57.000Z
|
easy_rec/python/input/tfrecord_input.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 11
|
2021-08-20T06:19:08.000Z
|
2021-10-02T14:55:39.000Z
|
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import tensorflow as tf
from easy_rec.python.input.input import Input
if tf.__version__ >= '2.0':
tf = tf.compat.v1
class TFRecordInput(Input):
def __init__(self,
data_config,
feature_config,
input_path,
task_index=0,
task_num=1):
super(TFRecordInput, self).__init__(data_config, feature_config, input_path,
task_index, task_num)
self.feature_desc = {}
for x, t, d in zip(self._input_fields, self._input_field_types,
self._input_field_defaults):
d = self.get_type_defaults(t, d)
t = self.get_tf_type(t)
self.feature_desc[x] = tf.FixedLenFeature(
dtype=t, shape=1, default_value=d)
def _parse_tfrecord(self, example):
try:
inputs = tf.parse_single_example(example, features=self.feature_desc)
except AttributeError:
inputs = tf.io.parse_single_example(example, features=self.feature_desc)
return inputs
def _build(self, mode, params):
file_paths = tf.gfile.Glob(self._input_path)
assert len(file_paths) > 0, 'match no files with %s' % self._input_path
num_parallel_calls = self._data_config.num_parallel_calls
data_compression_type = self._data_config.data_compression_type
if mode == tf.estimator.ModeKeys.TRAIN:
logging.info('train files[%d]: %s' %
(len(file_paths), ','.join(file_paths)))
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if self._data_config.shuffle:
# shuffle input files
dataset = dataset.shuffle(len(file_paths))
# too many readers read the same file will cause performance issues
# as the same data will be read multiple times
parallel_num = min(num_parallel_calls, len(file_paths))
dataset = dataset.interleave(
lambda x: tf.data.TFRecordDataset(
x, compression_type=data_compression_type),
cycle_length=parallel_num,
num_parallel_calls=parallel_num)
dataset = dataset.shard(self._task_num, self._task_index)
if self._data_config.shuffle:
dataset = dataset.shuffle(
self._data_config.shuffle_buffer_size,
seed=2020,
reshuffle_each_iteration=True)
dataset = dataset.repeat(self.num_epochs)
else:
logging.info('eval files[%d]: %s' %
(len(file_paths), ','.join(file_paths)))
dataset = tf.data.TFRecordDataset(
file_paths, compression_type=data_compression_type)
dataset = dataset.repeat(1)
dataset = dataset.map(
self._parse_tfrecord, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(self._data_config.batch_size)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
dataset = dataset.map(
map_func=self._preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size=self._prefetch_size)
if mode != tf.estimator.ModeKeys.PREDICT:
dataset = dataset.map(lambda x:
(self._get_features(x), self._get_labels(x)))
else:
dataset = dataset.map(lambda x: (self._get_features(x)))
return dataset
| 37.292135
| 80
| 0.662248
|
f99ea854ace14a1770f64c6e2c33871b52ef5d31
| 7,515
|
py
|
Python
|
r2r/env_config.py
|
google-research/valan
|
9fc6e38f411e6cb76408bf033cdc056ace980973
|
[
"Apache-2.0"
] | 71
|
2019-09-16T20:20:45.000Z
|
2022-03-31T09:33:33.000Z
|
r2r/env_config.py
|
google-research/valan
|
9fc6e38f411e6cb76408bf033cdc056ace980973
|
[
"Apache-2.0"
] | 2
|
2020-01-16T12:05:37.000Z
|
2021-04-12T17:38:09.000Z
|
r2r/env_config.py
|
google-research/valan
|
9fc6e38f411e6cb76408bf033cdc056ace980973
|
[
"Apache-2.0"
] | 16
|
2019-09-25T07:00:43.000Z
|
2022-01-16T11:32:54.000Z
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default configuration used for R2R environment."""
from absl import flags
import numpy as np
from valan.framework import eval_metric
from valan.framework import hparam
from valan.r2r import constants
FLAGS = flags.FLAGS
# Default fixed params for env.
DEFAULT_ENV_CONFIG = {
# Base dir for scan data, eg., `scans`, `connections`.
'scan_base_dir': '', # Can be updated by FLAGS.scan_base_dir.
# Base dir for input JSON and vocab files.
'data_base_dir': '', # Can be updated by FLAGS.data_base_dir.
# Base dir for vocab.
'vocab_dir': '', # Can be updated by FLAGS.vocab_dir.
'vocab_file': 'vocab.txt',
# Number of image pathes for each panorama, typically 36.
'images_per_pano': 36,
'max_conns': 14,
# Max number of instruction tokens.
'instruction_len': 50,
'max_agent_actions': 12,
'reward_fn_type': constants.REWARD_DISTANCE_TO_GOAL,
# Field of View used to generate image features.
'fov': 0.17,
# Dir containing pre-generated image features.
'image_features_dir': '', # Can be updated by FLAGS.image_features_dir.
# Image feature dimension size. 1792 for EfficientNet B4.
'image_encoding_dim': 1792,
# Direction encoding dimension size.
'direction_encoding_dim': 256, # Must be a multiple of 8.
# The following are set in `get_default_env_config`.
'reward_fn': '',
}
### Different functions go here. All methods accept the following 4 args:
# path_history: A list of integers specifying pano ids (source) until the
# current step.
# next_pano: An integer specifying next pano id (destination).
# golden_path: A list containing string names of panos on the golden path.
# end_of_episode: True if this is the last transition in the episode.
# scan_info: A `ScanInfo` tuple. See constants.py.
def distance_to_goal(path_history, next_pano, golden_path, end_of_episode,
scan_info):
"""Rewards an agent based on how close it gets to the goal node.
If d(p, g) is the distance of pano `p` from goal node `g`, then
r(p1 --> p2) = 4 if end_of_episode and agent stopped correctly
= -4 if end_of_episode and agent did not stop correctly
= clip(d(p1, g) - d(p2, g), max=1, min=-1) otherwise
Args:
path_history: See above.
next_pano: See above.
golden_path: See above.
end_of_episode: See above.
scan_info: See above.
Returns:
A scalar float immediate reward for the transition
current_pano --> next_pano.
"""
current_pano = path_history[-1]
goal_pano_name = golden_path[-1]
if end_of_episode:
# If episode ended due to STOP node, then last valid node is
# `current_pano`.
last_node_id = (
next_pano if next_pano != constants.STOP_NODE_ID else current_pano)
last_node_name = scan_info.pano_id_to_name[last_node_id]
return 4. if last_node_name == goal_pano_name else -4.
current_pano_name = scan_info.pano_id_to_name[current_pano]
next_pano_name = scan_info.pano_id_to_name[next_pano]
delta_distance = scan_info.graph.get_distance(
current_pano_name, goal_pano_name) - scan_info.graph.get_distance(
next_pano_name, goal_pano_name)
return min(1., max(-1., delta_distance))
def dense_dtw(path_history, next_pano, golden_path, end_of_episode, scan_info):
"""Rewards an agent based on the difference in DTW after going to nex_pano.
Args:
path_history: See above.
next_pano: See above.
golden_path: See above.
end_of_episode: See above.
scan_info: See above.
Returns:
A scalar float immediate reward for the transition
current_pano --> next_pano.
"""
del end_of_episode
if next_pano in [constants.STOP_NODE_ID, constants.INVALID_NODE_ID]:
return 0.0
observed_pano_ids = path_history + [next_pano]
observed_pano_names = [
scan_info.pano_id_to_name[pano] for pano in observed_pano_ids
]
dtw_matrix = eval_metric.get_dtw_matrix(observed_pano_names, golden_path,
scan_info.graph.get_distance)
num_obs_panos = len(observed_pano_names)
num_golden_panos = len(golden_path)
previous_dtw = dtw_matrix[num_obs_panos - 1][num_golden_panos]
current_dtw = dtw_matrix[num_obs_panos][num_golden_panos]
return previous_dtw - current_dtw
def random_reward(path_history, next_pano, golden_path, end_of_episode,
scan_info):
"""Rewards by sampling a random value in (-1, 1) from a uniform distribution.
Args:
path_history: See above.
next_pano: See above.
golden_path: See above.
end_of_episode: See above.
scan_info: See above.
Returns:
A scalar float immediate reward sampled from a uniform dist for the
transition current_pano --> next_pano.
"""
del path_history, next_pano, golden_path, end_of_episode, scan_info
return np.random.uniform(-1, 1)
def goal_plus_random_reward(path_history, next_pano, golden_path,
end_of_episode, scan_info):
"""Rewards an agent based on the difference in DTW after going to nex_pano.
Args:
path_history: See above.
next_pano: See above.
golden_path: See above.
end_of_episode: See above.
scan_info: See above.
Returns:
A scalar float immediate reward for the transition
current_pano --> next_pano.
"""
goal_rwd = distance_to_goal(path_history, next_pano, golden_path,
end_of_episode, scan_info)
random_rwd = np.random.uniform(-1, 1)
return goal_rwd + random_rwd
class RewardFunction(object):
"""Specifies the RL reward function."""
### Registration happens here.
_REWARD_FN_REGISTRY = {
constants.REWARD_DISTANCE_TO_GOAL: distance_to_goal,
constants.REWARD_DENSE_DTW: dense_dtw,
constants.REWARD_RANDOM: random_reward,
constants.REWARD_GOAL_RANDOM: goal_plus_random_reward,
}
@staticmethod
def get_reward_fn(reward_fn_type):
if reward_fn_type not in RewardFunction._REWARD_FN_REGISTRY:
raise ValueError(
'Unsupported reward function type: %s. Please use one of %s or '
'add your reward function to the registry in this file' %
(reward_fn_type, RewardFunction._REWARD_FN_REGISTRY.keys()))
return RewardFunction._REWARD_FN_REGISTRY[reward_fn_type]
def get_default_env_config():
"""Returns default config using values from dict `DEFAULT_ENV_CONFIG`."""
config = hparam.HParams(**DEFAULT_ENV_CONFIG)
config.reward_fn = RewardFunction.get_reward_fn(config.reward_fn_type)
# Update directories if set in FLAGS.
if FLAGS.scan_base_dir:
config.scan_base_dir = FLAGS.scan_base_dir
if FLAGS.data_base_dir:
config.data_base_dir = FLAGS.data_base_dir
if FLAGS.vocab_dir:
config.vocab_dir = FLAGS.vocab_dir
if FLAGS.vocab_file:
config.vocab_file = FLAGS.vocab_file
if FLAGS.image_features_dir:
config.image_features_dir = FLAGS.image_features_dir
return config
| 35.616114
| 79
| 0.720958
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.