hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aceb544c697fede5dcc3fd37662f58511f3bad9e | 830 | py | Python | tests/settings.py | jannon/drf-any-permissions | 128d29f6e70148ee708724123301021bf8b5fd9a | [
"MIT"
] | null | null | null | tests/settings.py | jannon/drf-any-permissions | 128d29f6e70148ee708724123301021bf8b5fd9a | [
"MIT"
] | null | null | null | tests/settings.py | jannon/drf-any-permissions | 128d29f6e70148ee708724123301021bf8b5fd9a | [
"MIT"
] | null | null | null | # Import all of the settings from the global settings file.
# This allows us to have our own custom settings for running tests.
from django.conf.global_settings import *
import os
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = True
ROOT_URLCONF = "tests.urls"
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
INSTALLED_APPS = [
"django.contrib.auth",
'django.contrib.contenttypes',
'django.contrib.sessions',
"rest_any_permissions",
"tests",
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "drf_any_perms.db",
}
}
| 23.714286 | 67 | 0.715663 |
aceb5587f28e0e000a8a477ed06c0df13c4120de | 730 | py | Python | electrum/version.py | zumbrunn/electrumfair | 41f08b7664f7c28f5cb76ad6a674093b3671d1e7 | [
"MIT"
] | 4 | 2017-01-13T00:08:14.000Z | 2019-04-21T02:51:33.000Z | electrum/version.py | zumbrunn/electrumfair | 41f08b7664f7c28f5cb76ad6a674093b3671d1e7 | [
"MIT"
] | 13 | 2017-02-20T16:24:37.000Z | 2021-04-21T08:29:34.000Z | electrum/version.py | zumbrunn/electrumfair | 41f08b7664f7c28f5cb76ad6a674093b3671d1e7 | [
"MIT"
] | 8 | 2017-01-14T14:45:56.000Z | 2020-01-21T13:30:39.000Z | ELECTRUMFAIR_VERSION = '3.3.4' # version of the client package
APK_VERSION = '3.3.4.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
elif seed_type == '2fa_segwit':
return SEED_PREFIX_2FA_SW
| 34.761905 | 64 | 0.669863 |
aceb560d3c057ebbb9bf412a2c9ab850c6e2e865 | 669 | py | Python | features/fixtures.py | TomMalkin/Sqema | d7cf6f36a8ed9b6693cdc4f151ae4936ced7a0d7 | [
"MIT"
] | null | null | null | features/fixtures.py | TomMalkin/Sqema | d7cf6f36a8ed9b6693cdc4f151ae4936ced7a0d7 | [
"MIT"
] | 10 | 2019-07-07T06:21:15.000Z | 2019-10-29T00:09:44.000Z | features/fixtures.py | Harlekuin/Sqema | d7cf6f36a8ed9b6693cdc4f151ae4936ced7a0d7 | [
"MIT"
] | null | null | null | """BDD fixtures."""
import os
from behave import fixture
@fixture
def sqlite_database(context):
"""Ensure that temporary sqlite databases are removed."""
sqlite_cleanup()
context.add_cleanup(sqlite_cleanup)
return
def sqlite_cleanup():
"""Remove all known test databases."""
databases = [
"/tmp/database.db",
"/tmp/prod-database.db",
"/tmp/dev-database.db",
"/tmp/test-database.db",
"/tmp/prod-database2.db",
"/tmp/dev-database2.db",
"/tmp/test-database2.db",
]
for database in databases:
try:
os.remove(database)
except OSError:
pass
| 20.90625 | 61 | 0.593423 |
aceb5682f663c7a168000cff399d6fbf7cfc8039 | 8,415 | py | Python | test/functional/proxy_test.py | otherdeniz/denizlite | b066f3a3bea93377039c9a2acf92e8582e627a28 | [
"MIT"
] | null | null | null | test/functional/proxy_test.py | otherdeniz/denizlite | b066f3a3bea93377039c9a2acf92e8582e627a28 | [
"MIT"
] | null | null | null | test/functional/proxy_test.py | otherdeniz/denizlite | b066f3a3bea93377039c9a2acf92e8582e627a28 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:4127", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 4127)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:4127", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 4127)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.25 | 121 | 0.625074 |
aceb575305b600e7800a887e24c867105c412f08 | 12,499 | py | Python | third_party/webrtc/src/chromium/src/tools/json_schema_compiler/preview.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 27 | 2016-04-27T01:02:03.000Z | 2021-12-13T08:53:19.000Z | tools/json_schema_compiler/preview.py | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2017-03-09T09:00:50.000Z | 2017-09-21T15:48:20.000Z | tools/json_schema_compiler/preview.py | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 17 | 2016-04-27T02:06:39.000Z | 2019-12-18T08:07:00.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Server for viewing the compiled C++ code from tools/json_schema_compiler.
"""
import cc_generator
import code
import cpp_type_generator
import cpp_util
import h_generator
import idl_schema
import json_schema
import model
import optparse
import os
import shlex
import urlparse
from highlighters import (
pygments_highlighter, none_highlighter, hilite_me_highlighter)
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from cpp_namespace_environment import CppNamespaceEnvironment
from schema_loader import SchemaLoader
class CompilerHandler(BaseHTTPRequestHandler):
"""A HTTPRequestHandler that outputs the result of tools/json_schema_compiler.
"""
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
request_path = self._GetRequestPath(parsed_url)
chromium_favicon = 'http://codereview.chromium.org/static/favicon.ico'
head = code.Code()
head.Append('<link rel="icon" href="%s">' % chromium_favicon)
head.Append('<link rel="shortcut icon" href="%s">' % chromium_favicon)
body = code.Code()
try:
if os.path.isdir(request_path):
self._ShowPanels(parsed_url, head, body)
else:
self._ShowCompiledFile(parsed_url, head, body)
finally:
self.wfile.write('<html><head>')
self.wfile.write(head.Render())
self.wfile.write('</head><body>')
self.wfile.write(body.Render())
self.wfile.write('</body></html>')
def _GetRequestPath(self, parsed_url, strip_nav=False):
"""Get the relative path from the current directory to the requested file.
"""
path = parsed_url.path
if strip_nav:
path = parsed_url.path.replace('/nav', '')
return os.path.normpath(os.curdir + path)
def _ShowPanels(self, parsed_url, head, body):
"""Show the previewer frame structure.
Code panes are populated via XHR after links in the nav pane are clicked.
"""
(head.Append('<style>')
.Append('body {')
.Append(' margin: 0;')
.Append('}')
.Append('.pane {')
.Append(' height: 100%;')
.Append(' overflow-x: auto;')
.Append(' overflow-y: scroll;')
.Append(' display: inline-block;')
.Append('}')
.Append('#nav_pane {')
.Append(' width: 20%;')
.Append('}')
.Append('#nav_pane ul {')
.Append(' list-style-type: none;')
.Append(' padding: 0 0 0 1em;')
.Append('}')
.Append('#cc_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('#h_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('</style>')
)
body.Append(
'<div class="pane" id="nav_pane">%s</div>'
'<div class="pane" id="h_pane"></div>'
'<div class="pane" id="cc_pane"></div>' %
self._RenderNavPane(parsed_url.path[1:])
)
# The Javascript that interacts with the nav pane and panes to show the
# compiled files as the URL or highlighting options change.
body.Append('''<script type="text/javascript">
// Calls a function for each highlighter style <select> element.
function forEachHighlighterStyle(callback) {
var highlighterStyles =
document.getElementsByClassName('highlighter_styles');
for (var i = 0; i < highlighterStyles.length; ++i)
callback(highlighterStyles[i]);
}
// Called when anything changes, such as the highlighter or hashtag.
function updateEverything() {
var highlighters = document.getElementById('highlighters');
var highlighterName = highlighters.value;
// Cache in localStorage for when the page loads next.
localStorage.highlightersValue = highlighterName;
// Show/hide the highlighter styles.
var highlighterStyleName = '';
forEachHighlighterStyle(function(highlighterStyle) {
if (highlighterStyle.id === highlighterName + '_styles') {
highlighterStyle.removeAttribute('style')
highlighterStyleName = highlighterStyle.value;
} else {
highlighterStyle.setAttribute('style', 'display:none')
}
// Cache in localStorage for when the page next loads.
localStorage[highlighterStyle.id + 'Value'] = highlighterStyle.value;
});
// Populate the code panes.
function populateViaXHR(elementId, requestPath) {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = function() {
if (xhr.readyState != 4)
return;
if (xhr.status != 200) {
alert('XHR error to ' + requestPath);
return;
}
document.getElementById(elementId).innerHTML = xhr.responseText;
};
xhr.open('GET', requestPath, true);
xhr.send();
}
var targetName = window.location.hash;
targetName = targetName.substring('#'.length);
targetName = targetName.split('.', 1)[0]
if (targetName !== '') {
var basePath = window.location.pathname;
var query = 'highlighter=' + highlighterName + '&' +
'style=' + highlighterStyleName;
populateViaXHR('h_pane', basePath + '/' + targetName + '.h?' + query);
populateViaXHR('cc_pane', basePath + '/' + targetName + '.cc?' + query);
}
}
// Initial load: set the values of highlighter and highlighterStyles from
// localStorage.
(function() {
var cachedValue = localStorage.highlightersValue;
if (cachedValue)
document.getElementById('highlighters').value = cachedValue;
forEachHighlighterStyle(function(highlighterStyle) {
var cachedValue = localStorage[highlighterStyle.id + 'Value'];
if (cachedValue)
highlighterStyle.value = cachedValue;
});
})();
window.addEventListener('hashchange', updateEverything, false);
updateEverything();
</script>''')
def _ShowCompiledFile(self, parsed_url, head, body):
"""Show the compiled version of a json or idl file given the path to the
compiled file.
"""
api_model = model.Model()
request_path = self._GetRequestPath(parsed_url)
(file_root, file_ext) = os.path.splitext(request_path)
(filedir, filename) = os.path.split(file_root)
schema_loader = SchemaLoader("./",
filedir,
self.server.include_rules,
self.server.cpp_namespace_pattern)
try:
# Get main file.
namespace = schema_loader.ResolveNamespace(filename)
type_generator = cpp_type_generator.CppTypeGenerator(
api_model,
schema_loader,
namespace)
# Generate code
if file_ext == '.h':
cpp_code = (h_generator.HGenerator(type_generator)
.Generate(namespace).Render())
elif file_ext == '.cc':
cpp_code = (cc_generator.CCGenerator(type_generator)
.Generate(namespace).Render())
else:
self.send_error(404, "File not found: %s" % request_path)
return
# Do highlighting on the generated code
(highlighter_param, style_param) = self._GetHighlighterParams(parsed_url)
head.Append('<style>' +
self.server.highlighters[highlighter_param].GetCSS(style_param) +
'</style>')
body.Append(self.server.highlighters[highlighter_param]
.GetCodeElement(cpp_code, style_param))
except IOError:
self.send_error(404, "File not found: %s" % request_path)
return
except (TypeError, KeyError, AttributeError,
AssertionError, NotImplementedError) as error:
body.Append('<pre>')
body.Append('compiler error: %s' % error)
body.Append('Check server log for more details')
body.Append('</pre>')
raise
def _GetHighlighterParams(self, parsed_url):
"""Get the highlighting parameters from a parsed url.
"""
query_dict = urlparse.parse_qs(parsed_url.query)
return (query_dict.get('highlighter', ['pygments'])[0],
query_dict.get('style', ['colorful'])[0])
def _RenderNavPane(self, path):
"""Renders an HTML nav pane.
This consists of a select element to set highlight style, and a list of all
files at |path| with the appropriate onclick handlers to open either
subdirectories or JSON files.
"""
html = code.Code()
# Highlighter chooser.
html.Append('<select id="highlighters" onChange="updateEverything()">')
for name, highlighter in self.server.highlighters.items():
html.Append('<option value="%s">%s</option>' %
(name, highlighter.DisplayName()))
html.Append('</select>')
html.Append('<br/>')
# Style for each highlighter.
# The correct highlighting will be shown by Javascript.
for name, highlighter in self.server.highlighters.items():
styles = sorted(highlighter.GetStyles())
if not styles:
continue
html.Append('<select class="highlighter_styles" id="%s_styles" '
'onChange="updateEverything()">' % name)
for style in styles:
html.Append('<option>%s</option>' % style)
html.Append('</select>')
html.Append('<br/>')
# The files, with appropriate handlers.
html.Append('<ul>')
# Make path point to a non-empty directory. This can happen if a URL like
# http://localhost:8000 is navigated to.
if path == '':
path = os.curdir
# Firstly, a .. link if this isn't the root.
if not os.path.samefile(os.curdir, path):
normpath = os.path.normpath(os.path.join(path, os.pardir))
html.Append('<li><a href="/%s">%s/</a>' % (normpath, os.pardir))
# Each file under path/
for filename in sorted(os.listdir(path)):
full_path = os.path.join(path, filename)
_, file_ext = os.path.splitext(full_path)
if os.path.isdir(full_path) and not full_path.endswith('.xcodeproj'):
html.Append('<li><a href="/%s/">%s/</a>' % (full_path, filename))
elif file_ext in ['.json', '.idl']:
# cc/h panes will automatically update via the hash change event.
html.Append('<li><a href="#%s">%s</a>' %
(filename, filename))
html.Append('</ul>')
return html.Render()
class PreviewHTTPServer(HTTPServer, object):
def __init__(self,
server_address,
handler,
highlighters,
include_rules,
cpp_namespace_pattern):
super(PreviewHTTPServer, self).__init__(server_address, handler)
self.highlighters = highlighters
self.include_rules = include_rules
self.cpp_namespace_pattern = cpp_namespace_pattern
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Runs a server to preview the json_schema_compiler output.',
usage='usage: %prog [option]...')
parser.add_option('-p', '--port', default='8000',
help='port to run the server on')
parser.add_option('-n', '--namespace', default='generated_api_schemas',
help='C++ namespace for generated files. e.g extensions::api.')
parser.add_option('-I', '--include-rules',
help='A list of paths to include when searching for referenced objects,'
' with the namespace separated by a \':\'. Example: '
'/foo/bar:Foo::Bar::%(namespace)s')
(opts, argv) = parser.parse_args()
def split_path_and_namespace(path_and_namespace):
if ':' not in path_and_namespace:
raise ValueError('Invalid include rule "%s". Rules must be of '
'the form path:namespace' % path_and_namespace)
return path_and_namespace.split(':', 1)
include_rules = []
if opts.include_rules:
include_rules = map(split_path_and_namespace,
shlex.split(opts.include_rules))
try:
print('Starting previewserver on port %s' % opts.port)
print('The extension documentation can be found at:')
print('')
print(' http://localhost:%s/chrome/common/extensions/api' % opts.port)
print('')
highlighters = {
'hilite': hilite_me_highlighter.HiliteMeHighlighter(),
'none': none_highlighter.NoneHighlighter()
}
try:
highlighters['pygments'] = pygments_highlighter.PygmentsHighlighter()
except ImportError as e:
pass
server = PreviewHTTPServer(('', int(opts.port)),
CompilerHandler,
highlighters,
include_rules,
opts.namespace)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
| 34.337912 | 80 | 0.645172 |
aceb57c59554597f16b8c8adc82babc2dcd98a98 | 250 | py | Python | Easy/replace_elements_with_greatest_elements_on_the_right_side.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/replace_elements_with_greatest_elements_on_the_right_side.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/replace_elements_with_greatest_elements_on_the_right_side.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
n = len(arr); ans = [0]*n
for i in range(n):
if i == n-1: ans[i] = -1
else:
ans[i] = max(arr[i+1:])
return ans | 31.25 | 59 | 0.444 |
aceb58bc75700f1c45a63a8e383023222c60aae8 | 1,186 | py | Python | test/util/Dredd.py | Compositional/orders-aws | da3bc7a31df18d3a45c2907bde31857540d55516 | [
"Apache-2.0"
] | 1 | 2018-07-08T20:33:35.000Z | 2018-07-08T20:33:35.000Z | test/util/Dredd.py | Compositional/orders-aws | da3bc7a31df18d3a45c2907bde31857540d55516 | [
"Apache-2.0"
] | null | null | null | test/util/Dredd.py | Compositional/orders-aws | da3bc7a31df18d3a45c2907bde31857540d55516 | [
"Apache-2.0"
] | null | null | null | from util.Docker import Docker
from util.Api import Api
import os
import unittest
class Dredd:
image = 'weaveworksdemos/openapi:snapshot'
container_name = ''
def test_against_endpoint(self, service, api_endpoint, links=[], env=[], dump_streams=False):
self.container_name = Docker().random_container_name('openapi')
command = ['docker', 'run',
'-h', 'openapi',
'--name', self.container_name,
'-v', "{0}:{1}".format(os.getcwd() + "/api-spec/", "/tmp/specs/")]
if links != []:
[command.extend(["--link", x]) for x in links]
if env != []:
[command.extend(["--env", "{}={}".format(x[0], x[1])]) for x in env]
command.extend([Dredd.image,
"/tmp/specs/{0}.json".format(service),
api_endpoint,
"--level", "verbose",
"-f",
"/tmp/specs/hooks.js".format(service)])
out = Docker().execute(command, dump_streams=dump_streams)
Docker().kill_and_remove(self.container_name)
return out
| 37.0625 | 97 | 0.507589 |
aceb59e25b0dd75eb65101581e909f12f91491dc | 6,440 | py | Python | benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py | vuanvin/pytorch | 9267fd8d7395074001ad7cf2a8f28082dbff6b0b | [
"Intel"
] | 5 | 2018-04-24T13:41:12.000Z | 2019-07-09T07:32:09.000Z | benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py | vuanvin/pytorch | 9267fd8d7395074001ad7cf2a8f28082dbff6b0b | [
"Intel"
] | 14 | 2021-10-14T06:58:50.000Z | 2021-12-17T11:51:07.000Z | benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py | vuanvin/pytorch | 9267fd8d7395074001ad7cf2a8f28082dbff6b0b | [
"Intel"
] | 7 | 2020-08-31T22:49:59.000Z | 2020-09-15T14:29:07.000Z | import torch
from torch.autograd import functional
import time
from argparse import ArgumentParser
from collections import defaultdict
from typing import NamedTuple, Callable, List, Any
import ppl_models
import vision_models
import audio_text_models
from utils import to_markdown_table, TimingResultType, InputsType, GetterType, VType
def get_task_func(task: str) -> Callable:
def hessian_fwdrev(model, inp, strict=None):
return functional.hessian(model, inp, strict=False, vectorize=True, outer_jacobian_strategy="forward-mode")
def hessian_revrev(model, inp, strict=None):
return functional.hessian(model, inp, strict=False, vectorize=True)
def jacfwd(model, inp, strict=None):
return functional.jacobian(model, inp, strict=False, vectorize=True, strategy="forward-mode")
def jacrev(model, inp, strict=None):
return functional.jacobian(model, inp, strict=False, vectorize=True)
if task == "hessian_fwdrev":
return hessian_fwdrev
elif task == "hessian_revrev":
return hessian_revrev
elif task == "jacfwd":
return jacfwd
elif task == "jacrev":
return jacrev
else:
return getattr(functional, task)
# Listing of the different tasks
FAST_TASKS_NO_DOUBLE_BACK = [
"vjp",
]
FAST_TASKS = FAST_TASKS_NO_DOUBLE_BACK + [
"vhp",
"jvp",
]
ALL_TASKS_NON_VECTORIZED = FAST_TASKS + [
"hvp",
"jacobian",
"hessian"
]
DOUBLE_BACKWARD_TASKS = ["jvp", "hvp", "vhp", "hessian"]
VECTORIZED_TASKS = ["hessian_fwdrev", "hessian_revrev", "jacfwd", "jacrev"]
ALL_TASKS = ALL_TASKS_NON_VECTORIZED + VECTORIZED_TASKS
# Model definition which contains:
# - name: a string with the model name.
# - getter: a function to get the model. It takes as input the device on which the model
# will run. It should return the forward function and the parameters (Tensors) used as
# input for the forward function. Note that the forward must *not* have any side effect.
# - tasks: the list of recommended tasks that can run in a reasonable amount of time with this model.
# - unsupported: the list of tasks that this model cannot run.
class ModelDef(NamedTuple):
name: str
getter: GetterType
tasks: List[str]
unsupported: List[str]
MODELS = [
ModelDef("resnet18", vision_models.get_resnet18, FAST_TASKS, []),
ModelDef("fcn_resnet", vision_models.get_fcn_resnet, FAST_TASKS, []),
ModelDef("detr", vision_models.get_detr, FAST_TASKS, []),
ModelDef("ppl_simple_reg", ppl_models.get_simple_regression, ALL_TASKS, []),
ModelDef("ppl_robust_reg", ppl_models.get_robust_regression, ALL_TASKS, []),
ModelDef("wav2letter", audio_text_models.get_wav2letter, FAST_TASKS, []),
ModelDef("deepspeech", audio_text_models.get_deepspeech, FAST_TASKS_NO_DOUBLE_BACK, DOUBLE_BACKWARD_TASKS),
ModelDef("transformer", audio_text_models.get_transformer, FAST_TASKS, []),
ModelDef("multiheadattn", audio_text_models.get_multiheadattn, FAST_TASKS, []),
]
def get_v_for(model: Callable, inp: InputsType, task: str) -> VType:
v: VType
if task in ["vjp"]:
out = model(*inp)
v = torch.rand_like(out)
elif task in ["jvp", "hvp", "vhp"]:
if isinstance(inp, tuple):
v = tuple(torch.rand_like(i) for i in inp)
else:
v = torch.rand_like(inp)
else:
v = None
return v
def run_once(model: Callable, inp: InputsType, task: str, v: VType) -> None:
func = get_task_func(task)
if v is not None:
res = func(model, inp, v=v, strict=True)
else:
res = func(model, inp, strict=True)
def run_model(model_getter: GetterType, args: Any, task: str) -> List[float]:
if args.gpu == -1:
device = torch.device("cpu")
def noop():
pass
do_sync = noop
else:
device = torch.device("cuda:{}".format(args.gpu))
do_sync = torch.cuda.synchronize
model, inp = model_getter(device)
v = get_v_for(model, inp, task)
# Warmup
run_once(model, inp, task, v)
elapsed = []
for it in range(args.num_iters):
do_sync()
start = time.time()
run_once(model, inp, task, v)
do_sync()
elapsed.append(time.time() - start)
return elapsed
def main():
parser = ArgumentParser("Main script to benchmark functional API of the autograd.")
parser.add_argument("--output", type=str, default="", help="Text file where to write the output")
parser.add_argument("--num-iters", type=int, default=10)
parser.add_argument("--gpu", type=int, default=-2, help="GPU to use, -1 for CPU and -2 for auto-detect")
parser.add_argument("--run-slow-tasks", action="store_true", help="Run even the slow tasks")
parser.add_argument("--model-filter", type=str, default="", help="Only run the models in this filter")
parser.add_argument("--task-filter", type=str, default="", help="Only run the tasks in this filter")
parser.add_argument("--num-threads", type=int, default=10,
help="Number of concurrent threads to use when running on cpu")
parser.add_argument("--seed", type=int, default=0, help="The random seed to use.")
args = parser.parse_args()
results: TimingResultType = defaultdict(defaultdict)
torch.set_num_threads(args.num_threads)
torch.set_num_interop_threads(args.num_threads)
# This automatically seed cuda if it is available
torch.manual_seed(args.seed)
if args.gpu == -2:
args.gpu = 0 if torch.cuda.is_available() else -1
for name, model_getter, recommended_tasks, unsupported_tasks in MODELS:
if args.model_filter and name not in args.model_filter:
continue
tasks = ALL_TASKS if args.run_slow_tasks else recommended_tasks
for task in tasks:
if task in unsupported_tasks:
continue
if args.task_filter and task not in args.task_filter:
continue
runtimes = run_model(model_getter, args, task)
runtimes = torch.tensor(runtimes)
mean, var = runtimes.mean(), runtimes.var()
results[name][task] = (mean.item(), var.item())
print("Results for model {} on task {}: {}s (var: {})".format(name, task, mean, var))
if args.output:
with open(args.output, "w") as f:
f.write(to_markdown_table(results))
if __name__ == "__main__":
main()
| 35.384615 | 115 | 0.670342 |
aceb5abb7a622445a81afa224e19634f8da7823a | 1,150 | py | Python | src/apiron/endpoint/json.py | chankeypathak/apiron | 0a2cbb0c6f27b44d46ea2b2720bb007b5f56b346 | [
"MIT"
] | null | null | null | src/apiron/endpoint/json.py | chankeypathak/apiron | 0a2cbb0c6f27b44d46ea2b2720bb007b5f56b346 | [
"MIT"
] | null | null | null | src/apiron/endpoint/json.py | chankeypathak/apiron | 0a2cbb0c6f27b44d46ea2b2720bb007b5f56b346 | [
"MIT"
] | null | null | null | import collections
from apiron.endpoint.endpoint import Endpoint
class JsonEndpoint(Endpoint):
"""
An endpoint that returns :mimetype:`application/json`
"""
def __init__(
self, *args, path="/", default_method="GET", default_params=None, required_params=None, preserve_order=False
):
super().__init__(
path=path, default_method=default_method, default_params=default_params, required_params=required_params
)
self.preserve_order = preserve_order
def format_response(self, response):
"""
Extracts JSON data from the response
:param requests.Response response:
The original response from :mod:`requests`
:return:
The response's JSON content
:rtype:
:class:`dict` if ``preserve_order`` is ``False``
:rtype:
:class:`collections.OrderedDict` if ``preserve_order`` is ``True``
"""
return response.json(object_pairs_hook=collections.OrderedDict if self.preserve_order else None)
@property
def required_headers(self):
return {"Accept": "application/json"}
| 31.081081 | 116 | 0.650435 |
aceb5ba2ac6501b650a8c5986e598546aaf34d28 | 955 | py | Python | DiffManchester.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | DiffManchester.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | DiffManchester.py | itungsten/miscScripts | c43378247525a2b8a48b906b3a19cf9847e4c845 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
import re
#hex1 = 'AAAAA56A69AA55A95995A569AA95565556' # # 0x8893CA58
hex1 = 'AAAAA56A69AA556A965A5999596AA95656'
def bintohex(s1):
s2 = ''
s1 = re.findall('.{4}',s1)
print ('每一个hex分隔:',s1)
for i in s1:
s2 += str(hex(int(i,2))).replace('0x','')
print ('ID:',s2)
def diffmqst(s):
s1 = ''
s = re.findall('.{2}',s)
cc = '01'
for i in s:
if i == cc:
s1 += '0'
else:
s1 += '1'
cc = i # 差分加上cc = i
print ('差分曼切斯特解码:',s1)
bintohex(s1)
def mqst(s): #只能算曼切斯特编码,无法算差分
mdict = {'5': '00', '6': '01', '9': '10', 'A': '11'}
a1 = ''.join(mdict[i] for i in s)
a2 = ''.join(mdict[i][::-1] for i in s)
print ('曼切斯特解码: ',a1 )
print ('曼切斯特解码2: ',a2)
bintohex(a1)
bintohex(a2)
if __name__ == '__main__':
bin1 = bin(int(hex1,16))[2:]
diffmqst(bin1)
mqst(hex1) | 19.1 | 60 | 0.491099 |
aceb5c9546b4f0d75f7346d84f67be6b18fc11e6 | 3,914 | py | Python | piccolo/apps/asgi/commands/new.py | teners/piccolo | e5c32a4810badf39fc61e465747b7343309d7e12 | [
"MIT"
] | null | null | null | piccolo/apps/asgi/commands/new.py | teners/piccolo | e5c32a4810badf39fc61e465747b7343309d7e12 | [
"MIT"
] | null | null | null | piccolo/apps/asgi/commands/new.py | teners/piccolo | e5c32a4810badf39fc61e465747b7343309d7e12 | [
"MIT"
] | null | null | null | from __future__ import annotations
import os
import shutil
import typing as t
import black # type: ignore
import colorama # type: ignore
from jinja2 import Environment, FileSystemLoader
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "templates/starlette/")
SERVERS = ["uvicorn", "Hypercorn"]
ROUTERS = ["starlette", "fastapi"]
def print_instruction(message: str):
print(f"{colorama.Fore.CYAN}{message}{colorama.Fore.RESET}")
def get_options_string(options: t.List[str]):
return ", ".join(
[f"{name} [{index}]" for index, name in enumerate(options)]
)
def get_routing_framework() -> str:
print_instruction("Which routing framework?")
router = input(f"{get_options_string(ROUTERS)}\n") or 0
return ROUTERS[int(router)]
def get_server() -> str:
print_instruction("Which server?")
server = input(f"{get_options_string(SERVERS)}\n") or 0
return SERVERS[int(server)]
def new(root: str = ".", name: str = "piccolo_project"):
"""
Create a basic ASGI app, including Piccolo, routing, and an admin.
:param root:
Where to create the app e.g. /my/folder. By default it creates the
app in the current directory.
:param name:
The name of the app to create - this will be used to prepopulate things
like the database name.
"""
tree = os.walk(TEMPLATE_DIR)
template_context = {
"router": get_routing_framework(),
"server": get_server(),
"project_identifier": name.replace(" ", "_").lower(),
}
for directory in tree:
dir_path, sub_dir_names, file_names = directory # type: ignore
output_dir_path = os.path.join(root, dir_path.split(TEMPLATE_DIR)[-1])
if not os.path.exists(output_dir_path):
folder_name = output_dir_path.split("/")[-1]
if folder_name.startswith(("_", ".")):
continue
os.mkdir(dir_path)
for sub_dir_name in sub_dir_names:
if sub_dir_name.startswith("_"):
continue
sub_dir_path = os.path.join(output_dir_path, sub_dir_name)
if not os.path.exists(sub_dir_path):
os.mkdir(sub_dir_path)
for file_name in file_names:
if file_name.startswith("_"):
continue
extension = file_name.rsplit(".")[0]
if extension in ("pyc",):
continue
if file_name.endswith(".jinja"):
output_file_name = file_name.replace(".jinja", "")
template = Environment(
loader=FileSystemLoader(searchpath=dir_path)
).get_template(file_name)
output_contents = template.render(**template_context)
if output_file_name.endswith(".py"):
try:
output_contents = black.format_str(
output_contents,
mode=black.FileMode(line_length=80),
)
except Exception as exception:
print(f"Problem processing {output_file_name}")
raise exception
with open(
os.path.join(output_dir_path, output_file_name), "w"
) as f:
f.write(output_contents)
else:
if file_name.endswith(".jinja_raw"):
output_file_name = file_name.replace(
".jinja_raw", ".jinja"
)
else:
output_file_name = file_name
shutil.copy(
os.path.join(dir_path, file_name),
os.path.join(output_dir_path, output_file_name),
)
print(
"Run `pip install -r requirements.txt` and `python main.py` to get "
"started."
)
| 31.564516 | 79 | 0.566173 |
aceb5ca5307914a9ed89e5aeec3b0c971021d70d | 16,614 | py | Python | Triplet.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null | Triplet.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null | Triplet.py | sirpuria/DeepScoresExamples | 3b2decb8b295354f7023437144dd0152f1cb8f6a | [
"MIT"
] | null | null | null |
import argparse
import sys, os
import imageio
import tensorflow as tf
import Classification_BatchDataset
import TensorflowUtils as utils
import pickle
import time
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Input, Lambda, Layer
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Accuracy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import shuffle
from sklearn.metrics import roc_curve,roc_auc_score
import numpy as np
import numpy.random as rng
FLAGS = None
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
lang_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for alphabet in os.listdir(path):
print("loading alphabet: " + alphabet)
lang_dict[alphabet] = [curr_y,None]
alphabet_path = os.path.join(path,alphabet)
# every letter/category has it's own column in the array, so load seperately
for letter in os.listdir(alphabet_path):
cat_dict[curr_y] = (alphabet, letter)
category_images=[]
letter_path = os.path.join(alphabet_path, letter)
# read all the images in the current category
dirlist = os.listdir(letter_path)
if len(dirlist)>1:
for filename in dirlist:
image_path = os.path.join(letter_path, filename)
image = imageio.imread(image_path)
category_images.append(image)
# print(len(category_images))
y.append(curr_y)
try:
uu = np.stack(category_images)
X.append(uu)
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
print(letter)
curr_y += 1
lang_dict[alphabet][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,lang_dict
# def initialize_weights(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer weights with mean as 0.0 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.0, stddev = 0.01)
#
# def initialize_bias(shape, name=None):
# """
# The paper, http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf
# suggests to initialize CNN layer bias with mean as 0.5 and standard deviation of 0.01
# """
# return tf.random.normal(shape, mean = 0.5, stddev = 0.01)
def build_network(input_shape, embeddingsize):
'''
Define the neural network to learn image similarity
Input :
input_shape : shape of input images
embeddingsize : vectorsize used to encode our picture
'''
initialize_weights = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)
initialize_bias = tf.keras.initializers.RandomNormal(mean=0.5, stddev=0.01)
# Convolutional Neural Network
network = Sequential([
Conv2D(128, (7,7), activation='relu',
input_shape=input_shape,
kernel_initializer='he_uniform',
kernel_regularizer=l2(2e-4)),
MaxPool2D( pool_size=(3,3)),
Conv2D(128, (3,3), activation='relu', kernel_initializer='he_uniform',
kernel_regularizer=l2(2e-4)),
MaxPool2D(pool_size=(3,3)),
Conv2D(256, (3,3), activation='relu', kernel_initializer='he_uniform',
kernel_regularizer=l2(2e-4)),
MaxPool2D(pool_size=(3,3)),
Flatten(),
Dense(4096, activation='relu',
kernel_regularizer=l2(1e-3),
kernel_initializer='he_uniform'),
Dense(embeddingsize, activation=None,
kernel_regularizer=l2(1e-3),
kernel_initializer='he_uniform'),
#Force the encoding to live on the d-dimentional hypershpere
Lambda(lambda x: tf.math.l2_normalize(x,axis=-1))
])
return network
class TripletLossLayer(Layer):
def __init__(self, alpha, **kwargs):
self.alpha = alpha
super(TripletLossLayer, self).__init__(**kwargs)
def triplet_loss(self, inputs):
anchor, positive, negative = inputs
p_dist = tf.math.reduce_sum(tf.math.square(anchor-positive), axis=-1)
n_dist = tf.math.reduce_sum(tf.math.square(anchor-negative), axis=-1)
return tf.math.reduce_sum(tf.math.maximum(p_dist - n_dist + self.alpha, 0), axis=0)
def call(self, inputs):
loss = self.triplet_loss(inputs)
self.add_loss(loss)
return loss
def build_model(input_shape, network, margin=0.2):
'''
Define the Keras Model for training
Input :
input_shape : shape of input images
network : Neural network to train outputing embeddings
margin : minimal distance between Anchor-Positive and Anchor-Negative for the lossfunction (alpha)
'''
# Define the tensors for the three input images
anchor_input = Input(input_shape, name="anchor_input")
positive_input = Input(input_shape, name="positive_input")
negative_input = Input(input_shape, name="negative_input")
# Generate the encodings (feature vectors) for the three images
encoded_a = network(anchor_input)
encoded_p = network(positive_input)
encoded_n = network(negative_input)
#TripletLoss Layer
loss_layer = TripletLossLayer(alpha=margin,name='triplet_loss_layer')([encoded_a,encoded_p,encoded_n])
# Connect the inputs with the outputs
network_train = Model(inputs=[anchor_input,positive_input,negative_input],outputs=loss_layer)
# return the model
return network_train
def compute_dist(a,b):
return np.sum(np.square(a-b))
def get_batch_random(batch_size,s="train"):
"""Create batch of n pairs, half same class, half different class"""
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples, h, w = X.shape
# randomly sample several classes to use in the batch
categories = rng.choice(n_classes,size=(batch_size,),replace=False)
# initialize 2 empty arrays for the input image batch
triplets=[np.zeros((batch_size, h, w,1)) for i in range(3)]
# initialize vector for the targets
#targets=np.zeros((batch_size,))
# make one half of it '1's, so 2nd half of batch has same class
#targets[batch_size//2:] = 1
for i in range(batch_size):
anchor_class = np.random.randint(0, n_classes)
num_samples_for_class = X[anchor_class].shape[0]
[idx_A, idx_P] = np.random.choice(num_samples_for_class, size=2, replace=False)
negative_class = (anchor_class+np.random.randint(1, n_classes))%n_classes
num_samples_for_n_class = X[negative_class].shape[0]
idx_N = np.random.randint(0, num_samples_for_n_class)
triplets[0][i,:,:,:] = X[anchor_class,idx_A].reshape(h,w,1)
triplets[1][i,:,:,:] = X[anchor_class,idx_P].reshape(h,w,1)
triplets[2][i,:,:,:] = X[anchor_class,idx_N].reshape(h,w,1)
return triplets
def get_batch_hard(draw_batch_size,hard_batchs_size,norm_batchs_size,network,s="train"):
"""
Create batch of APN "hard" triplets
Arguments:
draw_batch_size -- integer : number of initial randomly taken samples
hard_batchs_size -- interger : select the number of hardest samples to keep
norm_batchs_size -- interger : number of random samples to add
Returns:
triplets -- list containing 3 tensors A,P,N of shape (hard_batchs_size+norm_batchs_size,w,h,c)
"""
if s == 'train':
X = Xtrain
else:
X = Xval
m, w, h= X[0].shape
#Step 1 : pick a random batch to study
studybatch = get_batch_random(draw_batch_size,s)
#Step 2 : compute the loss with current network : d(A,P)-d(A,N). The alpha parameter here is omited here since we want only to order them
studybatchloss = np.zeros((draw_batch_size))
#print(studybatch[0].shape)
#Compute embeddings for anchors, positive and negatives
A = network.predict(studybatch[0])
P = network.predict(studybatch[1])
N = network.predict(studybatch[2])
#Compute d(A,P)-d(A,N)
studybatchloss = np.sum(np.square(A-P),axis=1) - np.sum(np.square(A-N),axis=1)
#Sort by distance (high distance first) and take the
selection = np.argsort(studybatchloss)[::-1][:hard_batchs_size]
#Draw other random samples from the batch
selection2 = np.random.choice(np.delete(np.arange(draw_batch_size),selection),norm_batchs_size,replace=False)
selection = np.append(selection,selection2)
triplets = [studybatch[0][selection,:,:,:], studybatch[1][selection,:,:,:], studybatch[2][selection,:,:,:]]
return triplets
def compute_probs(network,X,Y):
'''
Input
network : current NN to compute embeddings
X : tensor of shape (m,w,h,1) containing pics to evaluate
Y : tensor of shape (m,) containing true class
Returns
probs : array of shape (m,m) containing distances
'''
c, n, h, w = X.shape
m = c*n
Xa = X.reshape(m, h,w)
Xr = Xa.reshape(m,h,w,1)
nbevaluation = int(m*(m-1)/2)
probs = np.zeros((nbevaluation))
y = np.zeros((nbevaluation))
embeddings = network.predict(Xr)
size_embedding = embeddings.shape[1]
k = 0
for i in range(m):
#Against all other images
for j in range(i+1,m):
#compute the probability of being the right decision : it should be 1 for right class, 0 for all other classes
probs[k] = -compute_dist(embeddings[i,:],embeddings[j,:])
if (Y[i]==Y[j]):
y[k] = 1
#print("{3}:{0} vs {1} : {2}\tSAME".format(i,j,probs[k],k))
else:
y[k] = 0
#print("{3}:{0} vs {1} : \t\t\t{2}\tDIFF".format(i,j,probs[k],k))
k += 1
return probs,y
#probs,yprobs = compute_probs(network,x_test_origin[:10,:,:,:],y_test_origin[:10])
def compute_metrics(probs,yprobs):
'''
Returns
fpr : Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]
tpr : Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i].
thresholds : Decreasing thresholds on the decision function used to compute fpr and tpr. thresholds[0] represents no instances being predicted and is arbitrarily set to max(y_score) + 1
auc : Area Under the ROC Curve metric
'''
# calculate AUC
auc = roc_auc_score(yprobs, probs)
# calculate roc curve
fpr, tpr, thresholds = roc_curve(yprobs, probs)
return fpr, tpr, thresholds,auc
def generate(batch_size, s="train"):
"""a generator for batches, so model.fit_generator can be used. """
while True:
pairs, targets = get_batch(batch_size,s)
yield (pairs, targets)
def make_oneshot_task(N, s="val", language=None):
"""Create pairs of test image, support set for testing N way one-shot learning. """
if s == 'train':
X = Xtrain
categories = train_classes
else:
X = Xval
categories = val_classes
n_classes, n_examples,h, w = X.shape
indices = rng.randint(0, n_examples,size=(N,))
if language is not None: # if language is specified, select characters for that language
low, high = categories[language]
if N > high - low:
raise ValueError("This language ({}) has less than {} letters".format(language, N))
categories = rng.choice(range(low,high),size=(N,),replace=False)
else: # if no language specified just pick a bunch of random letters
categories = rng.choice(range(n_classes),size=(N,),replace=False)
true_category = categories[0]
ex1, ex2 = rng.choice(n_examples,replace=False,size=(2,))
test_image = np.asarray([X[true_category,ex1,:,:]]*N).reshape(N, h,w,1)
support_set = X[categories,indices,:,:]
support_set[0,:,:] = X[true_category,ex2]
support_set = support_set.reshape(N, h, w,1)
targets = np.zeros((N,))
targets[0] = 1
targets, test_image, support_set = shuffle(targets, test_image, support_set)
pairs = [test_image,support_set]
return pairs, targets
def test_oneshot(model, N, k, s = "val", verbose = 0):
"""Test average N way oneshot learning accuracy of a siamese neural net over k one-shot tasks"""
n_correct = 0
if verbose:
print("Evaluating model on {} random {} way one-shot learning tasks ... \n".format(k,N))
for i in range(k):
inputs, targets = make_oneshot_task(N,s)
probs = model.predict(inputs[0])
probs_test = model.predict(inputs[1])
par = np.zeros(N)
for i in range(N):
par[i] = compute_dist(probs[i], probs_test[i])
#print(par[i])
if np.argmin(par) == np.argmin(targets):
n_correct+=1
#print("T got index {} and answer is {}".format(np.argmax(par),np.argmax(targets) ))
# got index {} while answer is {}".format(np.argmax(par),np.argmax(targets) ))
percent_correct = (100.0 * n_correct / k)
if verbose:
print("Got an average of {}% {} way one-shot learning accuracy \n".format(percent_correct,N))
return percent_correct
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='dataset',
help='Directory for storing input data')
parser.add_argument('--batch_size', type=int,
default=128)
parser.add_argument('--epochs', type=int,
default=50)
parser.add_argument('--hard', default=2, type=int)
parser.add_argument('--normal', default=2, type=int)
FLAGS, unparsed = parser.parse_known_args()
dataset_dir = FLAGS.data_dir
batch_size=FLAGS.batch_size
hard_n = FLAGS.hard
normal_n = FLAGS.normal
# epochs=FLAGS.epochs
# mode=FLAGS.mode
train_dir = os.path.join(dataset_dir, 'train')
validation_dir = os.path.join(dataset_dir, 'validate')
test_dir = os.path.join(dataset_dir, 'test')
# classes= os.listdir(train_dir)
#####################################################################
network = build_network((220, 120, 1), embeddingsize=64)
network_train = build_model((220, 120, 1),network)
optimizer = Adam(learning_rate=0.00006)
network_train.compile(optimizer=optimizer)
network_train.summary()
######################################################################
X,y,c = loadimgs(train_dir)
with open(os.path.join(dataset_dir,"train.pickle"), "wb") as f:
pickle.dump((X,c),f)
Xval,yval,cval=loadimgs(validation_dir)
with open(os.path.join(dataset_dir,"val.pickle"), "wb") as f:
pickle.dump((Xval,cval),f)
with open(os.path.join(dataset_dir, "train.pickle"), "rb") as f:
(Xtrain, train_classes) = pickle.load(f)
with open(os.path.join(dataset_dir, "val.pickle"), "rb") as f:
(Xval, val_classes) = pickle.load(f)
# evaluate_every = 1 # interval for evaluating on one-shot tasks
# n_iter = 7500 # No. of training iterations
# N_way = 18 # how many classes for testing one-shot tasks
# n_val = 100 # how many one-shot tasks to validate on
# best = -1
#
# print("Starting training process!")
# print("-------------------------------------")
# t_start = time.time()
# history = model.fit(generate(4, "train"), steps_per_epoch=10, epochs=1, validation_data=generate(4, "validate"))
# print(history)
for i in range(1, 32000):
triplets = get_batch_hard(batch_size, hard_n,normal_n, network )
#print(triplets[0].shape)
loss = network_train.train_on_batch(triplets, None)
if i%100==0:
print("Loss is {}".format(loss))
if i%500 == 0:
a = test_oneshot(network, 18, 100, verbose=True)
print(a)
| 35.5 | 193 | 0.633141 |
aceb5caed722a9488624c179b60605df6733fcd0 | 1,679 | py | Python | test/test_transfer_request_input.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | null | null | null | test/test_transfer_request_input.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | 3 | 2021-09-27T05:10:36.000Z | 2021-09-27T06:10:57.000Z | test/test_transfer_request_input.py | displague/metal-python | 96e64e9ac41025d85ff6f61693165e29e1c366db | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@equinixmetal.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import metal
from metal.types.transfer_request_input import TransferRequestInput # noqa: E501
from metal.rest import ApiException
class TestTransferRequestInput(unittest.TestCase):
"""TransferRequestInput unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test TransferRequestInput
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = metal.models.transfer_request_input.TransferRequestInput() # noqa: E501
if include_optional :
return TransferRequestInput(
target_organization_id = ''
)
else :
return TransferRequestInput(
)
def testTransferRequestInput(self):
"""Test TransferRequestInput"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 31.679245 | 312 | 0.699226 |
aceb5ce700b39d069d99b428a6e1f4a87bc1bbcf | 150 | py | Python | core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
] | null | null | null | core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
] | null | null | null | core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
] | 3 | 2021-03-23T14:48:38.000Z | 2022-01-13T09:45:08.000Z | def add(x, y, z=0):
"""Add two (or three) objects"""
return x + y + z
def sub(x, y):
"""Subtract two objects"""
return x - y
| 18.75 | 36 | 0.486667 |
aceb5d9210bda0df6eb889e2acdf22c4784dfb96 | 5,887 | py | Python | ark_nlp/nn/configuration/configuration_roformer.py | confstantine/nlp-task | cb152e885bc6f6f1243a12ad90b1c715eb548736 | [
"Apache-2.0"
] | 1 | 2021-12-27T04:48:40.000Z | 2021-12-27T04:48:40.000Z | ark_nlp/nn/configuration/configuration_roformer.py | confstantine/nlp-task | cb152e885bc6f6f1243a12ad90b1c715eb548736 | [
"Apache-2.0"
] | null | null | null | ark_nlp/nn/configuration/configuration_roformer.py | confstantine/nlp-task | cb152e885bc6f6f1243a12ad90b1c715eb548736 | [
"Apache-2.0"
] | 1 | 2021-12-27T04:49:35.000Z | 2021-12-27T04:49:35.000Z |
from transformers import PretrainedConfig
ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class RoFormerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an :class:`~transformers.AlbertModel`.
It is used to instantiate an ALBERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30000):
Vocabulary size of the ALBERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.AlbertModel`.
embedding_size (:obj:`int`, optional, defaults to 128):
Dimensionality of vocabulary embeddings.
hidden_size (:obj:`int`, optional, defaults to 4096):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_hidden_groups (:obj:`int`, optional, defaults to 1):
Number of groups for the hidden layers, parameters in the same group are shared.
num_attention_heads (:obj:`int`, optional, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 16384):
The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
inner_group_num (:obj:`int`, optional, defaults to 1):
The number of inner repetition of attention and ffn.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu_new"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something
large (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.AlbertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for attached classifiers.
Example::
from transformers import AlbertConfig, AlbertModel
# Initializing an ALBERT-xxlarge style configuration
albert_xxlarge_configuration = AlbertConfig()
# Initializing an ALBERT-base style configuration
albert_base_configuration = AlbertConfig(
hidden_size=768,
num_attention_heads=12,
intermediate_size=3072,
)
# Initializing a model from the ALBERT-base style configuration
model = AlbertModel(albert_xxlarge_configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "roformer"
def __init__(self,
vocab_size=50000,
embedding_size=768,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
**kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps | 51.191304 | 122 | 0.650926 |
aceb5dcba6f716e11bc035d99a50b5eff5d4b399 | 7,778 | py | Python | flexget/utils/cached_input.py | Toilal/Flexget | b02ca4978698bdb10273c6fab8434c2170a0ae5f | [
"MIT"
] | null | null | null | flexget/utils/cached_input.py | Toilal/Flexget | b02ca4978698bdb10273c6fab8434c2170a0ae5f | [
"MIT"
] | null | null | null | flexget/utils/cached_input.py | Toilal/Flexget | b02ca4978698bdb10273c6fab8434c2170a0ae5f | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import copy
import logging
import hashlib
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, PickleType, Unicode, ForeignKey
from sqlalchemy.orm import relation
from flexget import db_schema
from flexget.manager import Session
from flexget.utils.database import safe_pickle_synonym
from flexget.utils.tools import parse_timedelta, TimedDict
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
log = logging.getLogger('input_cache')
Base = db_schema.versioned_base('input_cache', 0)
class InputCache(Base):
__tablename__ = 'input_cache'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
hash = Column(String)
added = Column(DateTime, default=datetime.now)
entries = relation('InputCacheEntry', backref='cache', cascade='all, delete, delete-orphan')
class InputCacheEntry(Base):
__tablename__ = 'input_cache_entry'
id = Column(Integer, primary_key=True)
_entry = Column('entry', PickleType)
entry = safe_pickle_synonym('_entry')
cache_id = Column(Integer, ForeignKey('input_cache.id'), nullable=False)
@event('manager.db_cleanup')
def db_cleanup(manager, session):
"""Removes old input caches from plugins that are no longer configured."""
result = session.query(InputCache).filter(InputCache.added < datetime.now() - timedelta(days=7)).delete()
if result:
log.verbose('Removed %s old input caches.' % result)
def config_hash(config):
"""
:param dict config: Configuration
:return: MD5 hash for *config*
"""
if isinstance(config, dict):
# this does in fact support nested dicts, they're sorted too!
return hashlib.md5(str(sorted(config.items()))).hexdigest()
else:
return hashlib.md5(str(config)).hexdigest()
class cached(object):
"""
Implements transparent caching decorator @cached for inputs.
Decorator has two parameters:
* **name** in which the configuration is present in tasks configuration.
* **key** in which the configuration has the cached resource identifier (ie. url).
If the key is not given or present in the configuration :name: is expected to be a cache name (ie. url)
.. note:: Configuration assumptions may make this unusable in some (future) inputs
"""
cache = TimedDict(cache_time='5 minutes')
def __init__(self, name, persist=None):
# Cast name to unicode to prevent sqlalchemy warnings when filtering
self.name = unicode(name)
# Parse persist time
self.persist = persist and parse_timedelta(persist)
def __call__(self, func):
def wrapped_func(*args, **kwargs):
# get task from method parameters
task = args[1]
# detect api version
api_ver = 1
if len(args) == 3:
api_ver = 2
if api_ver == 1:
# get name for a cache from tasks configuration
if not self.name in task.config:
raise Exception('@cache config name %s is not configured in task %s' % (self.name, task.name))
hash = config_hash(task.config[self.name])
else:
hash = config_hash(args[2])
log.trace('self.name: %s' % self.name)
log.trace('hash: %s' % hash)
cache_name = self.name + '_' + hash
log.debug('cache name: %s (has: %s)' % (cache_name, ', '.join(self.cache.keys())))
if not task.options.nocache and cache_name in self.cache:
# return from the cache
log.trace('cache hit')
entries = []
for entry in self.cache[cache_name]:
fresh = copy.deepcopy(entry)
entries.append(fresh)
if entries:
log.verbose('Restored %s entries from cache' % len(entries))
return entries
else:
if self.persist and not task.options.nocache:
# Check database cache
with Session() as session:
db_cache = session.query(InputCache).filter(InputCache.name == self.name).\
filter(InputCache.hash == hash).\
filter(InputCache.added > datetime.now() - self.persist).\
first()
if db_cache:
entries = [Entry(e.entry) for e in db_cache.entries]
log.verbose('Restored %s entries from db cache' % len(entries))
# Store to in memory cache
self.cache[cache_name] = copy.deepcopy(entries)
return entries
# Nothing was restored from db or memory cache, run the function
log.trace('cache miss')
# call input event
try:
response = func(*args, **kwargs)
except PluginError as e:
# If there was an error producing entries, but we have valid entries in the db cache, return those.
if self.persist and not task.options.nocache:
with Session() as session:
db_cache = session.query(InputCache).filter(InputCache.name == self.name).\
filter(InputCache.hash == hash).first()
if db_cache and db_cache.entries:
log.error('There was an error during %s input (%s), using cache instead.' %
(self.name, e))
entries = [Entry(e.entry) for e in db_cache.entries]
log.verbose('Restored %s entries from db cache' % len(entries))
# Store to in memory cache
self.cache[cache_name] = copy.deepcopy(entries)
return entries
# If there was nothing in the db cache, re-raise the error.
raise
if api_ver == 1:
response = task.entries
if not isinstance(response, list):
log.warning('Input %s did not return a list, cannot cache.' % self.name)
return response
# store results to cache
log.debug('storing to cache %s %s entries' % (cache_name, len(response)))
try:
self.cache[cache_name] = copy.deepcopy(response)
except TypeError:
# might be caused because of backlog restoring some idiotic stuff, so not neccessarily a bug
log.critical('Unable to save task content into cache, if problem persists longer than a day please report this as a bug')
if self.persist:
# Store to database
log.debug('Storing cache %s to database.' % cache_name)
with Session() as session:
db_cache = session.query(InputCache).filter(InputCache.name == self.name).\
filter(InputCache.hash == hash).first()
if not db_cache:
db_cache = InputCache(name=self.name, hash=hash)
db_cache.entries = [InputCacheEntry(entry=e) for e in response]
db_cache.added = datetime.now()
session.merge(db_cache)
return response
return wrapped_func
| 42.502732 | 141 | 0.567884 |
aceb5eaa2d0578b9bc06d205d196bde9b8840f85 | 1,166 | py | Python | simulation/constants.py | 5g-media/ss-cno-teleimmersive-game | 70fd71e17c005feb02e661f36bf4257817f91dde | [
"Apache-2.0"
] | null | null | null | simulation/constants.py | 5g-media/ss-cno-teleimmersive-game | 70fd71e17c005feb02e661f36bf4257817f91dde | [
"Apache-2.0"
] | null | null | null | simulation/constants.py | 5g-media/ss-cno-teleimmersive-game | 70fd71e17c005feb02e661f36bf4257817f91dde | [
"Apache-2.0"
] | null | null | null | mano = {
'ns': {
'id': '13987ea3-054a-459b-a24d-f4c76679edaf',
'name': 'ns_takis',
'nsd_name': 'cirros_2vnf_ns',
'nsd_id': 'd5c99561-ec46-4480-8377-b5b218b8b1e5'
},
'vnf': {
'id': 'abd00f09-dff1-40f1-be83-637a456ed400',
'short_name': None,
'vnfd_name': 'cirros_vnfd',
'name': None,
'vnfd_id': '16c40d2e-7a1b-4f22-9e50-3f7ede3e9fc4'
},
'vdu': {
'id': '99f76771-3a39-42ae-a09c-2f79f459a9c9',
'image_id': 'a46178eb-9f69-4e44-9058-c5eb5ded0aa3',
'ip_address': '192.168.207.2',
'flavor': {
'id': 'c46aecab-c6a9-4775-bde2-070377b8111f',
'disk': 10,
'swap': 0,
'vcpus': 1,
'ram': 4096,
'name': 'ubuntuvnf_vnfd-VM-flv',
'ephemeral': 0
},
'mgmt-interface': None,
'name': 'instance-00000009',
'status': 'running'
},
'vim': {
'uuid': '48eb5bd0-feaa-48ed-b0d7-6d2b8ad0385e',
'type': 'sim_vim',
'tag': 'sim_vim',
'name': 'devstack-ocata',
'url': 'http://192.168.1.147/identity/v3'
}
}
| 29.15 | 59 | 0.495712 |
aceb60f94b995a009781ab3e1b6fcd1f9a789bc1 | 2,177 | py | Python | ending_shorts.py | usingcaio/Locomotiva-Universal-da-Curiosidade- | 7db26004c0b70dd045c37faebd9a43b197087b70 | [
"MIT"
] | 1 | 2021-07-01T00:33:52.000Z | 2021-07-01T00:33:52.000Z | ending_shorts.py | Caiiooow/Locomotiva-Universal-da-Curiosidade- | 7db26004c0b70dd045c37faebd9a43b197087b70 | [
"MIT"
] | null | null | null | ending_shorts.py | Caiiooow/Locomotiva-Universal-da-Curiosidade- | 7db26004c0b70dd045c37faebd9a43b197087b70 | [
"MIT"
] | null | null | null | from manim import *
verd= "#3BC66B"
rox= "#862FD1"
cinz= "#363636"
azu= "#131E3E"
lar= "#FF862F"
class Vert(Scene):
def construct(self):
background = Rectangle(width = 14.5, height = 8, stroke_width = 0, fill_color = cinz, fill_opacity = 1)
self.add(background)
t1 = Tex("Assista o vídeo","na íntegra").scale(2).set_color(lar).arrange(DOWN).rotate(PI/2).to_edge(LEFT, buff=1)
t2 = Tex("Locomotiva", "Universal", "da Curiosidade").scale(2).arrange(DOWN).rotate(PI/2).to_edge(RIGHT, buff=2)
t2[0].set_color(verd).shift(0.5*RIGHT)
t2[2].set_color(rox)
logo = ImageMobject("LC_watermark.png").rotate(PI/2).scale(0.8).shift(LEFT)
self.wait()
self.play(LaggedStart(FadeIn(logo, shift=UP), Write(t1), lag_ratio=0.5), run_time=2)
self.play(FadeIn(t2[0], run_time=2), t2[0].animate.shift(0.5*LEFT),
LaggedStart(TransformFromCopy(t2[0], t2[1]), Write(t2[2:], run_time=1), lag_ratio=0.2))
self.wait()
self.play(FadeOut(t1), FadeOut(t2), FadeOut(logo))
self.wait()
class Horiz(Scene):
def construct(self):
background = Rectangle(width = 14.5, height = 8, stroke_width = 0, fill_color = cinz, fill_opacity = 1)
self.add(background)
t1 = Tex("Assista o vídeo na íntegra").scale(2).set_color(lar).to_edge(UP, buff=1)
a = Tex("Locomotiva", "Universal", "da Curiosidade").scale(2).arrange(DOWN)
a[0].set_color(verd).shift(0.5*DOWN)
a[2].set_color(rox)
logo = ImageMobject("LC_watermark.png").scale(0.8).shift(1*DOWN).to_edge(LEFT, buff=1.5)
self.wait(0.5)
self.play(FadeIn(a[0], run_time=2), a[0].animate.shift(0.5*UP),
LaggedStart(TransformFromCopy(a[0], a[1]), Write(a[2:], run_time=1), lag_ratio=0.2))
self.play(a.animate.arrange(DOWN, center=False, aligned_edge=LEFT).shift(2*RIGHT+1*DOWN),
FadeIn(logo, shift=DOWN+RIGHT), Create(t1))
self.wait()
self.play(FadeOut(t1), FadeOut(a), FadeOut(logo))
| 41.075472 | 122 | 0.58475 |
aceb615ec0174b07ae3e4e78dc682926defe2c0a | 3,406 | py | Python | main/forms.py | Durigy/shopping-list | 52fef15af494be4a726e747ed23b1a26313dc5d4 | [
"MIT"
] | null | null | null | main/forms.py | Durigy/shopping-list | 52fef15af494be4a726e747ed23b1a26313dc5d4 | [
"MIT"
] | null | null | null | main/forms.py | Durigy/shopping-list | 52fef15af494be4a726e747ed23b1a26313dc5d4 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, IntegerField
from wtforms.validators import DataRequired, Length, EqualTo, ValidationError, Regexp, InputRequired, NumberRange, Email
from main.models import User
from flask_login import current_user
class RegistrationForm(FlaskForm):
username = StringField('Username *', validators=[DataRequired(), Length(min=2, max=15)])
email = StringField('Email *', validators=[DataRequired(), Email()])
password = PasswordField('Password *', validators=[DataRequired()]) #, Regexp('^(?=.*\d).{6,20}$', message='Your password should be between 6 and 20 Charaters long and contain at least 1 number')])
confirm_password = PasswordField('Confirm Password *', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username already Taken. Please choose a different one.')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email already Used. Please Use a different one.')
class LoginForm(FlaskForm):
username_email = StringField('Username or Email')
password = PasswordField('Password', validators=[DataRequired()]) #, Regexp('^(?=.*\d).{6,20}$', message='Your password should be between 6 and 20 Charaters long and contain at least 1 number')])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username *', validators=[DataRequired(), Length(min=2, max=15)])
email = StringField('Email *', validators=[DataRequired(), Email()])
submit = SubmitField('Update Account')
def validate_username(self, username):
if username.data.lower() != current_user.username.lower():
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('Username already Taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError('Email already Used. Please Use a different one.')
class AddListForm(FlaskForm):
name = StringField('Shopping List Name *', validators=[DataRequired()])
submit = SubmitField('Create List')
class UpdateListForm(FlaskForm):
list_name = StringField('Shopping List Name *', validators=[DataRequired()])
submit = SubmitField('Updata List')
class AddListItemForm(FlaskForm):
name = StringField('Item Name *', validators=[DataRequired()])
quantity = IntegerField('Quantity *', validators=[DataRequired(), NumberRange(min=1)])
priority = IntegerField('Priority *', validators=[DataRequired(), NumberRange(min=1)])
submit = SubmitField('Add Item')
class UpdateListItemForm(FlaskForm):
name = StringField('Item Name *', validators=[DataRequired()])
quantity = IntegerField('Quantity *', validators=[DataRequired(), NumberRange(min=1)])
priority = IntegerField('Priority *', validators=[DataRequired(), NumberRange(min=1)])
submit = SubmitField('Update Item') | 50.088235 | 201 | 0.702584 |
aceb61de2a633412ecfbf2d96a1bb239024b47a3 | 1,144 | py | Python | AdventOfCode/Day06/interrogation.py | bfark13/PythonExercises | 6dfd5eb13923eeddaf285bf27164d014bb2e8167 | [
"MIT"
] | null | null | null | AdventOfCode/Day06/interrogation.py | bfark13/PythonExercises | 6dfd5eb13923eeddaf285bf27164d014bb2e8167 | [
"MIT"
] | null | null | null | AdventOfCode/Day06/interrogation.py | bfark13/PythonExercises | 6dfd5eb13923eeddaf285bf27164d014bb2e8167 | [
"MIT"
] | null | null | null | finput ='./AdventOfCode/Day06/invalid.txt'
finput = './AdventOfCode/Day06/valid.txt'
finput = './AdventOfCode/Day06/Input.txt'
questions =[]
questions2 = []
def getCountLetters(str):
dct = {}
for x in range(0,len(str)):
try:
dct[str[x]] += 1
except:
dct[str[x]] = 1
return dct
def getAllSameCount(str, num_people):
dct = getCountLetters(str)
count = 0
for key in dct:
print(f"key:{dct[key]} numPeople:{num_people}")
if dct[key] == num_people:
count += 1
return count
with open(finput, 'r') as file:
lines = file.readlines()
temp =""
count = 0
for line in lines:
line =line.strip()
if len(line) >= 1:
count += 1
temp += line
else:
questions.append(len(dict.fromkeys(list(temp))))
questions2.append(getAllSameCount(temp,count))
temp = ""
count = 0
questions.append(len(dict.fromkeys(list(temp))))
questions2.append(getAllSameCount(temp,count))
print(questions)
print(sum(questions))
print(questions2)
print(sum(questions2)) | 24.340426 | 60 | 0.578671 |
aceb624c373f05698eec6928829aa24b2aa850f7 | 3,108 | py | Python | resources/ycsb-helper-scripts/data_generator_utils/namespace_gen.py | dremio/YCSB | 0d7adbdbed59d2c43cc9f4fcb18c451df995d8f4 | [
"Apache-2.0"
] | null | null | null | resources/ycsb-helper-scripts/data_generator_utils/namespace_gen.py | dremio/YCSB | 0d7adbdbed59d2c43cc9f4fcb18c451df995d8f4 | [
"Apache-2.0"
] | null | null | null | resources/ycsb-helper-scripts/data_generator_utils/namespace_gen.py | dremio/YCSB | 0d7adbdbed59d2c43cc9f4fcb18c451df995d8f4 | [
"Apache-2.0"
] | null | null | null | import os
import random
import argparse
import logging
from data_generator_utils.fakeman import FakeMan
from data_generator_utils.jobs_gen import date_serializer
DATASET_TYPES = frozenset(["SPACE", "SOURCE", "HOME", "FOLDER", "DATASET"])
fake = FakeMan.get_faker()
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
def generate_entity_paths(num_tables, num_cols):
data = []
for i in range(1, num_tables + 1):
for j in range(1, num_cols + 1):
s = "/mysource/schema/table{}/file{}.txt".format(i, j)
elements = ["/" + x for x in s.split("/")[1:]]
vals = ["".join(elements[0:n + 1]) for n in range(len(elements))]
for val in vals:
data.append(val)
return list(set(data))
def generate(entity_path_key):
file_path = fake.file_path(depth=3)
namespace_entity = {
"entityPathKey": entity_path_key,
"id": fake.pystr(),
"childIds": [fake.pystr() for _ in range(2)],
"container": {
"uuid": fake.uuid4(),
"type": {
"entityType": fake.random_sample(elements=DATASET_TYPES, length=1)[0],
"entityId": fake.pystr()
},
"fullPathList": file_path.split("/"),
"config": fake.pydict(5, True, str)
},
"attributes": [{
"typeUrl": fake.uri(),
"value": fake.text()
} for _ in range(5)]
}
return namespace_entity
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num-namespaces", action="store", dest="n",
help="Number of namespace entities to generate", type=int, default=1)
args = parser.parse_args()
namespaces = []
buffer_flush_max_size = 1000
import json
output_file_path = "namespaces_{}.json".format(args.n)
ENTITY_PATHS = generate_entity_paths(10, 30)
num_entity_paths_fixed = len(ENTITY_PATHS)
if args.n < num_entity_paths_fixed:
args.n = num_entity_paths_fixed
if os.path.exists(output_file_path):
os.remove(output_file_path)
pseudo_random_entity_paths = []
for ep in ENTITY_PATHS:
pseudo_random_entity_paths.append(ep)
for _ in range(args.n - num_entity_paths_fixed):
pseudo_random_entity_paths.append(fake.file_path(depth=random.choice([2, 3, 4])))
with open(output_file_path, 'w') as f:
write_buffer = []
f.write("[\n")
for ep in pseudo_random_entity_paths:
ns = generate(ep)
write_buffer.append(json.dumps(ns, default=date_serializer))
if len(write_buffer) == buffer_flush_max_size:
f.write("{},\n".format(",\n".join(write_buffer)))
logging.info("Flushing buffer and writing to disk...")
del write_buffer[:]
if len(write_buffer) != 0:
f.write("{}\n".format(",\n".join(write_buffer)))
f.write("]")
else:
f.seek(-2, 1) # seek to end of file; f.seek(0, 2) is legal
f.write("\n]")
| 33.419355 | 93 | 0.594916 |
aceb62e41af2817c32340fe452d04790dbd54062 | 87 | py | Python | ib_insync/version.py | txu2014/ib_insync | 7e99236e4c760280f117f6bbe64820b904cd5534 | [
"BSD-2-Clause"
] | 1 | 2019-10-14T14:40:03.000Z | 2019-10-14T14:40:03.000Z | ib_insync/version.py | txu2014/ib_insync | 7e99236e4c760280f117f6bbe64820b904cd5534 | [
"BSD-2-Clause"
] | null | null | null | ib_insync/version.py | txu2014/ib_insync | 7e99236e4c760280f117f6bbe64820b904cd5534 | [
"BSD-2-Clause"
] | null | null | null | __version_info__ = (0, 9, 56)
__version__ = '.'.join(str(v) for v in __version_info__)
| 29 | 56 | 0.701149 |
aceb6333fec562580ce3acc836ab497af1732b7f | 1,590 | py | Python | lib/surface/dns/record_sets/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/dns/record_sets/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/surface/dns/record_sets/__init__.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns record-sets command group."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class RecordSets(base.Group):
"""Manage the record-sets within your managed-zones.
Manage the record-sets within your managed-zones.
## EXAMPLES
To import record-sets from a BIND zone file, run:
$ {command} import --zone MANAGED_ZONE --zone-file-format ZONE_FILE
To export record-sets in yaml format, run:
$ {command} export --zone MANAGED_ZONE
To see how to make scriptable changes to your record-sets through
transactions, run:
$ {command} transaction --help
To list all changes, run:
$ {command} changes list --zone MANAGED_ZONE
To see change details, run:
$ {command} changes describe CHANGE_ID --zone MANAGED_ZONE
To see the list of all record-sets, run:
$ {command} list --zone MANAGED_ZONE
"""
pass
| 27.413793 | 74 | 0.735849 |
aceb6339980b765c3bacd81d88b9c9a38df26135 | 906 | py | Python | wxyy/urls.py | balics/wxyy | bb1cc8fca8cb85f2c80378c83bfdb0f59c33108d | [
"MIT"
] | null | null | null | wxyy/urls.py | balics/wxyy | bb1cc8fca8cb85f2c80378c83bfdb0f59c33108d | [
"MIT"
] | null | null | null | wxyy/urls.py | balics/wxyy | bb1cc8fca8cb85f2c80378c83bfdb0f59c33108d | [
"MIT"
] | null | null | null | """wxyy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('wx/', include('wx_app.urls'))
]
urlpatterns += staticfiles_urlpatterns()
| 33.555556 | 77 | 0.720751 |
aceb635da3fe6f864d150df48133e9ea62d68e2b | 11,697 | py | Python | instamanager.py | denisafanasev/Social.Guard | 1f4040f7d02e5717069e229ef78ed4ab508e16eb | [
"MIT"
] | null | null | null | instamanager.py | denisafanasev/Social.Guard | 1f4040f7d02e5717069e229ef78ed4ab508e16eb | [
"MIT"
] | null | null | null | instamanager.py | denisafanasev/Social.Guard | 1f4040f7d02e5717069e229ef78ed4ab508e16eb | [
"MIT"
] | null | null | null | from instagram_private_api import Client, ClientCompatPatch, ClientError
from datetime import datetime
import time
import logging
import json
import ast
BLOCKED_USERS_CONTROL_LIST_FILENAME = "blockedlist.usr"
LOG_FILENAME = "socialguard.log"
LOGGING_LEVEL = logging.INFO
BLOCKING_LOG_FILENAME = "blocking.log"
MAX_TIMEOUT = 20
class InstaManager:
_connections = dict()
_blocked_list = dict()
_blocked_control_list = []
_blocked_control_list_filename = BLOCKED_USERS_CONTROL_LIST_FILENAME
_is_timeout = dict()
def __init__(self):
"""Constructor"""
logging.basicConfig(filename=LOG_FILENAME, level=LOGGING_LEVEL)
logging.info("{}: session started".format(
str(datetime.now())))
self.read_blocked_users_control_list() # Initially reaading a infinity blocked users list
pass
def _timeout_call(self, account_name):
"""
Internal function to allign with IS request timing policy
@params:
account_name - Required : account name for api call
"""
time_diff = datetime.timestamp(
datetime.now()) - self._is_timeout.get(account_name)
time.sleep(MAX_TIMEOUT - max(0, 10 - time_diff))
self._is_timeout[account_name] = datetime.timestamp(
datetime.now())
pass
def read_blocked_users_list(self, account_name):
"""
Function read blocked users list from the account by account_name
@params:
param1 - Required : description
"""
logging.info("{}:{}: getting blocked users list".format(str(datetime.now()), str(account_name)))
_items = []
try:
_api = self._connections.get(account_name)
rank_token = _api.generate_uuid()
_result = _api.blocked_user_list(rank_token)
_items.extend(_result.get('blocked_list', []))
next_max_id = _result.get('next_max_id')
while next_max_id:
self._timeout_call(account_name)
_result = _api.blocked_user_list(rank_token, max_id=next_max_id)
_items.extend(_result.get('blocked_list', []))
next_max_id = _result.get('next_max_id')
self._blocked_list[account_name] = _items
logging.info("{}:{}: getting blocked users list successed".format(
str(datetime.now()), str(account_name)))
except ClientError as e:
logging.error("{}:{}: getting blocked users list failed: {}".format(str(datetime.now()), str(account_name), str(account_name), e.msg))
pass
def read_blocked_users_control_list(self):
"""
Function read blocked users from saved file
@params:
param1 - Required : description
"""
logging.info("{}: reading blocked control file".format(str(datetime.now())))
self._blocked_control_list.clear()
try:
file_iter = open(
"data/" + BLOCKED_USERS_CONTROL_LIST_FILENAME, 'rU')
for line in file_iter:
self._blocked_control_list.append(ast.literal_eval(line))
logging.info("{}: reading blocked control file successed".format(str(datetime.now())))
except Exception:
logging.error("{}: reading blocked control file: failed".format(
str(datetime.now())))
pass
def save_user_list(self, users, filename):
"""
write a user list to file with filename
@params:
param1 - Required : description
"""
try:
with open("data/" + filename, 'w') as f:
for item in users:
f.write(str(item) + "\n")
f.close()
logging.info(
"{}:{} - wrote successful".format(str(datetime.now()), filename))
except Exception:
logging.error(
"{}:{} - wrote failed".format(str(datetime.now()), filename))
pass
def save_blocked_users_control_list(self):
"""
Function write blocked users from saved file
@params:
param1 - Required : description
"""
logging.info("{}: writing blocked control file".format(str(datetime.now())))
self.save_user_list(self._blocked_control_list, BLOCKED_USERS_CONTROL_LIST_FILENAME)
pass
def get_blocked_users_control_list(self):
"""
Function get blocked users control list
@params:
param1 - Required : description
"""
return self._blocked_control_list
def merge_user_lists(self, user_list1, user_list2):
"""
merge two lists together
@params:
param1 - Required : description
"""
_list = user_list1
for element2 in user_list2:
_check = False
for element1 in user_list1:
if element1['user_id'] == element2["user_id"]:
_check = True
break
if not _check:
_list.append(element2)
return _list
def diff_user_lists(self, user_list1, user_list2):
"""
return a users from list 2 which is no in list1
@params:
param1 - Required : description
"""
_list = []
for element2 in user_list2:
_check = False
for element1 in user_list1:
if element1['user_id'] == element2["user_id"]:
_check = True
break
if not _check:
_list.append(element2)
return _list
def add_users_to_blocked_users_control_list(self, user_list):
"""
Adding users to blocked user control list
@params:
param1 - Required : description
"""
_to_add = self.diff_user_lists(self._blocked_control_list, user_list)
self._blocked_control_list.extend(_to_add)
pass
def get_blocked_users_list(self, account_name):
"""Return blocked users list for the account
@params:
param1 - Required : description
"""
return self._blocked_list.get(account_name)
def connect(self, account_name, password):
""" Connect to an account in IS
Trying to connect to the account
and add a new connection to the conecction pool
@params:
account_name - Required : description
password - Required : description
"""
try:
logging.info("{}:{}: connecting".format(
str(datetime.now()), str(account_name)))
self._is_timeout[account_name] = datetime.timestamp(
datetime.now())
api = Client(account_name, password)
self._connections[account_name] = api
logging.info("{}:{}: connected".format(
str(datetime.now()), str(account_name)))
# For each new connection initialy read blocked user list
self.read_blocked_users_list(account_name)
return True
except ClientError as e:
logging.error(
"{}:{}: connected failed: {}".format(str(datetime.now()), account_name, e.msg))
return False
def get_connections(self):
"""
Get connections list as a dict
"""
result = []
for key in self._connections.keys():
result.append(key)
return result
def get_blocked_status(self, account_name, user):
"""
Check if user is blocked
@params:
account_name - Required : account name
user - Required : user for status check
"""
try:
_api = self._connections.get(account_name)
result = _api.friendships_show(user['user_id'])
return result['blocking']
except ClientError as e:
logging.error("{}:{}: user {} id {} status check error: {}".format(str(datetime.now()), str(account_name), user['username'], user['user_id'], e.msg))
return True
def block_user(self, account_name, user):
"""
Block user
@params:
account_name - Required : account name
user - Required : user for block
"""
_api = self._connections.get(account_name)
self._timeout_call(account_name)
try:
result = _api.friendships_block(user['user_id'])
if result['status'] == 'ok':
logging.info("{}:{}: user {}: blocked".format(str(datetime.now()), str(account_name), str(user['user_id'])))
try:
with open("data/" + BLOCKING_LOG_FILENAME, 'a') as f:
f.write(str(datetime.now()) + ":" + account_name + ":" + str(user) + "\n")
f.close()
except Exception:
pass
else:
logging.info("{}:{}: user {}: block failed".format(
str(datetime.now()), str(account_name), str(user['user_id'])))
pass
pass
except ClientError as e:
logging.error("{}:{}: user {} id {} blocking error: {}".format(str(datetime.now()), str(account_name), user['username'], user['user_id'], e.msg))
return False
return True
| 40.334483 | 173 | 0.46046 |
aceb64354701fad1e040452606953ecaee98fbfd | 1,342 | py | Python | metasub_utils/packet_parse/tests/test_figures.py | MetaSUB/metasub_utils | c52c5dde816d710db5ac8dc6f8804bb795a992e4 | [
"MIT"
] | 8 | 2018-12-30T23:35:03.000Z | 2022-02-22T09:43:48.000Z | metasub_utils/packet_parse/tests/test_figures.py | MetaSUB/metasub_utils | c52c5dde816d710db5ac8dc6f8804bb795a992e4 | [
"MIT"
] | 5 | 2019-01-05T04:54:46.000Z | 2021-03-10T08:59:16.000Z | metasub_utils/packet_parse/tests/test_figures.py | MetaSUB/metasub_utils | c52c5dde816d710db5ac8dc6f8804bb795a992e4 | [
"MIT"
] | 2 | 2019-08-26T22:08:18.000Z | 2020-02-24T19:57:17.000Z | """Test suite for figures."""
import unittest
from unittest import TestCase
from os import environ
from metasub_utils.packet_parse.figs import MetaSUBFigures
unittest.TestLoader.sortTestMethodsUsing = None
PACKET_DIR = environ['METASUB_DATA_PACKET_DIR']
myfigs = MetaSUBFigures(PACKET_DIR)
class TestMetaSUBFigures(TestCase):
"""Test suite for metadata."""
_multiprocess_shared_ = True
def test_tbl1(self):
myfigs.tbl1()
def test_fig1_prevalence_curve(self):
myfigs.fig1_prevalence_curve()
def test_fig1_major_taxa_curves(self):
myfigs.fig1_major_taxa_curves()
def test_fig1_species_rarefaction(self):
myfigs.fig1_species_rarefaction()
def test_fig1_reference_comparisons(self):
myfigs.fig1_reference_comparisons()
def test_fig1_fraction_unclassified(self):
myfigs.fig1_fraction_unclassified()
def test_fig2_umap(self):
myfigs.fig2_umap()
def test_fig2_region_blocks(self):
myfigs.fig2_region_blocks()
def test_fig2_pca_flows(self, n_pcs=100):
myfigs.fig2_pca_flows()
def test_fig5_amr_cooccur(self):
myfigs.fig5_amr_cooccur()
def test_fig5_amr_richness_by_city(self):
myfigs.fig5_amr_richness_by_city()
def test_fig5_amr_rarefaction(self):
myfigs.fig5_amr_rarefaction()
| 24.4 | 58 | 0.73994 |
aceb64dcd86781539adcd4403ad3bfa3b8e87478 | 1,320 | py | Python | tests/integration/actions/collections/test_welcome_interactive_ee.py | LaudateCorpus1/ansible-navigator | 28cdea13dba3e9039382eb993989db4b3e61b237 | [
"Apache-2.0"
] | null | null | null | tests/integration/actions/collections/test_welcome_interactive_ee.py | LaudateCorpus1/ansible-navigator | 28cdea13dba3e9039382eb993989db4b3e61b237 | [
"Apache-2.0"
] | null | null | null | tests/integration/actions/collections/test_welcome_interactive_ee.py | LaudateCorpus1/ansible-navigator | 28cdea13dba3e9039382eb993989db4b3e61b237 | [
"Apache-2.0"
] | null | null | null | """Tests for collections from welcome, interactive, with an EE.
"""
import pytest
from .base import BaseClass
CLI = "ansible-navigator --execution-environment true"
testdata = [
(0, CLI, "ansible-navigator welcome screen"),
(1, ":collections", "ansible-navigator collections top window"),
(2, ":0", "Browse testorg.coll_1 plugins window"),
(3, ":0", "lookup_1 plugin docs window"),
(4, ":back", "Back to browse testorg.coll_1 plugins window"),
(5, ":1", "mod_1 plugin docs window"),
(6, ":back", "Back to browse testorg.coll_1 plugins window"),
(7, ":back", "Back to ansible-navigator collections browse window"),
(8, ":1", "Browse testorg.coll_2 plugins window"),
(9, ":0", "lookup_2 plugin docs window"),
(10, ":back", "Back to browse testorg.coll_2 plugins window"),
(11, ":1", "mod_2 plugin docs window"),
(12, ":back", "Back to browse testorg.coll_2 plugins window"),
(13, ":back", "Back to ansible-navigator collections browse window"),
]
@pytest.mark.parametrize("index, user_input, comment", testdata)
class Test(BaseClass):
# pylint: disable=too-few-public-methods
"""Run the tests for collections from welcome, interactive, with an EE."""
TEST_FOR_MODE = "interactive"
EXECUTION_ENVIRONMENT_TEST = True
UPDATE_FIXTURES = False
| 36.666667 | 78 | 0.672727 |
aceb6571c9ceeb0cb5d15f96833c126aeb049e8c | 3,055 | py | Python | lib/JumpScale/baselib/jobcontroller/models/RunsCollection.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 8 | 2016-04-14T14:04:57.000Z | 2020-06-09T00:24:34.000Z | lib/JumpScale/baselib/jobcontroller/models/RunsCollection.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 418 | 2016-01-25T10:30:00.000Z | 2021-09-08T12:29:13.000Z | lib/JumpScale/baselib/jobcontroller/models/RunsCollection.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 9 | 2016-04-21T07:21:17.000Z | 2022-01-24T10:35:54.000Z | from JumpScale import j
from JumpScale.baselib.jobcontroller.models.RunModel import RunModel
import capnp
from JumpScale.baselib.jobcontroller import model_job_capnp as ModelCapnp
class RunsCollection:
"""
This class represent a collection of Runs
It's used to list/find/create new Instance of Run Model object
"""
def __init__(self):
# connection to the key-value store index repository namespace
self.category = "Job"
self.namespace_prefix = 'runs'
namespace = "%s:%s" % (self.namespace_prefix, self.category.lower())
self._db = j.servers.kvs.getRedisStore(namespace, namespace, **j.atyourservice.config['redis'])
# for now we do index same as database
self._index = j.servers.kvs.getRedisStore(namespace, namespace, **j.atyourservice.config['redis'])
def new(self):
model = RunModel(
capnp_schema=ModelCapnp.Run,
category=self.category,
db=self._db,
index=self._index,
key='',
new=True)
return model
def get(self, key):
return RunModel(
capnp_schema=ModelCapnp.Run,
category=self.category,
db=self._db,
index=self._index,
key=key,
new=False)
def exists(self, key):
return self._db.exists(key)
def _list_keys(self, state="", fromEpoch=0, toEpoch=9999999999999, returnIndex=False):
if state == "":
state = ".*"
epoch = ".*"
regex = "%s:%s" % (state, epoch)
res0 = self._index.list(regex, returnIndex=True)
res1 = []
for index, key in res0:
epoch = int(index.split(":")[-1])
if fromEpoch < epoch < toEpoch:
if returnIndex:
res1.append((index, key))
else:
res1.append(key)
return res1
def find(self, state="", repo="", fromEpoch=0, toEpoch=9999999999999):
res = []
keys = self._list_keys(state, fromEpoch, toEpoch)
for key in keys:
if repo:
model = self.get(key)
if model.dbobj.repo != repo:
continue
res.append(self.get(key))
return res
def delete(self, state="", repo="", fromEpoch=0, toEpoch=9999999999999):
for key in self._list_keys(state, fromEpoch, toEpoch):
if repo:
model = self.get(key)
if model.dbobj.repo != repo.model.key:
continue
idx = str(model.dbobj.state) + ':' + str(model.dbobj.lastModDate)
self._index.index_remove(keys=idx)
self._db.delete(key=key)
# for job in model.jobs .. job. remove job
for step in model.dbobj.steps:
for job in step.jobs:
j.core.jobcontroller.db.jobs._db.delete(job.key)
def destroy(self):
self._db.destroy()
self._index.destroy()
| 32.5 | 106 | 0.555483 |
aceb6741ada1b8a71241b200dd155b4ca2635d32 | 23,140 | py | Python | workers/repomirrorworker/test/test_repomirrorworker.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | workers/repomirrorworker/test/test_repomirrorworker.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | workers/repomirrorworker/test/test_repomirrorworker.py | dongboyan77/quay | 8018e5bd80f17e6d855b58b7d5f2792d92675905 | [
"Apache-2.0"
] | null | null | null | import pytest
import mock
import json
from functools import wraps
from app import storage
from data.registry_model.blobuploader import upload_blob, BlobUploadSettings
from image.docker.schema2.manifest import DockerSchema2ManifestBuilder
from data.registry_model import registry_model
from data.model.test.test_repo_mirroring import create_mirror_repo_robot
from data.model.user import retrieve_robot_token
from data.database import Manifest, RepoMirrorConfig, RepoMirrorStatus
from workers.repomirrorworker import delete_obsolete_tags
from workers.repomirrorworker.repomirrorworker import RepoMirrorWorker
from io import BytesIO
from data.model.image import find_create_or_link_image
from data.model.tag import create_or_update_tag_for_repo
from util.repomirror.skopeomirror import SkopeoResults, SkopeoMirror
from test.fixtures import *
def disable_existing_mirrors(func):
@wraps(func)
def wrapper(*args, **kwargs):
for mirror in RepoMirrorConfig.select():
mirror.is_enabled = False
mirror.save()
func(*args, **kwargs)
for mirror in RepoMirrorConfig.select():
mirror.is_enabled = True
mirror.save()
return wrapper
def _create_tag(repo, name):
repo_ref = registry_model.lookup_repository("mirror", "repo")
with upload_blob(repo_ref, storage, BlobUploadSettings(500, 500, 500)) as upload:
app_config = {"TESTING": True}
config_json = json.dumps(
{
"config": {"author": u"Repo Mirror",},
"rootfs": {"type": "layers", "diff_ids": []},
"history": [
{
"created": "2019-07-30T18:37:09.284840891Z",
"created_by": "base",
"author": u"Repo Mirror",
},
],
}
)
upload.upload_chunk(app_config, BytesIO(config_json))
blob = upload.commit_to_blob(app_config)
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(blob.digest, blob.compressed_size)
builder.add_layer("sha256:abcd", 1234, urls=["http://hello/world"])
manifest = builder.build()
manifest, tag = registry_model.create_manifest_and_retarget_tag(
repo_ref, manifest, name, storage
)
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_successful_mirror(run_skopeo_mock, initialized_db, app):
"""
Basic test of successful mirror
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:latest",
u"docker://localhost:5000/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "stdout", "stderr"),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_successful_disabled_sync_now(run_skopeo_mock, initialized_db, app):
"""
Disabled mirrors still allow "sync now"
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
mirror.is_enabled = False
mirror.sync_status = RepoMirrorStatus.SYNC_NOW
mirror.save()
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:latest",
u"docker://localhost:5000/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "stdout", "stderr"),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_successful_mirror_verbose_logs(run_skopeo_mock, initialized_db, app, monkeypatch):
"""
Basic test of successful mirror with verbose logs turned on
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"--debug",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"--debug",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:latest",
u"docker://localhost:5000/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "Success", ""),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
monkeypatch.setenv("DEBUGLOG", "true")
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_rollback(run_skopeo_mock, initialized_db, app):
"""
Tags in the repo:
"updated" - this tag will be updated during the mirror
"removed" - this tag will be removed during the mirror
"created" - this tag will be created during the mirror
"""
mirror, repo = create_mirror_repo_robot(["updated", "created", "zzerror"])
_create_tag(repo, "updated")
_create_tag(repo, "deleted")
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:updated",
],
"results": SkopeoResults(
True, [], '{"RepoTags": ["latest", "updated", "created", "zzerror"]}', ""
),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:created",
u"docker://localhost:5000/mirror/repo:created",
],
"results": SkopeoResults(True, [], "Success", ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:updated",
u"docker://localhost:5000/mirror/repo:updated",
],
"results": SkopeoResults(True, [], "Success", ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:zzerror",
u"docker://localhost:5000/mirror/repo:zzerror",
],
"results": SkopeoResults(False, [], "", "ERROR"),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
if args[1] == "copy" and args[6].endswith(":updated"):
_create_tag(repo, "updated")
elif args[1] == "copy" and args[6].endswith(":created"):
_create_tag(repo, "created")
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
# TODO: how to assert tag.retarget_tag() and tag.delete_tag() called?
def test_remove_obsolete_tags(initialized_db):
"""
As part of the mirror, the set of tags on the remote repository is compared to the local
existing tags. Those not present on the remote are removed locally.
"""
mirror, repository = create_mirror_repo_robot(["updated", "created"], repo_name="removed")
manifest = Manifest.get()
image = find_create_or_link_image("removed", repository, None, {}, "local_us")
tag = create_or_update_tag_for_repo(
repository, "oldtag", image.docker_image_id, oci_manifest=manifest, reversion=True
)
incoming_tags = ["one", "two"]
deleted_tags = delete_obsolete_tags(mirror, incoming_tags)
assert [tag.name for tag in deleted_tags] == [tag.name]
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_mirror_config_server_hostname(run_skopeo_mock, initialized_db, app, monkeypatch):
"""
Set REPO_MIRROR_SERVER_HOSTNAME to override SERVER_HOSTNAME config
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"--debug",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"--debug",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
u"docker://registry.example.com/namespace/repository:latest",
u"docker://config_server_hostname/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "Success", ""),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
monkeypatch.setenv("DEBUGLOG", "true")
with patch.dict(
"data.model.config.app_config", {"REPO_MIRROR_SERVER_HOSTNAME": "config_server_hostname"}
):
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_quote_params(run_skopeo_mock, initialized_db, app):
"""
Basic test of successful mirror
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
mirror.external_reference = "& rm -rf /;/namespace/repository"
mirror.external_registry_username = "`rm -rf /`"
mirror.save()
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
"--creds",
u"`rm -rf /`",
u"'docker://& rm -rf /;/namespace/repository:latest'",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
"--src-creds",
u"`rm -rf /`",
u"'docker://& rm -rf /;/namespace/repository:latest'",
u"docker://localhost:5000/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "stdout", "stderr"),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_quote_params_password(run_skopeo_mock, initialized_db, app):
"""
Basic test of successful mirror
"""
mirror, repo = create_mirror_repo_robot(["latest", "7.1"])
mirror.external_reference = "& rm -rf /;/namespace/repository"
mirror.external_registry_username = "`rm -rf /`"
mirror.external_registry_password = '""$PATH\\"'
mirror.save()
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
"--creds",
u'`rm -rf /`:""$PATH\\"',
u"'docker://& rm -rf /;/namespace/repository:latest'",
],
"results": SkopeoResults(True, [], '{"RepoTags": ["latest"]}', ""),
},
{
"args": [
"/usr/bin/skopeo",
"copy",
"--src-tls-verify=True",
"--dest-tls-verify=True",
"--dest-creds",
u"%s:%s"
% (mirror.internal_robot.username, retrieve_robot_token(mirror.internal_robot)),
"--src-creds",
u'`rm -rf /`:""$PATH\\"',
u"'docker://& rm -rf /;/namespace/repository:latest'",
u"docker://localhost:5000/mirror/repo:latest",
],
"results": SkopeoResults(True, [], "stdout", "stderr"),
},
]
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
worker._process_mirrors()
assert [] == skopeo_calls
@disable_existing_mirrors
@mock.patch("util.repomirror.skopeomirror.SkopeoMirror.run_skopeo")
def test_inspect_error_mirror(run_skopeo_mock, initialized_db, app):
"""
Test for no tag for skopeo inspect. The mirror is processed four times, asserting that the remaining syncs
decrement until next sync is bumped to the future, confirming the fourth is never processed.
"""
def skopeo_test(args, proxy):
try:
skopeo_call = skopeo_calls.pop(0)
assert args == skopeo_call["args"]
assert proxy == {}
return skopeo_call["results"]
except Exception as e:
skopeo_calls.append(skopeo_call)
raise e
run_skopeo_mock.side_effect = skopeo_test
worker = RepoMirrorWorker()
mirror, repo = create_mirror_repo_robot(["7.1"])
# Call number 1
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:7.1",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
]
worker._process_mirrors()
mirror = RepoMirrorConfig.get_by_id(mirror.id)
assert [] == skopeo_calls
assert 2 == mirror.sync_retries_remaining
# Call number 2
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:7.1",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
]
worker._process_mirrors()
mirror = RepoMirrorConfig.get_by_id(mirror.id)
assert [] == skopeo_calls
assert 1 == mirror.sync_retries_remaining
# Call number 3
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:7.1",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
]
worker._process_mirrors()
mirror = RepoMirrorConfig.get_by_id(mirror.id)
assert [] == skopeo_calls
assert 3 == mirror.sync_retries_remaining
# Call number 4
skopeo_calls = [
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:7.1",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
{
"args": [
"/usr/bin/skopeo",
"inspect",
"--tls-verify=True",
u"docker://registry.example.com/namespace/repository:latest",
],
"results": SkopeoResults(
False,
[],
"",
'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"',
),
},
]
worker._process_mirrors()
mirror = RepoMirrorConfig.get_by_id(mirror.id)
assert 2 == len(skopeo_calls)
assert 3 == mirror.sync_retries_remaining
| 33.009986 | 175 | 0.542308 |
aceb677e88a9bfb0b3bd3fe5a55042a9258d411c | 1,286 | py | Python | Three_Part_Moudule/Pandas/Pandas_2.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | Three_Part_Moudule/Pandas/Pandas_2.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | Three_Part_Moudule/Pandas/Pandas_2.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | # 用loc和iloc进行选取
# 对于DataFrame的行的标签索引我们引入了特殊的标签运算符loc和iloc.
# 它们可以让你用类似Numpy的标签使用轴标签(loc)或整数索引(iloc), 从DataFrame选择行和列的子集
import pandas as pd
import numpy as np
data = pd.DataFrame(np.arange(16).reshape((4, 4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
print(data)
'''
one two three four
Ohio 0 1 2 3
Colorado 4 5 6 7
Utah 8 9 10 11
New York 12 13 14 15
'''
print(data.loc['Colorado', ['two', 'three']])
'''
output:
two 5
three 6
Name: Colorado, dtype: int64
'''
# 然后用iloc和整数进行选取:
print(data.iloc[2, [3, 0, 1]])
'''
output:
four 11
one 8
two 9
Name: Utah, dtype: int64'''
print(data.iloc[2])
'''
output:
one 8
two 9
three 10
four 11
Name: Utah, dtype: int64
'''
print(data.iloc[[1, 2], [3, 0, 1]])
'''
output:
four one two
Colorado 7 4 5
Utah 11 8 9
'''
# 这两个索引函数也适用与一个标签或多个标签的切片
print(data.loc[:'Utah', 'two'])
'''
output:
Ohio 1
Colorado 5
Utah 9
Name: two, dtype: int64
'''
print(data.iloc[:, :3][data.three > 5])
'''
one two three
Colorado 4 5 6
Utah 8 9 10
New York 12 13 14
'''
# 整数索引
pd.Series(np.arange(3.))
| 17.861111 | 60 | 0.549767 |
aceb67f9d91cbc93b2e855a6a50955e678268281 | 762 | py | Python | django_rest_scaffold/management/commands/cleanmigrations.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | null | null | null | django_rest_scaffold/management/commands/cleanmigrations.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | 4 | 2019-08-05T05:00:45.000Z | 2021-06-10T19:28:56.000Z | django_rest_scaffold/management/commands/cleanmigrations.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
Created by Régis Eduardo Crestani <regis.crestani@gmail.com> on 19/06/2016.
"""
import os
from django.core.management.base import BaseCommand
from django_rest_scaffold.settings import DJANGO_REST_SCAFFOLD_SETTINGS as SETTINGS
class Command(BaseCommand):
def handle(self, *args, **options):
print('Searching files to remove...')
for root, dir_names, file_names in os.walk(SETTINGS['APPS_FOLDER']):
if root.endswith('migrations'):
for file_name in file_names:
if file_name != '__init__.py':
file_path = os.path.join(root, file_name)
os.remove(file_path)
print(' deleted %s' % file_path)
| 38.1 | 83 | 0.612861 |
aceb68733234a2c1b125cd4ad5eca2f1a8d2f80f | 6,199 | py | Python | alf/environments/suite_gym.py | breakds/alf | b3d60048daee2c9625ba44f778e49570d0d029a7 | [
"Apache-2.0"
] | 1 | 2021-11-17T17:08:04.000Z | 2021-11-17T17:08:04.000Z | alf/environments/suite_gym.py | ipsec/alf | 15fd71896eac5ad0987dbe14a9f630b32e0e131f | [
"Apache-2.0"
] | null | null | null | alf/environments/suite_gym.py | ipsec/alf | 15fd71896eac5ad0987dbe14a9f630b32e0e131f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import gym
import gym.spaces
import alf
from alf.environments import gym_wrappers, alf_wrappers, alf_gym_wrapper
@alf.configurable
def load(environment_name,
env_id=None,
discount=1.0,
max_episode_steps=None,
gym_env_wrappers=(),
alf_env_wrappers=(),
image_channel_first=True):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name (str): Name for the environment to load.
env_id (int): (optional) ID of the environment.
discount (float): Discount to use for the environment.
max_episode_steps (int): If None the max_episode_steps will be set to the
default step limit defined in the environment's spec. No limit is applied
if set to 0 or if there is no max_episode_steps set in the environment's
spec.
gym_env_wrappers (Iterable): Iterable with references to gym_wrappers
classes to use directly on the gym environment.
alf_env_wrappers (Iterable): Iterable with references to alf_wrappers
classes to use on the ALF environment.
image_channel_first (bool): whether transpose image channels to first dimension.
Returns:
An AlfEnvironment instance.
"""
gym_spec = gym.spec(environment_name)
gym_env = gym_spec.make()
if max_episode_steps is None:
if gym_spec.max_episode_steps is not None:
max_episode_steps = gym_spec.max_episode_steps
else:
max_episode_steps = 0
return wrap_env(
gym_env,
env_id=env_id,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
alf_env_wrappers=alf_env_wrappers,
image_channel_first=image_channel_first)
@alf.configurable
def wrap_env(gym_env,
env_id=None,
discount=1.0,
max_episode_steps=0,
gym_env_wrappers=(),
time_limit_wrapper=alf_wrappers.TimeLimit,
normalize_action=True,
clip_action=True,
alf_env_wrappers=(),
image_channel_first=True,
auto_reset=True):
"""Wraps given gym environment with AlfGymWrapper.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Also note that all gym wrappers assume images are 'channel_last' by default,
while PyTorch only supports 'channel_first' image inputs. To enable this
transpose, 'image_channel_first' is set as True by default. ``gym_wrappers.ImageChannelFirst``
is applied after all gym_env_wrappers and before the AlfGymWrapper.
Args:
gym_env (gym.Env): An instance of OpenAI gym environment.
env_id (int): (optional) ID of the environment.
discount (float): Discount to use for the environment.
max_episode_steps (int): Used to create a TimeLimitWrapper. No limit is applied
if set to 0. Usually set to `gym_spec.max_episode_steps` as done in `load.
Note that a ``TimeLimit`` wrapper will be applied as the *last* Gym wrapper,
so if you also use the ``FrameSkip`` Gym wrapper, then the actual max length
of an episode will be ``skip*max_episode_steps``.
gym_env_wrappers (Iterable): Iterable with references to gym_wrappers,
classes to use directly on the gym environment.
time_limit_wrapper (AlfEnvironmentBaseWrapper): Wrapper that accepts
(env, max_episode_steps) params to enforce a TimeLimit. Usually this
should be left as the default, alf_wrappers.TimeLimit.
normalize_action (bool): if True, will scale continuous actions to
``[-1, 1]`` to be better used by algorithms that compute entropies.
clip_action (bool): If True, will clip continuous action to its bound specified
by ``action_spec``. If ``normalize_action`` is also ``True``, this
clipping happens after the normalization (i.e., clips to ``[-1, 1]``).
alf_env_wrappers (Iterable): Iterable with references to alf_wrappers
classes to use on the ALF environment.
image_channel_first (bool): whether transpose image channels to first dimension.
PyTorch only supports channgel_first image inputs.
auto_reset (bool): If True (default), reset the environment automatically after a
terminal state is reached.
Returns:
An AlfEnvironment instance.
"""
for wrapper in gym_env_wrappers:
gym_env = wrapper(gym_env)
# To apply channel_first transpose on gym (py) env
if image_channel_first:
gym_env = gym_wrappers.ImageChannelFirst(gym_env)
if normalize_action:
# normalize continuous actions to [-1, 1]
gym_env = gym_wrappers.NormalizedAction(gym_env)
if clip_action:
# clip continuous actions according to gym_env.action_space
gym_env = gym_wrappers.ContinuousActionClip(gym_env)
env = alf_gym_wrapper.AlfGymWrapper(
gym_env=gym_env,
env_id=env_id,
discount=discount,
auto_reset=auto_reset,
)
if max_episode_steps > 0:
env = time_limit_wrapper(env, max_episode_steps)
for wrapper in alf_env_wrappers:
env = wrapper(env)
return env
| 40.51634 | 98 | 0.689466 |
aceb68a66cbff1f38bd2db637884befb7404110e | 65,461 | py | Python | lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py | dan-zheng/llvm-project | 6b792850da0345274758c9260fda5df5e57ab486 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py | dan-zheng/llvm-project | 6b792850da0345274758c9260fda5df5e57ab486 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py | dan-zheng/llvm-project | 6b792850da0345274758c9260fda5df5e57ab486 | [
"Apache-2.0"
] | null | null | null | """
Test case for testing the gdbremote protocol.
Tests run against debugserver and lldb-server (llgs).
lldb-server tests run where the lldb-server exe is
available.
This class will be broken into smaller test case classes by
gdb remote packet functional areas. For now it contains
the initial set of tests implemented.
"""
from __future__ import division, print_function
import unittest2
import gdbremote_testcase
import lldbgdbserverutils
from lldbsuite.support import seven
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbdwarf import *
from lldbsuite.test import lldbutil
class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase, DwarfOpcodeParser):
mydir = TestBase.compute_mydir(__file__)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_exe_starts_debugserver(self):
self.init_debugserver_test()
server = self.connect_to_debug_monitor()
@llgs_test
def test_exe_starts_llgs(self):
self.init_llgs_test()
server = self.connect_to_debug_monitor()
def start_no_ack_mode(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_start_no_ack_mode_debugserver(self):
self.init_debugserver_test()
self.start_no_ack_mode()
@llgs_test
def test_start_no_ack_mode_llgs(self):
self.init_llgs_test()
self.start_no_ack_mode()
def thread_suffix_supported(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.test_sequence.add_log_lines(
["lldb-server < 26> read packet: $QThreadSuffixSupported#e4",
"lldb-server < 6> send packet: $OK#9a"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_thread_suffix_supported_debugserver(self):
self.init_debugserver_test()
self.thread_suffix_supported()
@llgs_test
def test_thread_suffix_supported_llgs(self):
self.init_llgs_test()
self.thread_suffix_supported()
def list_threads_in_stop_reply_supported(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.test_sequence.add_log_lines(
["lldb-server < 27> read packet: $QListThreadsInStopReply#21",
"lldb-server < 6> send packet: $OK#9a"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_list_threads_in_stop_reply_supported_debugserver(self):
self.init_debugserver_test()
self.list_threads_in_stop_reply_supported()
@llgs_test
def test_list_threads_in_stop_reply_supported_llgs(self):
self.init_llgs_test()
self.list_threads_in_stop_reply_supported()
def c_packet_works(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $c#63",
"send packet: $W00#00"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_c_packet_works_debugserver(self):
self.init_debugserver_test()
self.build()
self.c_packet_works()
@llgs_test
def test_c_packet_works_llgs(self):
self.init_llgs_test()
self.build()
self.c_packet_works()
def inferior_print_exit(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# build launch args
launch_args += ["hello, world"]
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8",
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"hello, world\r\n")},
"send packet: $W00#00"],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_inferior_print_exit_debugserver(self):
self.init_debugserver_test()
self.build()
self.inferior_print_exit()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_inferior_print_exit_llgs(self):
self.init_llgs_test()
self.build()
self.inferior_print_exit()
def first_launch_stop_reply_thread_matches_first_qC(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# build launch args
launch_args += ["hello, world"]
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(["read packet: $qC#00",
{"direction": "send",
"regex": r"^\$QC([0-9a-fA-F]+)#",
"capture": {1: "thread_id"}},
"read packet: $?#00",
{"direction": "send",
"regex": r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)",
"expect_captures": {1: "thread_id"}}],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_first_launch_stop_reply_thread_matches_first_qC_debugserver(self):
self.init_debugserver_test()
self.build()
self.first_launch_stop_reply_thread_matches_first_qC()
@llgs_test
def test_first_launch_stop_reply_thread_matches_first_qC_llgs(self):
self.init_llgs_test()
self.build()
self.first_launch_stop_reply_thread_matches_first_qC()
def attach_commandline_continue_app_exits(self):
procs = self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8",
"send packet: $W00#00"],
True)
self.expect_gdbremote_sequence()
# Wait a moment for completed and now-detached inferior process to
# clear.
time.sleep(1)
if not lldb.remote_platform:
# Process should be dead now. Reap results.
poll_result = procs["inferior"].poll()
self.assertIsNotNone(poll_result)
# Where possible, verify at the system level that the process is not
# running.
self.assertFalse(
lldbgdbserverutils.process_is_running(
procs["inferior"].pid, False))
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_attach_commandline_continue_app_exits_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.attach_commandline_continue_app_exits()
@expectedFailureNetBSD
@llgs_test
def test_attach_commandline_continue_app_exits_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.attach_commandline_continue_app_exits()
def qRegisterInfo_returns_one_valid_result(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $qRegisterInfo0#00",
{"direction": "send", "regex": r"^\$(.+);#[0-9A-Fa-f]{2}", "capture": {1: "reginfo_0"}}],
True)
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
reg_info_packet = context.get("reginfo_0")
self.assertIsNotNone(reg_info_packet)
self.assert_valid_reg_info(
lldbgdbserverutils.parse_reg_info_response(reg_info_packet))
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_returns_one_valid_result_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_returns_one_valid_result()
@llgs_test
def test_qRegisterInfo_returns_one_valid_result_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_returns_one_valid_result()
def qRegisterInfo_returns_all_valid_results(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream.
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Validate that each register info returned validates.
for reg_info in self.parse_register_info_packets(context):
self.assert_valid_reg_info(reg_info)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_returns_all_valid_results_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_returns_all_valid_results()
@llgs_test
def test_qRegisterInfo_returns_all_valid_results_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_returns_all_valid_results()
def qRegisterInfo_contains_required_generics(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all generic registers found.
generic_regs = {
reg_info['generic']: 1 for reg_info in reg_infos if 'generic' in reg_info}
# Ensure we have a program counter register.
self.assertTrue('pc' in generic_regs)
# Ensure we have a frame pointer register. PPC64le's FP is the same as SP
if self.getArchitecture() != 'powerpc64le':
self.assertTrue('fp' in generic_regs)
# Ensure we have a stack pointer register.
self.assertTrue('sp' in generic_regs)
# Ensure we have a flags register.
self.assertTrue('flags' in generic_regs)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_contains_required_generics_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_contains_required_generics()
@llgs_test
def test_qRegisterInfo_contains_required_generics_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_required_generics()
def qRegisterInfo_contains_at_least_one_register_set(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all register sets found.
register_sets = {
reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info}
self.assertTrue(len(register_sets) >= 1)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_contains_at_least_one_register_set_debugserver(
self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_contains_at_least_one_register_set()
@llgs_test
def test_qRegisterInfo_contains_at_least_one_register_set_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_at_least_one_register_set()
def targetHasAVX(self):
triple = self.dbg.GetSelectedPlatform().GetTriple()
# TODO other platforms, please implement this function
if not re.match(".*-.*-linux", triple):
return True
# Need to do something different for non-Linux/Android targets
if lldb.remote_platform:
self.runCmd('platform get-file "/proc/cpuinfo" "cpuinfo"')
cpuinfo_path = "cpuinfo"
self.addTearDownHook(lambda: os.unlink("cpuinfo"))
else:
cpuinfo_path = "/proc/cpuinfo"
f = open(cpuinfo_path, 'r')
cpuinfo = f.read()
f.close()
return " avx " in cpuinfo
def qRegisterInfo_contains_avx_registers(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all generics found.
register_sets = {
reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info}
self.assertEqual(
self.targetHasAVX(),
"Advanced Vector Extensions" in register_sets)
@expectedFailureAll(oslist=["windows"]) # no avx for now.
@expectedFailureNetBSD
@llgs_test
def test_qRegisterInfo_contains_avx_registers_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_avx_registers()
def qThreadInfo_contains_thread(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_threadinfo_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather threadinfo entries.
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
# We should have exactly one thread.
self.assertEqual(len(threads), 1)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_contains_thread_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@expectedFailureNetBSD
@llgs_test
def test_qThreadInfo_contains_thread_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_contains_thread_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
@expectedFailureAll(oslist=["windows"]) # expect one more thread stopped
@expectedFailureNetBSD
@llgs_test
def test_qThreadInfo_contains_thread_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
def qThreadInfo_matches_qC(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_threadinfo_collection_packets()
self.test_sequence.add_log_lines(
["read packet: $qC#00",
{"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}
], True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather threadinfo entries.
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
# We should have exactly one thread from threadinfo.
self.assertEqual(len(threads), 1)
# We should have a valid thread_id from $QC.
QC_thread_id_hex = context.get("thread_id")
self.assertIsNotNone(QC_thread_id_hex)
QC_thread_id = int(QC_thread_id_hex, 16)
# Those two should be the same.
self.assertEqual(threads[0], QC_thread_id)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_matches_qC_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@llgs_test
def test_qThreadInfo_matches_qC_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_matches_qC_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
@expectedFailureAll(oslist=["windows"]) # expect one more thread stopped
@expectedFailureNetBSD
@llgs_test
def test_qThreadInfo_matches_qC_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
def p_returns_correct_data_size_for_each_qRegisterInfo(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.assertTrue(len(reg_infos) > 0)
byte_order = self.get_target_byte_order()
# Read value for each register.
reg_index = 0
for reg_info in reg_infos:
# Skip registers that don't have a register set. For x86, these are
# the DRx registers, which have no LLDB-kind register number and thus
# cannot be read via normal
# NativeRegisterContext::ReadRegister(reg_info,...) calls.
if not "set" in reg_info:
continue
# Clear existing packet expectations.
self.reset_test_sequence()
# Run the register query
self.test_sequence.add_log_lines(
["read packet: $p{0:x}#00".format(reg_index),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the response length.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
if "dynamic_size_dwarf_expr_bytes" in reg_info:
self.updateRegInfoBitsize(reg_info, byte_order)
self.assertEqual(len(p_response), 2 * int(reg_info["bitsize"]) / 8)
# Increment loop
reg_index += 1
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@expectedFailureNetBSD
@llgs_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@expectedFailureNetBSD
@llgs_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
def Hg_switches_to_3_threads(self):
# Startup the inferior with three threads (main + 2 new ones).
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["thread:new", "thread:new"])
# Let the inferior process have a few moments to start up the thread
# when launched. (The launch scenario has no time to run, so threads
# won't be there yet.)
self.run_process_then_stop(run_seconds=1)
# Wait at most x seconds for 3 threads to be present.
threads = self.wait_for_thread_count(3, timeout_seconds=self._WAIT_TIMEOUT)
self.assertEqual(len(threads), 3)
# verify we can $H to each thead, and $qC matches the thread we set.
for thread in threads:
# Change to each thread, verify current thread id.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $Hg{0:x}#00".format(thread), # Set current thread.
"send packet: $OK#00",
"read packet: $qC#00",
{"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the thread id.
self.assertIsNotNone(context.get("thread_id"))
self.assertEqual(int(context.get("thread_id"), 16), thread)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hg_switches_to_3_threads_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@expectedFailureAll(oslist=["windows"]) # expect 4 threads
@expectedFailureNetBSD
@llgs_test
def test_Hg_switches_to_3_threads_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hg_switches_to_3_threads_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
@expectedFailureAll(oslist=["windows"]) # expecting one more thread
@expectedFailureNetBSD
@llgs_test
def test_Hg_switches_to_3_threads_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
def Hc_then_Csignal_signals_correct_thread(self, segfault_signo):
# NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached,
# and the test requires getting stdout from the exe.
NUM_THREADS = 3
# Startup the inferior with three threads (main + NUM_THREADS-1 worker threads).
# inferior_args=["thread:print-ids"]
inferior_args = ["thread:segfault"]
for i in range(NUM_THREADS - 1):
# if i > 0:
# Give time between thread creation/segfaulting for the handler to work.
# inferior_args.append("sleep:1")
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
# Launch/attach. (In our case, this should only ever be launched since
# we need inferior stdout/stderr).
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
self.test_sequence.add_log_lines(["read packet: $c#63"], True)
context = self.expect_gdbremote_sequence()
# Let the inferior process have a few moments to start up the thread when launched.
# context = self.run_process_then_stop(run_seconds=1)
# Wait at most x seconds for all threads to be present.
# threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5)
# self.assertEquals(len(threads), NUM_THREADS)
signaled_tids = {}
print_thread_ids = {}
# Switch to each thread, deliver a signal, and verify signal delivery
for i in range(NUM_THREADS - 1):
# Run until SIGSEGV comes in.
self.reset_test_sequence()
self.test_sequence.add_log_lines([{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);",
"capture": {1: "signo",
2: "thread_id"}}],
True)
context = self.expect_gdbremote_sequence(timeout_seconds=self._DEFAULT_TIMEOUT)
self.assertIsNotNone(context)
signo = context.get("signo")
self.assertEqual(int(signo, 16), segfault_signo)
# Ensure we haven't seen this tid yet.
thread_id = int(context.get("thread_id"), 16)
self.assertFalse(thread_id in signaled_tids)
signaled_tids[thread_id] = 1
# Send SIGUSR1 to the thread that signaled the SIGSEGV.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
# Set the continue thread.
# Set current thread.
"read packet: $Hc{0:x}#00".format(thread_id),
"send packet: $OK#00",
# Continue sending the signal number to the continue thread.
# The commented out packet is a way to do this same operation without using
# a $Hc (but this test is testing $Hc, so we'll stick with the former).
"read packet: $C{0:x}#00".format(lldbutil.get_signal_number('SIGUSR1')),
# "read packet: $vCont;C{0:x}:{1:x};c#00".format(lldbutil.get_signal_number('SIGUSR1'), thread_id),
# FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does.
# But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL.
# Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out
# an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal.
# {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
# "read packet: $c#63",
{"type": "output_match", "regex": r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture": {1: "print_thread_id", 2: "post_handle_thread_id"}},
],
True)
# Run the sequence.
context = self.expect_gdbremote_sequence(
timeout_seconds=self._DEFAULT_TIMEOUT)
self.assertIsNotNone(context)
# Ensure the stop signal is the signal we delivered.
# stop_signo = context.get("stop_signo")
# self.assertIsNotNone(stop_signo)
# self.assertEquals(int(stop_signo,16), lldbutil.get_signal_number('SIGUSR1'))
# Ensure the stop thread is the thread to which we delivered the signal.
# stop_thread_id = context.get("stop_thread_id")
# self.assertIsNotNone(stop_thread_id)
# self.assertEquals(int(stop_thread_id,16), thread_id)
# Ensure we haven't seen this thread id yet. The inferior's
# self-obtained thread ids are not guaranteed to match the stub
# tids (at least on MacOSX).
print_thread_id = context.get("print_thread_id")
self.assertIsNotNone(print_thread_id)
print_thread_id = int(print_thread_id, 16)
self.assertFalse(print_thread_id in print_thread_ids)
# Now remember this print (i.e. inferior-reflected) thread id and
# ensure we don't hit it again.
print_thread_ids[print_thread_id] = 1
# Ensure post signal-handle thread id matches the thread that
# initially raised the SIGSEGV.
post_handle_thread_id = context.get("post_handle_thread_id")
self.assertIsNotNone(post_handle_thread_id)
post_handle_thread_id = int(post_handle_thread_id, 16)
self.assertEqual(post_handle_thread_id, print_thread_id)
@unittest2.expectedFailure()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
# Darwin debugserver translates some signals like SIGSEGV into some gdb
# expectations about fixed signal numbers.
self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS)
@skipIfWindows # no SIGSEGV support
@expectedFailureNetBSD
@llgs_test
def test_Hc_then_Csignal_signals_correct_thread_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.Hc_then_Csignal_signals_correct_thread(
lldbutil.get_signal_number('SIGSEGV'))
def m_packet_reads_memory(self):
# This is the memory we will write into the inferior and then ensure we
# can read back with $m.
MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz"
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"set-message:%s" %
MEMORY_CONTENTS,
"get-data-address-hex:g_message",
"sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "message_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the message address.
self.assertIsNotNone(context.get("message_address"))
message_address = int(context.get("message_address"), 16)
# Grab contents from the inferior.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)),
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "read_contents"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure what we read from inferior memory is what we wrote.
self.assertIsNotNone(context.get("read_contents"))
read_contents = seven.unhexlify(context.get("read_contents"))
self.assertEqual(read_contents, MEMORY_CONTENTS)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_m_packet_reads_memory_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.m_packet_reads_memory()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
def test_m_packet_reads_memory_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.m_packet_reads_memory()
def qMemoryRegionInfo_is_supported(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior()
# Ask if it supports $qMemoryRegionInfo.
self.test_sequence.add_log_lines(
["read packet: $qMemoryRegionInfo#00",
"send packet: $OK#00"
], True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_is_supported_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_is_supported()
@llgs_test
def test_qMemoryRegionInfo_is_supported_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_is_supported()
def qMemoryRegionInfo_reports_code_address_as_executable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-code-address-hex:hello", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "code_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the code address.
self.assertIsNotNone(context.get("code_address"))
code_address = int(context.get("code_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(code_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure code address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("x" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(code_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_code_address_as_executable()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_code_address_as_executable()
def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-stack-address-hex:", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"stack address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "stack_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the address.
self.assertIsNotNone(context.get("stack_address"))
stack_address = int(context.get("stack_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(stack_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("w" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(
stack_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable()
def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-heap-address-hex:", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"heap address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "heap_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the address.
self.assertIsNotNone(context.get("heap_address"))
heap_address = int(context.get("heap_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(heap_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("w" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(heap_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable()
def breakpoint_set_and_remove_work(self, want_hardware=False):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"get-code-address-hex:hello",
"sleep:1",
"call-function:hello"])
# Run the process
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
self.test_sequence.add_log_lines(
[ # Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the function call entry point.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "function_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info - we need endian of target to handle register
# value conversions.
process_info = self.parse_process_info_response(context)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
(pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos)
self.assertIsNotNone(pc_lldb_reg_index)
self.assertIsNotNone(pc_reg_info)
# Grab the function address.
self.assertIsNotNone(context.get("function_address"))
function_address = int(context.get("function_address"), 16)
# Get current target architecture
target_arch = self.getArchitecture()
# Set the breakpoint.
if (target_arch == "arm") or (target_arch == "aarch64"):
# TODO: Handle case when setting breakpoint in thumb code
BREAKPOINT_KIND = 4
else:
BREAKPOINT_KIND = 1
# Set default packet type to Z0 (software breakpoint)
z_packet_type = 0
# If hardware breakpoint is requested set packet type to Z1
if want_hardware == True:
z_packet_type = 1
self.reset_test_sequence()
self.add_set_breakpoint_packets(
function_address,
z_packet_type,
do_continue=True,
breakpoint_kind=BREAKPOINT_KIND)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the stop signal reported was the breakpoint signal number.
stop_signo = context.get("stop_signo")
self.assertIsNotNone(stop_signo)
self.assertEqual(int(stop_signo, 16),
lldbutil.get_signal_number('SIGTRAP'))
# Ensure we did not receive any output. If the breakpoint was not set, we would
# see output (from a launched process with captured stdio) printing a hello, world message.
# That would indicate the breakpoint didn't take.
self.assertEqual(len(context["O_content"]), 0)
# Verify that the PC for the main thread is where we expect it - right at the breakpoint address.
# This acts as a another validation on the register reading code.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
# Print the PC. This should match the breakpoint address.
"read packet: $p{0:x}#00".format(pc_lldb_reg_index),
# Capture $p results.
{"direction": "send",
"regex": r"^\$([0-9a-fA-F]+)#",
"capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the PC is where we expect. Note response is in endianness of
# the inferior.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
# Convert from target endian to int.
returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
self.assertEqual(returned_pc, function_address)
# Verify that a breakpoint remove and continue gets us the expected
# output.
self.reset_test_sequence()
# Add breakpoint remove packets
self.add_remove_breakpoint_packets(
function_address,
z_packet_type,
breakpoint_kind=BREAKPOINT_KIND)
self.test_sequence.add_log_lines(
[
# Continue running.
"read packet: $c#63",
# We should now receive the output from the call.
{"type": "output_match", "regex": r"^hello, world\r\n$"},
# And wait for program completion.
{"direction": "send", "regex": r"^\$W00(.*)#[0-9a-fA-F]{2}$"},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_software_breakpoint_set_and_remove_work_debugserver(self):
self.init_debugserver_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=False)
@skipIfWindows # No pty support to test any inferior output
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_software_breakpoint_set_and_remove_work_llgs(self):
self.init_llgs_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=False)
@debugserver_test
@skipUnlessPlatform(oslist=['linux'])
@expectedFailureAndroid
@skipIf(archs=no_match(['arm', 'aarch64']))
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_hardware_breakpoint_set_and_remove_work_debugserver(self):
self.init_debugserver_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=True)
@llgs_test
@skipUnlessPlatform(oslist=['linux'])
@skipIf(archs=no_match(['arm', 'aarch64']))
def test_hardware_breakpoint_set_and_remove_work_llgs(self):
self.init_llgs_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=True)
def qSupported_returns_known_stub_features(self):
# Start up the stub and start/prep the inferior.
procs = self.prep_debug_monitor_and_inferior()
self.add_qSupported_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Retrieve the qSupported features.
supported_dict = self.parse_qSupported_response(context)
self.assertIsNotNone(supported_dict)
self.assertTrue(len(supported_dict) > 0)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qSupported_returns_known_stub_features_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qSupported_returns_known_stub_features()
@llgs_test
def test_qSupported_returns_known_stub_features_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qSupported_returns_known_stub_features()
def written_M_content_reads_back_correctly(self):
TEST_MESSAGE = "Hello, memory"
# Start up the stub and start/prep the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"set-message:xxxxxxxxxxxxxX",
"get-data-address-hex:g_message",
"sleep:1",
"print-message:"])
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "message_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the message address.
self.assertIsNotNone(context.get("message_address"))
message_address = int(context.get("message_address"), 16)
# Hex-encode the test message, adding null termination.
hex_encoded_message = seven.hexlify(TEST_MESSAGE)
# Write the message to the inferior. Verify that we can read it with the hex-encoded (m)
# and binary (x) memory read packets.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(TEST_MESSAGE), hex_encoded_message),
"send packet: $OK#00",
"read packet: $m{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)),
"send packet: ${0}#00".format(hex_encoded_message),
"read packet: $x{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)),
"send packet: ${0}#00".format(TEST_MESSAGE),
"read packet: $m{0:x},4#00".format(message_address),
"send packet: ${0}#00".format(hex_encoded_message[0:8]),
"read packet: $x{0:x},4#00".format(message_address),
"send packet: ${0}#00".format(TEST_MESSAGE[0:4]),
"read packet: $c#63",
{"type": "output_match", "regex": r"^message: (.+)\r\n$", "capture": {1: "printed_message"}},
"send packet: $W00#00",
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure what we read from inferior memory is what we wrote.
printed_message = context.get("printed_message")
self.assertIsNotNone(printed_message)
self.assertEqual(printed_message, TEST_MESSAGE + "X")
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_written_M_content_reads_back_correctly_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.written_M_content_reads_back_correctly()
@skipIfWindows # No pty support to test any inferior output
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_written_M_content_reads_back_correctly_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.written_M_content_reads_back_correctly()
def P_writes_all_gpr_registers(self):
# Start inferior debug session, grab all register info.
procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"])
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Process register infos.
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
# Process endian.
process_info = self.parse_process_info_response(context)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
# Pull out the register infos that we think we can bit flip
# successfully,.
gpr_reg_infos = [
reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)]
self.assertTrue(len(gpr_reg_infos) > 0)
# Write flipped bit pattern of existing value to each register.
(successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(
gpr_reg_infos, endian)
# print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes))
self.assertTrue(successful_writes > 0)
# Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp).
# Come back to this. I have the test rigged to verify that at least some
# of the bit-flip writes work.
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_P_writes_all_gpr_registers_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.P_writes_all_gpr_registers()
@llgs_test
def test_P_writes_all_gpr_registers_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.P_writes_all_gpr_registers()
def P_and_p_thread_suffix_work(self):
# Startup the inferior with three threads.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["thread:new", "thread:new"])
self.add_thread_suffix_request_packets()
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
reg_index = self.select_modifiable_register(reg_infos)
self.assertIsNotNone(reg_index)
reg_byte_size = int(reg_infos[reg_index]["bitsize"]) // 8
self.assertTrue(reg_byte_size > 0)
# Run the process a bit so threads can start up, and collect register
# info.
context = self.run_process_then_stop(run_seconds=1)
self.assertIsNotNone(context)
# Wait for 3 threads to be present.
threads = self.wait_for_thread_count(3, timeout_seconds=self._WAIT_TIMEOUT)
self.assertEqual(len(threads), 3)
expected_reg_values = []
register_increment = 1
next_value = None
# Set the same register in each of 3 threads to a different value.
# Verify each one has the unique value.
for thread in threads:
# If we don't have a next value yet, start it with the initial read
# value + 1
if not next_value:
# Read pre-existing register value.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Set the next value to use for writing as the increment plus
# current value.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
next_value = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
# Set new value using P and thread suffix.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $P{0:x}={1};thread:{2:x}#00".format(
reg_index,
lldbgdbserverutils.pack_register_hex(
endian,
next_value,
byte_size=reg_byte_size),
thread),
"send packet: $OK#00",
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Save the value we set.
expected_reg_values.append(next_value)
# Increment value for next thread to use (we want them all
# different so we can verify they wrote to each thread correctly
# next.)
next_value += register_increment
# Revisit each thread and verify they have the expected value set for
# the register we wrote.
thread_index = 0
for thread in threads:
# Read pre-existing register value.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Get the register value.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
read_value = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
# Make sure we read back what we wrote.
self.assertEqual(read_value, expected_reg_values[thread_index])
thread_index += 1
# Note: as of this moment, a hefty number of the GPR writes are failing
# with E32 (everything except rax-rdx, rdi, rsi, rbp).
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_P_and_p_thread_suffix_work_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.P_and_p_thread_suffix_work()
@skipIfWindows
@expectedFailureNetBSD
@llgs_test
def test_P_and_p_thread_suffix_work_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.P_and_p_thread_suffix_work()
| 41.067127 | 211 | 0.650189 |
aceb69ba3df19fea1355dbdab8e9478113fc1925 | 6,995 | py | Python | smartplug/models.py | kwarodom/bemoss_web_ui_1.2 | 332e3ef307cec146fdc3141169a4a64998b3fc20 | [
"Unlicense"
] | null | null | null | smartplug/models.py | kwarodom/bemoss_web_ui_1.2 | 332e3ef307cec146fdc3141169a4a64998b3fc20 | [
"Unlicense"
] | null | null | null | smartplug/models.py | kwarodom/bemoss_web_ui_1.2 | 332e3ef307cec146fdc3141169a4a64998b3fc20 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Authors: Kruthika Rathinavel
# Version: 1.2.1
# Email: kruthika@vt.edu
# Created: "2014-10-13 18:45:40"
# Updated: "2015-02-13 15:06:41"
# Copyright © 2014 by Virginia Polytechnic Institute and State University
# All rights reserved
#
# Virginia Polytechnic Institute and State University (Virginia Tech) owns the copyright for the BEMOSS software and its
# associated documentation ("Software") and retains rights to grant research rights under patents related to
# the BEMOSS software to other academic institutions or non-profit research institutions.
# You should carefully read the following terms and conditions before using this software.
# Your use of this Software indicates your acceptance of this license agreement and all terms and conditions.
#
# You are hereby licensed to use the Software for Non-Commercial Purpose only. Non-Commercial Purpose means the
# use of the Software solely for research. Non-Commercial Purpose excludes, without limitation, any use of
# the Software, as part of, or in any way in connection with a product or service which is sold, offered for sale,
# licensed, leased, loaned, or rented. Permission to use, copy, modify, and distribute this compilation
# for Non-Commercial Purpose to other academic institutions or non-profit research institutions is hereby granted
# without fee, subject to the following terms of this license.
#
# Commercial Use: If you desire to use the software for profit-making or commercial purposes,
# you agree to negotiate in good faith a license with Virginia Tech prior to such profit-making or commercial use.
# Virginia Tech shall have no obligation to grant such license to you, and may grant exclusive or non-exclusive
# licenses to others. You may contact the following by email to discuss commercial use:: vtippatents@vtip.org
#
# Limitation of Liability: IN NO EVENT WILL VIRGINIA TECH, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE
# THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
# CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
# LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE
# OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF VIRGINIA TECH OR OTHER PARTY HAS BEEN ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGES.
#
# For full terms and conditions, please visit https://bitbucket.org/bemoss/bemoss_os.
#
# Address all correspondence regarding this license to Virginia Tech's electronic mail address: vtippatents@vtip.org
from django.db import models
from dashboard.models import Building_Zone, DeviceMetadata
#Plugload device information - for all plugload controllers in BEMOSS
class Plugload(models.Model):
plugload = models.ForeignKey(DeviceMetadata, primary_key=True, max_length=50)
status = models.CharField(max_length=3, null=True, blank=True)
power = models.FloatField(null=True, blank=True)
energy = models.FloatField(null=True, blank=True)
ip_address = models.IPAddressField(null=True, blank=True)
nickname = models.CharField(max_length=30, null=True, blank=True)
zone = models.ForeignKey(Building_Zone, null=True, blank=True)
network_status = models.CharField(max_length=7, null=True, blank=True)
other_parameters = models.CharField(max_length=200, null=True, blank=True)
last_scanned_time = models.DateTimeField(null=True, blank=True)
last_offline_time = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = "plugload"
def __unicode__(self):
return self.plugload_id
def get_zone(self):
zone_req = Building_Zone.as_json(self.zone)
return zone_req
def data_as_json(self):
zone_req = Building_Zone.as_json(self.zone)
device_info = DeviceMetadata.objects.get(device_id=self.plugload_id)
metadata = DeviceMetadata.data_as_json(device_info)
return dict(
id=self.plugload_id,
status=self.status,
power=self.power,
energy=self.energy,
zone=zone_req,
nickname=self.nickname.encode('utf-8').title(),
device_type=metadata['device_type'].encode('utf-8'),
device_model_id=metadata['device_model_id'],
bemoss=metadata['bemoss'],
identifiable=metadata['identifiable'],
mac_address=metadata['mac_address'].encode('utf-8'),
vendor_name=metadata['vendor_name'].encode('utf-8'),
device_model=metadata['device_model'].encode('utf-8'))
def device_status(self):
zone_req = Building_Zone.as_json(self.zone)
device_info = DeviceMetadata.objects.get(device_id=self.plugload_id)
metadata = DeviceMetadata.data_as_json(device_info)
return dict(
id=self.plugload_id,
nickname=self.nickname.encode('utf-8').title(),
device_model=metadata['device_model'],
date_added=metadata['date_added'],
zone=zone_req,
bemoss=metadata['bemoss'],
network_status=self.network_status.capitalize(),
zone_nickname=zone_req['zone_nickname'],
last_scanned=self.last_scanned_time,
last_offline=self.last_offline_time)
def data_dashboard(self):
zone_req = Building_Zone.as_json(self.zone)
device_info = DeviceMetadata.objects.get(device_id=self.plugload_id)
metadata = DeviceMetadata.data_as_json(device_info)
return dict(
device_id=self.plugload_id,
device_type=metadata['device_type'].encode('utf-8'),
vendor_name=metadata['vendor_name'].encode('utf-8'),
device_model=metadata['device_model'].encode('utf-8'),
device_model_id=metadata['device_model_id'],
mac_address=metadata['mac_address'].encode('utf-8'),
nickname=self.nickname.encode('utf-8').title(),
date_added=metadata['date_added'],
identifiable=metadata['identifiable'],
zone_id=zone_req['id'],
bemoss=metadata['bemoss'],
zone_nickname=zone_req['zone_nickname'],
network_status=self.network_status.capitalize(),
last_scanned=self.last_scanned_time)
def data_side_nav(self):
zone_req = Building_Zone.as_json(self.zone)
device_info = DeviceMetadata.objects.get(device_id=self.plugload_id)
metadata = DeviceMetadata.data_as_json(device_info)
return dict(
device_id=self.plugload_id,
device_model_id=metadata['device_model_id'],
mac_address=metadata['mac_address'].encode('utf-8'),
nickname=self.nickname.encode('utf-8').title(),
zone_id=zone_req['id'],
bemoss=metadata['bemoss'],
zone_nickname=zone_req['zone_nickname'],
network_status=self.network_status.capitalize())
| 49.964286 | 120 | 0.71065 |
aceb6b35000f4ec763ebc71334ba1f648ac7d0fd | 5,794 | py | Python | migration/biashist_mig1.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | 3 | 2017-11-23T13:29:47.000Z | 2021-01-08T09:28:35.000Z | migration/biashist_mig1.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | null | null | null | migration/biashist_mig1.py | yokoyama-flogics/ibp_monitor_2 | 1a7df55a524ff3a7908df330e7e02c9f27e24ae0 | [
"BSD-2-Clause"
] | 2 | 2018-02-15T08:11:24.000Z | 2021-01-08T09:28:43.000Z | """
Bias History Migration Utility
BSD 2-Clause License
Copyright (c) 2017, Atsushi Yokoyama, Firmlogics (yokoyama@flogics.com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import sys
# Set Python search path to the parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.common import eprint
def biashist_mig_band(dbconn, recorder, offset_ms, bfo_offset_hz, filename,
ignore_err=False):
"""
Read lines from given filename (Monitor-1 biashist file) and insert them as
database records.
"""
from lib.config import BeaconConfigParser
from lib.ibp import mhz_to_freq_khz
import re
import sqlite3
from datetime import datetime
m = re.search('_(20[0-9]+)\.log', filename)
date_str = m.group(1)
for line in open(filename, 'r').readlines():
if line.rstrip() == 'END':
break
if line.rstrip() == '':
eprint('Found empty line. Skipped')
continue
# Parsing characteristic parameters from *.log file
m = re.match(
'([0-9:]+) [A-Z0-9]+ +(\d+)MHz SN: *([\d.-]+) Bias: *([\d.-]+)'
+ ' Ct: *(\d+) IF: *([\d-]+) +([\d.-]+)',
line)
try:
datetime_sec = (datetime.strptime(
date_str + ' ' + m.group(1),
'%Y%m%d %H:%M:%S')
- datetime.utcfromtimestamp(0)).total_seconds()
except:
eprint('Found illegal line "%s". Aborted')
raise
freq_khz = mhz_to_freq_khz(int(m.group(2)))
max_sn = float(m.group(3))
best_pos_hz = int(m.group(4))
total_ct = int(m.group(5))
bg_pos_hz = int(m.group(6))
bg_sn = float(m.group(7))
# print datetime_sec, freq_khz, max_sn, best_pos_hz, total_ct
# print bg_pos_hz, bg_sn
# Originally, trying to calculate true time by comparing bad_slot and
# true slot.
# m = re.search(r'_([A-Z0-9]+)_', filename)
# callsign = m.group(1)
# bad_slot = get_slot(datetime_sec, band)
# true_slot = callsign_to_slot(callsign)
# diff = (bad_slot - true_slot) % 18
# if diff < 2 or diff > 3:
# # print bad_slot, callsign
# print diff
c = dbconn.cursor()
try:
c.execute('''INSERT INTO
received(datetime, offset_ms, freq_khz, bfo_offset_hz, recorder,
char1_max_sn, char1_best_pos_hz, char1_total_ct,
char1_bg_pos_hz, char1_bg_sn)
VALUES(?,?,?,?,?,?,?,?,?,?)''',
(
datetime_sec,
offset_ms,
freq_khz,
bfo_offset_hz,
recorder,
max_sn,
best_pos_hz,
total_ct,
bg_pos_hz,
bg_sn
))
except sqlite3.IntegrityError as err:
if not ignore_err:
raise
elif err[0] != 'UNIQUE constraint failed: biashist.datetime':
raise
dbconn.commit()
def biashist_mig_all(ignore_err=False, debug=False):
from lib.config import BeaconConfigParser
from lib.fileio import connect_database
from fnmatch import fnmatch
import os
dbdir = BeaconConfigParser().getpath('Migration', 'dbdir')
recorder = BeaconConfigParser().get('Migration', 'recorder')
offset_ms = BeaconConfigParser().getint('Migration', 'offset_ms')
bfo_offset_hz = \
BeaconConfigParser().getint('Migration', 'bfo_offset_hz')
conn = connect_database()
for file in sorted(os.listdir(dbdir)):
if fnmatch(file, 'ibprec_*.log'):
if debug:
print "Migrating", file
biashist_mig_band(conn, recorder, offset_ms, bfo_offset_hz,
os.path.join(dbdir, file), ignore_err=ignore_err)
conn.close()
def main():
import argparse
import re
import sys
# Parse arguments
parser = argparse.ArgumentParser(
description='Bias History Migration Utility')
parser.add_argument('-d', '--debug',
action='store_true',
default=False,
help='enable debug')
parser.add_argument('--ignoreerr',
action='store_true',
default=False,
help='continue even error occurred when inserting records')
args = parser.parse_args()
biashist_mig_all(ignore_err=args.ignoreerr, debug=args.debug)
if __name__ == "__main__":
main()
| 34.284024 | 80 | 0.620815 |
aceb6bd727ffe561138de67b56ff94ca21f0694d | 392 | py | Python | numpy.example1.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 4 | 2019-05-04T00:33:25.000Z | 2021-05-29T20:37:59.000Z | numpy.example1.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | null | null | null | numpy.example1.py | eltechno/python_course | f74abac7df3f9f41864afd06479389260c29ea3a | [
"MIT"
] | 3 | 2020-05-05T13:14:28.000Z | 2022-02-03T16:18:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 30/04/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
import numpy as np
#simple array
numbers = np.array([2, 3, 5, 7, 11])
print(type(numbers))
print(numbers)
#Multidimensional Arguments
test = np.array([[1,2,3],[4,5,6]])
print(test)
| 18.666667 | 38 | 0.691327 |
aceb6cc11ee4572267695f9680248ca18c1651c0 | 6,537 | py | Python | testcode/test_scoutsuite_rails.py | ncc-erik-steringer/Aerides | 6f5c70447cd42667157586303abc9ea1313c4cff | [
"MIT"
] | 46 | 2022-02-03T13:25:10.000Z | 2022-03-17T17:51:25.000Z | testcode/test_scoutsuite_rails.py | ncc-erik-steringer/Aerides | 6f5c70447cd42667157586303abc9ea1313c4cff | [
"MIT"
] | null | null | null | testcode/test_scoutsuite_rails.py | ncc-erik-steringer/Aerides | 6f5c70447cd42667157586303abc9ea1313c4cff | [
"MIT"
] | null | null | null | # Copyright (c) 2022 Erik Steringer and NCC Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from typing import Optional, Union
import unittest
def _get_item(ptr, itempath: str) -> Union[str, dict, list, None]:
"""Utility function. The ptr param should point at .services then follow the itempath (separated via '.') to
the expected object. Returns None if invalid (tries to avoid Error)."""
root = ptr
for element in itempath.split('.'):
if isinstance(root, dict):
if element in root:
root = root[element]
else:
return None
elif isinstance(root, list):
index = int(element)
if not (0 <= index < len(root)):
return None
root = root[index]
else:
return None
return root
class TestScoutSuiteExpected(unittest.TestCase):
def setUp(self) -> None:
# per https://github.com/nccgroup/ScoutSuite/wiki/Exporting-and-Programmatically-Accessing-the-Report
with open('/tmp/artifacts/scout-dir/scoutsuite-results/scoutsuite_results_aws-000000000000.js') as fd:
fd.readline() # discard first line
self.scoutdata = json.load(fd) # type: dict
def test_ec2_no_ports_open_to_all(self):
"""Verify that none of the security groups have a port open to 0.0.0.0/0"""
# start by grabbing a handle to the .services.ec2.findings dict
ptr = _get_item(self.scoutdata, 'services.ec2.findings')
if ptr is None:
self.fail('Expected path services.ec2.findings in Scout Suite data was not found')
# look at all findings for "port is open", group them up, report
issues = []
for finding, data in ptr.items():
if 'ec2-security-group-opens' not in finding or 'port-to-all' not in finding:
continue
if data['flagged_items'] > 0:
issues.append((finding, data))
if len(issues) > 0:
self.fail(
'ScoutSuite reported the following EC2 Security Group findings:\n\n{}'.format(
'\n\n'.join(
['{}\n{}'.format(x, '\n'.join(y['items'])) for x, y in issues]
)
)
)
def test_iam_no_inline_passrole(self):
"""Verify there are no inline policies granting iam:PassRole for *"""
# get the handle
ptr = _get_item(self.scoutdata, 'services.iam.findings')
if ptr is None:
self.fail('Expected path services.iam.findings in Scout Suite data was not found')
# Review all iam-PassRole findings
finding_names = (
'iam-inline-role-policy-allows-iam-PassRole',
'iam-inline-user-policy-allows-iam-PassRole',
'iam-inline-group-policy-allows-iam-PassRole'
)
finding_items = []
for finding_name in finding_names:
finding_contents = ptr.get(finding_name)
if finding_contents is not None and finding_contents['flagged_items'] > 0:
finding_items.extend(finding_contents['items'])
if len(finding_items) > 0:
item_listing = []
for item in finding_items:
root = self.scoutdata.get('services')
item_ref = _get_item(root, '.'.join(item.split('.')[:3])) # type: Optional[dict]
if item_ref is not None:
item_listing.append(item_ref.get('arn'))
else:
item_listing.append(item)
self.fail(
'The following IAM Users/Roles/Groups had an inline policy allowing '
'iam:PassRole for all resources:\n\n{}'.format(
'\n'.join(['* {}'.format(x) for x in item_listing])
)
)
def test_iam_no_inline_notaction(self):
"""Verify no inline IAM Policies (for Users/Roles/Groups) use the NotAction field"""
# get the handle
ptr = _get_item(self.scoutdata, 'services.iam.findings')
if ptr is None:
self.fail('Expected path services.iam.findings in Scout Suite data was not found')
# Review all iam-PassRole findings
finding_names = (
'iam-inline-role-policy-allows-NotActions',
'iam-inline-user-policy-allows-NotActions',
'iam-inline-group-policy-allows-NotActions'
)
finding_items = []
for finding_name in finding_names:
finding_contents = ptr.get(finding_name)
if finding_contents is not None:
finding_items.extend(finding_contents['items'])
if len(finding_items) > 0:
item_listing = []
for item in finding_items:
root = self.scoutdata.get('services')
item_ref = _get_item(root, '.'.join(item.split('.')[:3])) # type: Optional[dict]
if item_ref is not None:
item_listing.append(item_ref.get('arn'))
else:
item_listing.append(item)
self.fail(
'The following IAM Users/Roles/Groups had an inline policy that uses '
'NotAction in a statement:\n\n{}'.format(
'\n'.join(['* {}'.format(x) for x in item_listing])
)
)
def tearDown(self) -> None:
del self.scoutdata
if __name__ == '__main__':
unittest.main()
| 40.602484 | 112 | 0.604559 |
aceb6dcae19f9cef7569cbf6385668fc4ba94f01 | 315 | py | Python | lessonsApp/models.py | glen-s-abraham/Elearning-platform | 3122cfcef9dd920fffb474da2a63c475fe08c27b | [
"MIT"
] | null | null | null | lessonsApp/models.py | glen-s-abraham/Elearning-platform | 3122cfcef9dd920fffb474da2a63c475fe08c27b | [
"MIT"
] | null | null | null | lessonsApp/models.py | glen-s-abraham/Elearning-platform | 3122cfcef9dd920fffb474da2a63c475fe08c27b | [
"MIT"
] | null | null | null | from django.db import models
from coursesApp.models import Courses
class Lessons(models.Model):
course=models.ForeignKey(Courses, on_delete=models.CASCADE)
lessonname=models.CharField(max_length=100,unique=False)
lesson = models.FileField(upload_to='Lessons/')
def __str__(self):
return str(self.lessonname) | 35 | 60 | 0.806349 |
aceb6e65cace647477cb6c7b10938a7146c6a73a | 223 | py | Python | ITMO_FS/filters/multivariate/__init__.py | lindlind/ITMO_FS | 8662b67a98bceaac800ccc8ea3230cc9f9a250e1 | [
"BSD-3-Clause"
] | 90 | 2020-05-29T14:10:51.000Z | 2022-03-21T16:40:02.000Z | ITMO_FS/filters/multivariate/__init__.py | lindlind/ITMO_FS | 8662b67a98bceaac800ccc8ea3230cc9f9a250e1 | [
"BSD-3-Clause"
] | 11 | 2020-04-25T15:35:39.000Z | 2021-06-29T21:00:05.000Z | ITMO_FS/filters/multivariate/__init__.py | lindlind/ITMO_FS | 8662b67a98bceaac800ccc8ea3230cc9f9a250e1 | [
"BSD-3-Clause"
] | 22 | 2020-07-08T17:37:46.000Z | 2022-03-25T10:13:15.000Z | from .DISRwithMassive import *
from .FCBF import *
from .MultivariateFilter import MultivariateFilter
from .measures import *
from .TraceRatioFisher import TraceRatioFisher
from .STIR import STIR
from .mimaga import MIMAGA
| 27.875 | 50 | 0.829596 |
aceb6f8531b97aa6ac0495e7fe178896b46c1d55 | 23,302 | py | Python | src/protean/port/dao.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | src/protean/port/dao.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | src/protean/port/dao.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | # Standard Library Imports
import logging
from abc import ABCMeta, abstractmethod
from typing import Any
# Protean
from protean.core.entity import BaseEntity
from protean.core.exceptions import (
ObjectNotFoundError,
TooManyObjectsError,
ValidationError,
)
from protean.core.field.basic import Auto, Field
from protean.core.queryset import QuerySet
from protean.globals import current_uow
from protean.utils.query import Q
logger = logging.getLogger("protean.repository")
class ResultSet(object):
"""This is an internal helper class returned by DAO query operations.
The purpose of this class is to prevent DAO-specific data structures from leaking into the domain layer.
It can help check whether results exist, traverse the results, fetch the total number of items and also provide
basic pagination support.
"""
def __init__(self, offset: int, limit: int, total: int, items: list):
# the current offset (zero indexed)
self.offset = offset
# the number of items to be displayed on a page.
self.limit = limit
# the total number of items matching the query
self.total = total
# the items for the current page
self.items = items
@property
def has_prev(self):
"""Is `True` if the results are a subset of all results"""
return bool(self.items) and self.offset > 0
@property
def has_next(self):
"""Is `True` if more pages exist"""
return (self.offset + self.limit) < self.total
@property
def first(self):
"""Is the first item from results"""
if self.items:
return self.items[0]
def __bool__(self):
"""Returns `True` when the resultset is not empty"""
return bool(self.items)
def __iter__(self):
"""Returns an iterable on items, to support traversal"""
return iter(self.items)
def __len__(self):
"""Returns number of items in the resultset"""
return len(self.items)
class BaseDAO(metaclass=ABCMeta):
"""This is the baseclass for concrete DAO implementations.
One part of this base class contains abstract methods to be overridden and implemented in each
concrete database implementation. These methods are where the actual interaction with the database
takes place. The other part contains fully-implemented object lifecycle methods to help in persistence
and data fetch. These methods invoke the concrete methods of each database implementation to
complete their function.
The lifecycle methods of DAO handle the casting of a model object to a domain entity object, and vice versa.
:param domain: the domain of the application this DAO is associated with.
:param provider: the corresponding provider object of the database implementation, from whom the DAO can
request and fetch sessions and connections.
:param model_cls: the concrete model class associated with the DAO. The model class is a direct representation
of an object in ORM/ODM terms or as represented by a python driver.
:param entity_cls: the domain entity class associated with the DAO.
"""
def __init__(self, domain, provider, entity_cls, model_cls):
#: Holds a reference to the domain to which the DAO belongs to.
self.domain = domain
#: Holds a reference to the provider which supplies the DAO with live connections.
self.provider = provider
#: Holds a reference to the model class representation required by the ORM/ODM or the python database driver.
self.model_cls = model_cls
#: Holds a reference to the entity class associated with this DAO.
self.entity_cls = entity_cls
#: An empty query object that can be used to begin filtering/querying operations
#: on the underlying data store.
self.query = QuerySet(self, domain, self.entity_cls)
#: The actual database document or table name associated with the DAO.
#: This is used for deriving namespaces for storage containers, especially the default dict provider.
self.schema_name = entity_cls.meta_.schema_name
#: Tracks whether the DAO needs to operate outside any active Unit of Work transactions.
self._outside_uow = False
def _get_session(self):
"""Returns an active connection to the persistence store.
- If there is an active transaction, the connection associated with the transaction (in the UoW) is returned
- If the DAO has been explicitly instructed to work outside a UoW (with the help of `_outside_uow`), or if
there are no active transactions, a new connection is retrieved from the provider and returned.
"""
if current_uow and not self._outside_uow:
return current_uow.get_session(self.provider.name)
else:
return self.provider.get_connection()
def outside_uow(self):
"""When called, the DAO is instructed to work outside active transactions."""
self._outside_uow = True
# Return the DAO object to support method chaining
return self
###############################
# Repository-specific methods #
###############################
@abstractmethod
def _filter(
self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()
) -> ResultSet:
"""
Filter objects from the data store. Method must return a `ResultSet`
object
"""
@abstractmethod
def _create(self, model_obj: Any):
"""Persist a new entity into the persistent store. Concrete implementation will be provided by
the database DAO class.
Method invocation should persist a new record in the data store.
This method is invoked by the `create` wrapper and should not be called directly.
Returns the persisted model object.
:param model_obj: The model object supplied in an ORM/ODM/Python driver friendly/format
"""
@abstractmethod
def _update(self, model_obj: Any):
"""Update entity data in the persistence store. Concrete implementation will be provided by
the database DAO class.
Method invocation should update the existing data in the persistent store, by its unique identifier.
This method is invoked by the DAO's `update` wrapper method and should not be called directly.
Returns the updated model object.
:param model_obj: The model object supplied in an ORM/ODM/Python driver friendly/format
"""
@abstractmethod
def _update_all(self, criteria: Q, *args, **kwargs):
"""Perform a bulk update on the persistent store. Concrete implementation will be provided by
the database DAO class.
Method invocation should update all objects satisfying `criteria` with attributes specified in
`args` and `kwargs`.
This method is invoked by Queryset's `update_all()` method and should not be called directly.
.. warning:: The `update_all()` method is a “bulk” operation, which bypasses ORM/ODM unit-of-work automation
in favor of greater performance.
Returns the count of rows matched for the provided criteria.
:param criteria: A Q object wrapping one or more levels of criteria/filters
:param args: A dictionary object containing attribute data to be updated
:param kwargs: Keyword args specifying attribute data to be updated
"""
@abstractmethod
def _delete(self):
"""Delete this entity from the persistence store. Concrete implementation will be provided by
the database DAO class.
Method invocation should delete existing record in the persistent store, by its unique identifier.
This method is invoked by DAO's `delete` wrapper method and should not be called directly.
Returns the deleted model object.
"""
@abstractmethod
def _delete_all(self, criteria: Q = None):
"""Perform a bulk delete on the persistent store. Concrete implementation will be provided by
the database DAO class.
Method invocation should update all objects satisfying `criteria`.
This method is invoked by Queryset's `delete_all()` method and should not be called directly.
.. warning:: The `delete_all()` method is a “bulk” operation, which bypasses ORM/ODM unit-of-work automation
in favor of greater performance.
Returns the count of rows matched for the provided criteria.
:param criteria: A Q object wrapping one or more levels of criteria/filters. If no criteria is provided,
then all records of the table/document are removed.
"""
@abstractmethod
def _raw(self, query: Any, data: Any = None):
"""Run raw query on Data source. Concrete implementation will be provided by
the database DAO class.
Method invocation should fetch all objects satisfying the raw query. Running a raw query on the data store
should always returns entity instance objects. If the results were not synthesizable back into
entity objects, an exception should be thrown.
This method is invoked by Queryset's `raw()` method and should not be called directly.
.. warning:: The `raw()` method bypasses ORM/ODM unit-of-work automation.
Returns the result specified by the raw query.
:param query: Raw query to be passed to the data store.
:param data: Data to be passed to the data store as-is, along with the query
(in case of update statements, for example).
"""
######################
# Life-cycle methods #
######################
def get(self, identifier: Any) -> BaseEntity:
"""Retrieve a specific Record from the Repository by its `identifier`.
This method internally uses the `filter` method to fetch records.
Returns exactly one record that matches the identifier.
Throws `ObjectNotFoundError` if no record was found for the identifier.
Throws `TooManyObjectsError` if multiple records were found for the identifier.
:param identifier: id of the record to be fetched from the data store.
"""
logger.debug(
f"Lookup `{self.entity_cls.__name__}` object with identifier {identifier}"
)
# Filter on the ID field of the entity
filters = {
self.entity_cls.meta_.id_field.field_name: identifier,
}
results = self.query.filter(**filters).all()
if not results:
raise ObjectNotFoundError(
f"`{self.entity_cls.__name__}` object with identifier {identifier} "
f"does not exist."
)
if len(results) > 1:
raise TooManyObjectsError(
f"More than one object of `{self.entity_cls.__name__}` exist with identifier {identifier}",
)
# Return the first result, because `filter` would have returned an array
return results.first
def find_by(self, **kwargs) -> "BaseEntity":
"""Find a specific entity record that matches one or more criteria.
This method internally uses the `filter` method to fetch records.
Returns exactly one record that matches the identifier.
Throws `ObjectNotFoundError` if no record was found for the identifier.
Throws `TooManyObjectsError` if multiple records were found for the identifier.
:param kwargs: named arguments of attribute names and values to filter on.
"""
logger.debug(
f"Lookup `{self.entity_cls.__name__}` object with values " f"{kwargs}"
)
# Filter for item in the data store
results = self.query.filter(**kwargs).all()
if not results:
raise ObjectNotFoundError(
f"`{self.entity_cls.__name__}` object with values {[item for item in kwargs.items()]} "
f"does not exist."
)
if len(results) > 1:
raise TooManyObjectsError(
f"More than one object of `{self.entity_cls.__name__}` exist "
f"with values {[item for item in kwargs.items()]}",
)
# Return the first result, because `filter` would have returned an array
return results.first
def exists(self, excludes_, **filters):
"""Returns `True` if objects matching the provided filters were found. Else, returns False.
This method internally uses the `filter` method to fetch records. But it can be overridden for better and
quicker implementations supported by databases.
:param filters: criteria to match records against
:param excludes_: criteria that records should not satisfy
"""
results = self.query.filter(**filters).exclude(**excludes_)
# Invokes the __bool__ method on `ResultSet`.
return bool(results)
def create(self, *args, **kwargs) -> "BaseEntity":
"""Create a new record in the data store.
Performs validations for unique attributes before creating the entity
Returns the created entity object.
Throws `ValidationError` for validation failures on attribute values or uniqueness constraints.
:param args: Dictionary object containing the object's data.
:param kwargs: named attribute names and values
"""
logger.debug(
f"Creating new `{self.entity_cls.__name__}` object using data {kwargs}"
)
try:
# Build the entity from input arguments
# Raises validation errors, if any, at this point
entity_obj = self.entity_cls(*args, **kwargs)
# Perform unique checks. Raises validation errors if unique constraints are violated.
self._validate_unique(entity_obj)
# Build the model object and persist into data store
model_obj = self._create(self.model_cls.from_entity(entity_obj))
# Reverse update auto fields into entity
for field_name, field_obj in entity_obj.meta_.declared_fields.items():
if isinstance(field_obj, Auto) and not getattr(entity_obj, field_name):
if isinstance(model_obj, dict):
field_val = model_obj[field_name]
else:
field_val = getattr(model_obj, field_name)
setattr(entity_obj, field_name, field_val)
# Set Entity status to saved to let everybody know it has been persisted
entity_obj.state_.mark_saved()
return entity_obj
except ValidationError as exc:
logger.error(f"Failed creating entity because of {exc}")
raise
def save(self, entity_obj):
"""Create or update an entity in the data store, depending on its state. An identity for entity record is
generated, if not already present.
The primary difference between `save` and other lifecycle methods like `create` and `update` is that `save`
accepts a fully formed entity object to persist, while the others accept attribute params to build the
entity model from.
Returns the created/updated entity object.
Throws `ValidationError` for validation failures on attribute values or uniqueness constraints.
:param entity_obj: Entity object to be persisted
"""
logger.debug(f"Saving `{self.entity_cls.__name__}` object")
try:
# Build the model object and create it
if entity_obj.state_.is_persisted:
model_obj = self._update(self.model_cls.from_entity(entity_obj))
else:
# If this is a new entity, generate ID
if entity_obj.state_.is_new:
if not getattr(
entity_obj, entity_obj.meta_.id_field.field_name, None
):
setattr(
entity_obj,
entity_obj.meta_.id_field.field_name,
self.entity_cls.generate_identity(),
)
model_obj = self._create(self.model_cls.from_entity(entity_obj))
updated_entity_obj = self.model_cls.to_entity(model_obj)
# Update the auto fields of the entity
for field_name, field_obj in entity_obj.meta_.declared_fields.items():
if isinstance(field_obj, Auto):
field_val = getattr(updated_entity_obj, field_name)
setattr(entity_obj, field_name, field_val)
# Set Entity status to saved to let everybody know it has been persisted
entity_obj.state_.mark_saved()
return entity_obj
except Exception as exc:
logger.error(f"Failed saving entity because of {exc}")
raise
def update(self, entity_obj, *data, **kwargs) -> "BaseEntity":
"""Update a record in the data store.
Performs validations for unique attributes before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
>>> user.update({'age': 10})
>>> user.update(age=10)
Returns the updated entity object.
Throws `ValidationError` for validation failures on attribute values or uniqueness constraints.
:param entity_obj: The entity object to be updated
:param data: Dictionary of values to be updated for the entity
:param kwargs: keyword arguments of attribute pairs to be updated
"""
logger.debug(
f"Updating existing `{self.entity_cls.__name__}` object with id {entity_obj.id}"
)
try:
# Update entity's data attributes
entity_obj._update_data(*data, **kwargs)
# Do unique checks
self._validate_unique(entity_obj, create=False)
self._update(self.model_cls.from_entity(entity_obj))
# Set Entity status to saved to let everybody know it has been persisted
entity_obj.state_.mark_saved()
return entity_obj
except Exception as exc:
logger.error(f"Failed updating entity because of {exc}")
raise
def _validate_unique(self, entity_obj, create=True):
"""Validate the unique constraints for the entity. Raise ValidationError, if constraints were violated.
This method internally uses each field object's fail method to construct a valid error message.
:param entity_obj: Entity object to be validated
:param create: boolean value to indicate that the validation is part of a create operation
"""
# Build the filters from the unique constraints
filters, excludes = {}, {}
# Construct filter criteria based on unique fields defined in Entity class
for field_name, field_obj in self.entity_cls.meta_.unique_fields.items():
lookup_value = getattr(entity_obj, field_name, None)
# Ignore empty lookup values
if lookup_value in Field.empty_values:
continue
# Ignore identifiers on updates
if not create and field_obj.identifier:
excludes[field_name] = lookup_value
continue
filters[field_name] = lookup_value
# Lookup the objects by filters and raise error if objects exist
for filter_key, lookup_value in filters.items():
if self.exists(excludes, **{filter_key: lookup_value}):
field_obj = self.entity_cls.meta_.declared_fields[filter_key]
field_obj.fail(
"unique",
entity_name=self.entity_cls.__name__,
field_name=filter_key,
)
def delete(self, entity_obj):
"""Delete a record in the data store.
Performs validations before data deletion.
Returns the deleted entity object.
Throws ObjectNotFoundError if the object was not found in the data store.
:param entity_obj: Entity object to be deleted from data store
"""
try:
if not entity_obj.state_.is_destroyed:
self._delete(self.model_cls.from_entity(entity_obj))
# Set Entity status to destroyed to let everybody know the object is no longer referable
entity_obj.state_.mark_destroyed()
return entity_obj
except Exception as exc:
logger.error(f"Failed entity deletion because of {exc}")
raise
def delete_all(self):
"""Delete all records in this table/document in the persistent store.
Does not perform validations before data deletion.
Does not return confirmation of data deletion.
"""
try:
self._delete_all()
except Exception as exc:
logger.error(f"Failed deletion of all records because of {exc}")
raise
class BaseLookup(metaclass=ABCMeta):
"""Base Lookup class to implement for each lookup
Inspired by the lookup mechanism implemented in Django.
Each lookup, which is simply a data comparison (like `name == 'John'`), is implemented as a subclass of this
class, and has to implement the `as_expression()` method to provide the representation that the persistence
store needs.
Lookups are identified by their names, and the names are stored in the `lookup_name` class variable.
"""
lookup_name = None
def __init__(self, source, target):
"""Source is LHS and Target is RHS of a comparsion.
For example, in the expression `name == 'John'`, `name` is source (LHS) and `'John'` is target (RHS).
In other words, source is the key/column/attribute to be searched on, and target is the value present in the
persistent store.
"""
self.source, self.target = source, target
def process_source(self):
"""This is a blank implementation that simply returns the source.
Returns `source` (LHS of the expression).
You can override this method to manipulate the source when necessary. For example, if you are using a
data store that cannot perform case-insensitive queries, it may be useful to always compare in lowercase.
"""
return self.source
def process_target(self):
"""This is a blank implementation that simply returns the target.
Returns `target` (RHS of the expression).
You can override this method to manipulate the target when necessary. A good example of overriding this
method is when you are using a data store that needs strings to be enclosed in single quotes.
"""
return self.target
@abstractmethod
def as_expression(self):
"""This methods should return the source and the target in the format required by the persistence store.
Concrete implementation for this method varies from database to database.
"""
raise NotImplementedError
| 39.696763 | 117 | 0.652691 |
aceb6fa01804c2f1f4049ad3ec642f49e76212ab | 16,069 | py | Python | tests/core/contracts/test_contract_call_interface.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_call_interface.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_call_interface.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | import pytest
from hexbytes import (
HexBytes,
)
from webu.exceptions import (
BadFunctionCallOutput,
BlockNumberOutofRange,
InvalidAddress,
)
from webu.utils.ens import (
contract_ens_addresses,
)
# Ignore warning in pyhappyuc 1.6 - will go away with the upgrade
pytestmark = pytest.mark.filterwarnings("ignore:implicit cast from 'char *'")
def deploy(webu, Contract, args=None):
deploy_txn = Contract.deploy(args=args)
deploy_receipt = webu.eth.getTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
contract = Contract(address=deploy_receipt['contractAddress'])
assert len(webu.eth.getCode(contract.address)) > 0
return contract
@pytest.fixture()
def address_reflector_contract(webu, AddressReflectorContract):
return deploy(webu, AddressReflectorContract)
@pytest.fixture()
def math_contract(webu, MathContract):
return deploy(webu, MathContract)
@pytest.fixture()
def string_contract(webu, StringContract):
return deploy(webu, StringContract, args=["Caqalai"])
@pytest.fixture()
def arrays_contract(webu, ArraysContract):
# bytes_32 = [keccak('0'), keccak('1')]
bytes32_array = [
b'\x04HR\xb2\xa6p\xad\xe5@~x\xfb(c\xc5\x1d\xe9\xfc\xb9eB\xa0q\x86\xfe:\xed\xa6\xbb\x8a\x11m', # noqa: E501
b'\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6', # noqa: E501
]
byte_arr = [b'\xff', b'\xff', b'\xff', b'\xff']
return deploy(webu, ArraysContract, args=[bytes32_array, byte_arr])
@pytest.fixture()
def address_contract(webu, WithConstructorAddressArgumentsContract):
return deploy(webu, WithConstructorAddressArgumentsContract, args=[
"0xd3CdA913deB6f67967B99D67aCDFa1712C293601",
])
@pytest.fixture(params=[b'\x04\x06', '0x0406', '0406'])
def bytes_contract(webu, BytesContract, request):
return deploy(webu, BytesContract, args=[request.param])
@pytest.fixture()
def call_transaction():
return {
'data': '0x61bc221a',
'to': '0xc305c901078781C232A2a521C2aF7980f8385ee9'
}
@pytest.fixture(params=[
'0x0406040604060406040604060406040604060406040604060406040604060406',
'0406040604060406040604060406040604060406040604060406040604060406',
HexBytes('0406040604060406040604060406040604060406040604060406040604060406'),
])
def bytes32_contract(webu, Bytes32Contract, request):
return deploy(webu, Bytes32Contract, args=[request.param])
@pytest.fixture()
def undeployed_math_contract(webu, MathContract):
empty_address = "0x000000000000000000000000000000000000dEaD"
_undeployed_math_contract = MathContract(address=empty_address)
return _undeployed_math_contract
@pytest.fixture()
def mismatched_math_contract(webu, StringContract, MathContract):
deploy_txn = StringContract.deploy(args=["Caqalai"])
deploy_receipt = webu.eth.getTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
_mismatched_math_contract = MathContract(address=deploy_receipt['contractAddress'])
return _mismatched_math_contract
@pytest.fixture()
def fallback_function_contract(webu, FallballFunctionContract):
return deploy(webu, FallballFunctionContract)
def test_invalid_address_in_deploy_arg(webu, WithConstructorAddressArgumentsContract):
with pytest.raises(InvalidAddress):
WithConstructorAddressArgumentsContract.deploy(args=[
"0xd3cda913deb6f67967b99d67acdfa1712c293601",
])
def test_call_with_no_arguments(math_contract, call):
result = call(contract=math_contract,
contract_function='return13')
assert result == 13
def test_call_with_one_argument(math_contract, call):
result = call(contract=math_contract,
contract_function='multiply7',
func_args=[3])
assert result == 21
@pytest.mark.parametrize(
'call_args,call_kwargs',
(
((9, 7), {}),
((9,), {'b': 7}),
(tuple(), {'a': 9, 'b': 7}),
),
)
def test_call_with_multiple_arguments(math_contract, call, call_args, call_kwargs):
result = call(contract=math_contract,
contract_function='add',
func_args=call_args,
func_kwargs=call_kwargs)
assert result == 16
@pytest.mark.parametrize(
'call_args,call_kwargs',
(
((9, 7), {}),
((9,), {'b': 7}),
(tuple(), {'a': 9, 'b': 7}),
),
)
def test_saved_method_call_with_multiple_arguments(math_contract, call_args, call_kwargs):
math_contract_add = math_contract.functions.add(*call_args, **call_kwargs)
result = math_contract_add.call()
assert result == 16
def test_call_get_string_value(string_contract, call):
result = call(contract=string_contract,
contract_function='getValue')
# eth_abi.decode_api() does not assume implicit utf-8
# encoding of string return values. Thus, we need to decode
# ourselves for fair comparison.
assert result == "Caqalai"
def test_call_read_string_variable(string_contract, call):
result = call(contract=string_contract,
contract_function='constValue')
assert result == b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff".decode(errors='backslashreplace') # noqa: E501
def test_call_get_bytes32_array(arrays_contract, call):
result = call(contract=arrays_contract,
contract_function='getBytes32Value')
# expected_bytes32_array = [keccak('0'), keccak('1')]
expected_bytes32_array = [
b'\x04HR\xb2\xa6p\xad\xe5@~x\xfb(c\xc5\x1d\xe9\xfc\xb9eB\xa0q\x86\xfe:\xed\xa6\xbb\x8a\x11m', # noqa: E501
b'\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6', # noqa: E501
]
assert result == expected_bytes32_array
def test_call_get_bytes32_const_array(arrays_contract, call):
result = call(contract=arrays_contract,
contract_function='getBytes32ConstValue')
# expected_bytes32_array = [keccak('A'), keccak('B')]
expected_bytes32_array = [
b'\x03x?\xac.\xfe\xd8\xfb\xc9\xadD>Y.\xe3\x0ea\xd6_G\x11@\xc1\x0c\xa1U\xe97\xb45\xb7`',
b'\x1fg[\xff\x07Q_]\xf9g7\x19N\xa9E\xc3lA\xe7\xb4\xfc\xef0{|\xd4\xd0\xe6\x02\xa6\x91\x11',
]
assert result == expected_bytes32_array
def test_call_get_byte_array(arrays_contract, call):
result = call(contract=arrays_contract,
contract_function='getByteValue')
expected_byte_arr = [b'\xff', b'\xff', b'\xff', b'\xff']
assert result == expected_byte_arr
def test_call_get_byte_const_array(arrays_contract, call):
result = call(contract=arrays_contract,
contract_function='getByteConstValue')
expected_byte_arr = [b'\x00', b'\x01']
assert result == expected_byte_arr
def test_call_read_address_variable(address_contract, call):
result = call(contract=address_contract,
contract_function='testAddr')
assert result == "0xd3CdA913deB6f67967B99D67aCDFa1712C293601"
def test_init_with_ens_name_arg(webu, WithConstructorAddressArgumentsContract, call):
with contract_ens_addresses(
WithConstructorAddressArgumentsContract,
[("arg-name.eth", "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413")],
):
address_contract = deploy(webu, WithConstructorAddressArgumentsContract, args=[
"arg-name.eth",
])
result = call(contract=address_contract,
contract_function='testAddr')
assert result == "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413"
def test_call_read_bytes_variable(bytes_contract, call):
result = call(contract=bytes_contract, contract_function='constValue')
assert result == b"\x01\x23"
def test_call_get_bytes_value(bytes_contract, call):
result = call(contract=bytes_contract, contract_function='getValue')
assert result == b'\x04\x06'
def test_call_read_bytes32_variable(bytes32_contract, call):
result = call(contract=bytes32_contract, contract_function='constValue')
assert result == b"\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23\x01\x23" # noqa
def test_call_get_bytes32_value(bytes32_contract, call):
result = call(contract=bytes32_contract, contract_function='getValue')
assert result == b'\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06\x04\x06' # noqa
@pytest.mark.parametrize(
'value, expected',
[
(
'0x' + '11' * 20,
'0x' + '11' * 20,
),
(
'0xbb9bc244d798123fde783fcc1c72d3bb8c189413',
InvalidAddress,
),
(
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
),
]
)
def test_call_address_reflector_with_address(address_reflector_contract, value, expected, call):
if not isinstance(expected, str):
with pytest.raises(expected):
call(contract=address_reflector_contract,
contract_function='reflect',
func_args=[value])
else:
assert call(contract=address_reflector_contract,
contract_function='reflect',
func_args=[value]) == expected
@pytest.mark.parametrize(
'value, expected',
[
(
['0x' + '11' * 20, '0x' + '22' * 20],
['0x' + '11' * 20, '0x' + '22' * 20],
),
(
['0x' + '11' * 20, '0x' + 'aa' * 20],
InvalidAddress
),
(
[
'0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4',
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
],
[
'0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4',
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
],
),
]
)
def test_call_address_list_reflector_with_address(address_reflector_contract,
value,
expected,
call):
if not isinstance(expected, list):
with pytest.raises(expected):
call(contract=address_reflector_contract,
contract_function='reflect',
func_args=[value])
else:
assert call(contract=address_reflector_contract,
contract_function='reflect',
func_args=[value]) == expected
def test_call_address_reflector_single_name(address_reflector_contract, call):
with contract_ens_addresses(
address_reflector_contract,
[("dennisthepeasant.eth", "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413")],
):
result = call(contract=address_reflector_contract,
contract_function='reflect',
func_args=['dennisthepeasant.eth'])
assert result == '0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413'
def test_call_address_reflector_name_array(address_reflector_contract, call):
names = [
'autonomouscollective.eth',
'wedonthavealord.eth',
]
addresses = [
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
'0xFeC2079e80465cc8C687fFF9EE6386ca447aFec4',
]
with contract_ens_addresses(address_reflector_contract, zip(names, addresses)):
result = call(contract=address_reflector_contract,
contract_function='reflect',
func_args=[names])
assert addresses == result
def test_call_reject_invalid_ens_name(address_reflector_contract, call):
with contract_ens_addresses(address_reflector_contract, []):
with pytest.raises(ValueError):
call(contract=address_reflector_contract,
contract_function='reflect',
func_args=['type0.eth'])
def test_call_missing_function(mismatched_math_contract, call):
expected_missing_function_error_message = "Could not decode contract function call"
with pytest.raises(BadFunctionCallOutput) as exception_info:
call(contract=mismatched_math_contract, contract_function='return13')
assert expected_missing_function_error_message in str(exception_info.value)
def test_call_undeployed_contract(undeployed_math_contract, call):
expected_undeployed_call_error_message = "Could not transact with/call contract function"
with pytest.raises(BadFunctionCallOutput) as exception_info:
call(contract=undeployed_math_contract, contract_function='return13')
assert expected_undeployed_call_error_message in str(exception_info.value)
def test_call_fallback_function(fallback_function_contract):
result = fallback_function_contract.fallback.call()
assert result == []
def test_throws_error_if_block_out_of_range(webu, math_contract):
webu.providers[0].make_request(method='evm_mine', params=[20])
with pytest.raises(BlockNumberOutofRange):
math_contract.functions.counter().call(block_identifier=50)
with pytest.raises(BlockNumberOutofRange):
math_contract.functions.counter().call(block_identifier=-50)
def test_accepts_latest_block(webu, math_contract):
webu.providers[0].make_request(method='evm_mine', params=[5])
math_contract.functions.increment().transact()
late = math_contract.functions.counter().call(block_identifier='latest')
pend = math_contract.functions.counter().call(block_identifier='pending')
assert late == 1
assert pend == 1
def test_accepts_block_hash_as_identifier(webu, math_contract):
blocks = webu.providers[0].make_request(method='evm_mine', params=[5])
math_contract.functions.increment().transact()
more_blocks = webu.providers[0].make_request(method='evm_mine', params=[5])
old = math_contract.functions.counter().call(block_identifier=blocks['result'][2])
new = math_contract.functions.counter().call(block_identifier=more_blocks['result'][2])
assert old == 0
assert new == 1
def test_neg_block_indexes_from_the_end(webu, math_contract):
webu.providers[0].make_request(method='evm_mine', params=[5])
math_contract.functions.increment().transact()
math_contract.functions.increment().transact()
webu.providers[0].make_request(method='evm_mine', params=[5])
output1 = math_contract.functions.counter().call(block_identifier=-7)
output2 = math_contract.functions.counter().call(block_identifier=-6)
assert output1 == 1
assert output2 == 2
def test_returns_data_from_specified_block(webu, math_contract):
start_num = webu.eth.getBlock('latest').number
webu.providers[0].make_request(method='evm_mine', params=[5])
math_contract.functions.increment().transact()
math_contract.functions.increment().transact()
output1 = math_contract.functions.counter().call(block_identifier=start_num + 6)
output2 = math_contract.functions.counter().call(block_identifier=start_num + 7)
assert output1 == 1
assert output2 == 2
| 37.720657 | 1,096 | 0.697305 |
aceb717b9b77b8bf1f5d3e691c6a3e6994de9cf6 | 4,015 | py | Python | smarts/zoo/worker.py | ehtnamuh/Smarts-Fork | 2cd7e85947e7feb7da46753f664ff5e8cc6ace6c | [
"MIT"
] | null | null | null | smarts/zoo/worker.py | ehtnamuh/Smarts-Fork | 2cd7e85947e7feb7da46753f664ff5e8cc6ace6c | [
"MIT"
] | null | null | null | smarts/zoo/worker.py | ehtnamuh/Smarts-Fork | 2cd7e85947e7feb7da46753f664ff5e8cc6ace6c | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Run an agent in it's own (independent) process.
What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process.
To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process.
This script is called from within SMARTS to instantiate a remote agent.
The protocal is as follows:
1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent
2. worker.py will begin listening on port 5467.
3. SMARTS connects to (ip, 5467) as a client.
4. SMARTS calls `build()` rpc with `AgentSpec` as input.
5. worker.py recieves the `AgentSpec` instances and builds the Agent.
6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py.
"""
import argparse
import importlib
import logging
import os
import signal
from concurrent import futures
import grpc
from smarts.zoo import worker_pb2_grpc, worker_servicer
# Front-load some expensive imports as to not block the simulation
modules = [
"smarts.core.utils.pybullet",
"smarts.core.utils.sumo",
"smarts.core.road_map",
"numpy",
"sklearn",
"shapely",
"scipy",
"trimesh",
"panda3d",
"gym",
]
for mod in modules:
try:
importlib.import_module(mod)
except ImportError:
if mod == "panda3d":
print(
"You need to install the panda3d dependency using pip install -e .[camera-obs] first"
)
pass
# End front-loaded imports
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(f"worker.py - pid({os.getpid()})")
def serve(port):
ip = "[::]"
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
worker_pb2_grpc.add_WorkerServicer_to_server(
worker_servicer.WorkerServicer(), server
)
server.add_insecure_port(f"{ip}:{port}")
server.start()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.")
def stop_server(*args):
server.stop(0)
log.debug(
f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal."
)
# Catch keyboard interrupt and terminate signal
signal.signal(signal.SIGINT, stop_server)
signal.signal(signal.SIGTERM, stop_server)
# Wait to receive server termination signal
server.wait_for_termination()
log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Run an agent in an independent process.")
parser.add_argument(
"--port",
type=int,
required=True,
help="Port to listen for remote client connections.",
)
args = parser.parse_args()
serve(args.port)
| 33.739496 | 155 | 0.70934 |
aceb7203522462d655570a427f8c1ea2c1a4ded1 | 24,809 | py | Python | python/cudf/tests/test_string.py | abellina/cudf | a8e886708023a2773722024bdacd686785a27500 | [
"Apache-2.0"
] | null | null | null | python/cudf/tests/test_string.py | abellina/cudf | a8e886708023a2773722024bdacd686785a27500 | [
"Apache-2.0"
] | 1 | 2020-10-23T17:44:07.000Z | 2020-10-23T17:44:07.000Z | python/cudf/tests/test_string.py | nartal1/cudf | 27656c1f7ab730548a21d37aaf3488a560abca7e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
import pytest
from contextlib import ExitStack as does_not_raise
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda
from cudf import concat
from cudf.dataframe import DataFrame, Series
from cudf.dataframe.index import StringIndex, StringColumn
from cudf.tests.utils import assert_eq
from librmm_cffi import librmm as rmm
data_list = [
['AbC', 'de', 'FGHI', 'j', 'kLm'],
['nOPq', None, 'RsT', None, 'uVw'],
[None, None, None, None, None]
]
data_id_list = [
"no_nulls",
"some_nulls",
"all_nulls"
]
idx_list = [
None,
[10, 11, 12, 13, 14]
]
idx_id_list = [
"None_index",
"Set_index"
]
def raise_builder(flags, exceptions):
if any(flags):
return pytest.raises(exceptions)
else:
return does_not_raise()
@pytest.fixture(params=data_list, ids=data_id_list)
def data(request):
return request.param
@pytest.fixture(params=idx_list, ids=idx_id_list)
def index(request):
return request.param
@pytest.fixture
def ps_gs(data, index):
ps = pd.Series(data, index=index, dtype='str')
gs = Series(data, index=index, dtype='str')
return (ps, gs)
@pytest.mark.parametrize('construct', [list, np.array, pd.Series, pa.array])
def test_string_ingest(construct):
expect = ['a', 'a', 'b', 'c', 'a']
data = construct(expect)
got = Series(data)
assert got.dtype == np.dtype('object')
assert len(got) == 5
for idx, val in enumerate(expect):
assert expect[idx] == got[idx]
def test_string_export(ps_gs):
ps, gs = ps_gs
expect = ps
got = gs.to_pandas()
pd.testing.assert_series_equal(expect, got)
expect = np.array(ps)
got = gs.to_array()
np.testing.assert_array_equal(expect, got)
expect = pa.Array.from_pandas(ps)
got = gs.to_arrow()
assert pa.Array.equals(expect, got)
@pytest.mark.parametrize(
'item',
[
0,
2,
4,
slice(1, 3),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 4, 3, 2, 1, 0],
np.array([0, 1, 2, 3, 4]),
rmm.to_device(np.array([0, 1, 2, 3, 4]))
]
)
def test_string_get_item(ps_gs, item):
ps, gs = ps_gs
got = gs[item]
if isinstance(got, Series):
got = got.to_arrow()
if isinstance(item, cuda.devicearray.DeviceNDArray):
item = item.copy_to_host()
expect = ps.iloc[item]
if isinstance(expect, pd.Series):
expect = pa.Array.from_pandas(expect)
pa.Array.equals(expect, got)
else:
assert expect == got
@pytest.mark.parametrize(
'item',
[
[True] * 5,
[False] * 5,
np.array([True] * 5),
np.array([False] * 5),
rmm.to_device(np.array([True] * 5)),
rmm.to_device(np.array([False] * 5)),
list(np.random.randint(0, 2, 5).astype('bool')),
np.random.randint(0, 2, 5).astype('bool'),
rmm.to_device(np.random.randint(0, 2, 5).astype('bool'))
]
)
def test_string_bool_mask(ps_gs, item):
ps, gs = ps_gs
got = gs[item]
if isinstance(got, Series):
got = got.to_arrow()
if isinstance(item, cuda.devicearray.DeviceNDArray):
item = item.copy_to_host()
expect = ps[item]
if isinstance(expect, pd.Series):
expect = pa.Array.from_pandas(expect)
pa.Array.equals(expect, got)
else:
assert expect == got
@pytest.mark.parametrize('item', [0, slice(1, 3), slice(5)])
def test_string_repr(ps_gs, item):
ps, gs = ps_gs
got_out = gs[item]
expect_out = ps.iloc[item]
expect = str(expect_out)
got = str(got_out)
# if isinstance(expect_out, pd.Series):
# expect = expect.replace("object", "str")
assert expect == got
@pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64',
'float32', 'float64', 'bool',
'datetime64[ms]'])
def test_string_astype(dtype):
if dtype.startswith('int'):
data = ["1", "2", "3", "4", "5"]
elif dtype.startswith('float'):
data = ["1.0", "2.0", "3.0", "4.0", "5.0"]
elif dtype.startswith('bool'):
data = ["True", "False", "True", "False", "False"]
elif dtype.startswith('datetime64'):
data = [
"2019-06-04T00:00:00Z",
"2019-06-04T12:12:12Z",
"2019-06-03T00:00:00Z",
"2019-05-04T00:00:00Z",
"2018-06-04T00:00:00Z"
]
ps = pd.Series(data)
gs = Series(data)
# Pandas str --> bool typecasting always returns True if there's a string
if dtype.startswith('bool'):
expect = (ps == 'True')
else:
expect = ps.astype(dtype)
got = gs.astype(dtype)
assert_eq(expect, got)
@pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64',
'float32', 'float64', 'bool',
'datetime64[ms]'])
def test_string_empty_astype(dtype):
data = []
ps = pd.Series(data, dtype="str")
gs = Series(data, dtype="str")
expect = ps.astype(dtype)
got = gs.astype(dtype)
assert_eq(expect, got)
@pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64',
'float32', 'float64', 'bool',
'datetime64[ms]'])
def test_string_numeric_astype(dtype):
if dtype.startswith('bool'):
data = [1, 0, 1, 0, 1]
elif dtype.startswith('int'):
data = [1, 2, 3, 4, 5]
elif dtype.startswith('float'):
data = [1.0, 2.0, 3.0, 4.0, 5.0]
elif dtype.startswith('datetime64'):
data = [
1000000000,
2000000000,
3000000000,
4000000000,
5000000000
]
if dtype.startswith('datetime64'):
ps = pd.Series(data, dtype='datetime64[ns]')
gs = Series.from_pandas(ps)
else:
ps = pd.Series(data, dtype=dtype)
gs = Series(data, dtype=dtype)
# Pandas datetime64 --> str typecasting returns arbitrary format depending
# on the data, so making it consistent unless we choose to match the
# behavior
if dtype.startswith('datetime64'):
expect = ps.dt.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
expect = ps.astype('str')
got = gs.astype('str')
assert_eq(expect, got)
@pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64',
'float32', 'float64', 'bool',
'datetime64[ms]'])
def test_string_empty_numeric_astype(dtype):
data = []
if dtype.startswith('datetime64'):
ps = pd.Series(data, dtype='datetime64[ns]')
else:
ps = pd.Series(data, dtype=dtype)
gs = Series(data, dtype=dtype)
expect = ps.astype('str')
got = gs.astype('str')
assert_eq(expect, got)
def test_string_concat():
data1 = ['a', 'b', 'c', 'd', 'e']
data2 = ['f', 'g', 'h', 'i', 'j']
ps1 = pd.Series(data1)
ps2 = pd.Series(data2)
gs1 = Series(data1)
gs2 = Series(data2)
expect = pd.concat([ps1, ps2])
got = concat([gs1, gs2])
assert_eq(expect, got)
@pytest.mark.parametrize('ascending', [True, False])
def test_string_sort(ps_gs, ascending):
ps, gs = ps_gs
expect = ps.sort_values(ascending=ascending)
got = gs.sort_values(ascending=ascending)
assert_eq(expect, got)
def test_string_len(ps_gs):
ps, gs = ps_gs
expect = ps.str.len()
got = gs.str.len()
# Can't handle nulls in Pandas so use PyArrow instead
# Pandas will return as a float64 so need to typecast to int32
expect = pa.array(expect, from_pandas=True).cast(pa.int32())
got = got.to_arrow()
assert pa.Array.equals(expect, got)
@pytest.mark.parametrize('others', [
None,
['f', 'g', 'h', 'i', 'j'],
('f', 'g', 'h', 'i', 'j'),
pd.Series(['f', 'g', 'h', 'i', 'j']),
pd.Index(['f', 'g', 'h', 'i', 'j']),
(['f', 'g', 'h', 'i', 'j'], ['f', 'g', 'h', 'i', 'j']),
[['f', 'g', 'h', 'i', 'j'], ['f', 'g', 'h', 'i', 'j']],
(
pd.Series(['f', 'g', 'h', 'i', 'j']),
['f', 'a', 'b', 'f', 'a'],
pd.Series(['f', 'g', 'h', 'i', 'j']),
['f', 'a', 'b', 'f', 'a'],
['f', 'a', 'b', 'f', 'a'],
pd.Index(['1', '2', '3', '4', '5']),
['f', 'a', 'b', 'f', 'a'],
pd.Index(['f', 'g', 'h', 'i', 'j'])
),
[
pd.Index(['f', 'g', 'h', 'i', 'j']),
['f', 'a', 'b', 'f', 'a'],
pd.Series(['f', 'g', 'h', 'i', 'j']),
['f', 'a', 'b', 'f', 'a'],
['f', 'a', 'b', 'f', 'a'],
pd.Index(['f', 'g', 'h', 'i', 'j']),
['f', 'a', 'b', 'f', 'a'],
pd.Index(['f', 'g', 'h', 'i', 'j'])
]
])
@pytest.mark.parametrize('sep', [None, '', ' ', '|', ',', '|||'])
@pytest.mark.parametrize('na_rep', [None, '', 'null', 'a'])
@pytest.mark.parametrize('index', [
['1', '2', '3', '4', '5'],
pd.Series(['1', '2', '3', '4', '5']),
pd.Index(['1', '2', '3', '4', '5'])
])
def test_string_cat(ps_gs, others, sep, na_rep, index):
ps, gs = ps_gs
pd_others = others
if isinstance(pd_others, pd.Series):
pd_others = pd_others.values
expect = ps.str.cat(others=pd_others, sep=sep, na_rep=na_rep)
got = gs.str.cat(others=others, sep=sep, na_rep=na_rep)
assert_eq(expect, got)
ps.index = index
gs.index = index
expect = ps.str.cat(others=ps.index, sep=sep, na_rep=na_rep)
got = gs.str.cat(others=gs.index, sep=sep, na_rep=na_rep)
assert_eq(expect, got)
expect = ps.str.cat(others=[ps.index] + [ps.index],
sep=sep, na_rep=na_rep)
got = gs.str.cat(others=[gs.index] + [gs.index],
sep=sep, na_rep=na_rep)
assert_eq(expect, got)
expect = ps.str.cat(others=(ps.index, ps.index),
sep=sep, na_rep=na_rep)
got = gs.str.cat(others=(gs.index, gs.index),
sep=sep, na_rep=na_rep)
assert_eq(expect, got)
@pytest.mark.xfail(raises=(NotImplementedError, AttributeError))
@pytest.mark.parametrize('sep', [None, '', ' ', '|', ',', '|||'])
def test_string_join(ps_gs, sep):
ps, gs = ps_gs
expect = ps.str.join(sep)
got = gs.str.join(sep)
assert_eq(expect, got)
@pytest.mark.parametrize('pat', [
r'(a)',
r'(f)',
r'([a-z])',
r'([A-Z])'
])
@pytest.mark.parametrize('expand', [True, False])
@pytest.mark.parametrize('flags,flags_raise', [
(0, 0),
(1, 1)
])
def test_string_extract(ps_gs, pat, expand, flags, flags_raise):
ps, gs = ps_gs
expectation = raise_builder([flags_raise], NotImplementedError)
with expectation:
expect = ps.str.extract(pat, flags=flags, expand=expand)
got = gs.str.extract(pat, flags=flags, expand=expand)
assert_eq(expect, got)
@pytest.mark.parametrize('pat,regex', [
('a', False),
('f', False),
(r'[a-z]', True),
(r'[A-Z]', True)
])
@pytest.mark.parametrize('case,case_raise', [
(True, 0),
(False, 1)
])
@pytest.mark.parametrize('flags,flags_raise', [
(0, 0),
(1, 1)
])
@pytest.mark.parametrize('na,na_raise', [
(np.nan, 0),
(None, 1),
('', 1)
])
def test_string_contains(ps_gs, pat, regex, case, case_raise, flags,
flags_raise, na, na_raise):
ps, gs = ps_gs
expectation = raise_builder(
[case_raise, flags_raise, na_raise],
NotImplementedError
)
with expectation:
expect = ps.str.contains(pat, case=case, flags=flags, na=na,
regex=regex)
got = gs.str.contains(pat, case=case, flags=flags, na=na, regex=regex)
expect = pa.array(expect, from_pandas=True).cast(pa.bool_())
got = got.to_arrow()
assert pa.Array.equals(expect, got)
# Pandas isn't respect the `n` parameter so ignoring it in test parameters
@pytest.mark.parametrize('pat,regex', [
('a', False),
('f', False),
(r'[a-z]', True),
(r'[A-Z]', True)
])
@pytest.mark.parametrize('repl', ['qwerty', '', ' '])
@pytest.mark.parametrize('case,case_raise', [
(None, 0),
(True, 1),
(False, 1)
])
@pytest.mark.parametrize('flags,flags_raise', [
(0, 0),
(1, 1)
])
def test_string_replace(ps_gs, pat, repl, case, case_raise, flags,
flags_raise, regex):
ps, gs = ps_gs
expectation = raise_builder(
[case_raise, flags_raise],
NotImplementedError
)
with expectation:
expect = ps.str.replace(pat, repl, case=case, flags=flags,
regex=regex)
got = gs.str.replace(pat, repl, case=case, flags=flags,
regex=regex)
assert_eq(expect, got)
def test_string_lower(ps_gs):
ps, gs = ps_gs
expect = ps.str.lower()
got = ps.str.lower()
assert_eq(expect, got)
@pytest.mark.parametrize('data', [
['a b', ' c ', ' d', 'e ', 'f'],
['a-b', '-c-', '---d', 'e---', 'f'],
['ab', 'c', 'd', 'e', 'f'],
[None, None, None, None, None]
])
@pytest.mark.parametrize('pat', [
None,
' ',
'-'
])
@pytest.mark.parametrize('n', [-1, 0, 1, 3, 10])
@pytest.mark.parametrize('expand,expand_raise', [
(True, 0),
(False, 1)
])
def test_string_split(data, pat, n, expand, expand_raise):
if data in (
['a b', ' c ', ' d', 'e ', 'f'],
) and pat is None:
pytest.xfail("None pattern split algorithm not implemented yet")
ps = pd.Series(data, dtype='str')
gs = Series(data, dtype='str')
expectation = raise_builder(
[expand_raise],
NotImplementedError
)
with expectation:
expect = ps.str.split(pat=pat, n=n, expand=expand)
got = gs.str.split(pat=pat, n=n, expand=expand)
assert_eq(expect, got)
@pytest.mark.parametrize('str_data,str_data_raise', [
([], 0),
(['a', 'b', 'c', 'd', 'e'], 0),
([None, None, None, None, None], 1)
])
@pytest.mark.parametrize('num_keys', [1, 2, 3])
@pytest.mark.parametrize('how,how_raise', [
('left', 0),
('right', 1),
('inner', 0),
('outer', 0)
])
def test_string_join_key(str_data, str_data_raise, num_keys, how, how_raise):
other_data = [1, 2, 3, 4, 5][:len(str_data)]
pdf = pd.DataFrame()
gdf = DataFrame()
for i in range(num_keys):
pdf[i] = pd.Series(str_data, dtype='str')
gdf[i] = Series(str_data, dtype='str')
pdf['a'] = other_data
gdf['a'] = other_data
pdf2 = pdf.copy()
gdf2 = gdf.copy()
expectation = raise_builder(
[how_raise, str_data_raise],
(NotImplementedError, AssertionError)
)
with expectation:
expect = pdf.merge(pdf2, on=list(range(num_keys)), how=how)
got = gdf.merge(gdf2, on=list(range(num_keys)), how=how)
if len(expect) == 0 and len(got) == 0:
expect = expect.reset_index(drop=True)
got = got[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize('str_data_nulls', [
['a', 'b', 'c'],
['a', 'b', 'f', 'g'],
['f', 'g', 'h', 'i', 'j'],
['f', 'g', 'h'],
[None, None, None, None, None],
[]
])
def test_string_join_key_nulls(str_data_nulls):
str_data = ['a', 'b', 'c', 'd', 'e']
other_data = [1, 2, 3, 4, 5]
other_data_nulls = [6, 7, 8, 9, 10][:len(str_data_nulls)]
pdf = pd.DataFrame()
gdf = DataFrame()
pdf['key'] = pd.Series(str_data, dtype='str')
gdf['key'] = Series(str_data, dtype='str')
pdf['vals'] = other_data
gdf['vals'] = other_data
pdf2 = pd.DataFrame()
gdf2 = DataFrame()
pdf2['key'] = pd.Series(str_data_nulls, dtype='str')
gdf2['key'] = Series(str_data_nulls, dtype='str')
pdf2['vals'] = pd.Series(other_data_nulls, dtype='int64')
gdf2['vals'] = Series(other_data_nulls, dtype='int64')
expect = pdf.merge(pdf2, on='key', how='left')
got = gdf.merge(gdf2, on='key', how='left')
if len(expect) == 0 and len(got) == 0:
expect = expect.reset_index(drop=True)
got = got[expect.columns]
expect["vals_y"] = expect["vals_y"].fillna(-1).astype('int64')
assert_eq(expect, got)
@pytest.mark.parametrize('str_data', [
[],
['a', 'b', 'c', 'd', 'e'],
[None, None, None, None, None]
])
@pytest.mark.parametrize('num_cols', [1, 2, 3])
@pytest.mark.parametrize('how,how_raise', [
('left', 0),
('right', 1),
('inner', 0),
('outer', 0)
])
def test_string_join_non_key(str_data, num_cols, how, how_raise):
other_data = [1, 2, 3, 4, 5][:len(str_data)]
pdf = pd.DataFrame()
gdf = DataFrame()
for i in range(num_cols):
pdf[i] = pd.Series(str_data, dtype='str')
gdf[i] = Series(str_data, dtype='str')
pdf['a'] = other_data
gdf['a'] = other_data
pdf2 = pdf.copy()
gdf2 = gdf.copy()
expectation = raise_builder(
[how_raise],
NotImplementedError
)
with expectation:
expect = pdf.merge(pdf2, on=['a'], how=how)
got = gdf.merge(gdf2, on=['a'], how=how)
if len(expect) == 0 and len(got) == 0:
expect = expect.reset_index(drop=True)
got = got[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize('str_data_nulls', [
['a', 'b', 'c'],
['a', 'b', 'f', 'g'],
['f', 'g', 'h', 'i', 'j'],
['f', 'g', 'h'],
[None, None, None, None, None],
[]
])
def test_string_join_non_key_nulls(str_data_nulls):
str_data = ['a', 'b', 'c', 'd', 'e']
other_data = [1, 2, 3, 4, 5]
other_data_nulls = [6, 7, 8, 9, 10][:len(str_data_nulls)]
pdf = pd.DataFrame()
gdf = DataFrame()
pdf['vals'] = pd.Series(str_data, dtype='str')
gdf['vals'] = Series(str_data, dtype='str')
pdf['key'] = other_data
gdf['key'] = other_data
pdf2 = pd.DataFrame()
gdf2 = DataFrame()
pdf2['vals'] = pd.Series(str_data_nulls, dtype='str')
gdf2['vals'] = Series(str_data_nulls, dtype='str')
pdf2['key'] = pd.Series(other_data_nulls, dtype='int64')
gdf2['key'] = Series(other_data_nulls, dtype='int64')
expect = pdf.merge(pdf2, on='key', how='left')
got = gdf.merge(gdf2, on='key', how='left')
if len(expect) == 0 and len(got) == 0:
expect = expect.reset_index(drop=True)
got = got[expect.columns]
assert_eq(expect, got)
def test_string_join_values_nulls():
left_dict = [
{'b': 'MATCH 1', 'a': 1.},
{'b': 'MATCH 1', 'a': 1.},
{'b': 'LEFT NO MATCH 1', 'a': -1.},
{'b': 'MATCH 2', 'a': 2.},
{'b': 'MATCH 2', 'a': 2.},
{'b': 'MATCH 1', 'a': 1.},
{'b': 'MATCH 1', 'a': 1.},
{'b': 'MATCH 2', 'a': 2.},
{'b': 'MATCH 2', 'a': 2.},
{'b': 'LEFT NO MATCH 2', 'a': -2.},
{'b': 'MATCH 3', 'a': 3.},
{'b': 'MATCH 3', 'a': 3.},
]
right_dict = [
{'b': 'RIGHT NO MATCH 1', 'c': -1.},
{'b': 'MATCH 3', 'c': 3.},
{'b': 'MATCH 2', 'c': 2.},
{'b': 'RIGHT NO MATCH 2', 'c': -2.},
{'b': 'RIGHT NO MATCH 3', 'c': -3.},
{'b': 'MATCH 1', 'c': 1.}
]
left_pdf = pd.DataFrame(left_dict)
right_pdf = pd.DataFrame(right_dict)
left_gdf = DataFrame.from_pandas(left_pdf)
right_gdf = DataFrame.from_pandas(right_pdf)
expect = left_pdf.merge(right_pdf, how='left', on='b')
got = left_gdf.merge(right_gdf, how='left', on='b')
expect = expect.sort_values(by=['a', 'b', 'c']).reset_index(drop=True)
got = got.sort_values(by=['a', 'b', 'c']).reset_index(drop=True)
assert_eq(expect, got)
@pytest.mark.parametrize('str_data', [
[],
['a', 'b', 'c', 'd', 'e'],
[None, None, None, None, None]
])
@pytest.mark.parametrize('num_keys', [1, 2, 3])
def test_string_groupby_key(str_data, num_keys):
other_data = [1, 2, 3, 4, 5][:len(str_data)]
pdf = pd.DataFrame()
gdf = DataFrame()
for i in range(num_keys):
pdf[i] = pd.Series(str_data, dtype='str')
gdf[i] = Series(str_data, dtype='str')
pdf['a'] = other_data
gdf['a'] = other_data
expect = pdf.groupby(list(range(num_keys)), as_index=False).count()
got = gdf.groupby(list(range(num_keys)), as_index=False).count()
expect = expect.sort_values([0]).reset_index(drop=True)
got = got.sort_values([0]).reset_index(drop=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize('str_data', [
[],
['a', 'b', 'c', 'd', 'e'],
[None, None, None, None, None]
])
@pytest.mark.parametrize('num_cols', [1, 2, 3])
def test_string_groupby_non_key(str_data, num_cols):
other_data = [1, 2, 3, 4, 5][:len(str_data)]
pdf = pd.DataFrame()
gdf = DataFrame()
for i in range(num_cols):
pdf[i] = pd.Series(str_data, dtype='str')
gdf[i] = Series(str_data, dtype='str')
pdf['a'] = other_data
gdf['a'] = other_data
expect = pdf.groupby('a', as_index=False).count()
got = gdf.groupby('a', as_index=False).count()
expect = expect.sort_values(['a']).reset_index(drop=True)
got = got.sort_values(['a']).reset_index(drop=True)
assert_eq(expect, got, check_dtype=False)
expect = pdf.groupby('a', as_index=False).max()
got = gdf.groupby('a', as_index=False).max()
expect = expect.sort_values(['a']).reset_index(drop=True)
got = got.sort_values(['a']).reset_index(drop=True)
if len(expect) == 0 and len(got) == 0:
for i in range(num_cols):
expect[i] = expect[i].astype('str')
assert_eq(expect, got, check_dtype=False)
expect = pdf.groupby('a', as_index=False).min()
got = gdf.groupby('a', as_index=False).min()
expect = expect.sort_values(['a']).reset_index(drop=True)
got = got.sort_values(['a']).reset_index(drop=True)
if len(expect) == 0 and len(got) == 0:
for i in range(num_cols):
expect[i] = expect[i].astype('str')
assert_eq(expect, got, check_dtype=False)
def test_string_groupby_key_index():
str_data = ['a', 'b', 'c', 'd', 'e']
other_data = [1, 2, 3, 4, 5]
pdf = pd.DataFrame()
gdf = DataFrame()
pdf['a'] = pd.Series(str_data, dtype="str")
gdf['a'] = Series(str_data, dtype="str")
pdf['b'] = other_data
gdf['b'] = other_data
expect = pdf.groupby('a').count()
got = gdf.groupby('a').count()
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize('scalar', [
'a',
None
])
def test_string_set_scalar(scalar):
pdf = pd.DataFrame()
pdf['a'] = [1, 2, 3, 4, 5]
gdf = DataFrame.from_pandas(pdf)
pdf['b'] = "a"
gdf['b'] = "a"
assert_eq(pdf['b'], gdf['b'])
assert_eq(pdf, gdf)
def test_string_index():
pdf = pd.DataFrame(np.random.rand(5, 5))
gdf = DataFrame.from_pandas(pdf)
stringIndex = ['a', 'b', 'c', 'd', 'e']
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = np.array(['a', 'b', 'c', 'd', 'e'])
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = StringIndex(['a', 'b', 'c', 'd', 'e'], name='name')
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
stringIndex = StringColumn(['a', 'b', 'c', 'd', 'e'], name='name')
pdf.index = stringIndex
gdf.index = stringIndex
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
'item',
[
['Cbe', 'cbe', 'CbeD', 'Cb', 'ghi', 'Cb'],
['a', 'a', 'a', 'a', 'A'],
['A'],
['abc', 'xyz', None, 'ab', '123'],
[None, None, 'abc', None, 'abc'],
]
)
def test_string_unique(item):
ps = pd.Series(item)
gs = Series(item)
# Pandas `unique` returns a numpy array
pres = pd.Series(ps.unique())
# Nvstrings returns sorted unique with `None` placed before other strings
pres = pres.sort_values(na_position='first').reset_index(drop=True)
gres = gs.unique()
assert_eq(pres, gres)
def test_string_slice():
df = DataFrame({'a': ['hello', 'world']})
a_slice = df.a.str.slice(0, 2)
assert isinstance(a_slice, Series)
def test_string_equality():
data1 = ['b', 'c', 'd', 'a', 'c']
data2 = ['a', None, 'c', 'a', 'c']
ps1 = pd.Series(data1)
ps2 = pd.Series(data2)
gs1 = Series(data1)
gs2 = Series(data2)
expect = (ps1 == ps2)
got = (gs1 == gs2)
assert_eq(expect, got.fillna(False))
@pytest.mark.parametrize(
'lhs',
[
['Cbe', 'cbe', 'CbeD', 'Cb', 'ghi', 'Cb'],
['abc', 'xyz', 'a', 'ab', '123', '097']
]
)
@pytest.mark.parametrize(
'rhs',
[
['Cbe', 'cbe', 'CbeD', 'Cb', 'ghi', 'Cb'],
['a', 'a', 'a', 'a', 'A', 'z']
]
)
def test_string_binary_op_add(lhs, rhs):
pds = pd.Series(lhs) + pd.Series(rhs)
gds = Series(lhs) + Series(rhs)
assert_eq(pds, gds)
| 26.762675 | 78 | 0.551735 |
aceb7460ce1df64f39a535fe34de9a2423bfebc8 | 5,144 | py | Python | contrib/seeds/makeseeds.py | JTAG-Romney/gamecoin | 814089cc3e4bf524679566adc12c7a7b2771db6e | [
"MIT"
] | 2 | 2021-04-05T22:19:37.000Z | 2021-06-16T00:24:43.000Z | contrib/seeds/makeseeds.py | JTAG-Romney/gamecoin | 814089cc3e4bf524679566adc12c7a7b2771db6e | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | JTAG-Romney/gamecoin | 814089cc3e4bf524679566adc12c7a7b2771db6e | [
"MIT"
] | 1 | 2021-04-05T22:19:35.000Z | 2021-04-05T22:19:35.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 500000
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/GameCoin:0.8.4.2/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in hist.items() if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print '[%s]:%i' % (ip['ip'], ip['port'])
else:
print '%s:%i' % (ip['ip'], ip['port'])
if __name__ == '__main__':
main()
| 31.950311 | 186 | 0.563764 |
aceb75e2f27c8ee56a39366f067492c957538b54 | 1,317 | py | Python | Tests/dice_roller_calculator.py | tim0901/Dice-Roller-Calculator | 66d3bc325b0097d7cfd5c5f6cf8fc435d24b05da | [
"MIT"
] | null | null | null | Tests/dice_roller_calculator.py | tim0901/Dice-Roller-Calculator | 66d3bc325b0097d7cfd5c5f6cf8fc435d24b05da | [
"MIT"
] | null | null | null | Tests/dice_roller_calculator.py | tim0901/Dice-Roller-Calculator | 66d3bc325b0097d7cfd5c5f6cf8fc435d24b05da | [
"MIT"
] | null | null | null | import ctypes
from ctypes import cdll
import os
# Load the library. Windows needs winmode=0 to be set for this to work
if os.name == 'nt':
roller = ctypes.CDLL('./dice_roller_calculator1.0.dll', winmode=0)
else:
roller = ctypes.CDLL('./dice_roller_calculator1.0.so')
class Roller(object):
def __init__(self):
self.obj = roller.Lexer_new()
# Evaluate an expression
def Roll(self, in_string):
total = ctypes.c_double(0.0)
allocation = ctypes.create_string_buffer(128 * 1024)
roller.Lexer_Lex.restype = ctypes.c_char_p
out_string = roller.Lexer_Lex(self.obj, bytes(in_string, encoding='utf8'), allocation, 128 * 1024, ctypes.byref(total), ctypes.c_double(0.0))
return (out_string.decode('utf8'), total.value)
# This function overrides the random number generator seed for testing purposes.
# Do not use in production - you will not get random numbers.
def Roll_Test(self, in_string):
total = ctypes.c_double(0.0)
allocation = ctypes.create_string_buffer(128 * 1024)
roller.Lexer_Lex.restype = ctypes.c_char_p
out_string = roller.Lexer_Lex(self.obj, bytes(in_string, encoding='utf8'), allocation, 128 * 1024, ctypes.byref(total), ctypes.c_double(1.0))
return (out_string.decode('utf8'), total.value) | 47.035714 | 149 | 0.697798 |
aceb76f9211e730cf94cb6e3d61c3bd5ab1718b4 | 515 | py | Python | tt_core/migrations/0016_tasksuggestion_last_vote.py | probablytom/tweaktoday | d721f078d6ed0c3b7b6435788522dda1ed48ea12 | [
"MIT"
] | 1 | 2020-04-18T22:24:11.000Z | 2020-04-18T22:24:11.000Z | tt_core/migrations/0016_tasksuggestion_last_vote.py | probablytom/tweaktoday | d721f078d6ed0c3b7b6435788522dda1ed48ea12 | [
"MIT"
] | null | null | null | tt_core/migrations/0016_tasksuggestion_last_vote.py | probablytom/tweaktoday | d721f078d6ed0c3b7b6435788522dda1ed48ea12 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-04-02 10:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tt_core', '0015_tasksuggestion_suggestion_time'),
]
operations = [
migrations.AddField(
model_name='tasksuggestion',
name='last_vote',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 24.52381 | 93 | 0.650485 |
aceb778ba079a3e6efcb2d7098894c8dd9eacf18 | 2,828 | py | Python | saleor/graphql/shipping/tests/mutations/test_shipping_zone_delete.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 1,392 | 2021-10-06T15:54:28.000Z | 2022-03-31T20:50:55.000Z | saleor/graphql/shipping/tests/mutations/test_shipping_zone_delete.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 888 | 2021-10-06T10:48:54.000Z | 2022-03-31T11:00:30.000Z | saleor/graphql/shipping/tests/mutations/test_shipping_zone_delete.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 538 | 2021-10-07T16:21:27.000Z | 2022-03-31T22:58:57.000Z | from unittest import mock
import graphene
import pytest
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import get_graphql_content
DELETE_SHIPPING_ZONE_MUTATION = """
mutation deleteShippingZone($id: ID!) {
shippingZoneDelete(id: $id) {
shippingZone {
id
name
}
errors {
field
code
message
}
}
}
"""
def test_delete_shipping_zone(
staff_api_client, permission_manage_shipping, shipping_zone
):
# given
shipping_zone_id = graphene.Node.to_global_id("ShippingZone", shipping_zone.pk)
variables = {"id": shipping_zone_id}
# when
response = staff_api_client.post_graphql(
DELETE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneDelete"]["shippingZone"]
# then
assert data["name"] == shipping_zone.name
with pytest.raises(shipping_zone._meta.model.DoesNotExist):
shipping_zone.refresh_from_db()
@freeze_time("2022-05-12 12:00:00")
@mock.patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@mock.patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_delete_shipping_zone_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
permission_manage_shipping,
shipping_zone,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
shipping_zone_id = shipping_zone.id
variables = {"id": graphene.Node.to_global_id("ShippingZone", shipping_zone_id)}
# when
response = staff_api_client.post_graphql(
DELETE_SHIPPING_ZONE_MUTATION,
variables,
permissions=[permission_manage_shipping],
)
content = get_graphql_content(response)
data = content["data"]["shippingZoneDelete"]
# then
assert content["data"]["shippingZoneDelete"]["shippingZone"]
assert data["errors"] == []
mocked_webhook_trigger.assert_called_once_with(
{
"id": data["shippingZone"]["id"],
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
WebhookEventAsyncType.SHIPPING_ZONE_DELETED,
[any_webhook],
shipping_zone,
SimpleLazyObject(lambda: staff_api_client.user),
)
| 29.154639 | 84 | 0.673975 |
aceb78581a56f1d9a65c0bf09b22d4c26a128107 | 650 | py | Python | arrays/sorted-squared-array.py | shenoyrohit/data-structures-python | 16164c7751bb5296655c5bcd774601b7b252451b | [
"Apache-2.0"
] | null | null | null | arrays/sorted-squared-array.py | shenoyrohit/data-structures-python | 16164c7751bb5296655c5bcd774601b7b252451b | [
"Apache-2.0"
] | null | null | null | arrays/sorted-squared-array.py | shenoyrohit/data-structures-python | 16164c7751bb5296655c5bcd774601b7b252451b | [
"Apache-2.0"
] | null | null | null | # Time: O(N logN) | Space: O(N)
def sortedSquaredArray(array):
squared = []
for index in range(len(array)):
squared.append(array[index] * array[index])
squared.sort()
return squared
# Time: O(N) | Space: O(N)
def sortedSquaredArrayLinear(array):
squared = [0 for _ in array]
left = 0
right = len(array) - 1
for ind in reversed(range(len(array))):
small = array[left]
large = array[right]
if abs(small) > abs(right):
squared[ind] = small * small
left += 1
else:
squared[ind] = large * large
right -= 1
return squared
| 19.69697 | 51 | 0.549231 |
aceb7a67b275ad775c604c7fcf337a912a795ec6 | 30,616 | py | Python | app.py | amasotti/GreekText_Webapp | ed320991c3ecc29266129f034ccb895d5b8f6697 | [
"Apache-2.0"
] | null | null | null | app.py | amasotti/GreekText_Webapp | ed320991c3ecc29266129f034ccb895d5b8f6697 | [
"Apache-2.0"
] | null | null | null | app.py | amasotti/GreekText_Webapp | ed320991c3ecc29266129f034ccb895d5b8f6697 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Antonio Masotti'
__version__ = '1.0'
__date__ = '01.04.2019'
'''
DOC: Hauptdatei des Programms.
-------------------------------
In dieser Datei werden alle benötigten Module (sowohl meine als auch diejenigen aus Flask und SQLAlchemy) importiert.
Wenn diese Datei direkt ausgeführt wird, entweder über Python (python app.py) oder über Flask (flask run), sorgt sie dafür,
dass die richtigen HTML-Seiten geladen werden und befüllt diese mit dem mittels Python generierten Inhalt.
'''
################ Importe ###############################################
# Python und Flask Pakete, inklusiv SQLAlchemy für die Steuerung der SQLite-Datenbank
from common.grund_einstellungen import app, db
from flask import url_for,render_template,request,redirect
import csv # zur Erzeugung von csv Listen der Ergebnisse
import os
from urllib.parse import urlencode
from pathlib import Path
###### Meine Module ######
from common.mischmasch import * # wie der Name schon sagt, enthält dieses Modul alle meine Funktionen, die nicht deutlicher zu klassifizieren waren.
from models.db_klassen import Werken, Autoren, Wortlist # Die SQLAlchemy-Klassen, die den 3 Tabellen der Datenbank entsprechen.
from common.formulare import * # Python Klassen, mit denen die Formulare auf den HTML-Seiten generiert werden.
import common.Autoren.autoren_zeilen as vorbereitung_autoren # Befehle und Inhalte für die Erzeugung der Tabelle "Autoren".
from models.tables import * # Python-Klassen, mit denen die HTML-Tabellen zur Visualisierung der heruntergeladenen Daten generiert werden.
########################################################################
# Die DB wurde initialisiert (mit dem Import aus common.grundeinstellung.py).
# Jetzt müssen wir nur die vorbereitete Tabelle mit den Autoren bilden und befüllen
# die Tabellen werden erzeugt
db.create_all()
# die Tabelle "Autoren" wird befüllt
vorbereitung_autoren.add_neue_autoren()
################################################################
################################################################
######################### ######################
######################### HAUPTPROGRAMM ######################
######################### ######################
################################################################
################################################################
# Routing zu den einzelnen Seiten #
# der Rest dieser Datei sammelt die Infos und Inhalte aus den verschiedenen Pythonfunktionen und bildet die URLs, die im Browser abgerufen werden.
# Das eigentliche Routing geschieht mithilfe von den Python-Dekoratoren (Funktionen, die als Argumenten Funktionen nehmen, siehe Dokumentation.)
# Ich habe hier zwei Adressen für die Homepage definiert: ohne Prefix und mit /home.html als Prefix
# die Funktion homepage macht nichts anderes als die entsprechenden HTML-Datei abzurufen.
# Das Abrufen an sich erfolgt mittels der Funktion render_template(). Diese verlangt mindestens ein Argument: die HTML-template, die visualisiert werden muss.
# Andere mögliche Argumente sind die Python Variabeln (Strings, Liste, Dictionaries usw...), die für spezifische Seiten benötigt werden könnten.
@app.route('/')
@app.route("/home", methods=[ "GET", "POST" ])
def startseite():
return render_template('startseite.html')
#############################################################################################
#############################################################################################
#############################################################################################
# Seite zur Visualisierung der heruntergeladenen Texte
@app.route('/zeig_text')
def zeige_text():
# Klein Alert
flash_extern(message="Aktuell habe ich geschafft, nur die Visualisierung für die Ilias von Homer zu optimieren. Ich bitte um Verständnis.")
# Diese Visualisierungsseite verlangt einen Link zu dem Text, der heruntergeladen werden soll.
# Diese Info kommt aus einer anderen Seite und wird mit der nächsten Zeile abgelesen.
gewaehlter_text = request.args.get('gewaehlter_text')
if request.args.get('link_zum_werk'): # Wenn ein Link zur Verfügung steht, kann der Text heruntergeladen werden
link = request.args.get('link_zum_werk')
else: # Ansonsten wird dem Benutzer eine Liste der vorhandenen Werke gezeigt.
flash_extern(message="Der gesuchten Text wurde noch nicht heruntergeladen...")
render_template(url_for('allewerke'))
# Zunächst wird kontrolliert, ob eine lokale Version des Textes vorhanden ist.
# Wenn der Text schon heruntergeladen wurde, dann wird er nur angezeigt. Ansonsten muss er erstmal heruntergeladen und lokal gespeichert werden.
# Path ist eine Klasse aus der Library pathlib, die uns erlaubt, sehr schnell mit lokalen, relativen und absoluten Dateipfaden umzugehen.
path = Path(('common/static/assets/'+gewaehlter_text+'/'+gewaehlter_text+'_vollständig.txt'))
# Fall 1: Es gibt keine lokale Version des Textes
if not path.exists():
# get_texte (aus der Datei mischmasch.py) ist die Funktion, die Texte aus dem Internet herunterlädt.
# Die Funktion verlangt zwei Parameter: einen Link, aus dem der Text heruntergeladen werden muss und einen bool-Wert, mit dem wir entscheiden,
# ob der Text im Browser (=False) oder in einem separaten Text Editor (=True) geöffnet werden muss
get_texte(link,aufmachen=False)
# Fall 2: die lokale Version ist schon vorhanden oder der Text wurde im letzten Schritt heruntergeladen.
# text_formatter (auch aus mischmasch.py) ist eine Funktion, die den Text aus der Datei liest und ihn für die Visualisierung im Browser optimiert.
text_name = text_formatter(gewaehlter_text)[0]
inhalt = text_formatter(gewaehlter_text)[1]
# die HTML-Seite "zeige_text.html" wird geladen. Dieser Seite werden 2 Variabeln überführt: text_name und inhalt:
# inhalt enthält den zu zeigenden Text und text_name bildet den Titel der Seite.
return render_template('zeige_text.html', text_name = text_name, inhalt = inhalt)
#############################################################################################
#############################################################################################
#############################################################################################
# Kleine Funktion zur Auflistung der verfügbaren Autoren
@app.route("/autorenList")
def autorenList():
lista = [ "Aeschines", "Aeschylus", "Andocides", "Anna Komnene", "Antiphon", "Apollodorus",
"Apollonius Rhodius", "Appian", "Aretaeus", "Aristophanes", "Aristotle", "Bacchylides",
"Callimachus", "Demades", "Demosthenes", "Dinarchus", "Diodorus Siculus", "Diogenes Laertius",
"Epictetus", "Euclid", "Euripides", "Flavius Josephus", "Galen", "Gorgias", "Herodotus", "Hesiod",
"Hippocrates", "Homer", "Hyperides", "Isaeus", "Isocrates", "Lycurgus", "Lysias", "NA", "Old Oligarch",
"Pausanias", "Pindar", "Plato", "Plutarch", "Polybius", "Sophocles", "Strabo",
"Theocritus", "Theophrastus", "Thucydides", "Xenophon" ]
links = [ ]
# Einfache Schleife zur Halbierung der Autorenliste
for x in lista:
links.append((
"http://artflsrv02.uchicago.edu/cgi-bin/perseus/search3torth?dbname=GreekApr19&word=&OUTPUT=conc&ORTHMODE=ORG&CONJUNCT=PHRASE&DISTANCE=3&author=" + x + "&title=&POLESPAN=5&THMPRTLIMIT=1&KWSS=1&KWSSPRLIM=500&trsortorder=author%2C+title&editor=&pubdate=&language=&shrtcite=&genre=&sortorder=author%2C+title&dgdivhead=&dgdivtype=&dgsubdivwho=&dgsubdivn=&dgsubdivtag=&dgsubdivtype=",
x))
half = len(links) // 2
links1 = links[ 0:half ]
links2 = links[ half: ]
# Die HTML-Datei 'autorenList.html' wird geladen
return render_template('autorenList.html', autors=links1, autors2=links2)
#############################################################################################
#############################################################################################
#############################################################################################
# Die Funktion "alleautoren" zeigt alle Autoren, die in der Datenbank gespeichert wurden.
@app.route('/alleautoren')
def alleautoren():
# Der folgende Befehl entspricht der SQL-Query: SELECT * FROM Autoren;
autoren = Autoren.query.all()
# Die Ergebnisse werden später in einer Liste vorläufig gespeichert; die Liste wird hier initialisiert
items_list = [ ]
# Die folgende Schleife bildet die Zeile der HTML-Tabelle zur Visualisierung der Ergebnisse der Suche.
for autor in autoren:
item = Item_Autor(id=autor.id_autor, name=autor.name, link=autor.link)
items_list.append(item)
# Mit den Zeilen, die wir mit der Schleife erzeugt haben, wird jetzt eine HTML-Tabelle gebildet.
tabelle = Autoren_Tabelle(items_list)
# Die Seite 'alleautoren.html' wird geladen. Die Tabelle, die gezeigt werden muss, wird als Variabel überführt.
return render_template('alleautoren.html', table=tabelle)
#############################################################################################
#############################################################################################
#############################################################################################
# Die folgende Seite/Funktion erzeugt eine Liste aller in der Datenbank gespeicherten Werke
@app.route('/allewerke')
def allewerke():
# Der folgende Befehl illustriert die typische Syntax von SQLAlchemy. Das ist eine Query für die Klasse (d.h. SQL-Tabelle) der Werke.
# Ohne SQLAlchemy, hätten wir eine SQL Query (Select * from Werken) und die Kursor und Execute Befehle benötigt.
# Mit SQLAlchemy reduziert sich das ganze auf eine kurze Zeile.
# Darüber hinaus funktioniert dieser Befehl mit beliebigen Datenbanken (PostGresSQL, MySQL, SQLite, MongoDB ecc..)
werke = Werken.query.all()
# Der folgende Code generiert die HTML-Tabelle
items_list = [ ]
for work in werke:
# Der Parameter "download_link" wird einen Link erzeugen, der uns erlaubt den gewünschten Text herunterzuladen
download_link = url_for('herunterladen', link_zum_werk = work.titel_link)
# Der Parameter "aufmachen_link" wird einen Link erzeugen, der uns erlaubt, den gewünschten Text in einem Editor auf dem Rechner zu öffnen.
aufmachen_link = url_for('zeige_text', gewaehlter_text = work.titel, link_zum_werk = work.titel_link, autor=Autoren.query.get(work.autor))
link_unsere_wortliste = url_for('wortliste', autor=Autoren.query.get(work.autor), wl=work.wortliste_link, werk=work.titel)
# Jedes item ist eine Instanz der Klasse "Item_Werk". Diese stellt wiederum eine Zeile der HTML-Tabelle dar.
item = Item_Werk(id=work.id_werk,
titel=work.titel,
autor=Autoren.query.get(work.autor),
autor_id=Autoren.query.get(work.autor).id_autor,
download = download_link,
aufmachen = aufmachen_link,
verbesserte_wortliste = link_unsere_wortliste
)
items_list.append(item)
# Alle Instanzen der Klasse Item_Werk (spricht alle Zeilen der Tabelle) wurden in einer Liste gespeichert (mit der Methode .append).
# Jetzt können wir die Tabelle befüllen und den entsprechenden HTML-Code generieren.
tabelle = Werk_Tabelle(items_list,table_id='werke')
# Klein Alert
flash_extern(message="Die Funktionen zum Herunterladen und zur Visualisierung der Texte können etwas Zeit brauchen (je nachdem wie groß das Werk ist).")
# Die HTML-Seite wird geladen
return render_template('allenwerke_table.html', table=tabelle)
#############################################################################################
#############################################################################################
#############################################################################################
# Die Funktion "herunterladen" steuert den Download von Texten aus Perseus. Sie bedient sich der Funktion get_texte aus mischmasch.py
@app.route('/herunterladen')
def herunterladen():
# Link des gewählten Werkes
link = request.args.get("link_zum_werk")
# Abruf der Funktion "get_texte".
# Achtung!: Die Funktion ist sehr langsam (bis 40-50 Minuten Laufzeit)
get_texte(link)
# Wenn der Download fertig ist, wird die Datei geöffnet. Die Webapp lädt die Seite "allewerke".
return redirect(url_for('allewerke'))
#############################################################################################
#############################################################################################
#############################################################################################
# Eine einfache Funktion, um einen Werk aus der Datenbank zu löschen
@app.route('/loeschwerk', methods=[ "GET", "POST" ])
def loeschwerk():
# Das Formular wird definiert. Damit wird es möglich, den ID der zu löschenden Werk einzutippen.
# Bei diesem, wie bei allen anderen Formularen, handelt es sich um Klassen aus dem Paket flask_WTForms.
# Die Logik der Python Klassen erlaubt es, sehr schnell und zuverlässig HTML-Formulare herzustellen
form = LoeschWerk()
# das If-Statement definiert, was geschehen soll, wenn das Formular ausgefüllt und der Button "Submit" gedrückt wird.
# Wenn das Formular noch nicht gefüllt wurde, wird das leere Formular gezeigt, ansonsten kümmert sich das If-Statement um die Löschoperationen
# und lädt am Ende die Seite 'allewerke'.
if form.validate_on_submit():
# Das Werk mit dem angegebenen ID wird gesucht
id = form.id_werk.data
werk = Werken.query.get(id)
# das Werk wird gelöscht und die Änderung in der DB gespeichert.
db.session.delete(werk)
db.session.commit()
# die Seite 'allewerke' wird geladen
return redirect(url_for('allewerke'))
# Beim ersten Laden der Seite, wird das Löschformular angezeigt. Dieses befindet sich auf der Seite 'loeschwerk.html'
return render_template('loeschwerk.html', form=form)
#############################################################################################
#############################################################################################
#############################################################################################
# Die nächste Funktion listet die Werke in der Datenbanktabelle "Werke" auf und bietet die Links zu den Wortlisten und Originaltexten
@app.route('/zeigWerken', methods=["GET", "POST"])
def zeigWerken():
# Die nötigen Variabeln werden initialisiert. Wir brauchen:
# ein Formular, um den Autor zu wählen, dessen Werken aufgelistet werden sollen,
form = WaehleAutor()
# eine Liste zum vorläufigen Speichern der Query-Ergebnisse,
werk_tabelle = [ ]
# und eine Kontrollvariabel, die entweder das Formular "WaehlAutor" oder die Liste auf der HTML-Seite zeigt.
zeig_tabelle = False
# Die Tabelle muss nur dann gezeigt werden, wenn der Benutzer einen Autor gewählt hat.
# Solange das nicht der Fall ist, muss die Seite nach einem Autor fragen.
while zeig_tabelle == False:
# Fall 1 : Wir haben keine Info über den gewünschten Autor. Das Suchformular wird gezeigt
if request.args.get('autor') is None:
# Fall 2: Das Formular is ausgefüllt. Wir beenden die Visualisierung des Formulars und bilden die Tabelle
if form.validate_on_submit():
# Tabelle wird gezeigt, das Formular verschwindet
zeig_tabelle = True
# Aus dem Formular wird den Name des Autors extrahiert. Danach wird das Formularfeld gelöscht, um später eine neue Suche zu erlauben
autor_name = form.autor_name.data
form.autor_name.data = ' '
else:
# Fall 2.1 : Das Formular ist nicht gefüllt und wir haben keine Info über einen gewünschten Autor. Das Formular wird geladen.
flash_extern(message="das Aufladen der 'Verbesserte Wortliste' kann sehr lange dauern (bis 40 min). Jedes Wort wird im Internet gesucht, die nötige Infos werden extrapoliert und das ganze wird auf SQL-DB gespeichert (das ganze ist eine Schleife mit bis auf 50.000 Durchläufe!")
zeig_tabelle = False
# Lade die HTML-Seite mit dem Formular
return render_template('zeigWerke.html',form = form, werke_tabelle = werk_tabelle,zeig_tabelle=zeig_tabelle)
# Fall 3: Wir haben Infos über den Autor. Die Tabelle kann direkt gebildet werden und die Schleife mit dem Formular beendet werden.
if request.args.get('autor') is not None:
autor_name = request.args.get('autor')
zeig_tabelle = True
# Der folgende Code baut sukzessiv die Tabelle mit den Werken auf
# die Funktion get_works (aus mischmasch.py) sucht alle Werke eines Autors.
werke_list = get_works(autor_name)
werk_items = []
# Für jedes gefundene Werk wird kontrolliert, ob dieses schon in unserer SQLite Datenbank vorhanden ist. Wenn nicht, wird eine neue Zeile dort geschrieben
for werk in werke_list:
check = Werken.query.filter_by(titel_link = werk[1]).first()
if check:
print("Werk schon vorhanden in der Datenbank")
else:
autor_id = Autoren.query.filter_by(name = autor_name).first().id_autor
item = Werken(autor = autor_id, titel = werk[0],titel_link=werk[1],wortliste_link=werk[2])
db.session.add(item)
db.session.commit()
# die gesammelten Daten werden jetzt in der Tabelle zur Visualisierung gespeichert
link_unsere_wortliste = url_for('wortliste', autor = autor_name, wl = werk[2], werk = werk[0])
# Wie bei anderen Funktionen, die die Klasse Table (aus flask_table) benutzten, müssen zunächst die einzelnen Zeile als Klasseninstanzen gebildet werden
# Danach übergeben wir einfach die Liste mit den Tabellenzeilen der übergeordneten Klasse "Werke_aus_Perseus".
# diese Klasse wird dann alle einzelnen Zeilen in einer Tabelle zusammenfügen und den HTML-Code dafür generieren.
item = Item_PerseusWerk(titel=werk[0],
autor = autor_name,
link_text=werk[1],
link_wortliste=werk[2],
link_unsere_wortliste=link_unsere_wortliste
)
werk_items.append(item)
# Alle einzelnen Zeilen der Tabelle, jetzt gespeichert in der Liste "werk_items", werden in einer Tabelle zusammengefügt.
werk_tabelle = Werke_aus_Perseus(werk_items,table_id='werkePerseus')
# Die HTML-Seite mit der Tabelle wird nun geladen.
return render_template('zeigWerke.html',form = form, werke_tabelle = werk_tabelle,zeig_tabelle=zeig_tabelle)
#############################################################################################
#############################################################################################
#############################################################################################
# Die folgende Funktion zeigt eine Tabelle mit allen gespeicherten bzw. gesuchten Wortformen.
@app.route('/wortliste', methods=[ "GET", "POST" ])
def wortliste():
# Die nötigen Paramater werden über die HTML Methode POST und GET gelesen:
# Wortliste (= Link zur Perseus Wortlist)
wl = request.args.get('wl')
# Name des Autors
autor = request.args.get('autor')
# Name des untersuchten Werkes
werk = request.args.get('werk')
# Jedes Wort in der Wortliste wird untersucht. Das Ergebnis ist eine komplexe Liste von Listen. Jede Unterliste enthält folgende Infos:
# (Wortform, Lemma, Übersetzung, morphologische_Bestimmungen)
worter = search_infos(link=wl)
# Analyse der einzelnen Wortformen
for x in worter:
# Zunächst wird kontrolliert, ob die Wortform schon vorhanden ist.
zeile = Wortlist.query.filter_by(wortform= x[0],werk=x[3]).first()
if zeile:
# Wenn die Wortform schon vorhanden ist, wird sie übersprungen
print(f"{x[0]} already thereeee!")
else:
# Ansonsten werden alle Spalten der Tabelle "Wortlist" befüllt
zeile = Wortlist(autor = autor, wortform = x[0], lemma = x[1], übersetzung = x[2], morphobestimmung = x[3],werk = werk)
# Die Infos werden in der Datenbank gespeichert
db.session.add(zeile)
db.session.commit()
# Jetzt können die Wortformen visualisiert werden. Das funktioniert genau so wie bei der vorangehenden Funktion.
tabelle_items = []
for wortform in worter:
tabelle_zeile = Wortlist_Item(wortform=wortform[0], lemma=wortform[1], übersetzung=wortform[2],morphobestimmung=wortform[3],autor=autor, werk=werk)
tabelle_items.append(tabelle_zeile)
wort_tabelle = WortlisteTabelle(tabelle_items,table_id="wortliste")
return render_template('wortliste.html',tabelle=wort_tabelle, werk = werk, autore = autor)
#############################################################################################
#############################################################################################
#############################################################################################
# Die folgende Funktion lädt eine Suchseite, über die es möglich ist, gezielt nach morphologischen Kategorien zu suchen.
# die Suche nutzt die Spalte "morphologische Bestimmungen" in der Tabelle "Wortformen" in der Datenbank
@app.route('/sqlForm', methods=[ "GET", "POST" ])
def sqlForm():
# das Formular für die Suche wird geladen.
form = SQLQuery()
# Wenn das Formular ausgefüllt und der Submit-Button gedruckt wurde, können wir die Informationen ablesen und eine Tabelle generieren.
if form.validate_on_submit():
morpho = form.morpho.data
return redirect(url_for('gesuchteforme', query = morpho))
# Wenn wir über keine Infos über die gesuchten morphologischen Kategorien verfügen, wird das Formular gezeigt
return render_template('sqlForm.html', form=form)
#############################################################################################
#############################################################################################
#############################################################################################
# Die folgende Funktion findet und visualisiert alle Wortformen in der Datenbank, die der Query entsprechen
# oder alle Wortformen in der DB, wenn keine Query vorliegt.
@app.route('/gesuchteforme', methods=[ "GET", "POST" ])
def gesuchteforme():
# Fall 1: Es liegt keine Query vor: die Seite zeigt alle gespeicherten Wortformen
if request.args.get('query') is None:
wortformen = Wortlist.query.all()
query = " "
else:
# Es liegt eine Query vor. Die entsprechenden Wortformen werden gezeigt
query = request.args.get('query')
wortformen = Wortlist.query.filter(Wortlist.morphobestimmung.contains(query)).all()
# Es wird eine Tabelle, wie bei ähnlichen Funktionen weiter oben, gebildet.
tabelle_items = []
for wortform in wortformen:
tabelle_zeile = Wortlist_Item(wortform=wortform.wortform,
lemma=wortform.lemma,
übersetzung = wortform.uebersetzung,
morphobestimmung=wortform.morphobestimmung,
autor=wortform.autor,
werk=wortform.werk)
tabelle_items.append(tabelle_zeile)
wort_tabelle = WortlisteTabelle(tabelle_items,table_id="wortliste")
return render_template('gesuchteforme.html', wort_tabelle = wort_tabelle,query=query)
#############################################################################################
#############################################################################################
#############################################################################################
# die Funktion bereinigt die Tabelle "Wortformen" in der Datenbank.
# Alle dort gespeicherten Informationen werden gelöscht
@app.route("/loeschFormen", methods=[ "GET", "POST" ])
def loeschFormen():
db.session.query(Wortlist).delete()
db.session.commit()
flash_extern(message="Die Tabelle mit den Wortformen wurde erfolgreich gelöscht")
return render_template('startseite.html')
#############################################################################################
#############################################################################################
#############################################################################################
# Funktion für den Download von Wortformen als csv Datei
@app.route('/csv_herunterladen', methods=[ "GET", "POST" ])
def csv_herunterladen():
# Wenn eine Query vorliegt, werden nur die entsprechenden Wortformen heruntergeladen, ansonsten alle vorhandenen.
if request.args.get('query') is None:
wortformen = Wortlist.query.all()
else:
query = request.args.get('query')
wortformen = Wortlist.query.filter(Wortlist.morphobestimmung.contains(query)).all()
print(query)
# das aktuelle Arbeitsverzeichnis wird gespeichert
HAUPTVERZEICHNIS = Path.cwd()
# in path_csv wird der Pfad der zukünftigen csv Datei gespeichert
path_csv = HAUPTVERZEICHNIS / "common" / "static" / "assets" / "lista.csv"
# die Datei wird geöffnet und geschrieben (mithilfe des Pakets csv)
export = open(str(path_csv.resolve()),"w",encoding="UTF-8")
out = csv.writer(export)
out.writerow(['id','autor','wortform','lemma','übersetzung','morpho'])
for zeile in wortformen:
out.writerow([zeile.id_wort, zeile.autor,zeile.wortform,zeile.lemma,zeile.uebersetzung,zeile.morphobestimmung])
# Die Datei wird geschlossen
export.close()
# je nach Einstellung auf dem eigenen Rechner könnten die griechsiche Charakter nicht korrekt dargestellt werden
verzeichnis_csv = str(path_csv.parents[0].resolve())
os.chdir(verzeichnis_csv)
os.system("lista.csv")
# Verzeichniswechsel (zurück zum Hauptverzeichnis)
os.chdir(str(HAUPTVERZEICHNIS))
flash_extern("Die Liste wurde erfolgreich heruntergeladen")
# Zum Schluss wird die Homepage geladen
return render_template('startseite.html')
#############################################################################################
#############################################################################################
#############################################################################################
# Funktion für den Download der gesuchten bzw. gespeicherten Wortformen in einer pdf Datei
# Die Logik ist dieselbe wie bei der Funktion "csv_herunterladen";
@app.route('/TeX_herunterladen')
def TeX_herunterladen():
AKTUELLE_VERZEICHNIS = Path.cwd()
# Die verlangten Wortformen werden in der Datenbank gesucht
if request.args.get('query') is None:
wortformen = Wortlist.query.all()
else:
query = request.args.get('query')
wortformen = Wortlist.query.filter(Wortlist.morphobestimmung.contains(query)).all()
# Der Abschnittstitel für die LaTeX Datei wird festgelegt
if query is None:
query_tex = ''' {\Large Query: alle Wortformen in der Datenbank } '''
else:
query_tex = "{\Large Query: "+query+" }\n\n"
# die Variabel inhalt_tex wird erzeugt. Sie wird den Inhalt der TeX-Datei enthalten
inhalt_tex = ""
for form in wortformen:
wf = form.wortform.replace("_"," ")
lemma = form.lemma.replace("_"," ")
uebersetzung = form.uebersetzung.replace("_"," ")
mb = form.morphobestimmung.replace("_"," ")
# Jede Wortform und die entsprechenden Infos werden für die TeX-Datei formatiert
inhalt_tex = inhalt_tex + " " + "\item \eintrag{"+wf+"}{"+lemma+"}{"+uebersetzung+"}{"+mb+"}\n"
# Das Ganze wird von einer itemize-Umgebung umschlossen
inhalt_tex = "\\begin{itemize}\n "+ inhalt_tex + " \\end{itemize}"
# Die TeX-Datei wird geschrieben
path_tex = Path.cwd() / "common" / "static" / "tex" / "Tex_formen.tex"
with open(path_tex, "w", encoding="UTF-8") as tex_output:
tex_output.write(query_tex)
tex_output.write(inhalt_tex)
# die TeX-Datei (Vorlage + neuer Inhalt) wird kompiliert
os.chdir(str(path_tex.parents[0].resolve()))
befehl = "xelatex Vorlage_wortformen.tex"
os.system(befehl)
# die PDF-Datei wird geöffnet
os.system("Vorlage_wortformen.pdf")
os.chdir(str(AKTUELLE_VERZEICHNIS.resolve()))
flash_extern(message="Eine pdf wurde erzeugt!")
# Zum Schluss wird man zur Homepage umadressiert
return render_template("startseite.html")
#############################################################################################
#############################################################################################
#############################################################################################
# Kleine Suchmaske auf der Homepage. Diese Fnktion liest das angegebene Wort im Formular und öffnet einen Link zu dem Perseus Greek Word Study Tool.
@app.route('/redirecting')
def redirect_zu_perseus():
'''
Fast banale Funktion, die den Input des Benutzers aus dem Formular (siehe Startseite) automatisch über das request-Paket annimmt und
die entsprechende Seite in dem Perseus Portal öffnet.
'''
wortform = request.args.get('wortform')
link = "http://www.perseus.tufts.edu/hopper/morph?l=" + wortform + "&la=greek"
return redirect(link)
#############################################################################################
#############################################################################################
#############################################################################################
# Technische Infos über diese WebApp
@app.route('/technisches')
def technisches():
return render_template('technisches.html')
#############################################################################################
#############################################################################################
#############################################################################################
# Routing zu einer festgelegten Seite, die immer abgerufen werden soll, wenn der Benutzer eine nicht-vorhandene Seite besuchen möchte.
@app.errorhandler(500) # die Seite existiert, aber eine Python-Funktion enthält einen Fehler
@app.errorhandler(404) # die Seite existiert nicht.
def keine_seite(e):
'''
Standardseite für den 404-Fehler. Wird automatisch abgerufen, falls der Benutzer versucht, eine nicht-vorhanden Seite zu laden.
Test: Wenn Sie die App lokal laufen (URL: localhost:500/home oder 127.0.0.1:5000/home) versuchen Sie die Adresse:
localhost:5000/djaklsjdkas
oder ähnlich zu besuchen.
:param e: Standardvariabel für Fehler.
:return: ein Template mit einer festgelegten Fehlerseite.
'''
return render_template('error_seite.html'), 404
#############################################################################################
#############################################################################################
#############################################################################################
#################################### STARTE DIE APP ##########################################
# Mit dem folgenden Befehl starten die App.
# Die Bedingung ist, dass die App nur startet, wenn man diese Datei mit Python ausführt.
if __name__ == "__main__":
app.jinja_env.cache = {}
# Setzen Sie debug auf "False", um die Debug-Statements auf der Konsole auszuschalten.
app.run(debug=True)
################################### ENDE #####################################################
| 53.901408 | 382 | 0.624053 |
aceb7a9ef2afd3c1472a71d88a0574be54fe332f | 9,660 | py | Python | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 167 | 2020-04-21T21:04:14.000Z | 2022-03-29T15:07:52.000Z | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 11 | 2020-05-19T18:49:24.000Z | 2021-06-08T01:51:29.000Z | part3/SpaceSciencePython_part3.py | ajpmaclean/SpaceScienceTutorial | ceddc5b3c3aa035ddc6c12b987a95c06b02ffe41 | [
"MIT"
] | 41 | 2020-05-03T06:13:17.000Z | 2022-02-12T17:32:51.000Z | # Import modules
import datetime
import spiceypy
import numpy as np
import pandas as pd
# Load the SPICE kernels via a meta file
spiceypy.furnsh('kernel_meta.txt')
# We want to compute miscellaneous positions w.r.t. the centre of
# the Sun for a certain time interval.
# First, we set an initial time in UTC.
INIT_TIME_UTC = datetime.datetime(year=2000, month=1, day=1, \
hour=0, minute=0, second=0)
# Add a number of days; you can play around with the datetime variables; but
# leave it as it is for the first try, since other computations and comments
# are based on this value.
DELTA_DAYS = 10000
END_TIME_UTC = INIT_TIME_UTC + datetime.timedelta(days=DELTA_DAYS)
# Convert the datetime objects now to strings
INIT_TIME_UTC_STR = INIT_TIME_UTC.strftime('%Y-%m-%dT%H:%M:%S')
END_TIME_UTC_STR = END_TIME_UTC.strftime('%Y-%m-%dT%H:%M:%S')
# Print the starting and end times
print('Init time in UTC: %s' % INIT_TIME_UTC_STR)
print('End time in UTC: %s\n' % END_TIME_UTC_STR)
# Convert to Ephemeris Time (ET) using the SPICE function utc2et
INIT_TIME_ET = spiceypy.utc2et(INIT_TIME_UTC_STR)
END_TIME_ET = spiceypy.utc2et(END_TIME_UTC_STR)
# Create a numpy array that covers a time interval in delta = 1 day step
TIME_INTERVAL_ET = np.linspace(INIT_TIME_ET, END_TIME_ET, DELTA_DAYS)
#%%
# Using km is not intuitive. AU would scale it too severely. Since we compute
# the Solar System Barycentre (SSB) w.r.t. the Sun; and since we expect it to
# be close to the Sun, we scale the x, y, z component w.r.t the radius of the
# Sun. We extract the Sun radii (x, y, z components of the Sun ellipsoid) and
# use the x component
_, RADII_SUN = spiceypy.bodvcd(bodyid=10, item='RADII', maxn=3)
RADIUS_SUN = RADII_SUN[0]
#%%
# All our computed parameters, positions etc. shall be stored in a pandas
# dataframe. First, we create an empty one
SOLAR_SYSTEM_DF = pd.DataFrame()
# Set the column ET that stores all ETs
SOLAR_SYSTEM_DF.loc[:, 'ET'] = TIME_INTERVAL_ET
# The column UTC transforms all ETs back to a UTC format. The function
# spicepy.et2datetime is NOT an official part of SPICE (there you can find
# et2utc).
# However this function returns immediately a datetime object
SOLAR_SYSTEM_DF.loc[:, 'UTC'] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: spiceypy.et2datetime(et=x).date())
# Here, the position of the SSB, as seen from the Sun, is computed. Since
# spicepy.spkgps returns the position and the corresponding light time,
# we add the index [0] to obtain only the position array
SOLAR_SYSTEM_DF.loc[:, 'POS_SSB_WRT_SUN'] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: spiceypy.spkgps(targ=0, \
et=x, \
ref='ECLIPJ2000', \
obs=10)[0])
# Now the SSB position vector is scaled with the Sun's radius
SOLAR_SYSTEM_DF.loc[:, 'POS_SSB_WRT_SUN_SCALED'] = \
SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].apply(lambda x: x / RADIUS_SUN)
# Finally the distance between the Sun and the SSB is computed. The length
# (norm) of the vector needs to be determined with the SPICE function vnorm().
# numpy provides an identical function in: numpy.linalg.norm()
SOLAR_SYSTEM_DF.loc[:, 'SSB_WRT_SUN_SCALED_DIST'] = \
SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN_SCALED'].apply(lambda x: \
spiceypy.vnorm(x))
#%%
# Import the matplotlib library
from matplotlib import pyplot as plt
# Set a figure
FIG, AX = plt.subplots(figsize=(12, 8))
# Plot the distance between the Sun and the SSB
AX.plot(SOLAR_SYSTEM_DF['UTC'], SOLAR_SYSTEM_DF['SSB_WRT_SUN_SCALED_DIST'], \
color='tab:blue')
# Set a label for the x and y axis and color the y ticks accordingly
AX.set_xlabel('Date in UTC')
AX.set_ylabel('SSB Dist. in Sun Radii', color='tab:blue')
AX.tick_params(axis='y', labelcolor='tab:blue')
# Set limits for the x and y axis
AX.set_xlim(min(SOLAR_SYSTEM_DF['UTC']), max(SOLAR_SYSTEM_DF['UTC']))
AX.set_ylim(0, 2)
# Set a grid
AX.grid(axis='x', linestyle='dashed', alpha=0.5)
# Saving the figure in high quality
plt.savefig('SSB2SUN_DISTANCE.png', dpi=300)
#%%
# Additionally, we want to compute the position vector of all outer gas
# giants. We define a dictionary with a planet's barycentre abbreviation and
# corresponding NAIF ID code
NAIF_ID_DICT = {'JUP': 5, \
'SAT': 6, \
'URA': 7, \
'NEP': 8}
# Iterate through the dictionary and compute the position vector for each
# planet as seen from the Sun. Further, compute the phase angle between the
# SSB and the planet as seen from the Sun
for planets_name_key in NAIF_ID_DICT:
# Define the pandas dataframe column for each planet (position and phase
# angle). Each '%s' substring is replaced with the planets name as
# indicated after the "%"
planet_pos_col = 'POS_%s_WRT_SUN' % planets_name_key
planet_angle_col = 'PHASE_ANGLE_SUN_%s2SSB' % planets_name_key
# Get the corresponding NAIF ID of the planet's barycentre
planet_id = NAIF_ID_DICT[planets_name_key]
# Compute the planet's position as seen from the Sun.
SOLAR_SYSTEM_DF.loc[:, planet_pos_col] = \
SOLAR_SYSTEM_DF['ET'].apply(lambda x: \
spiceypy.spkgps(targ=planet_id, \
et=x, \
ref='ECLIPJ2000', \
obs=10)[0])
# Compute the phase angle between the SSB and the planet as seen from the
# Sun. Since we apply a lambda function on all columns we need to set
# axis=1, otherwise we get an error!
SOLAR_SYSTEM_DF.loc[:, planet_angle_col] = \
SOLAR_SYSTEM_DF.apply(lambda x: \
np.degrees(spiceypy.vsep(x[planet_pos_col], \
x['POS_SSB_WRT_SUN'])),\
axis=1)
#%%
# Let's verify the function vsep and compute the phase angle between the SSB
# and Jupiter as seen from the Sun (we use the very first array entries).
# Define a lambda function the computes the angle between two vectors
COMP_ANGLE = lambda vec1, vec2: np.arccos(np.dot(vec1, vec2) \
/ (np.linalg.norm(vec1) \
* np.linalg.norm(vec2)))
print('Phase angle between the SSB and Jupiter as seen from the Sun (first ' \
'array entry, lambda function): %s' % \
np.degrees(COMP_ANGLE(SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].iloc[0], \
SOLAR_SYSTEM_DF['POS_JUP_WRT_SUN'].iloc[0])))
print('Phase angle between the SSB and Jupiter as seen from the Sun (first ' \
'array entry, SPICE vsep function): %s' % \
np.degrees(spiceypy.vsep(SOLAR_SYSTEM_DF['POS_SSB_WRT_SUN'].iloc[0], \
SOLAR_SYSTEM_DF['POS_JUP_WRT_SUN'].iloc[0])))
#%%
# Create a 4 axes plot where all 4 plots are vertically aligned and share the
# x axis (date in UTC)
FIG, (AX1, AX2, AX3, AX4) = plt.subplots(4, 1, sharex=True, figsize=(8, 20))
# We iterate through the planets (from Jupiter to Neptune) and plot the
# phase angle between the planet and the SSB, as seen from the Sun, in each
# axis individually
for ax_f, planet_abr, planet_name in zip([AX1, AX2, AX3, AX4], \
['JUP', 'SAT', 'URA', 'NEP'], \
['Jupiter', 'Saturn', 'Uranus', \
'Neptune']):
# First, we set the planet's name as the sub plot title (instead of
# setting a legend)
ax_f.set_title(planet_name, color='tab:orange')
# The distance between the SSB and the Sun is plotted.
ax_f.plot(SOLAR_SYSTEM_DF['UTC'], \
SOLAR_SYSTEM_DF['SSB_WRT_SUN_SCALED_DIST'], \
color='tab:blue')
# A y label is set and the color of labels and ticks are adjusted for
# better visibility
ax_f.set_ylabel('SSB Dist. in Sun Radii', color='tab:blue')
ax_f.tick_params(axis='y', labelcolor='tab:blue')
# Set x (based on the min and max date) and y limits (the SSB has varying
# distances between 0 and 2 Sun Radii)
ax_f.set_xlim(min(SOLAR_SYSTEM_DF['UTC']), max(SOLAR_SYSTEM_DF['UTC']))
ax_f.set_ylim(0, 2)
# We add now the phase angle values and copy the x axis for this purpose
ax_f_add = ax_f.twinx()
# Plot the phase angle between the SSB and planet as seen from the Sun
ax_f_add.plot(SOLAR_SYSTEM_DF['UTC'], \
SOLAR_SYSTEM_DF['PHASE_ANGLE_SUN_%s2SSB' % planet_abr], \
color='tab:orange', \
linestyle='-')
# Set the y label's name and color accordingly
ax_f_add.set_ylabel('Planet ph. ang. in deg', color='tab:orange')
ax_f_add.tick_params(axis='y', labelcolor='tab:orange')
# Invert the y axis and set the limits. We invert the axis so that a
# possible anti-correlation (large phase angle corresponds to a smaller
# distance between the Sun's centre and the SSB) becomes more obvious
ax_f_add.invert_yaxis()
ax_f_add.set_ylim(180, 0)
# Set a grid (only date)
ax_f.grid(axis='x', linestyle='dashed', alpha=0.5)
# Finally we set the x label ...
AX4.set_xlabel('Date in UTC')
# ... tight the figures a bit ...
FIG.tight_layout()
# ... reduce the distance between the axes ...
plt.subplots_adjust(hspace=0.2)
# ... and save the figure in high quality
plt.savefig('PLANETS_SUN_SSB_PHASE_ANGLE.png', dpi=300)
| 40.25 | 79 | 0.649896 |
aceb7acc41b21e5a6fc590e59b014a1d88b016c0 | 1,062 | py | Python | celery_proj/celery.py | Fansion/sharefun | 1e6bf742f5921d8dd464ba6074d6c18859118714 | [
"MIT"
] | 1 | 2015-01-17T03:17:48.000Z | 2015-01-17T03:17:48.000Z | celery_proj/celery.py | Fansion/sharefun | 1e6bf742f5921d8dd464ba6074d6c18859118714 | [
"MIT"
] | null | null | null | celery_proj/celery.py | Fansion/sharefun | 1e6bf742f5921d8dd464ba6074d6c18859118714 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from celery import Celery
from celery.schedules import crontab
from datetime import timedelta
app = Celery('celery_proj',
broker='redis://localhost:6379/0',
backend='redis://localhost:6379/0',
include=['celery_proj.tasks'])
app.conf.update(
CELERY_TIMEZONE='Asia/Shanghai',
CELERYBEAT_SCHEDULE={
'crawller': {
'task': 'celery_proj.tasks.crawller',
#'schedule': timedelta(seconds=10)
'schedule': crontab(hour='0,6,12,18', minute=0)
},
'backup': {
'task': 'celery_proj.tasks.backup',
'schedule': crontab(hour='0', minute=0)
},
'send_mail': {
'task': 'celery_proj.tasks.send_mail',
'schedule': crontab(hour='23', minute=0)
},
'sync_with_douban': {
'task': 'celery_proj.tasks.sync_with_douban',
'schedule': crontab(minute=0)
},
}
)
if __name__ == '__main__':
app.start()
| 27.230769 | 59 | 0.563089 |
aceb7b254277aa325ccca808177a48b91e6d1523 | 7,028 | py | Python | server/src/test/unit/voodoo/test_lock.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/test/unit/voodoo/test_lock.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | server/src/test/unit/voodoo/test_lock.py | zstars/weblabdeusto | 09bd9d93d483671bca67ee5c70a9c412eb5d352f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import unittest
import threading
import voodoo.lock as lock
VERY_SHORT_TIMEOUT = 0.2
SHORT_TIMEOUT = 1
class RWLockTestCase(unittest.TestCase):
def test_rw_lock_basic(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
rlock.acquire()
rlock.release()
wlock.acquire()
wlock.release()
def test_rw_lock_multiple_reads(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
evt = threading.Event()
rlock.acquire()
class RLockThread(threading.Thread):
def run(self):
rlock.acquire()
evt.set()
thread = RLockThread()
thread.setDaemon(True)
thread.start()
evt.wait(SHORT_TIMEOUT)
self.assertTrue(evt.isSet())
thread.join()
def test_rw_lock_only_one_write(self):
rwlock = lock.RWLock()
wlock = rwlock.write_lock()
evt1 = threading.Event()
evt2 = threading.Event()
wlock.acquire()
class RLockThread(threading.Thread):
def run(self):
evt1.set()
wlock.acquire()
evt2.set()
thread = RLockThread()
thread.setDaemon(True)
thread.start()
evt1.wait(SHORT_TIMEOUT)
self.assertTrue(evt1.isSet())
evt2.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt2.isSet())
# Cleaning up
wlock.release()
thread.join()
def test_rw_lock_reentrant_one_write(self):
rwlock = lock.RWLock()
wlock = rwlock.write_lock()
evt1 = threading.Event()
evt2 = threading.Event()
wlock.acquire()
class RLockThread(threading.Thread):
def run(self):
evt1.set()
wlock.acquire()
evt2.set()
thread = RLockThread()
thread.setDaemon(True)
thread.start()
evt1.wait(SHORT_TIMEOUT)
self.assertTrue(evt1.isSet())
evt2.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt2.isSet())
# I don't have any problem
wlock.acquire()
# And I can release and the other thread is still there
wlock.release()
evt2.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt2.isSet())
# The second time it works
wlock.release()
evt2.wait(SHORT_TIMEOUT)
self.assertTrue(evt2.isSet())
thread.join()
# Everyone is now happy
def test_rw_lock_reentrant_reading_while_write_locked(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
wlock.acquire()
rlock.acquire()
rlock.release()
wlock.release()
# Everyone is happy
def test_rw_lock_no_write_when_reading(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
evt1 = threading.Event()
evt2 = threading.Event()
rlock.acquire()
class RLockThread(threading.Thread):
def run(self):
evt1.set()
wlock.acquire()
evt2.set()
thread = RLockThread()
thread.setDaemon(True)
thread.start()
evt1.wait(SHORT_TIMEOUT)
self.assertTrue(evt1.isSet())
evt2.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt2.isSet())
# Cleaning
rlock.release()
thread.join()
def test_rw_lock_no_read_when_writing(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
evt1 = threading.Event()
evt2 = threading.Event()
wlock.acquire()
class RLockThread(threading.Thread):
def run(self):
evt1.set()
rlock.acquire()
evt2.set()
thread = RLockThread()
thread.setDaemon(True)
thread.start()
evt1.wait(SHORT_TIMEOUT)
self.assertTrue(evt1.isSet())
evt2.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt2.isSet())
# Cleaning
wlock.release()
thread.join()
def test_rw_write_write(self):
rwlock = lock.RWLock()
wlock = rwlock.write_lock()
wlock.acquire()
wlock.release()
wlock.acquire()
wlock.release()
def test_rw_write_read(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
wlock.acquire()
wlock.release()
rlock.acquire()
rlock.release()
def test_rw_lock_read_read_write(self):
rwlock = lock.RWLock()
rlock = rwlock.read_lock()
wlock = rwlock.write_lock()
evt1r = threading.Event()
evt2r = threading.Event()
evt3r = threading.Event()
evt4r = threading.Event()
class RLockThread(threading.Thread):
def run(self):
evt1r.set()
rlock.acquire()
evt2r.set()
evt3r.wait(SHORT_TIMEOUT * 2)
rlock.release()
evt4r.set()
evt1w = threading.Event()
evt2w = threading.Event()
evt3w = threading.Event()
class WLockThread(threading.Thread):
def run(self):
evt1w.set()
evt2w.wait(SHORT_TIMEOUT * 2)
wlock.acquire()
evt3w.set()
threadR = RLockThread()
threadR.setDaemon(True)
threadW = WLockThread()
threadW.setDaemon(True)
rlock.acquire()
threadR.start()
threadW.start()
evt1r.wait(SHORT_TIMEOUT)
self.assertTrue(evt1r.isSet())
evt2r.wait(SHORT_TIMEOUT)
self.assertTrue(evt2r.isSet())
evt1w.wait(SHORT_TIMEOUT)
self.assertTrue(evt1w.isSet())
evt2w.set()
evt3w.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt3w.isSet())
# Everybody did an acquire
# WLockThread is locked until
# we do a couple of releases
rlock.release()
# Not yet...
evt3w.wait(VERY_SHORT_TIMEOUT)
self.assertFalse(evt3w.isSet())
evt3r.set()
evt4r.wait(VERY_SHORT_TIMEOUT)
self.assertTrue(evt4r.isSet())
# Now yes
evt3w.wait(VERY_SHORT_TIMEOUT)
self.assertTrue(evt3w.isSet())
def suite():
return unittest.makeSuite(RWLockTestCase)
if __name__ == '__main__':
unittest.main()
| 24.921986 | 67 | 0.564741 |
aceb7b6faeb919840f7c3101679068fcee3242b0 | 4,192 | py | Python | python/demo/genotype.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 16 | 2021-07-14T23:32:31.000Z | 2022-03-24T16:25:15.000Z | python/demo/genotype.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-20T20:39:47.000Z | 2021-09-16T20:57:59.000Z | python/demo/genotype.py | spiralgenetics/biograph | 33c78278ce673e885f38435384f9578bfbf9cdb8 | [
"BSD-2-Clause"
] | 9 | 2021-07-15T19:38:35.000Z | 2022-01-31T19:24:56.000Z | #!/usr/bin/python3
import argparse
from libspiral import *
parser = argparse.ArgumentParser(description='Genotype some folks.')
parser.add_argument('--ref', help='Which refernce to use')
parser.add_argument('--dels', help='The deletion list as a .bed file')
parser.add_argument('seqsets', metavar='seqset', nargs='+',
help='SEQSETs for each individual')
args = parser.parse_args()
print "Opening reference"
ref = reference(args.ref)
with open(args.dels, 'r') as content_file:
content = content_file.read()
dels = [x.split('\t') for x in content.split('\n')][:-1]
print "Loading SEQSETs"
seqsets = {}
read_len = 0
for f in args.seqsets:
seqsets[f] = seqset(f)
if read_len == 0:
read_len = seqsets[f].read_len
if read_len != seqsets[f].read_len:
raise "Mismatched read lengths"
min_overlap = int(.7*read_len)
def seek_dels(gb, d):
print "Seeking", d
slop = int(3 * read_len)
max_anchors = slop / 2
max_steps = 10 * slop
readset = gb.all_readset()
left = int(d[1])
right = int(d[2])
middle = int((left + right) / 2)
left_range = ref.make_range(d[0], left - slop, left + slop, False)
right_range = ref.make_range(d[0], right - slop, right + slop, False)
left_anchor = find_anchors(gb, left_range, True, min_overlap, max_anchors, readset)
right_anchor = find_anchors(gb, right_range, False, min_overlap, max_anchors, readset)
results = assemble(left_anchor, right_anchor, min_overlap, max_steps, True, readset)
if len(results) == 0:
return []
return [x for x in results[0] if x.is_structural]
del_vars = []
for fn in args.seqsets:
print "Computing variation in", fn
del_vars += [[seek_dels(seqsets[fn], d) for d in dels]]
print "Zippering and flattening"
zip_vars = zip(*del_vars)
flat_vars = [[x for y in z for x in y] for z in zip_vars]
def normalize_asm(var):
if var.assembly_begin < min_overlap:
return ""
if var.assembly_end + min_overlap > len(var.assembly_sequence):
return ""
if not var.left_forward or not var.right_forward:
return ""
if var.left_position >= var.right_position:
return ""
return str(var.assembly_sequence)[var.assembly_begin - min_overlap : var.assembly_end + min_overlap]
def normalize_list(l):
if len(l) == 0:
return []
if len(l) > 2:
return [] # This is questionable
alts = {}
ref_contig = l[0].left_contig
ref_start = None
ref_end = 'X'
for v in l:
vf = v.flip()
asm = normalize_asm(vf)
if asm == "":
continue
alts[asm] = vf
ref_start = max(ref_start, vf.left_position)
ref_end = min(ref_end, vf.right_position)
if ref_start == None:
return []
rr = ref.make_range(ref_contig, ref_start, ref_end, True)
alleles = [rr]
for k, v in alts.iteritems():
alleles += [[sequence(k), v]]
return alleles
print "Normalizing"
var_sets = [normalize_list(x) for x in flat_vars]
def genotype(gb, alls):
if len(alls) == 0:
return [0,0]
ref_cov = gb.coverage(alls[0], gb.all_readset())
alleles = [sum(ref_cov)/len(ref_cov)]
for x in alls[1:]:
alt_cov = gb.seq_coverage(x[0], gb.all_readset())
alleles += [min(alt_cov[50:-50])]
bins = [int(bool(x)) for x in alleles]
if sum(bins) == 0 or sum(bins) > 2:
return None
both = []
for i in range(len(bins)):
if bins[i]:
both += [i]
if len(both) == 1:
both = [both[0], both[0]]
return both
print "Genotyping"
for i in range(len(var_sets)):
vs = var_sets[i]
d = dels[i]
line = ""
if len(vs) == 0:
line += "%s\t%d\t%d\t" % (d[0], int(d[1]), int(d[2]))
else:
v = vs[1][1]
line += "%s\t%d\t%d\t" % (v.left_contig, v.left_position, v.right_position-1)
#bits = []
#for a in vs[1:]:
# bits += [""]
#line += " ".join(bits) + "\t"
for fn in args.seqsets:
gt = genotype(seqsets[fn], vs)
if gt is None:
line += "?/?\t"
else:
line += "%d:%d\t" % (gt[0], gt[1])
print line
| 29.730496 | 104 | 0.595658 |
aceb7be21420acb9afaed3e0f1626161fe57efa1 | 98 | py | Python | hello.py | mark-solo/flsk-ttrl | 5e153444802c1bbde647837bbad813a53c806381 | [
"MIT"
] | null | null | null | hello.py | mark-solo/flsk-ttrl | 5e153444802c1bbde647837bbad813a53c806381 | [
"MIT"
] | null | null | null | hello.py | mark-solo/flsk-ttrl | 5e153444802c1bbde647837bbad813a53c806381 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello:
return 'Hello, World!' | 14 | 23 | 0.693878 |
aceb7c1c5f792db73d2770f29a2ef8de51b241f1 | 1,141 | py | Python | housemartin/processes/GetClimateStats/load_cache.py | cedadev/housemartin | 9d8c75e460a6dc46435760d15dc97ca9141e742f | [
"BSD-2-Clause"
] | null | null | null | housemartin/processes/GetClimateStats/load_cache.py | cedadev/housemartin | 9d8c75e460a6dc46435760d15dc97ca9141e742f | [
"BSD-2-Clause"
] | 1 | 2021-02-11T10:46:55.000Z | 2021-02-11T10:46:55.000Z | housemartin/processes/GetClimateStats/load_cache.py | cedadev/housemartin | 9d8c75e460a6dc46435760d15dc97ca9141e742f | [
"BSD-2-Clause"
] | null | null | null | """
load_cache.py
=============
Load all known assets into the cache.
Locations are held in:
"processes/local/GetClimateStats/asset_locations.txt"
"""
import time
# Process identifier
identifier = "GetFullClimateStats"
identifier = "GetClimateStats"
# Read in assets and process them
assets = []
n = 1
with open("asset_locations.txt") as reader:
for line in reader:
if line.find("LAT") > -1: continue
(lat, lon) = line.strip().split()
assets.append("test_%03d,%s,%s" % (n, lat, lon))
n += 1
if identifier == "GetClimateStats":
inputs = {"Experiment": "rcp45", "TimePeriod": "2035"}
else:
inputs = {}
options = {"verbose": True, "similarity_threshold": 0.1}
tester = ProcessTester(identifier)
first = "test_253"
first = "test_697"
do_run = False
do_run = True
for loc in assets:
print("Running for: %s" % loc)
if identifier == "GetClimateStats":
inputs["Locations"] = loc
else:
inputs["Location"] = loc
if loc.find(first) == 0 or do_run:
do_run = True
time.sleep(1.1)
tester.runTest(identifier, inputs, {}, options)
| 19.338983 | 58 | 0.623138 |
aceb7c2f15afa31ddf403f731e7e68ed6a7d97ad | 2,864 | py | Python | virtual/Lib/site-packages/dash_core_components/LogoutButton.py | LeonZly90/LeonZly90 | 935a658814632beca84cab0af6c048dd762f8c56 | [
"MIT"
] | 2 | 2021-07-18T11:39:56.000Z | 2021-11-06T17:13:05.000Z | venv/Lib/site-packages/dash_core_components/LogoutButton.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/dash_core_components/LogoutButton.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class LogoutButton(Component):
"""A LogoutButton component.
Logout button to submit a form post request to the `logout_url` prop.
Usage is intended for dash-deployment-server authentication.
DDS usage:
`dcc.LogoutButton(logout_url=os.getenv('DASH_LOGOUT_URL'))`
Custom usage:
- Implement a login mechanism.
- Create a flask route with a post method handler.
`@app.server.route('/logout', methods=['POST'])`
- The logout route should perform what's necessary for the user to logout.
- If you store the session in a cookie, clear the cookie:
`rep = flask.Response(); rep.set_cookie('session', '', expires=0)`
- Create a logout button component and assign it the logout_url
`dcc.LogoutButton(logout_url='/logout')`
See https://dash.plotly.com/dash-core-components/logout_button
for more documentation and examples.
Keyword arguments:
- id (string; optional):
Id of the button.
- className (string; optional):
CSS class for the button.
- label (string; default 'Logout'):
Text of the button.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- logout_url (string; optional):
Url to submit a post logout request.
- method (string; default 'post'):
Http method to submit the logout form.
- style (dict; optional):
Style of the button."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, label=Component.UNDEFINED, logout_url=Component.UNDEFINED, style=Component.UNDEFINED, method=Component.UNDEFINED, className=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'className', 'label', 'loading_state', 'logout_url', 'method', 'style']
self._type = 'LogoutButton'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'className', 'label', 'loading_state', 'logout_url', 'method', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(LogoutButton, self).__init__(**args)
| 35.358025 | 237 | 0.689246 |
aceb7c3e6734eee4417c6e2045ec224498d7e16f | 75,141 | py | Python | src/genie/libs/parser/nxos/tests/test_show_spanning_tree.py | svautour/genieparser | 7416c9a4b44582be835a0646fb7fad92a5181c7d | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/nxos/tests/test_show_spanning_tree.py | svautour/genieparser | 7416c9a4b44582be835a0646fb7fad92a5181c7d | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/nxos/tests/test_show_spanning_tree.py | svautour/genieparser | 7416c9a4b44582be835a0646fb7fad92a5181c7d | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, SchemaMissingKeyError
from genie.libs.parser.nxos.show_spanning_tree import (ShowSpanningTreeMst,
ShowSpanningTreeSummary,
ShowSpanningTreeDetail,
ShowErrdisableRecovery)
class TestShowSpanningTreeMst(unittest.TestCase):
dev1 = Device(name = 'deviceA')
dev2 = Device(name = 'deviceB')
output_1 = {'execute.return_value' :
'''
P1# show spanning-tree mst detail
##### MST0 vlans mapped: 1-399,501-4094
Bridge address 0023.04ee.be14 priority 32768 (32768 sysid 0)
Root this switch for the CIST
Regional Root this switch
Operational hello time 10, forward delay 30, max age 40, txholdcount 6
Configured hello time 10, forward delay 30, max age 40, max hops 255
Po30 of MST0 is broken (Bridge Assurance Inconsistent, VPC Peer-link Inconsisten
t)
Port info port id 128.4125 priority 128 cost 500
Designated root address 0023.04ee.be14 priority 32768 cost 0
Design. regional root address 0023.04ee.be14 priority 32768 cost 0
Designated bridge address 4055.3926.d8c1 priority 61440 port id 128.4125
Timers: message expires in 0 sec, forward delay 0, forward transitions 0
Bpdus sent 113, received 0
'''}
parsed_output_1 = {
'mstp': {
'mst_intances': {
0: {
'mst_id': 0,
'vlans_mapped': '1-399,501-4094',
'bridge_address': '0023.04ee.be14',
'bridge_priority': 32768,
'sys_id': 0,
'root_for_cist' : 'this switch',
'regional_root': 'this switch',
'interfaces': {
'Port-channel30': {
'name': 'Port-channel30',
'port_state': 'broken',
'port_id': '128.4125',
'port_priority': 128,
'port_cost': 500,
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'designated_root_address': '0023.04ee.be14',
'designated_root_priority': 32768,
'designated_root_cost': 0,
'designated_regional_root_address': '0023.04ee.be14',
'designated_regional_root_priority': 32768,
'designated_regional_root_cost': 0,
'designated_bridge_address': '4055.3926.d8c1',
'designated_bridge_priority': 61440,
'designated_bridge_port_id': '128.4125',
'timers': {
'message_expires_in': 0,
'forward_delay': 0,
'forward_transitions': 0
},
'counters': {
'bpdu_sent': 113,
'bpdu_recieved': 0
}
}
},
'operational': {
'domain': 'operational',
'hello_time': 10,
'forwarding_delay': 30,
'max_age': 40,
'hold_count': 6
},
'configured': {
'domain': 'configured',
'hello_time': 10,
'forwarding_delay': 30,
'max_age': 40,
'max_hop': 255
}
}
}
}
}
output_2 = {'execute.return_value' :
'''
P1# show spanning-tree mst detail
##### MST0 vlans mapped: 1-399,501-4094
Bridge address 0023.04ee.be14 priority 32768 (32768 sysid 0)
Root this switch for the CIST
Operational hello time 5, forward delay 20, max age 30, txholdcount 12
Configured hello time 10, forward delay 30, max age 40, max hops 255
Po25 of MST0 is broken (Bridge Assurance Inconsistent, VPC Peer-link Inconsisten
t)
Port info port id 128.4125 priority 128 cost 500
Designated root address 0023.04ee.be14 priority 32768 cost 0
Design. regional root address 0023.04ee.be14 priority 32768 cost 0
Designated bridge address 4055.3926.d8c1 priority 61440 port id 128.4125
Timers: message expires in 0 sec, forward delay 0, forward transitions 0
Bpdus sent 113, received 0
'''}
parsed_output_2 = {
'mstp': {
'mst_intances': {
0: {
'mst_id': 0,
'vlans_mapped': '1-399,501-4094',
'bridge_address': '0023.04ee.be14',
'bridge_priority': 32768,
'sys_id': 0,
'root_for_cist' : 'this switch',
'interfaces': {
'Port-channel25': {
'name': 'Port-channel25',
'port_state': 'broken',
'port_id': '128.4125',
'port_priority': 128,
'port_cost': 500,
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'designated_root_address': '0023.04ee.be14',
'designated_root_priority': 32768,
'designated_root_cost': 0,
'designated_regional_root_address': '0023.04ee.be14',
'designated_regional_root_priority': 32768,
'designated_regional_root_cost': 0,
'designated_bridge_address': '4055.3926.d8c1',
'designated_bridge_priority': 61440,
'designated_bridge_port_id': '128.4125',
'timers': {
'message_expires_in': 0,
'forward_delay': 0,
'forward_transitions': 0
},
'counters': {
'bpdu_sent': 113,
'bpdu_recieved': 0
}
}
},
'operational': {
'domain': 'operational',
'hello_time': 5,
'forwarding_delay': 20,
'max_age': 30,
'hold_count': 12
},
'configured': {
'domain': 'configured',
'hello_time': 10,
'forwarding_delay': 30,
'max_age': 40,
'max_hop': 255
}
}
}
}
}
empty_output = {'execute.return_value': ' '}
def test_output_1(self):
self.maxDiff = None
self.dev1 = Mock(**self.output_1)
obj = ShowSpanningTreeMst(device = self.dev1)
parsed = obj.parse()
self.assertEqual(parsed, self.parsed_output_1)
def test_output_2(self):
self.maxDiff = None
self.dev1 = Mock(**self.output_2)
obj = ShowSpanningTreeMst(device = self.dev1)
parsed = obj.parse()
self.assertEqual(parsed, self.parsed_output_2)
def test_empty_output(self):
self.dev2 = Mock(**self.empty_output)
obj = ShowSpanningTreeMst(device = self.dev2)
with self.assertRaises(SchemaEmptyParserError):
parsed = obj.parse()
class TestShowSpanningTreeSummary(unittest.TestCase):
dev_c3850 = Device(name = 'deviceA')
dev2 = Device(name = 'deviceB')
golden_output_mstp = {'execute.return_value' : '''
P1# show spanning-tree summary
Switch is in mst mode (IEEE Standard)
Root bridge for: MST0000
Port Type Default is disable
Edge Port [PortFast] BPDU Guard Default is disabled
Edge Port [PortFast] BPDU Filter Default is disabled
Bridge Assurance is enabled
Loopguard Default is disabled
Pathcost method used is long
PVST Simulation is enabled
vPC peer-switch is enabled (non-operational)
STP-Lite is enabled
Name Blocking Listening Learning Forwarding STP Active
---------------------- -------- --------- -------- ---------- ----------
MST0000 1 0 0 0 1
---------------------- -------- --------- -------- ---------- ----------
1 mst 1 0 0 0 1
'''}
golden_parsed_output_mstp = {
'mode': {
'mst': {
'MST0000': {
'blocking': 1,
'listening': 0,
'learning': 0,
'forwarding': 0,
'stp_active': 1
}
}
},
'root_bridge_for': 'MST0000',
'mst_type': 'IEEE Standard',
'port_type_default': False,
'bpdu_guard': False,
'bpdu_filter': False,
'bridge_assurance': True,
'loop_guard': False,
'path_cost_method': 'long',
'pvst_simulation': True,
'vpc_peer_switch': True,
'vpc_peer_switch_status': 'non-operational',
'stp_lite': True,
'total_statistics': {
'blockings': 1,
'listenings': 0,
'learnings': 0,
'forwardings': 0,
'stp_actives': 1
}
}
golden_output_mstp_2 = {'execute.return_value' : '''
P1# show spanning-tree summary
Switch is in mst mode (IEEE Standard)
Root bridge for: MST0000
Port Type Default is disable
Edge Port [PortFast] BPDU Guard Default is enabled
Edge Port [PortFast] BPDU Filter Default is enabled
Bridge Assurance is enabled
Loopguard Default is disabled
Pathcost method used is long
PVST Simulation is disabled
vPC peer-switch is enabled (non-operational)
STP-Lite is enabled
Name Blocking Listening Learning Forwarding STP Active
---------------------- -------- --------- -------- ---------- ----------
MST0 3 0 0 9 12
MST100 3 0 0 1 4
---------------------- -------- --------- -------- ---------- ----------
1 mst 1 0 0 0 1
'''}
golden_parsed_output_mstp_2 = {
'mode': {
'mst': {
'MST0': {
'blocking': 3,
'listening': 0,
'learning': 0,
'forwarding': 9,
'stp_active': 12
},
'MST100': {
'blocking': 3,
'listening': 0,
'learning': 0,
'forwarding': 1,
'stp_active': 4
}
}
},
'root_bridge_for': 'MST0000',
'mst_type': 'IEEE Standard',
'port_type_default': False,
'bpdu_guard': True,
'bpdu_filter': True,
'bridge_assurance': True,
'loop_guard': False,
'path_cost_method': 'long',
'pvst_simulation': False,
'vpc_peer_switch': True,
'vpc_peer_switch_status': 'non-operational',
'stp_lite': True,
'total_statistics': {
'blockings': 1,
'listenings': 0,
'learnings': 0,
'forwardings': 0,
'stp_actives': 1
}
}
empty_output = {'execute.return_value': ' '}
golden_output_1 = {'execute.return_value': '''\
S1-R101# show spann sum
Switch is in rapid-pvst mode
Root bridge for: VLAN0109-VLAN0110, VLAN0122, VLAN0202-VLAN0205
VLAN0207-VLAN0209, VLAN0212-VLAN0215, VLAN0222-VLAN0224, VLAN0232-VLAN0234
VLAN0242, VLAN0244, VLAN0253-VLAN0254, VLAN0264, VLAN0274, VLAN0280
Port Type Default is disable
Edge Port [PortFast] BPDU Guard Default is disabled
Edge Port [PortFast] BPDU Filter Default is disabled
Bridge Assurance is enabled
Loopguard Default is disabled
Pathcost method used is short
STP-Lite is enabled
Name Blocking Listening Learning Forwarding STP Active
---------------------- -------- --------- -------- ---------- ----------
VLAN0109 0 0 0 3 3
VLAN0110 0 0 0 2 2
VLAN0122 0 0 0 2 2
VLAN0202 0 0 0 2 2
VLAN0203 0 0 0 1 1
VLAN0204 0 0 0 2 2
VLAN0205 0 0 0 2 2
VLAN0207 0 0 0 2 2
VLAN0208 0 0 0 2 2
---------------------- -------- --------- -------- ---------- ----------
117 vlans 0 0 0 280 280
DS1-R101# exit
'''
}
golden_parsed_output_1 = {
'bpdu_filter': False,
'bpdu_guard': False,
'bridge_assurance': True,
'loop_guard': False,
'mode': {
'rapid-pvst': {
'VLAN0109': {
'blocking': 0,
'forwarding': 3,
'learning': 0,
'listening': 0,
'stp_active': 3
},
'VLAN0110': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0122': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0202': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0203': {
'blocking': 0,
'forwarding': 1,
'learning': 0,
'listening': 0,
'stp_active': 1
},
'VLAN0204': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0205': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0207': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
'VLAN0208': {
'blocking': 0,
'forwarding': 2,
'learning': 0,
'listening': 0,
'stp_active': 2
},
}
},
'path_cost_method': 'short',
'port_type_default': False,
'root_bridge_for': 'VLAN0109-VLAN0110, VLAN0122, VLAN0202-VLAN0205, '
'VLAN0207-VLAN0209, VLAN0212-VLAN0215, VLAN0222-VLAN0224, '
'VLAN0232-VLAN0234, VLAN0242, VLAN0244, VLAN0253-VLAN0254, '
'VLAN0264, VLAN0274, VLAN0280',
'stp_lite': True,
'total_statistics': {
'blockings': 0,
'forwardings': 280,
'learnings': 0,
'listenings': 0,
'stp_actives': 280
}
}
golden_output_2 = {'execute.return_value': '''
show spann sum
Switch is in rapid-pvst mode
Port Type Default is disable
Edge Port [PortFast] BPDU Guard Default is disabled
Edge Port [PortFast] BPDU Filter Default is disabled
Bridge Assurance is enabled
Loopguard Default is disabled
Pathcost method used is short
STP-Lite is enabled
'''}
golden_parsed_output_2 = {
'bpdu_filter': False,
'bpdu_guard': False,
'bridge_assurance': True,
'loop_guard': False,
'mode': {
'rapid-pvst': {
}
},
'path_cost_method': 'short',
'port_type_default': False,
'stp_lite': True,
}
def test_empty(self):
self.dev2 = Mock(**self.empty_output)
obj = ShowSpanningTreeSummary(device=self.dev2)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_mst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_mstp)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp)
def test_golden_mst_2(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_mstp_2)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp_2)
def test_golden_1(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_1)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_1)
def test_golden_2(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_2)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
class TestShowSpanningTreeDetail(unittest.TestCase):
dev_c3850 = Device(name = 'c3850')
dev2 = Device(name = 'empty')
empty_output = {'execute.return_value' : ' '}
golden_output_1 = {'execute.return_value': '''
P1# show spanning-tree detail
MST0000 is executing the mstp compatible Spanning Tree protocol
Bridge Identifier has priority 32768, sysid 0, address 00e3.04ee.be14
Configured hello time 10, max age 40, forward delay 30
We are the root of the spanning tree
Topology change flag not set, detected flag not set
Number of topology changes 0 last change occurred 142:22:13 ago
Times: hold 1, topology change 70, notification 10
hello 10, max age 40, forward delay 30
Timers: hello 0, topology change 0, notification 0
Port 4125 (port-channel30, vPC Peer-link) of MST0000 is broken (Bridge Assuran
ce Inconsistent, VPC Peer-link Inconsistent)
Port path cost 500, Port priority 128, Port Identifier 128.4125
Designated root has priority 32768, address 0023.04ee.be14
Designated bridge has priority 61440, address 4055.3926.d8c1
Designated port id is 128.4125, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
The port type is network
Link type is point-to-point by default, Internal
PVST Simulation is enabled by default
BPDU: sent 110, received 0
'''}
golden_parsed_output_1 = {
'mstp': {
'mst_instances': {
0: {
'mst_id': 0,
'bridge_priority': 32768,
'bridge_sysid': 0,
'bridge_address': '00e3.04ee.be14',
'topology_change_flag': False,
'topology_detected_flag': False,
'topology_changes': 0,
'time_since_topology_change': '142:22:13',
'times': {
'hold': 1,
'topology_change': 70,
'notification': 10,
'max_age': 40,
'hello': 10,
'forwarding_delay': 30,
},
'timers' : {
'hello': 0,
'topology_change': 0,
'notification': 0,
},
'root_of_the_spanning_tree': True,
'interfaces': {
'Port-channel30': {
'name': 'Port-channel30',
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'port_num': 4125,
'status': 'broken',
'cost': 500,
'port_priority': 128,
'port_identifier': '128.4125',
'designated_root_priority': 32768,
'designated_root_address': '0023.04ee.be14',
'designated_bridge_priority': 61440,
'designated_bridge_address': '4055.3926.d8c1',
'designated_port_id': '128.4125',
'designated_path_cost': 0,
'timers': {
'message_age': 0,
'forward_delay': 0,
'hold': 0,
},
'port_type' : 'network',
'number_of_forward_transitions': 0,
'link_type': 'point-to-point',
'internal': True,
'pvst_simulation': True,
'counters': {
'bpdu_sent': 110,
'bpdu_received': 0
}
}
}
}
},
'hello_time': 10,
'max_age': 40,
'forwarding_delay': 30
}
}
golden_output_2 = {'execute.return_value' : '''
P1# show spanning-tree detail
MST0000 is executing the mstp compatible Spanning Tree protocol
Bridge Identifier has priority 32768, sysid 0, address 0023.04ee.be14
Configured hello time 10, fex hello time 10, max age 40, forward delay 30
We are the root of the spanning tree
Topology change flag not set, detected flag not set
Number of topology changes 0 last change occurred 142:22:13 ago
Times: hold 1, topology change 70, notification 10
hello 10, max age 40, forward delay 30
Timers: hello 0, topology change 0, notification 0
Port 4125 (port-channel30, vPC Peer-link) of MST0000 is broken (Bridge Assuran
ce Inconsistent, VPC Peer-link Inconsistent)
Port path cost 500, Port priority 128, Port Identifier 128.4125
Designated root has priority 32768, address 0023.04ee.be14
Designated bridge has priority 61440, address 4055.3926.d8c1
Designated port id is 128.4125, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
The port type is network
Link type is point-to-point by default, Internal
PVST Simulation is enabled by default
BPDU: sent 110, received 0
Port 2390 (Port-channel14) of MST0 is broken (Bridge Assuran
ce Inconsistent, VPC Peer-link Inconsistent)
Port path cost 6660, Port priority 128, Port Identifier 128.2390.
Designated root has priority 32768, address d8b1.9009.bf80
Designated bridge has priority 32768, address d8b1.9009.bf80
Designated port id is 128.2390, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
The port type is network
Link type is point-to-point by default, Internal
PVST Simulation is enabled by default
BPDU: sent 138231, received 167393
'''}
golden_parsed_output_2 = {
'mstp': {
'mst_instances': {
0: {
'mst_id': 0,
'bridge_priority': 32768,
'bridge_sysid': 0,
'bridge_address': '0023.04ee.be14',
'topology_change_flag': False,
'topology_detected_flag': False,
'time_since_topology_change': '142:22:13',
'topology_changes': 0,
'times': {
'hold': 1,
'topology_change': 70,
'notification': 10,
'max_age': 40,
'hello': 10,
'forwarding_delay': 30,
},
'timers' : {
'hello': 0,
'topology_change': 0,
'notification': 0,
},
'root_of_the_spanning_tree': True,
'interfaces': {
'Port-channel30': {
'name': 'Port-channel30',
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'port_num': 4125,
'status': 'broken',
'cost': 500,
'port_priority': 128,
'port_identifier': '128.4125',
'designated_root_priority': 32768,
'designated_root_address': '0023.04ee.be14',
'designated_bridge_priority': 61440,
'designated_bridge_address': '4055.3926.d8c1',
'designated_port_id': '128.4125',
'designated_path_cost': 0,
'timers': {
'message_age': 0,
'forward_delay': 0,
'hold': 0,
},
'port_type' : 'network',
'number_of_forward_transitions': 0,
'link_type': 'point-to-point',
'internal': True,
'pvst_simulation': True,
'counters': {
'bpdu_sent': 110,
'bpdu_received': 0
}
},
'Port-channel14': {
'name': 'Port-channel14',
'bridge_assurance_inconsistent': True,
'vpc_peer_link_inconsistent': True,
'port_num': 2390,
'status': 'broken',
'cost': 6660,
'port_priority': 128,
'port_identifier': '128.2390.',
'designated_root_priority': 32768,
'designated_root_address': 'd8b1.9009.bf80',
'designated_bridge_priority': 32768,
'designated_bridge_address': 'd8b1.9009.bf80',
'designated_port_id': '128.2390',
'designated_path_cost': 0,
'timers': {
'message_age': 0,
'forward_delay': 0,
'hold': 0,
},
'port_type' : 'network',
'number_of_forward_transitions': 0,
'link_type': 'point-to-point',
'internal': True,
'pvst_simulation': True,
'counters': {
'bpdu_sent': 138231,
'bpdu_received': 167393
}
}
}
}
},
'hello_time': 10,
'fex_hello_time': 10,
'max_age': 40,
'forwarding_delay': 30
}
}
golden_output_3 = {'execute.return_value': '''\
DS1-R101# sh spanning-tree detail
VLAN0109 is executing the rstp compatible Spanning Tree protocol
Bridge Identifier has priority 20480, sysid 109, address 0023.04ee.be1f
Configured hello time 2, max age 20, forward delay 15
We are the root of the spanning tree
Topology change flag not set, detected flag not set
Number of topology changes 8 last change occurred 126:41:16 ago
from port-channel31
Times: hold 1, topology change 35, notification 2
hello 2, max age 20, forward delay 15
Timers: hello 0, topology change 0, notification 0
Port 4126 (port-channel31, vPC Peer-link) of VLAN0109 is root forwarding
Port path cost 2, Port priority 128, Port Identifier 128.4126
Designated root has priority 20589, address 0023.04ee.be1f
Designated bridge has priority 0, address 0026.981e.c642
Designated port id is 128.4126, designated path cost 0
Timers: message age 3, forward delay 0, hold 0
Number of transitions to forwarding state: 1
The port type is network
Link type is point-to-point by default
BPDU: sent 3245614, received 3245744
Port 4194 (port-channel99, vPC) of VLAN0109 is designated forwarding
Port path cost 1, Port priority 128, Port Identifier 128.4194
Designated root has priority 20589, address 0023.04ee.be1f
Designated bridge has priority 20589, address 0026.981e.c642
Designated port id is 128.4194, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
Link type is point-to-point by default
Root guard is enabled
BPDU: sent 2725887, received 0
Port 4196 (port-channel101, vPC) of VLAN0109 is designated forwarding
Port path cost 1, Port priority 128, Port Identifier 128.4196
Designated root has priority 20589, address 0023.04ee.be1f
Designated bridge has priority 20589, address 0026.981e.c642
Designated port id is 128.4196, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
Link type is shared
BPDU: sent 231106, received 0
VLAN0110 is executing the rstp compatible Spanning Tree protocol
Bridge Identifier has priority 20480, sysid 110, address 0023.04ee.be1f
Configured hello time 2, max age 20, forward delay 15
We are the root of the spanning tree
Topology change flag not set, detected flag not set
Number of topology changes 9 last change occurred 123:32:30 ago
from port-channel31
Times: hold 1, topology change 35, notification 2
hello 2, max age 20, forward delay 15
Timers: hello 0, topology change 0, notification 0
Port 4126 (port-channel31, vPC Peer-link) of VLAN0110 is root forwarding
Port path cost 2, Port priority 128, Port Identifier 128.4126
Designated root has priority 20590, address 0023.04ee.be1f
Designated bridge has priority 0, address 0026.981e.c642
Designated port id is 128.4126, designated path cost 0
Timers: message age 3, forward delay 0, hold 0
Number of transitions to forwarding state: 1
The port type is network
Link type is point-to-point by default
BPDU: sent 3245614, received 3245745
Port 4194 (port-channel99, vPC) of VLAN0110 is designated forwarding
Port path cost 1, Port priority 128, Port Identifier 128.4194
Designated root has priority 20590, address 0023.04ee.be1f
Designated bridge has priority 20590, address 0026.981e.c642
Designated port id is 128.4194, designated path cost 0
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
Link type is point-to-point by default
Root guard is enabled
BPDU: sent 2725886, received 0
VLAN0122 is executing the rstp compatible Spanning Tree protocol
Bridge Identifier has priority 20480, sysid 122, address 0023.04ee.be1f
Configured hello time 2, max age 20, forward delay 15
Topology change flag not set, detected flag not set
Number of topology changes 9 last change occurred 123:10:02 ago
from port-channel31
Times: hold 1, topology change 35, notification 2
hello 2, max age 20, forward delay 15
Timers: hello 0, topology change 0, notification 0
Port 4126 (port-channel31, vPC Peer-link) of VLAN0122 is root forwarding
Port path cost 2, Port priority 128, Port Identifier 128.4126
Designated root has priority 20602, address 0023.04ee.be1f
Designated bridge has priority 0, address 0026.981e.c642
Designated port id is 128.4126, designated path cost 0
Timers: message age 3, forward delay 0, hold 0
Number of transitions to forwarding state: 1
The port type is network
Link type is point-to-point by default
BPDU: sent 3245614, received 3245745
Port 4194 (port-channel99, vPC) of VLAN0122 is designated forwarding
Port path cost 1, Port priority 128, Port Identifier 128.4194
Designated root has priority 20602, address 0023.04ee.be1f
Designated bridge has priority 20602, address 0026.981e.c642
Designated port id is 128.4194, designated path cost 0, Topology change is set
Timers: message age 0, forward delay 0, hold 0
Number of transitions to forwarding state: 0
Link type is point-to-point by default
Root guard is enabled
BPDU: sent 2725887, received 0
'''
}
golden_parsed_output_3 = {
'rapid_pvst': {
'forwarding_delay': 15,
'hello_time': 2,
'max_age': 20,
'vlans': {
109: {
'bridge_address': '0023.04ee.be1f',
'bridge_priority': 20480,
'bridge_sysid': 109,
'interfaces': {
'Port-channel101': {
'cost': 1,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 231106
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 20589,
'designated_path_cost': 0,
'designated_port_id': '128.4196',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20589,
'internal': False,
'link_type': 'shared',
'name': 'Port-channel101',
'number_of_forward_transitions': 0,
'port_identifier': '128.4196',
'port_num': 4196,
'port_priority': 128,
'status': 'designated',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 0
}
},
'Port-channel31': {
'cost': 2,
'counters': {
'bpdu_received': 3245744,
'bpdu_sent': 3245614
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 0,
'designated_path_cost': 0,
'designated_port_id': '128.4126',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20589,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel31',
'number_of_forward_transitions': 1,
'port_identifier': '128.4126',
'port_num': 4126,
'port_priority': 128,
'port_type': 'network',
'status': 'root',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 3
}
},
'Port-channel99': {
'cost': 1,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 2725887
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 20589,
'designated_path_cost': 0,
'designated_port_id': '128.4194',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20589,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel99',
'number_of_forward_transitions': 0,
'port_identifier': '128.4194',
'port_num': 4194,
'port_priority': 128,
'root_guard': True,
'status': 'designated',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 0
}
}
},
'root_of_the_spanning_tree': True,
'time_since_topology_change': '126:41:16',
'timers': {
'hello': 0,
'notification': 0,
'topology_change': 0
},
'times': {
'forwarding_delay': 15,
'hello': 2,
'hold': 1,
'max_age': 20,
'notification': 2,
'topology_change': 35
},
'topology_change_flag': False,
'topology_changes': 8,
'topology_detected_flag': False,
'topology_from_port': 'port-channel31',
'vlan_id': 109
},
110: {
'bridge_address': '0023.04ee.be1f',
'bridge_priority': 20480,
'bridge_sysid': 110,
'interfaces': {
'Port-channel31': {
'cost': 2,
'counters': {
'bpdu_received': 3245745,
'bpdu_sent': 3245614
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 0,
'designated_path_cost': 0,
'designated_port_id': '128.4126',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20590,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel31',
'number_of_forward_transitions': 1,
'port_identifier': '128.4126',
'port_num': 4126,
'port_priority': 128,
'port_type': 'network',
'status': 'root',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 3
}
},
'Port-channel99': {
'cost': 1,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 2725886
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 20590,
'designated_path_cost': 0,
'designated_port_id': '128.4194',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20590,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel99',
'number_of_forward_transitions': 0,
'port_identifier': '128.4194',
'port_num': 4194,
'port_priority': 128,
'root_guard': True,
'status': 'designated',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 0
}
}
},
'root_of_the_spanning_tree': True,
'time_since_topology_change': '123:32:30',
'timers': {
'hello': 0,
'notification': 0,
'topology_change': 0
},
'times': {
'forwarding_delay': 15,
'hello': 2,
'hold': 1,
'max_age': 20,
'notification': 2,
'topology_change': 35
},
'topology_change_flag': False,
'topology_changes': 9,
'topology_detected_flag': False,
'topology_from_port': 'port-channel31',
'vlan_id': 110
},
122: {
'bridge_address': '0023.04ee.be1f',
'bridge_priority': 20480,
'bridge_sysid': 122,
'interfaces': {
'Port-channel31': {
'cost': 2,
'counters': {
'bpdu_received': 3245745,
'bpdu_sent': 3245614
},
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 0,
'designated_path_cost': 0,
'designated_port_id': '128.4126',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20602,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel31',
'number_of_forward_transitions': 1,
'port_identifier': '128.4126',
'port_num': 4126,
'port_priority': 128,
'port_type': 'network',
'status': 'root',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 3
}
},
'Port-channel99': {
'cost': 1,
'counters': {
'bpdu_received': 0,
'bpdu_sent': 2725887
},
'topology_change': True,
'designated_bridge_address': '0026.981e.c642',
'designated_bridge_priority': 20602,
'designated_path_cost': 0,
'designated_port_id': '128.4194',
'designated_root_address': '0023.04ee.be1f',
'designated_root_priority': 20602,
'internal': False,
'link_type': 'point-to-point',
'name': 'Port-channel99',
'number_of_forward_transitions': 0,
'port_identifier': '128.4194',
'port_num': 4194,
'port_priority': 128,
'root_guard': True,
'status': 'designated',
'timers': {
'forward_delay': 0,
'hold': 0,
'message_age': 0
}
}
},
'time_since_topology_change': '123:10:02',
'timers': {
'hello': 0,
'notification': 0,
'topology_change': 0
},
'times': {
'forwarding_delay': 15,
'hello': 2,
'hold': 1,
'max_age': 20,
'notification': 2,
'topology_change': 35
},
'topology_change_flag': False,
'topology_changes': 9,
'topology_detected_flag': False,
'topology_from_port': 'port-channel31',
'vlan_id': 122
}
}
}
}
def test_detail_output_1(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_1)
obj = ShowSpanningTreeDetail(device = self.dev_c3850)
parsed = obj.parse()
self.assertEqual(parsed, self.golden_parsed_output_1)
def test_detail_output_2(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_2)
obj = ShowSpanningTreeDetail(device = self.dev_c3850)
parsed = obj.parse()
self.assertEqual(parsed, self.golden_parsed_output_2)
def test_detail_output_empty(self):
self.dev2 = Mock(**self.empty_output)
obj = ShowSpanningTreeDetail(device = self.dev2)
with self.assertRaises(SchemaEmptyParserError):
parsed = obj.parse()
def test_detail_output_3(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_3)
obj = ShowSpanningTreeDetail(device = self.dev_c3850)
parsed = obj.parse()
self.assertEqual(parsed, self.golden_parsed_output_3)
class TestShowErrdisabledRecovery(unittest.TestCase):
dev_n7k = Device(name='n7000')
dev2 = Device(name = 'empty')
empty_output = {'execute.return_value' : ' '}
golden_output = {'execute.return_value': '''
PE1# sh errdisable recovery
ErrDisable Reason Timer Status
----------------- ------------
link-flap disabled
udld disabled
bpduguard disabled
loopback disabled
storm-ctrl disabled
sec-violation disabled
psec-violation disabled
vpc-peerlink disabled
failed-port-state disabled
event-debug disabled
event-debug1 disabled
event-debug2 disabled
event-debug3 disabled
event-debug4 disabled
miscabling disabled
Timer interval: 300
PE1#
'''}
golden_parsed_output = {
'timer_interval': 300,
'errdisable_reason': {
'bpduguard': False,
'event-debug': False,
'event-debug1': False,
'event-debug2': False,
'event-debug3': False,
'event-debug4': False,
'failed-port-state': False,
'link-flap': False,
'loopback': False,
'miscabling': False,
'psec-violation': False,
'sec-violation': False,
'storm-ctrl': False,
'udld': False,
'vpc-peerlink': False
}
}
golden_output_2 = {'execute.return_value': '''
N95_2# sh errdisable recovery
ErrDisable Reason Timer Status
----------------- ------------
link-flap disabled
udld disabled
bpduguard disabled
loopback disabled
storm-ctrl disabled
dhcp-rate-lim disabled
arp-inspection disabled
sec-violation disabled
psec-violation disabled
vpc-peerlink disabled
port-state-failed disabled
event-debug disabled
event-debug1 disabled
event-debug2 disabled
event-debug3 disabled
event-debug4 disabled
ip-addr-conflict disabled
ipqos-mgr-error disabled
ethpm disabled
ipqos-compat-failure disabled
syserr based disabled
CMM miscabling disabled
md-mismatch disabled
hw-res-exhaustion disabled
reinit-no-flap disabled
dcbx-error disabled
vlan-membership-erro disabled
pause-rate-limit disabled
inline-power disabled
sw-failure disabled
elo-session-down disabled
elo-discovery-timeou disabled
elo-capabilties-conf disabled
elo-miswired disabled
elo-link-fault disabled
elo-dying-gasp disabled
elo-critical-event disabled
Timer interval: 300
N95_2#
'''}
golden_parsed_output_2 = {
'timer_interval': 300,
'errdisable_reason': {
'CMM miscabling': False,
'arp-inspection': False,
'bpduguard': False,
'dcbx-error': False,
'dhcp-rate-lim': False,
'elo-capabilties-conf': False,
'elo-critical-event': False,
'elo-discovery-timeou': False,
'elo-dying-gasp': False,
'elo-link-fault': False,
'elo-miswired': False,
'elo-session-down': False,
'ethpm': False,
'event-debug': False,
'event-debug1': False,
'event-debug2': False,
'event-debug3': False,
'event-debug4': False,
'hw-res-exhaustion': False,
'inline-power': False,
'ip-addr-conflict': False,
'ipqos-compat-failure': False,
'ipqos-mgr-error': False,
'link-flap': False,
'loopback': False,
'md-mismatch': False,
'pause-rate-limit': False,
'port-state-failed': False,
'psec-violation': False,
'reinit-no-flap': False,
'sec-violation': False,
'storm-ctrl': False,
'sw-failure': False,
'syserr based': False,
'udld': False,
'vlan-membership-erro': False,
'vpc-peerlink': False
}
}
def test_output_empty(self):
self.dev2 = Mock(**self.empty_output)
obj = ShowErrdisableRecovery(device=self.dev2)
with self.assertRaises(SchemaEmptyParserError):
parsed = obj.parse()
def test_golden_output_1(self):
self.maxDiff = None
self.dev_n7k = Mock(**self.golden_output)
obj = ShowErrdisableRecovery(device=self.dev_n7k)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_output_2(self):
self.maxDiff = None
self.dev_n7k = Mock(**self.golden_output_2)
obj = ShowErrdisableRecovery(device=self.dev_n7k)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
if __name__ == '__main__':
unittest.main()
| 56.582078 | 340 | 0.334438 |
aceb7d62d6caf959c88e9041481e6315bdde3271 | 3,968 | py | Python | labeller/Video_writter.py | EveCharbie/rectangle-labelling | 8dd9921059d10bf0dd378cb7f81333f7d4f7ec3b | [
"MIT"
] | null | null | null | labeller/Video_writter.py | EveCharbie/rectangle-labelling | 8dd9921059d10bf0dd378cb7f81333f7d4f7ec3b | [
"MIT"
] | null | null | null | labeller/Video_writter.py | EveCharbie/rectangle-labelling | 8dd9921059d10bf0dd378cb7f81333f7d4f7ec3b | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
from tqdm.notebook import tqdm
from skvideo.io import FFmpegWriter
import pickle
import subprocess as sp
def load_video_frames(video_file, num_frames=None):
video = cv2.VideoCapture(video_file)
frames = []
if num_frames is None:
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
num_frames_update = 0
for i in tqdm(range(num_frames), desc='Loading video'):
ret, frame = video.read()
if type(frame) == np.ndarray:
frames.append(frame)
num_frames_update+=1
video.release()
return frames, num_frames_update
def write_labeled_video(output_file, frames_clone, fps):
video = FFmpegWriter(output_file, inputdict={'-r': str(fps)}, outputdict={'-r': str(fps)})
frames_small = np.array(frames_clone)
for frame_num in tqdm(np.arange(frames_small.shape[0])):
video.writeFrame(frames_small[frame_num, :, :])
video.close()
def draw_points_and_lines():
global points_labels, circle_colors, circle_radius, frame_counter, small_image, active_points, rectangle_color
for i in range(8):
if active_points[frame_counter, i] == True:
mouse_click_position = (int(points_labels[label_keys[i]][0, frame_counter]),
int(points_labels[label_keys[i]][1, frame_counter]))
cv2.circle(small_image, mouse_click_position, circle_radius, color=circle_colors[i], thickness=-1)
for j in neighbors[i]:
if active_points[frame_counter, j] == True:
line_position = (int(points_labels[label_keys[j]][0, frame_counter]), int(points_labels[label_keys[j]][1, frame_counter]))
cv2.line(small_image, mouse_click_position, line_position, rectangle_color, thickness=3)
return
############################### code beginning #######################################################################
global frame_counter
circle_radius = 5
rectangle_color = (1, 1, 1)
circle_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(100, 0, 0), (0, 100, 0), (0, 0, 100), (100, 100, 0)]
Image_name = "Video"
Trackbar_name = "Frames"
ratio_image = 1.5
movie_file = 'PI world v1 ps1.mp4'
frames, num_frames = load_video_frames(movie_file)
frames_clone = frames.copy()
[points_labels, active_points] = pickle.load(open(f'output/{movie_file[:-4]}_labeling_points.pkl', "rb"))
label_keys = [key for key in points_labels.keys()]
neighbors = [[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],]
width, height, rgb = np.shape(frames[0])
output_file = f'output/{movie_file[:-4]}_labeled.mp4'
fps = 30
# video_writer = cv2.VideoWriter_fourcc(*'XVID') # ('M', 'P', 'E', 'G') # ('m', 'p', '4', 'v') # ('M', 'P', '4', 'V')
# out_video = cv2.VideoWriter(output_file, video_writer, fps, (width,height))
for frame_counter in range(num_frames):
small_image = cv2.resize(frames[frame_counter], (int(round(width / ratio_image)), int(round(height / ratio_image))))
draw_points_and_lines()
frames_clone[frame_counter] = small_image # frame_gbr
# out_video.write(small_image)
# out_video.release()
write_labeled_video(output_file, frames_clone, fps)
# #
# command = [ "ffmpeg", # on Linux ans Mac OS, # FFMPEG_BIN = "ffmpeg.exe" # on Windows
# '-y', # (optional) overwrite output file if it exists
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-s', '420x360', # size of one frame
# '-pix_fmt', 'rgb24',
# '-r', f'{fps}', # frames per second
# '-i', '-', # The imput comes from a pipe
# '-an', # Tells FFMPEG not to expect any audio
# '-vcodec', 'mpeg'",
# 'my_output_videofile.mp4' ]
#
# pipe = sp.Popen( command, stdin=sp.PIPE, stderr=sp.PIPE)
| 30.523077 | 142 | 0.605595 |
aceb7eabdbc27b0850c3337b44855c01d60618ec | 2,399 | py | Python | usr/lib/gcc/i686-pc-msys/7.3.0/libstdc++.dll.a-gdb.py | qinjidong/esp8266-v3.0-msys32 | 12f8d5f697276061b37a8fd322f0a30dec1597d3 | [
"Apache-2.0"
] | 1 | 2019-02-22T05:19:40.000Z | 2019-02-22T05:19:40.000Z | usr/lib/gcc/i686-pc-msys/7.3.0/libstdc++.dll.a-gdb.py | qinjidong/esp8266-v3.0-msys32 | 12f8d5f697276061b37a8fd322f0a30dec1597d3 | [
"Apache-2.0"
] | null | null | null | usr/lib/gcc/i686-pc-msys/7.3.0/libstdc++.dll.a-gdb.py | qinjidong/esp8266-v3.0-msys32 | 12f8d5f697276061b37a8fd322f0a30dec1597d3 | [
"Apache-2.0"
] | 1 | 2020-11-04T07:54:34.000Z | 2020-11-04T07:54:34.000Z | # -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/usr/share/gcc-7.3.0/python'
libdir = '/usr/lib/gcc/i686-pc-msys/7.3.0'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 38.693548 | 79 | 0.719466 |
aceb7f4c8cb4de502095154884326b193702d0da | 157 | py | Python | APIs/Oauth/venv/lib/python3.8/site-packages/sanic_routing/__init__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Oauth/venv/lib/python3.8/site-packages/sanic_routing/__init__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Oauth/venv/lib/python3.8/site-packages/sanic_routing/__init__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | from .group import RouteGroup
from .route import Route
from .router import BaseRouter
__version__ = "0.6.2"
__all__ = ("BaseRouter", "Route", "RouteGroup")
| 22.428571 | 47 | 0.745223 |
aceb7f6068bbd3e0dac319a620a29baf167b36d8 | 12,248 | py | Python | IreneUtility/models/blackjackgame.py | MujyKun/IreneUtility | 1790c80e220e2d8bf7901286177893e985ec4cf4 | [
"MIT"
] | 1 | 2021-07-08T05:06:29.000Z | 2021-07-08T05:06:29.000Z | IreneUtility/models/blackjackgame.py | MujyKun/IreneUtility | 1790c80e220e2d8bf7901286177893e985ec4cf4 | [
"MIT"
] | null | null | null | IreneUtility/models/blackjackgame.py | MujyKun/IreneUtility | 1790c80e220e2d8bf7901286177893e985ec4cf4 | [
"MIT"
] | 3 | 2021-07-09T16:24:17.000Z | 2021-11-11T02:06:51.000Z | import random
import discord.ext.commands
from . import Game as Game_Base, User, PlayingCard
from ..util import u_logger as log
from typing import List, Optional
import asyncio
class BlackJackGame(Game_Base):
"""BlackJack Game for two users."""
def __init__(self, *args, first_player, first_player_bet):
"""
:param args: utility_obj, ctx
:param first_player_bet: Amount the first player bet.
"""
super().__init__(*args)
self.first_player: User = first_player
self.second_player: Optional[User] = None
self.first_player.in_currency_game = True # set first person to be in a game.
# cards the users have
self.first_player_cards: List[PlayingCard] = []
self.second_player_cards: List[PlayingCard] = []
# whether the players are done.
self.first_player_stand = False
self.second_player_stand = False
# we need the display name of second player, so we will hold their Context.
self.second_player_ctx: Optional[discord.ext.commands.Context] = None
# player bets
self.first_player_bet: int = first_player_bet
self.second_player_bet: int = 0
self.deck = [i+1 for i in range(52)] # a deck containing the ids of each non-custom playing card.
self.bust_value = 21
async def check_message(self):
"""Check incoming messages in the text channel and determines if the player wants to hit or stand."""
if self.force_ended:
return
stop_phrases = ['stop', 'end', 'quit']
hit_phrases = ['hit', 'new']
stand_phrases = ['stand', 'stay']
def check_player_response(message):
"""Checks if it is a player's response and filters."""
if message.channel != self.channel:
return False
elif message.content.lower() in stop_phrases:
return True
elif message.content.lower() in hit_phrases or message.content.lower() in stand_phrases:
return True
else:
return False
try:
msg = await self.ex.client.wait_for('message', check=check_player_response, timeout=60)
await msg.add_reaction(self.ex.keys.check_emoji)
if msg.content.lower() in stop_phrases:
await self.end_game()
return
elif msg.content.lower() in hit_phrases:
# let the player hit
return await self.hit(msg.author.id == self.first_player.id)
elif msg.content.lower() in stand_phrases:
# let the player stand
return await self.stand(msg.author.id == self.first_player.id)
else:
raise self.ex.exceptions.ShouldNotBeHere("A condition was not properly checked. "
"-> BlackJackGame.check_message()")
except asyncio.TimeoutError:
if not self.force_ended:
await self.end_game()
async def end_game(self):
"""End the blackjack game."""
self.first_player_stand = True
self.second_player_stand = True
self.first_player.in_currency_game = False
try:
self.second_player.in_currency_game = False
except:
# a second player may not exist.
pass
if self.force_ended:
await self.channel.send(await self.ex.get_msg(self.host_id, 'biasgame', 'force_closed'))
self.force_ended = True
try:
self.ex.cache.blackjack_games.remove(self)
except Exception as e:
log.useless(f"{e} (Exception) - Could not find the BlackJack Game to remove from cache.",
method=BlackJackGame.end_game)
return True
async def hit(self, first_player=True):
"""
Let a player hit
:param first_player: True if it is the first player that wants to hit. Otherwise its the second player.
"""
if await self.check_standing(first_player):
return await self.stand(first_player) # msg that the user is already standing will be sent.
random_card = await self.choose_random_card()
self.first_player_cards.append(random_card) if first_player else self.second_player_cards.append(random_card)
user_score = await self.calculate_score(self.first_player_cards if first_player else self.second_player_cards)
user_id = self.first_player.id if first_player else self.second_player.id
msg = await self.ex.get_msg(self.host_ctx, "blackjack", "hit", [
["mention", f"<@{user_id}>"],
["string", random_card.card_name],
["integer", f"0{user_score}" if len(str(user_score)) == 1 else user_score]])
await random_card.send_file(self.channel, message=msg, url=False)
if user_score >= 35:
await self.stand(first_player)
async def stand(self, first_player=True):
"""
Let a player stand
:param first_player: True if it is the first player that wants to stand. Otherwise its the second player.
"""
if first_player:
name = self.host_ctx.author.display_name
if self.first_player_stand:
# player was already standing
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "already_standing",
["name", name]))
self.first_player_stand = True
else:
name = self.second_player_ctx.author.display_name
if self.second_player_stand:
# player was already standing
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "already_standing",
["name", name]))
self.second_player_stand = True
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "now_standing",
["name", name]))
async def check_standing(self, first_player=True):
"""
Check if a player is standing.
:param first_player: True if it is the first player that wants to stand. Otherwise its the second player.
:return: True if the user is standing.
"""
return self.first_player_stand if first_player else self.second_player_stand
async def choose_random_card(self) -> PlayingCard:
"""Chooses a random card that is available in the deck."""
random_card_id = random.choice(self.deck)
# choose a custom playing card from the card id
random_card = random.choice(self.ex.cache.playing_cards[random_card_id])
# remove card if from deck so it is never accessed again
self.deck.remove(random_card_id)
return random_card
async def calculate_score(self, cards: List[PlayingCard]) -> int:
"""Calculate the score of a player.
:param cards: List of PlayingCards the user has in their deck.
:return: Score of the player
"""
total_card_value = 0
aces = 0
for card in cards:
await asyncio.sleep(0) # bare yield
total_card_value += card.value
if card.value == 11:
aces += 1
# handle aces by reducing the value from 11 to 1 if the total value is over the bust value
while aces > 0 and total_card_value > self.bust_value:
total_card_value -= 10
aces -= 1
return total_card_value
async def determine_winner(self) -> User:
"""Determine the winner of the blackjack game."""
first_player_score = await self.calculate_score(self.first_player_cards)
second_player_score = await self.calculate_score(self.second_player_cards)
if first_player_score == second_player_score:
# tie
return None
elif first_player_score > self.bust_value and second_player_score > self.bust_value:
# both busted
winner = self.first_player if (first_player_score - self.bust_value) < (
second_player_score - self.bust_value) else self.second_player
elif first_player_score <= self.bust_value > second_player_score:
# neither busted
winner = self.first_player if (self.bust_value - first_player_score) < (
self.bust_value - second_player_score) else self.second_player
elif first_player_score > self.bust_value >= second_player_score:
# player 1 busted
winner = self.second_player
elif second_player_score > self.bust_value >= first_player_score:
# player 2 busted
winner = self.first_player
else:
raise self.ex.exceptions.ShouldNotBeHere("A condition was not properly checked for in "
"BlackJackGame.determine_winner().")
return winner
async def deal_with_bets(self):
"""Properly deal with bets and appropriately remove/add the bets from the players balances."""
winner = await self.determine_winner()
if not winner:
return
if winner == self.first_player:
await self.first_player.update_balance(add=self.second_player_bet)
await self.second_player.update_balance(remove=self.second_player_bet)
else:
await self.first_player.update_balance(remove=self.first_player_bet)
await self.second_player.update_balance(add=self.first_player_bet)
async def announce_winner(self):
"""Announce the winner of the game."""
winner = await self.determine_winner()
first_player_score = await self.calculate_score(self.first_player_cards)
second_player_score = await self.calculate_score(self.second_player_cards)
if not winner:
# tie
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "announce_tie", [
["name", self.host_ctx.author.display_name], ["name2", self.second_player_ctx.author.display_name],
["integer", first_player_score]]))
if winner == self.first_player:
# first player won
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "announce_winner", [
["name", self.host_ctx.author.display_name], ["name2", self.second_player_ctx.author.display_name],
["integer", first_player_score], ["integer2", second_player_score]]))
else:
# second player won
return await self.channel.send(await self.ex.get_msg(self.host_ctx, "blackjack", "announce_winner", [
["name", self.second_player_ctx.author.display_name],
["name2", self.host_ctx.author.display_name],
["integer", second_player_score],
["integer2", first_player_score]]))
async def finalize_game(self):
"""Finalize the game by dealing with bets, announcing the winner, and officially ending the game."""
await self.announce_winner()
await self.deal_with_bets()
await self.end_game()
async def process_game(self):
"""Start the blackjack game."""
try:
self.second_player.in_currency_game = True
for i in range(2):
await asyncio.sleep(0) # bare yield
await self.hit(True)
await self.hit(False)
while not self.first_player_stand or not self.second_player_stand:
await asyncio.sleep(0)
await self.check_message()
# We can now properly end the game because both players have stood.
if not self.force_ended:
await self.finalize_game()
except Exception as e:
await self.channel.send(await self.ex.get_msg(self.host_id, 'biasgame', 'unexpected_error'))
log.console(f"{e} (Exception)", method=self.process_game)
| 43.279152 | 118 | 0.61757 |
aceb80a5f1b07b7100458c235c6fa46f0e57c4be | 737 | py | Python | lib/interpreter.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | lib/interpreter.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | lib/interpreter.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | import aiml
class Interpreter:
def __init__(self, emitter):
print u'Initializing the interpreter...'
self.emitter = emitter
self.kernel = aiml.Kernel()
# Kernel setup
self.kernel.verbose(False)
self.kernel.setBotPredicate('name', 'Reaper')
self.kernel.setBotPredicate('master', 'Iegor')
self.kernel.setPredicate('secure', "yes")
self.kernel.learn('startup.xml')
self.kernel.setPredicate('secure', "no")
self.kernel.respond('bootstrap')
def learn(self, data):
# The machine will store the responses by the user and will learn how to reply better the next time...
# print 'interpreter#learn func'
# At the moment...
res = self.kernel.respond(data['text'])
self.emitter.emit('send_message', res)
| 23.774194 | 104 | 0.709634 |
aceb80d3a25d68732079c0615dd3da4186eb6deb | 2,669 | py | Python | pytools/indices.py | Krissmedt/imprunko | 94171d0d47171cc4b199cd52f5f29385cbff903e | [
"MIT"
] | null | null | null | pytools/indices.py | Krissmedt/imprunko | 94171d0d47171cc4b199cd52f5f29385cbff903e | [
"MIT"
] | null | null | null | pytools/indices.py | Krissmedt/imprunko | 94171d0d47171cc4b199cd52f5f29385cbff903e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class Stagger:
staggers = {
'ex':[1,0,0],
'ey':[0,1,0],
'ez':[0,0,1],
'bx':[0,1,1],
'by':[1,0,1],
'bz':[1,1,0],
'jx':[1,0,0],
'jy':[0,1,0],
'jz':[0,0,1],
'rh':[0,0,0],
}
# compute transformation indices for going from
# variable at loc1 staggering to loc2 staggering
def x2y(self, loc1, loc2):
offs1 = self.staggers[loc1]
offs2 = self.staggers[loc2]
ret = [
-0.5*(offs2[0] - offs1[0]),
-0.5*(offs2[1] - offs1[1]),
-0.5*(offs2[2] - offs1[2]),
]
return ret
def at(self, stg, stg0='rh'):
offs = self.x2y(stg0, stg)
ret = Stagger(self.x, self.y ,self.z)
ret.x += offs[0]
ret.y += offs[1]
ret.z += offs[2]
return ret
def __init__(self, x,y,z):
self.x = x
self.y = y
self.z = z
#overloaded operators
#+ float/int
#- float/int
#* float/int
#/ float/int
#** float/int
#+ stg
#- stg
def __add__(self, o):
# always upcast return object to Stagger class
ret = Stagger(self.x, self.y ,self.z)
#add component wise
if isinstance(o, self.__class__):
ret.x += o.x
ret.y += o.y
ret.z += o.z
return ret
else:
raise TypeError("unsupported operand type(s) for +: '{}' and '{}'").format(self.__class__, type(o))
def __sub__(self, o):
# always upcast return object to Stagger class
ret = Stagger(self.x, self.y ,self.z)
#subtract component wise
if isinstance(o, self.__class__):
ret.x -= o.x
ret.y -= o.y
ret.z -= o.z
return ret
else:
raise TypeError("unsupported operand type(s) for -: '{}' and '{}'").format(self.__class__, type(o))
def ind2loc(gridI, tileI, conf):
# grid coordinates
i, j, k = gridI
Nx = conf.Nx
Ny = conf.Ny
Nz = conf.Nz
# tile coordinates
l, m, n = tileI
NxMesh = conf.NxMesh
NyMesh = conf.NyMesh
NzMesh = conf.NzMesh
# grid spacing; start point + step
xmin = conf.xmin
ymin = conf.ymin
zmin = conf.zmin
dx = 1.0 # conf.dx
dy = 1.0 # conf.dy
dz = 1.0 # conf.dz
# calculate coordinate extent
x = xmin + i * (NxMesh) * dx + l * dx
y = ymin + j * (NyMesh) * dy + m * dy
z = zmin + k * (NzMesh) * dz + n * dz
return [x, y, z]
| 18.534722 | 112 | 0.461221 |
aceb80d3de307a6a0e63200c9aabe2ba87f35494 | 5,772 | py | Python | src/utils.py | mmsbrggr/amber | 5ccd9ad5774b1d5725b2f327d5d0bb248b628aae | [
"MIT"
] | 1 | 2020-10-26T15:10:14.000Z | 2020-10-26T15:10:14.000Z | src/utils.py | probing-lab/amber | 5ccd9ad5774b1d5725b2f327d5d0bb248b628aae | [
"MIT"
] | null | null | null | src/utils.py | probing-lab/amber | 5ccd9ad5774b1d5725b2f327d5d0bb248b628aae | [
"MIT"
] | null | null | null | import math
from enum import Enum, auto
from diofant import *
from mora.core import Program, get_solution as get_expected
from mora.input import LOOP_GUARD_VAR
LOG_NOTHING = 0
LOG_ESSENTIAL = 10
LOG_VERBOSE = 20
LOG_LEVEL = LOG_ESSENTIAL
class Answer(Enum):
FALSE = auto()
TRUE = auto()
UNKNOWN = auto()
def is_true(self):
return self is Answer.TRUE
def is_known(self):
return self is not Answer.UNKNOWN
def __str__(self):
if self is Answer.TRUE:
return "Yes"
if self is Answer.FALSE:
return "No"
if self is Answer.UNKNOWN:
return "Maybe"
__COUNTER = 0
def unique_symbol(s: str, **args):
"""
Returns a symbol which every time has a different name
"""
global __COUNTER
s = symbols(s + str(__COUNTER), **args)
__COUNTER += 1
return s
def get_max_0(expression: Expr, n: Symbol):
"""
Returns the maximum positive 0 of a given expression or 0 if it does not exist
"""
n_real = symbols("n", real=True)
try:
exp_zeros = solve(expression.xreplace({n: n_real}), n_real)
if exp_zeros == [{}]:
return 0
exp_zeros = [z[n_real] for z in exp_zeros if z[n_real].is_real]
except NotImplementedError:
exp_zeros = []
exp_zeros = [math.ceil(float(z)) for z in exp_zeros] + [0]
return max(exp_zeros)
def get_monoms(poly: Poly):
"""
Returns for the list of monoms for a given polynomial
"""
monoms = []
for powers in poly.monoms():
m = prod(var ** power for var, power in zip(poly.gens, powers))
if m != 1:
monoms.append(m)
return monoms
def get_polarity(expression: Expr, n: Symbol):
"""
Given an expression in n, returns whether or not the expression is positive and negative for some values of n
"""
expression = simplify(expression)
if expression.is_number:
return expression > 0, expression < 0
max_0 = get_max_0(expression, n)
if max_0 > 0:
pos = True
neg = True
else:
expr_1 = expression.subs({n: 1})
pos = expr_1 > 0
pos = bool(pos) if pos.is_Boolean else True
neg = expr_1 < 0
neg = bool(neg) if neg.is_Boolean else True
return pos, neg
def get_signums_in_expression(expression: Expr) -> [Expr]:
"""
Given an expression it returns all expressions which occur within the signum function.
E.g for sign(x+2)*sign(y**3) it returns [x+1, y**3]
"""
if isinstance(expression, sign):
return [expression.args[0]]
signums = []
for arg in expression.args:
signums += get_signums_in_expression(arg)
return signums
def get_all_monom_powers(monom: Expr) -> [Number]:
"""
Returns the degrees of all variables in a given monomial in a list
"""
monom = monom.as_poly(monom.free_symbols)
return list(monom.degree_list())
def monom_is_deterministic(monom: Expr, program: Program):
"""
Returns true iff a given monomial is deterministic, that means all variables in the monomial are deterministic
"""
variables_deterministic = [not program.updates[m].is_probabilistic for m in monom.free_symbols]
return all(variables_deterministic)
def divide_monom_powers_by(monom: Expr, divisor):
"""
Returns the given monom where all powers a divided by divisor
"""
monom = monom.as_poly(monom.free_symbols)
powers = monom.monoms()[0]
vars_with_powers = [v ** (p // divisor) for v, p in zip(monom.gens, powers)]
return prod(vars_with_powers)
def separate_rvs_from_monom(monom: Expr, program: Program):
"""
Given a monomial returns a list of all random variables (together with their powers)
it contains and the remaining monomial
"""
if not program.contains_rvs:
return [], monom
monom = monom.as_poly(monom.free_symbols)
powers = monom.monoms()[0]
vars_with_powers = [(v, p) for v, p in zip(monom.gens, powers)]
m = sympify(1)
rvs = []
for v, p in vars_with_powers:
if program.updates[v].is_random_var and not hasattr(program.updates[v], "branches"):
rvs.append((v, p))
else:
m *= v ** p
return rvs, m
def set_log_level(log_level):
global LOG_LEVEL
LOG_LEVEL = log_level
def log(message, level):
"""
Logs a message depending on the log level
"""
if level <= LOG_LEVEL:
print(message)
def amber_limit(expr, n):
if n not in expr.free_symbols:
return expr
return limit(expr, n, oo)
def flatten_substitution_choices(subs_choices):
"""
For a given dict {expr: (expr1, expr2)} returns a list of all possible substitution arising from choosing to subs
expr by expr1 or expr2.
"""
subs_choices = subs_choices.copy()
if not subs_choices:
return [{}]
result = []
expr = next(iter(subs_choices.keys()))
choice1, choice2 = subs_choices.pop(expr)
remaining_choices_flat = flatten_substitution_choices(subs_choices)
for c in remaining_choices_flat:
c1 = c.copy()
c1[expr] = choice1
result.append(c1)
if choice1 != choice2:
c2 = c.copy()
c2[expr] = choice2
result.append(c2)
return result
def substitute_deterministic_variables(expr, program: Program):
"""
Substitutes deterministic variables in a given expression with their representation in n.
"""
for symbol, update in program.updates.items():
if str(symbol) is not LOOP_GUARD_VAR and not update.is_probabilistic:
closed_form = get_expected(program, symbol.as_poly(program.variables))
expr = expr.xreplace({symbol: closed_form})
return expr
| 27.226415 | 117 | 0.642931 |
aceb81196fca2a7b2cc5c1b1f0981f95cd3c0104 | 3,630 | py | Python | samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | samples/snippets/pipeline_service/create_training_pipeline_tabular_forecasting_sample.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_create_training_pipeline_tabular_forecasting_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
def create_training_pipeline_tabular_forecasting_sample(
project: str,
display_name: str,
dataset_id: str,
model_display_name: str,
target_column: str,
time_series_identifier_column: str,
time_column: str,
time_series_attribute_columns: str,
unavailable_at_forecast: str,
available_at_forecast: str,
forecast_horizon: int,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PipelineServiceClient(client_options=client_options)
# set the columns used for training and their data types
transformations = [
{"auto": {"column_name": "date"}},
{"auto": {"column_name": "state_name"}},
{"auto": {"column_name": "county_fips_code"}},
{"auto": {"column_name": "confirmed_cases"}},
{"auto": {"column_name": "deaths"}},
]
data_granularity = {"unit": "day", "quantity": 1}
# the inputs should be formatted according to the training_task_definition yaml file
training_task_inputs_dict = {
# required inputs
"targetColumn": target_column,
"timeSeriesIdentifierColumn": time_series_identifier_column,
"timeColumn": time_column,
"transformations": transformations,
"dataGranularity": data_granularity,
"optimizationObjective": "minimize-rmse",
"trainBudgetMilliNodeHours": 8000,
"timeSeriesAttributeColumns": time_series_attribute_columns,
"unavailableAtForecast": unavailable_at_forecast,
"availableAtForecast": available_at_forecast,
"forecastHorizon": forecast_horizon,
}
training_task_inputs = json_format.ParseDict(training_task_inputs_dict, Value())
training_pipeline = {
"display_name": display_name,
"training_task_definition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_forecasting_1.0.0.yaml",
"training_task_inputs": training_task_inputs,
"input_data_config": {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
},
"model_to_upload": {"display_name": model_display_name},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_training_pipeline(
parent=parent, training_pipeline=training_pipeline
)
print("response:", response)
# [END aiplatform_create_training_pipeline_tabular_forecasting_sample]
| 39.456522 | 127 | 0.706887 |
aceb814c4f5ac39abb49da13d48a8c8c920ef5e9 | 4,871 | py | Python | scripts/quick_plot.py | vedantdhruv96/kharma | 1159aa53d060087e1723166ceb922bd634c14a97 | [
"BSD-3-Clause"
] | 7 | 2021-07-16T02:14:32.000Z | 2021-12-25T22:51:45.000Z | scripts/quick_plot.py | vedantdhruv96/kharma | 1159aa53d060087e1723166ceb922bd634c14a97 | [
"BSD-3-Clause"
] | 17 | 2020-05-04T15:49:40.000Z | 2022-03-29T22:42:39.000Z | scripts/quick_plot.py | vedantdhruv96/kharma | 1159aa53d060087e1723166ceb922bd634c14a97 | [
"BSD-3-Clause"
] | 6 | 2021-12-01T23:25:13.000Z | 2022-03-31T06:03:48.000Z | """
File: quick_plot.py
BSD 3-Clause License
Copyright (c) 2020, AFD Group at UIUC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
################################################################################
# #
# PLOT ONE PRIMITIVE #
# #
################################################################################
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cProfile
# TODO package interface...
import pyHARM
import pyHARM.ana.plot as pplt
from pyHARM import pretty
from pyHARM.ana.units import get_units_M87
import pyHARM.parameters as parameters
# TODO parse these instead of hard-coding
USEARRSPACE = True
if not USEARRSPACE:
SIZE = 50
#window = (0, SIZE, 0, SIZE)
window = (-SIZE, SIZE, -SIZE, SIZE)
# window=(-SIZE/4, SIZE/4, 0, SIZE)
else:
window = (0, 1, 0, 1)
#window = (-0.1, 1.1, -0.1, 1.1)
pdf_window = (-10, 0)
FIGX = 10
FIGY = 10
dumpfile = sys.argv[1]
parfile = sys.argv[2]
var = sys.argv[3]
# Optionally take extra name, otherwise just set it to var
name = sys.argv[-1]
if len(sys.argv) > 5:
munit = float(sys.argv[4])
cgs = get_units_M87(munit)
print("Uisng M_unit: ", munit)
unit = cgs[sys.argv[3]]
print("Will multiply by unit {} with value {}".format(sys.argv[3], unit))
name = var + "_units"
else:
unit = 1
#params = {'include_ghost': True}
params = {}
parameters.parse_parthenon_dat(params, parfile)
parameters.fix(params)
dump = pyHARM.load_dump(dumpfile, params=params)
# Plot vectors in 4-pane layout
# fig = plt.figure(figsize=(FIGX, FIGY))
# plt.title(pretty(var))
# if var in ['jcon', 'jcov', 'ucon', 'ucov', 'bcon', 'bcov']:
# axes = [plt.subplot(2, 2, i) for i in range(1, 5)]
# for n in range(4):
# pplt.plot_xy(axes[n], dump, np.log10(dump[var][n] * unit), arrayspace=USEARRSPACE, window=window)
# elif "pdf_" in var:
# fig = plt.figure(figsize=(FIGX, FIGY))
# d_var, d_var_bins = dump[var]
# plt.plot(d_var_bins[:-1], d_var)
# if "_log_" in var:
# plt.xlabel("Log10 value")
# elif "_ln_" in var:
# plt.xlabel("Ln value")
# else:
# plt.xlabel("Value")
# plt.ylabel("Frequency")
# plt.savefig(name+".png", dpi=100)
# plt.close(fig)
# exit() # We already saved the figure, we don't need another
# else:
# # TODO allow specifying vmin/max, average from command line or above
# ax = plt.subplot(1, 1, 1)
# pplt.plot_xy(ax, dump, dump[var] * unit, log=False, arrayspace=USEARRSPACE, window=window)
# plt.tight_layout()
# plt.savefig(name + "_xy.png", dpi=100)
# plt.close(fig)
# Plot XZ
fig = plt.figure(figsize=(FIGX, FIGY))
if var in ['jcon', 'jcov', 'ucon', 'ucov', 'bcon', 'bcov']:
axes = [plt.subplot(2, 2, i) for i in range(1, 5)]
for n in range(4):
pplt.plot_xz(axes[n], dump, np.log10(dump[var][n] * unit), arrayspace=USEARRSPACE, window=window)
else:
ax = plt.subplot(1, 1, 1)
pplt.plot_xz(ax, dump, dump[var] * unit, log=False, arrayspace=USEARRSPACE, window=window)
#pplt.overlay_field(ax, dump, nlines=5, arrayspace=USEARRSPACE)
plt.tight_layout()
plt.savefig(name + "_xz.png", dpi=100)
plt.close(fig)
| 34.302817 | 107 | 0.640115 |
aceb826adf925bf241fd6e5917dad03b1c6decf3 | 1,497 | py | Python | h2o-bindings/bin/custom/R/gen_maxrglm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | h2o-bindings/bin/custom/R/gen_maxrglm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-bindings/bin/custom/R/gen_maxrglm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | null | null | null | extensions = dict(
set_required_params="""
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$ignored_columns <- args$x_ignore
parms$response_column <- args$y
""",
module="""
#' Extracts the best R2 values for all predictor subset size.
#'
#' @param model is a H2OModel with algorithm name of maxrglm
#' @export
h2o.get_best_r2_values<- function(model) {
if( is(model, "H2OModel") && (model@algorithm=='maxrglm'))
return(return(model@model$best_r2_values))
}
#' Extracts the subset of predictor names that yield the best R2 value for each predictor subset size.
#'
#' @param model is a H2OModel with algorithm name of maxrglm
#' @export
h2o.get_best_model_predictors<-function(model) {
if ( is(model, "H2OModel") && (model@algorithm=='maxrglm'))
return(model@model$best_model_predictors)
}
"""
)
doc = dict(
preamble="""
H2O MaxRGLM is used to build test best model with one predictor, two predictors, ... up to max_predictor_number
specified in the algorithm parameters. The best model is the one with the highest R2 value.
""",
examples="""
library(h2o)
h2o.init()
# Run MaxRGLM of VOL ~ all predictors
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
prostate$CAPSULE <- as.factor(prostate$CAPSULE)
model <- h2o.maxrglm(y = "VOL", x = c("CAPSULE", "RACE", "AGE", "RACE", "DPROS", "DCAPS", "PSA", "GLEASON:), training_frame = prostate)
"""
)
| 33.266667 | 135 | 0.708751 |
aceb82ff117a2192749520af6f5ba8bb148eb1c3 | 4,925 | py | Python | sitemessage/tests/test_utils.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | 49 | 2015-01-26T01:31:22.000Z | 2022-02-01T19:10:55.000Z | sitemessage/tests/test_utils.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | 10 | 2015-11-13T09:38:53.000Z | 2021-03-14T11:22:35.000Z | sitemessage/tests/test_utils.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | 10 | 2015-03-16T09:01:47.000Z | 2021-03-14T10:10:27.000Z | from sitemessage.messages.base import MessageBase
from sitemessage.messengers.base import MessengerBase
from sitemessage.models import Message, Subscription
from sitemessage.toolbox import schedule_messages, recipients, send_scheduled_messages, prepare_dispatches
from sitemessage.utils import register_message_types, register_messenger_objects, \
get_registered_messenger_objects, get_registered_messenger_object, get_registered_message_types, \
override_message_type_for_app, get_message_type_for_app
from .testapp.sitemessages import WONDERLAND_DOMAIN, MessagePlainForTest, MessagePlainDynamicForTest, MessageForTest, \
MessengerForTest
def test_register_messengers():
messenger = type('MyMessenger', (MessengerBase,), {}) # type: MessengerBase
register_messenger_objects(messenger)
assert messenger.get_alias() in get_registered_messenger_objects()
def test_register_message_types():
message = type('MyMessage', (MessageBase,), {}) # type: MessageBase
register_message_types(message)
assert message.get_alias() in get_registered_message_types()
def test_recipients(user_create):
user = user_create(attributes=dict(username='myuser'))
to = ['gogi', 'givi', user]
r1 = recipients('test_messenger', to)
assert len(r1) == len(to)
assert r1[0].address == f'gogi{WONDERLAND_DOMAIN}'
assert r1[0].messenger == 'test_messenger'
assert r1[1].address == f'givi{WONDERLAND_DOMAIN}'
assert r1[1].messenger == 'test_messenger'
assert r1[2].address == f'user_myuser{WONDERLAND_DOMAIN}'
assert r1[2].messenger == 'test_messenger'
def test_prepare_undispatched():
m, d = Message.create('testplain', {MessageBase.SIMPLE_TEXT_ID: 'abc'})
Subscription.create('fred', 'testplain', 'test_messenger')
Subscription.create('colon', 'testplain', 'test_messenger')
dispatches = prepare_dispatches()
assert len(dispatches) == 2
assert dispatches[0].address == 'fred'
assert dispatches[1].address == 'colon'
def test_send_scheduled_messages():
# This one won't count, as won't fit into message priority filter.
schedule_messages(
MessagePlainDynamicForTest('my_dyn_msg'),
recipients('test_messenger', ['three', 'four']))
msgr = get_registered_messenger_object('test_messenger') # type: MessengerForTest
msg = MessagePlainForTest('my_message')
schedule_messages(msg, recipients(msgr, ['one', 'two']))
send_scheduled_messages(priority=MessagePlainForTest.priority)
assert len(msgr.last_send['dispatch_models']) == 2
assert msgr.last_send['message_model'].cls == 'testplain'
assert msgr.last_send['message_cls'] == MessagePlainForTest
assert msgr.last_send['dispatch_models'][0].message_cache == 'my_message'
assert msgr.last_send['dispatch_models'][1].message_cache == 'my_message'
def test_send_scheduled_messages_dynamic_context():
msgr = get_registered_messenger_object('test_messenger') # type: MessengerForTest
msg_dyn = MessagePlainDynamicForTest('my_dyn_msg')
schedule_messages(msg_dyn, recipients(msgr, ['three', 'four']))
send_scheduled_messages()
assert len(msgr.last_send['dispatch_models']) == 2
assert msgr.last_send['message_model'].cls == 'testplain_dyn'
assert msgr.last_send['message_cls'] == MessagePlainDynamicForTest
assert msgr.last_send['dispatch_models'][0].message_cache == f'my_dyn_msg -- three{WONDERLAND_DOMAIN}'
assert msgr.last_send['dispatch_models'][1].message_cache == f'my_dyn_msg -- four{WONDERLAND_DOMAIN}'
def test_schedule_message(user):
msg = MessagePlainForTest('schedule_func')
model, _ = schedule_messages(msg)[0]
assert model.cls == msg.get_alias()
assert model.context == msg.get_context()
assert model.priority == MessagePlainForTest.priority
assert not model.dispatches_ready
msg = MessagePlainForTest('schedule_func')
model, _ = schedule_messages(msg, priority=33)[0]
assert model.cls == msg.get_alias()
assert model.context == msg.get_context()
assert model.priority == 33
assert not model.dispatches_ready
model, dispatch_models = \
schedule_messages(
'simple message',
recipients('test_messenger', ['gogi', 'givi']), sender=user)[0]
assert model.cls == 'plain'
assert model.context == {'use_tpl': False, MessageBase.SIMPLE_TEXT_ID: 'simple message', 'tpl': None}
assert model.sender == user
assert model.dispatches_ready
assert len(dispatch_models) == 2
assert dispatch_models[0].address == f'gogi{WONDERLAND_DOMAIN}'
assert dispatch_models[0].messenger == 'test_messenger'
def test_override_message_type_for_app():
mt = get_message_type_for_app('myapp', 'testplain')
assert mt is MessagePlainForTest
override_message_type_for_app('myapp', 'sometype', 'test_message')
mt = get_message_type_for_app('myapp', 'sometype')
assert mt is MessageForTest
| 40.368852 | 119 | 0.742335 |
aceb8328ed63c098bca96679567a0daca33bb9f1 | 2,845 | py | Python | lib/xslt/numbers/en.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-01-30T03:50:36.000Z | 2022-03-20T16:09:58.000Z | lib/xslt/numbers/en.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 2 | 2015-02-04T17:18:47.000Z | 2019-09-27T23:39:52.000Z | lib/xslt/numbers/en.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-02-04T16:16:18.000Z | 2019-10-30T20:07:48.000Z | ########################################################################
# amara/xslt/numbers/en.py
from amara.xpath import datatypes
from amara.xslt.numbers import formatter
ASCII_DIGITS = '0123456789'
ASCII_LOWER = 'abcdefghijklmnopqrstuvwxyz'
ASCII_UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class english_formatter(formatter):
language = 'en'
_roman_digits = _roman_upper, _roman_lower = [], []
for multiplier, combining in ((1, ''), (1000, u'\u0305')):
for base, one, five, ten in ((1, u'I', u'V', u'X'),
(10, u'X', u'L', u'C'),
(100, u'C', u'D', u'M')):
base *= multiplier
one += combining
five += combining
ten += combining
digits = (u'', one, one*2, one*3, one+five,
five, five+one, five+one*2, five+one*3, one+ten)
_roman_upper.append((base, digits))
_roman_lower.append((base, map(unicode.lower, digits)))
_roman_max = base * len(_roman_upper[-1][1])
def _alpha_sequence(self, number, alphabet):
size = len(alphabet)
digits = ''
while number > size:
number, ordinal = divmod(number - 1, size)
digits += alphabet[ordinal]
digits += alphabet[number - 1]
return digits
def _format(self, number, token, letter_value, separator, grouping):
if token in ('I', 'i') and letter_value != 'alphabetic':
# roman numerals
if 0 < number < self._roman_max:
result = []
for bound, digits in self._roman_digits[token == 'i']:
if number > bound:
index, number = divmod(number, bound)
result.append(digits[index])
last_digits = digits
result = u''.join(result)
else:
result = '%d' % number
elif token in ('A', 'a'):
# alphabetic numbering
alphabet = ASCII_LOWER if token == 'a' else ASCII_UPPER
result = self._alpha_sequence(number, alphabet)
else:
# arabic numerals
if token[-1:] != '1':
# unsupported format token, using '1'
token == '1'
result = '%0*d' % (len(token), number)
if separator and grouping:
start = -len(numeric)
step = -grouping
if start < step:
groups = []
for next in reversed(xrange(step, start, step)):
groups.append(result[start:next])
start = next
groups.append(result[start:])
result = separator.join(groups)
return datatypes.string(result)
| 38.445946 | 72 | 0.491388 |
aceb83c0d3d2395962da4fc2421721ba925d0084 | 784 | py | Python | test/test_tiled-bak.py | levibaba/pytiled_parser | c0d464359a7255e1c764a623b8b472ab1fe98cc6 | [
"MIT"
] | null | null | null | test/test_tiled-bak.py | levibaba/pytiled_parser | c0d464359a7255e1c764a623b8b472ab1fe98cc6 | [
"MIT"
] | null | null | null | test/test_tiled-bak.py | levibaba/pytiled_parser | c0d464359a7255e1c764a623b8b472ab1fe98cc6 | [
"MIT"
] | null | null | null | import arcade
import arcade.tiled
import pprint
pp = pprint.PrettyPrinter(indent=4, compact=True, width=200)
class MyTestWindow(arcade.Window):
def __init__(self, width, height, title, map_name):
super().__init__(width, height, title)
self.layers = []
my_map = arcade.tiled.read_tiled_map(map_name, 1)
pp.pprint(my_map.layers_int_data)
for layer in my_map.layers_int_data:
self.layers.append(arcade.tiled.generate_sprites(
my_map, layer, 1, "../arcade/arcade/examples/"))
def on_draw(self):
arcade.start_render()
for layer in self.layers:
layer.draw()
MAP_NAME = '../arcade/arcade/examples/map_base64_gzip.tmx'
test = MyTestWindow(640, 800, "meme", MAP_NAME)
arcade.run()
| 27.034483 | 64 | 0.663265 |
aceb84dff0fb02c1e39d6dd5c57e448d703b9857 | 424 | py | Python | IFCtoLBD_Python/IFCtoLBD.py | Design-Computation-RWTH/IFCtoLBD | f3605907f47facebd694ea377b8e6c3e28583ed3 | [
"Apache-2.0"
] | 34 | 2018-06-18T06:34:54.000Z | 2022-03-18T11:04:08.000Z | IFCtoLBD_Python/IFCtoLBD.py | jyrkioraskari/IFCtoLBD | 6ae4def825557e009bfd44e04cdfb65bfccec696 | [
"Apache-2.0"
] | 18 | 2018-05-15T15:19:42.000Z | 2021-09-23T11:53:55.000Z | IFCtoLBD_Python/IFCtoLBD.py | Design-Computation-RWTH/IFCtoLBD | f3605907f47facebd694ea377b8e6c3e28583ed3 | [
"Apache-2.0"
] | 13 | 2018-05-24T09:21:55.000Z | 2022-03-13T21:45:58.000Z | # !/usr/bin/env python3
import jpype
# Enable Java imports
import jpype.imports
# Pull in types
from jpype.types import *
jpype.startJVM(classpath = ['jars/*'])
from org.linkedbuildingdata.ifc2lbd import IFCtoLBDConverter
lbdconverter = IFCtoLBDConverter("https://dot.dc.rwth-aachen.de/IFCtoLBDset", 3)
model=lbdconverter.convert("Duplex_A_20110505.ifc");
model.write(jpype.java.lang.System.out)
jpype.shutdownJVM() | 21.2 | 81 | 0.775943 |
aceb84fa6dfc34e9c2f9c50407ea3dcc239e5654 | 17,407 | py | Python | utils/losses.py | linleon1995/prior_guiding_network | 6d1298cbfa2f7391d0d7673f85a71c07d8a10dfe | [
"Apache-2.0"
] | 1 | 2021-07-14T12:08:59.000Z | 2021-07-14T12:08:59.000Z | utils/losses.py | linleon1995/prior_guiding_network | 6d1298cbfa2f7391d0d7673f85a71c07d8a10dfe | [
"Apache-2.0"
] | 2 | 2021-08-25T16:16:35.000Z | 2022-02-10T04:40:18.000Z | utils/losses.py | linleon1995/prior_guiding_network | 6d1298cbfa2f7391d0d7673f85a71c07d8a10dfe | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import six
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from core import preprocess_utils
from core import utils
_EPSILON = 1e-9
def get_label_weight_mask(labels, ignore_label, num_classes, label_weights=1.0, keep_class_dims=False):
"""Gets the label weight mask.
Args:
labels: A Tensor of labels with the shape of [-1].
ignore_label: Integer, label to ignore.
num_classes: Integer, the number of semantic classes.
label_weights: A float or a list of weights. If it is a float, it means all
the labels have the same weight. If it is a list of weights, then each
element in the list represents the weight for the label of its index, for
example, label_weights = [0.1, 0.5] means the weight for label 0 is 0.1
and the weight for label 1 is 0.5.
Returns:
A Tensor of label weights with the same shape of labels, each element is the
weight for the label with the same index in labels and the element is 0.0
if the label is to ignore.
Raises:
ValueError: If label_weights is neither a float nor a list, or if
label_weights is a list and its length is not equal to num_classes.
"""
if not isinstance(label_weights, (float, list)):
raise ValueError(
'The type of label_weights is invalid, it must be a float or a list.')
if isinstance(label_weights, list) and len(label_weights) != num_classes:
raise ValueError(
'Length of label_weights must be equal to num_classes if it is a list, '
'label_weights: %s, num_classes: %d.' % (label_weights, num_classes))
not_ignore_mask = tf.not_equal(labels, ignore_label)
not_ignore_mask = tf.cast(not_ignore_mask, tf.float32)
if keep_class_dims:
not_ignore_mask = tf.tile(tf.expand_dims(not_ignore_mask, axis=1), [1, num_classes])
if isinstance(label_weights, float):
return not_ignore_mask * label_weights
label_weights = tf.constant(label_weights, tf.float32)
if keep_class_dims:
all_classes_label = tf.tile(tf.expand_dims(tf.ones_like(labels, dtype=tf.float32), axis=1), [1, num_classes])
weight_mask = label_weights * all_classes_label
else:
# Dot product
weight_mask = tf.einsum('...y,y->...',
tf.one_hot(labels, num_classes, dtype=tf.float32),
label_weights)
return tf.multiply(not_ignore_mask, weight_mask)
def _div_maybe_zero(total_loss, num_present):
"""Normalizes the total loss with the number of present pixels."""
return tf.to_float(num_present > 0) * tf.math.divide(
total_loss,
tf.maximum(1e-5, num_present))
def add_sigmoid_cross_entropy_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
dilated_kernel=None,
loss_weight=1.0,
upsample_logits=True,
hard_example_mining_step=0,
top_k_percent_pixels=1.0,
gt_is_matting_map=False,
scope=None):
"""Adds softmax cross entropy loss for logits of each scale."""
add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
loss_weight,
upsample_logits,
hard_example_mining_step,
top_k_percent_pixels,
gt_is_matting_map,
activation="sigmoid",
scope=scope)
def add_sigmoid_dice_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
alpha=0.5,
beta=0.5,
loss_weight=1.0,
scope=None):
"""Adds sigmoid dice loss for logits of each scale."""
add_softmax_dice_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
alpha,
beta,
loss_weight,
activation="sigmoid",
scope=scope)
def add_softmax_generalized_dice_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
alpha=0.5,
beta=0.5,
loss_weight=1.0,
scope=None):
"""Adds softmax genralized dice loss (GDL) for logits of each scale."""
if labels is None:
raise ValueError('No label for softmax dice loss.')
for scale, logits in scales_to_logits.items():
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
shape = preprocess_utils.resolve_shape(labels, 4)
logits = tf.image.resize_bilinear(
logits,
shape[1:3],
align_corners=True)
scaled_labels = labels
scaled_labels = tf.reshape(scaled_labels, shape=[-1, shape[1]*shape[2]])
logits = tf.reshape(logits, shape=[-1, shape[1]*shape[2], num_classes])
train_labels = tf.one_hot(
scaled_labels, num_classes, on_value=1.0, off_value=0.0)
# The reciprocal of label square for loss weight
area = tf.reduce_sum(train_labels, axis=1)
weights = tf.ones_like(area) / (tf.square(area)+_EPSILON)
weights = tf.where(tf.greater(weights, tf.ones_like(weights)), tf.zeros_like(weights), weights)
weights = weights * loss_weight
with tf.name_scope(loss_scope, 'softmax_all_pixel_loss',
[logits, train_labels, weights]):
# Compute the loss for all pixels.
prediction = tf.nn.softmax(logits, 2)
train_labels = tf.stop_gradient(
train_labels, name='train_labels_stop_gradient')
intersection = tf.reduce_sum(train_labels*prediction, axis=1)
union = tf.reduce_sum(train_labels, axis=1) + tf.reduce_sum(prediction, axis=1)
weighted_intersection = tf.reduce_sum(tf.multiply(intersection, weights), axis=1)
weighted_union = tf.reduce_sum(tf.multiply(union, weights), axis=1)
loss = 1 - 2*tf.reduce_mean((weighted_intersection+_EPSILON)/(weighted_union+_EPSILON))
tf.losses.add_loss(loss)
def add_softmax_dice_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
alpha=0.5,
beta=0.5,
loss_weight=1.0,
activation="softmax",
scope=None):
"""Adds softmax dice loss for logits of each scale."""
if labels is None:
raise ValueError('No label for softmax dice loss.')
for scale, logits in scales_to_logits.items():
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
logits = tf.image.resize_bilinear(logits,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True)
labels = tf.reshape(labels, shape=[-1])
weights = tf.constant(loss_weight, tf.float32)
logits = tf.reshape(logits, shape=[-1, num_classes])
train_labels = tf.one_hot(
labels, num_classes, on_value=1.0, off_value=0.0)
with tf.name_scope(loss_scope, '%s_all_pixel_loss' %activation,
[logits, train_labels, weights]):
# Compute the loss for all pixels.
if activation == "softmax":
prediction = tf.nn.softmax(logits, 1)
elif activation == "sigmoid":
prediction = tf.nn.sigmoid(logits)
else:
raise ValueError("Unknown activation for prediction")
train_labels = tf.stop_gradient(
train_labels, name='train_labels_stop_gradient')
intersection = tf.reduce_sum(train_labels*prediction, 0)
union = tf.reduce_sum(train_labels, 0) + tf.reduce_sum(prediction, 0)
pixel_losses = (2*intersection+_EPSILON) / (union+_EPSILON)
weighted_pixel_losses = tf.multiply(pixel_losses, weights)
loss = 1 - tf.reduce_mean(weighted_pixel_losses)
tf.losses.add_loss(loss)
def add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
loss_weight=1.0,
upsample_logits=True,
hard_example_mining_step=0,
top_k_percent_pixels=1.0,
gt_is_matting_map=False,
activation="softmax",
scope=None):
"""Adds softmax cross entropy loss for logits of each scale.
Args:
scales_to_logits: A map from logits names for different scales to logits.
The logits have shape [batch, logits_height, logits_width, num_classes].
labels: Groundtruth labels with shape [batch, image_height, image_width, 1].
num_classes: Integer, number of target classes.
ignore_label: Integer, label to ignore.
loss_weight: A float or a list of loss weights. If it is a float, it means
all the labels have the same weight. If it is a list of weights, then each
element in the list represents the weight for the label of its index, for
example, loss_weight = [0.1, 0.5] means the weight for label 0 is 0.1 and
the weight for label 1 is 0.5.
upsample_logits: Boolean, upsample logits or not.
hard_example_mining_step: An integer, the training step in which the hard
exampling mining kicks off. Note that we gradually reduce the mining
percent to the top_k_percent_pixels. For example, if
hard_example_mining_step = 100K and top_k_percent_pixels = 0.25, then
mining percent will gradually reduce from 100% to 25% until 100K steps
after which we only mine top 25% pixels.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value
< 1.0, only compute the loss for the top k percent pixels (e.g., the top
20% pixels). This is useful for hard pixel mining.
gt_is_matting_map: If true, the groundtruth is a matting map of confidence
score. If false, the groundtruth is an integer valued class mask.
scope: String, the scope for the loss.
Raises:
ValueError: Label or logits is None, or groundtruth is matting map while
label is not floating value.
"""
if labels is None:
raise ValueError('No label for softmax cross entropy loss.')
# If input groundtruth is a matting map of confidence, check if the input
# labels are floating point values.
if gt_is_matting_map and not labels.dtype.is_floating:
raise ValueError('Labels must be floats if groundtruth is a matting map.')
for scale, logits in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
if upsample_logits:
# Label is not downsampled, and instead we upsample logits.
logits = tf.image.resize_bilinear(
logits,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True)
scaled_labels = labels
else:
# Label is downsampled to the same size as logits.
# When gt_is_matting_map = true, label downsampling with nearest neighbor
# method may introduce artifacts. However, to avoid ignore_label from
# being interpolated with other labels, we still perform nearest neighbor
# interpolation.
# TODO(huizhongc): Change to bilinear interpolation by processing padded
# and non-padded label separately.
if gt_is_matting_map:
tf.logging.warning(
'Label downsampling with nearest neighbor may introduce artifacts.')
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
preprocess_utils.resolve_shape(logits, 4)[1:3],
align_corners=True)
scaled_labels = tf.reshape(scaled_labels, shape=[-1])
if activation == "sigmoid":
keep_class_dims = True
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
elif activation == "softmax":
keep_class_dims = False
loss_func = tf.nn.softmax_cross_entropy_with_logits_v2
else:
raise ValueError("Unknown activation for prediction")
weights = get_label_weight_mask(
scaled_labels, ignore_label, num_classes, label_weights=loss_weight, keep_class_dims=keep_class_dims)
# Dimension of keep_mask is equal to the total number of pixels.
keep_mask = tf.cast(
tf.not_equal(scaled_labels, ignore_label), dtype=tf.float32)
train_labels = None
logits = tf.reshape(logits, shape=[-1, num_classes])
if gt_is_matting_map:
# When the groundtruth is integer label mask, we can assign class
# dependent label weights to the loss. When the groundtruth is image
# matting confidence, we do not apply class-dependent label weight (i.e.,
# label_weight = 1.0).
if loss_weight != 1.0:
raise ValueError(
'loss_weight must equal to 1 if groundtruth is matting map.')
# Assign label value 0 to ignore pixels. The exact label value of ignore
# pixel does not matter, because those ignore_value pixel losses will be
# multiplied to 0 weight.
train_labels = scaled_labels * keep_mask
train_labels = tf.expand_dims(train_labels, 1)
train_labels = tf.concat([1 - train_labels, train_labels], axis=1)
else:
train_labels = tf.one_hot(
scaled_labels, num_classes, on_value=1.0, off_value=0.0)
default_loss_scope = ('softmax_all_pixel_loss'
if top_k_percent_pixels == 1.0 else
'softmax_hard_example_mining')
with tf.name_scope(loss_scope, default_loss_scope,
[logits, train_labels, weights]):
# Compute the loss for all pixels.
pixel_losses = loss_func(
labels=tf.stop_gradient(
train_labels, name='train_labels_stop_gradient'),
logits=logits,
name='pixel_losses')
weighted_pixel_losses = tf.multiply(pixel_losses, weights)
if top_k_percent_pixels == 1.0:
total_loss = tf.reduce_sum(weighted_pixel_losses)
num_present = tf.reduce_sum(keep_mask)
loss = _div_maybe_zero(total_loss, num_present)
tf.losses.add_loss(loss)
else:
num_pixels = tf.to_float(tf.shape(logits)[0])
# Compute the top_k_percent pixels based on current training step.
if hard_example_mining_step == 0:
# Directly focus on the top_k pixels.
top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels)
else:
# Gradually reduce the mining percent to top_k_percent_pixels.
global_step = tf.to_float(tf.train.get_or_create_global_step())
ratio = tf.minimum(1.0, global_step / hard_example_mining_step)
top_k_pixels = tf.to_int32(
(ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels)
top_k_losses, _ = tf.nn.top_k(weighted_pixel_losses,
k=top_k_pixels,
sorted=True,
name='top_k_percent_pixels')
total_loss = tf.reduce_sum(top_k_losses)
num_present = tf.reduce_sum(
tf.to_float(tf.not_equal(top_k_losses, 0.0)))
loss = _div_maybe_zero(total_loss, num_present)
tf.losses.add_loss(loss)
| 46.542781 | 113 | 0.572816 |
aceb860cf5c6fd6f24967402be72c8200c4312e1 | 15,281 | py | Python | tests/test_zabbix_api.py | maxgrechnev/unimonapi | f8a8255a4142f880a4a4cd931c3a3f35676ce0db | [
"MIT"
] | null | null | null | tests/test_zabbix_api.py | maxgrechnev/unimonapi | f8a8255a4142f880a4a4cd931c3a3f35676ce0db | [
"MIT"
] | null | null | null | tests/test_zabbix_api.py | maxgrechnev/unimonapi | f8a8255a4142f880a4a4cd931c3a3f35676ce0db | [
"MIT"
] | null | null | null | import pytest
from mock import patch
from mock import call
from mock import MagicMock
from unimonapi import MonitoringAPI
from unimonapi import ZabbixAPI
from unimonapi import UnimonError
from unimonapi import Event
from unimonapi import HostGroup
@pytest.fixture(scope='module')
@patch('unimonapi.zabbix.zabbix_api.PyZabbixAPI')
def zabbix_api(mock):
zabbix_api = ZabbixAPI('http://zabbix-frontend', 'Admin', 'zabbix123', '/path/to/repo', 'install_win.sh', 'install_lin.sh', 'match_filter')
zabbix_api.mock = mock
zabbix_api.mock_instance = mock.return_value
return zabbix_api
def test_zabbix_api_init(zabbix_api):
assert isinstance(zabbix_api, MonitoringAPI)
zabbix_api.mock.assert_called_once_with('http://zabbix-frontend')
zabbix_api.mock_instance.login.assert_called_once_with('Admin', 'zabbix123')
@pytest.mark.parametrize('rules_number', [1, 5, pytest.param(0, marks=pytest.mark.xfail(raises=UnimonError))])
def test_zabbix_api_get_discovery_ip_range(zabbix_api, rules_number):
zabbix_api.mock_instance.drule.get = MagicMock()
zabbix_api.mock_instance.drule.get.return_value = rules_number * [{
'druleid': 'rule_id',
'iprange': '1.1.1.1',
}]
returned_ip_range = zabbix_api.get_discovery_ip_range()
zabbix_api.mock_instance.drule.get.assert_called_once()
assert returned_ip_range == '1.1.1.1'
@pytest.mark.parametrize('rules_number', [1, 5, pytest.param(0, marks=pytest.mark.xfail(raises=UnimonError))])
def test_zabbix_api_start_default_discovery(zabbix_api, rules_number):
zabbix_api.mock_instance.drule.get = MagicMock()
zabbix_api.mock_instance.drule.update = MagicMock()
zabbix_api.mock_instance.drule.get.return_value = rules_number * [{
'druleid': 'rule_id',
}]
zabbix_api.start_discovery()
zabbix_api.mock_instance.drule.get.assert_called_once()
assert zabbix_api.mock_instance.drule.update.call_count == rules_number
update_calls = [call(druleid='rule_id', status=0)] * rules_number
zabbix_api.mock_instance.drule.update.assert_has_calls(update_calls)
def test_zabbix_api_start_discovery_with_ip_range(zabbix_api):
zabbix_api.mock_instance.drule.get = MagicMock()
zabbix_api.mock_instance.drule.update = MagicMock()
zabbix_api.mock_instance.drule.get.return_value = [{
'druleid': 'rule_id',
}]
zabbix_api.start_discovery('1.1.1.1')
zabbix_api.mock_instance.drule.get.assert_called_once()
zabbix_api.mock_instance.drule.update.assert_called_once_with(druleid='rule_id', iprange='1.1.1.1', status=0)
@pytest.mark.parametrize('rules_number', [1, 5, pytest.param(0, marks=pytest.mark.xfail(raises=UnimonError))])
def test_zabbix_api_stop_discovery(zabbix_api, rules_number):
zabbix_api.mock_instance.drule.get = MagicMock()
zabbix_api.mock_instance.drule.update = MagicMock()
zabbix_api.mock_instance.drule.get.return_value = rules_number * [{
'druleid': 'rule_id',
}]
zabbix_api.stop_discovery()
zabbix_api.mock_instance.drule.get.assert_called_once()
assert zabbix_api.mock_instance.drule.update.call_count == rules_number
update_calls = [call(druleid='rule_id', status=1)] * rules_number
zabbix_api.mock_instance.drule.update.assert_has_calls(update_calls)
@pytest.mark.parametrize('problems_number', [1, 5])
def test_get_problems(zabbix_api, problems_number):
zabbix_api.mock_instance.problem.get = MagicMock()
zabbix_api.mock_instance.trigger.get = MagicMock()
zabbix_api.mock_instance.problem.get.return_value = problems_number * [{
'eventid': 'event_id',
'objectid': 'trigger_id',
'tags': [],
}]
zabbix_api.mock_instance.trigger.get.return_value = {
'trigger_id': {
'triggerid': 'trigger_id',
'description': 'High CPU usage',
'priority': 1,
'hosts': [{
'hostid': 'host_id',
'name': 'zabbix-server',
}],
'groups': [{
'groupid': 'group_id',
'name': 'Zabbix Servers',
}],
}
}
problems = zabbix_api.get_problems()
zabbix_api.mock_instance.problem.get.assert_called_once()
zabbix_api.mock_instance.trigger.get.assert_called()
assert len(problems) == problems_number
assert isinstance(problems[0], Event)
assert problems[0].type == Event.PROBLEM
assert problems[0].detailed
assert problems[0].severity == Event.INFO
assert problems[0].host == 'zabbix-server'
assert problems[0].text == 'High CPU usage'
assert problems[0].id == 'event_id'
@pytest.mark.parametrize(
('priority', 'severity'),
[
(1, Event.INFO),
(2, Event.WARNING),
(4, Event.CRITICAL)
],
)
def test_get_problems_with_priority(zabbix_api, priority, severity):
zabbix_api.mock_instance.problem.get.return_value = [{
'eventid': 'event_id',
'objectid': 'trigger_id',
'tags': [],
}]
zabbix_api.mock_instance.trigger.get.return_value = {
'trigger_id': {
'triggerid': 'trigger_id',
'description': 'High CPU usage',
'priority': priority,
'hosts': [{
'hostid': 'host_id',
'name': 'zabbix-server',
}],
'groups': [{
'groupid': 'group_id',
'name': 'Zabbix Servers',
}],
}
}
problems = zabbix_api.get_problems()
assert len(problems) == 1
assert problems[0].severity == severity
@pytest.mark.parametrize(
('tags_number', 'tag', 'tag_value', 'tag_string'),
[
(0, 'App', 'Zabbix', ''),
(1, 'App', '', ' [ App ]'),
(1, 'App', 'Zabbix', ' [ App:Zabbix ]'),
(3, 'App', '', ' [ App, App, App ]'),
],
)
def test_get_problems_with_tags(zabbix_api, tags_number, tag, tag_value, tag_string):
zabbix_api.mock_instance.problem.get.return_value = [{
'eventid': 'event_id',
'objectid': 'trigger_id',
'tags': tags_number * [{
'tag': tag,
'value': tag_value,
}],
}]
zabbix_api.mock_instance.trigger.get.return_value = {
'trigger_id': {
'triggerid': 'trigger_id',
'description': 'High CPU usage',
'priority': 1,
'hosts': [{
'hostid': 'host_id',
'name': 'zabbix-server',
}],
'groups': [{
'groupid': 'group_id',
'name': 'Zabbix Servers',
}],
}
}
problems = zabbix_api.get_problems()
assert len(problems) == 1
assert problems[0].text == 'High CPU usage' + tag_string
@pytest.mark.parametrize(
('severities', 'priorities'),
[
([Event.CRITICAL], [4,5]),
([Event.INFO, Event.WARNING], [0,1,2,3]),
(None, [0,1,2,3,4,5]),
([], []),
('abcd', []),
([99,555], []),
pytest.param(123, [], marks=pytest.mark.xfail(raises=TypeError)),
],
)
def test_get_problems_with_severity(zabbix_api, severities, priorities):
zabbix_api.mock_instance.problem.get.return_value = []
zabbix_api.mock_instance.trigger.get.return_value = {}
zabbix_api.get_problems(severities=severities)
args, kwargs = zabbix_api.mock_instance.problem.get.call_args
assert 'severities' in kwargs
assert kwargs['severities'] == priorities
def test_get_problems_with_groups(zabbix_api):
zabbix_api.mock_instance.problem.get.return_value = []
zabbix_api.mock_instance.trigger.get.return_value = {}
zabbix_api.get_problems(groups=['id_1', 'id_2'])
args, kwargs = zabbix_api.mock_instance.problem.get.call_args
assert 'groupids' in kwargs
assert kwargs['groupids'] == ['id_1', 'id_2']
def test_get_problems_dependent(zabbix_api):
zabbix_api.mock_instance.problem.get.return_value = [{
'eventid': 'event_id',
'objectid': 'trigger_id',
'tags': [],
}]
zabbix_api.mock_instance.trigger.get.return_value = {}
problems = zabbix_api.get_problems()
args, kwargs = zabbix_api.mock_instance.trigger.get.call_args
assert 'monitored' in kwargs
assert kwargs['monitored'] == 1
assert 'skipDependent' in kwargs
assert kwargs['skipDependent'] == 1
assert len(problems) == 0
def test_get_summary(zabbix_api):
zabbix_api.mock_instance.problem.get = MagicMock()
zabbix_api.mock_instance.trigger.get = MagicMock()
zabbix_api.mock_instance.problem.get.return_value = [
{
'eventid': 'critical_event_id',
'objectid': 'critical_trigger_id',
},
{
'eventid': 'warning_event_id',
'objectid': 'warning_trigger_id',
},
]
zabbix_api.mock_instance.trigger.get.return_value = {
'critical_trigger_id': {
'triggerid': 'critical_trigger_id',
'priority': 5,
'groups': [
{
'groupid': 'group_id_1',
'name': 'Group 1',
},
{
'groupid': 'group_id_2',
'name': 'Group 2',
},
],
},
'warning_trigger_id': {
'triggerid': 'warning_trigger_id',
'priority': 3,
'groups': [{
'groupid': 'group_id_1',
'name': 'Group 1',
}],
}
}
host_groups = zabbix_api.get_summary([Event.CRITICAL, Event.WARNING])
zabbix_api.mock_instance.problem.get.assert_called_once()
zabbix_api.mock_instance.trigger.get.assert_called()
assert len(host_groups) == 2
assert host_groups[0].id == 'group_id_1'
assert host_groups[0].severity == Event.CRITICAL
assert host_groups[0].problems == 2
assert host_groups[1].id == 'group_id_2'
assert host_groups[1].severity == Event.CRITICAL
assert host_groups[1].problems == 1
@pytest.mark.parametrize(
('os_type', 'bin_file'),
[
('Windows', 'install_win.sh'),
('Linux', 'install_lin.sh'),
pytest.param('Android', '', marks=pytest.mark.xfail(raises=UnimonError)),
],
)
def test_install_agent(zabbix_api, os_type, bin_file):
with patch('subprocess.Popen') as MockPopen:
MockPopen.return_value.communicate.return_value = ('stdout', 'stderr')
MockPopen.return_value.returncode = 777
return_code = zabbix_api.install_agent(os_type, 'my-host', 'root', '12345')
MockPopen.assert_called_once()
args, kwargs = MockPopen.call_args
assert args[0] == [bin_file, '/path/to/repo', 'my-host', 'root', '12345']
assert return_code == 777
def test_get_available_host_groups(zabbix_api):
zabbix_api.mock_instance.hostgroup.get = MagicMock()
zabbix_api.mock_instance.hostgroup.get.side_effect = (
[
{
'groupid': 'group_id_1',
'name': 'Host group',
},
{
'groupid': 'group_id_2',
'name': 'Template group',
}
],
[
{
'groupid': 'group_id_2',
'name': 'Template group',
}
],
)
returned_groups = zabbix_api.get_available_host_groups()
assert zabbix_api.mock_instance.hostgroup.get.call_count == 2
assert returned_groups == ['Host group']
@pytest.mark.parametrize(
('host_name', 'use_ip', 'ip', 'dns'),
[
('new-host', 0, '', 'new-host'),
('1.1.1.1', 1, '1.1.1.1', ''),
],
)
def test_add_host(zabbix_api, host_name, use_ip, ip, dns):
zabbix_api.mock_instance.hostgroup.get = MagicMock()
zabbix_api.mock_instance.template.get = MagicMock()
zabbix_api.mock_instance.host.create = MagicMock()
zabbix_api.mock_instance.hostgroup.get.return_value = [{'groupid': 'group_id'}]
zabbix_api.mock_instance.template.get.return_value = [{'templateid': 'template_id'}]
zabbix_api.mock_instance.host.create.return_value = {'hostids': ['new_host_id']}
returned_host_id = zabbix_api.add_host(host_name, ['Host group'])
zabbix_api.mock_instance.hostgroup.get.assert_called_once()
zabbix_api.mock_instance.template.get.assert_called_once()
args, kwargs = zabbix_api.mock_instance.template.get.call_args
assert kwargs['filter'] == {'host': 'match_filter Host group'}
zabbix_api.mock_instance.host.create.assert_called_once()
args, kwargs = zabbix_api.mock_instance.host.create.call_args
assert kwargs['host'] == host_name
assert kwargs['groups'] == [{'groupid': 'group_id'}]
assert kwargs['templates'] == [{'templateid': 'template_id'}]
assert kwargs['interfaces'][0]['useip'] == use_ip
assert kwargs['interfaces'][0]['ip'] == ip
assert kwargs['interfaces'][0]['dns'] == dns
assert returned_host_id == 'new_host_id'
def test_delete_host(zabbix_api):
zabbix_api.mock_instance.host.delete = MagicMock()
zabbix_api.delete_host('host_id')
zabbix_api.mock_instance.host.delete.assert_called_once_with('host_id')
def test_get_host_id(zabbix_api):
zabbix_api.mock_instance.host.get = MagicMock()
zabbix_api.mock_instance.host.get.return_value = [{'hostid': 'host_id'}]
returned_id = zabbix_api.get_host_id('my-host')
zabbix_api.mock_instance.host.get.assert_called_once()
assert returned_id == 'host_id'
def test_get_host_name(zabbix_api):
zabbix_api.mock_instance.host.get = MagicMock()
zabbix_api.mock_instance.host.get.return_value = [{
'hostid': 'host_id',
'host': 'my-host',
}]
returned_name = zabbix_api.get_host_name('host_id')
zabbix_api.mock_instance.host.get.assert_called_once()
assert returned_name == 'my-host'
# def test_export_config(zabbix_api):
# zabbix_api.mock_instance.configuration.export = MagicMock()
# zabbix_api.mock_instance.template.get = MagicMock()
# zabbix_api.mock_instance.host.get = MagicMock()
# zabbix_api.mock_instance.valuemap.get = MagicMock()
# zabbix_api.mock_instance.hostgroup.get = MagicMock()
# zabbix_api.mock_instance.usermacro.get = MagicMock()
# zabbix_api.mock_instance.drule.get = MagicMock()
# zabbix_api.mock_instance.action.get = MagicMock()
# zabbix_api.mock_instance.template.get.return_value = { 'template_id': { 'templateid': 'template_id' }}
# zabbix_api.mock_instance.host.get.return_value = { 'host_id': { 'hostid': 'host_id' }}
# zabbix_api.mock_instance.valuemap.get.return_value = { 'valuemap_id': { 'valuemapid': 'valuemap_id' }}
# zabbix_api.mock_instance.hostgroup.get.return_value = { 'group_id': { 'groupid': 'group_id' }}
# zabbix_api.mock_instance.configuration.export.return_value = {
# 'zabbix_export': {
# 'templates': [
# ],
# 'triggers': [
# ],
# 'value_maps': [
# ],
# 'hosts': [
# ],
# 'graphs': [
# ],
# 'version': [
# ],
# 'groups': [
# ],
# }
# }
# zabbix_api.export_config()
# zabbix_api.mock_instance.host.delete.assert_called_once_with('host_id')
| 35.620047 | 143 | 0.632485 |
aceb8692d3251dda6870d71f348b0b3458127449 | 1,382 | py | Python | demo.py | Zekhire/pcaflow | 75f7b8b1df1f1b2b244eb6e0377abaf8b1a5d278 | [
"RSA-MD"
] | 80 | 2015-06-23T04:51:27.000Z | 2022-03-09T08:15:36.000Z | demo.py | Zekhire/pcaflow | 75f7b8b1df1f1b2b244eb6e0377abaf8b1a5d278 | [
"RSA-MD"
] | 4 | 2017-06-18T03:40:48.000Z | 2018-10-31T09:55:32.000Z | demo.py | Zekhire/pcaflow | 75f7b8b1df1f1b2b244eb6e0377abaf8b1a5d278 | [
"RSA-MD"
] | 37 | 2015-08-20T03:26:28.000Z | 2022-03-08T10:54:49.000Z | #! /usr/bin/env python2
from pcaflow import PCAFlow
# To read images
from scipy.misc import imread
# To display
from matplotlib import pyplot as plt
from pcaflow.utils.viz_flow import viz_flow
PATH_PC_U = 'data/PC_U.npy'
PATH_PC_V = 'data/PC_V.npy'
PATH_COV = 'data/COV_SINTEL.npy'
PATH_COV_SUBLAYER = 'data/COV_SINTEL_SUBLAYER.npy'
### Compute using PCA-Flow.
#P = PCAFlow.PCAFlow(
# pc_file_u=PATH_PC_U,
# pc_file_v=PATH_PC_V,
# covfile=PATH_COV,
# preset='pcaflow_sintel',
# )
#
### Compute using PCA-Layers.
P = PCAFlow.PCAFlow(
pc_file_u=PATH_PC_U,
pc_file_v=PATH_PC_V,
covfile=PATH_COV,
covfile_sublayer=PATH_COV_SUBLAYER,
preset='pcalayers_sintel',
)
### Once the object is created, it can be used like this:
I1 = imread('image1.png')
I2 = imread('image2.png')
P.push_back(I1)
P.push_back(I2)
# Compute flow
u,v = P.compute_flow()
### Use this if you want to just get the motion descriptor
#u,v,data = P.compute_flow(return_additional=['weights',])
#descriptor = data['weights']
I_flow = viz_flow(u,v)
plt.figure()
plt.subplot(221)
plt.imshow(I1)
plt.title('First image')
plt.subplot(222)
plt.imshow(I_flow)
plt.title('Flow colormap')
plt.subplot(223)
plt.imshow(u)
plt.title('Horizontal component')
plt.subplot(224)
plt.imshow(v)
plt.title('Vertical component')
plt.show()
| 19.464789 | 58 | 0.696816 |
aceb87cc7153e1d39c6f040a1739f2a0aa138a1a | 20,172 | py | Python | tests/test_deprecation.py | szaydel/knack | cd590312b9b4ea1402d7a0a76814f6ca4da7fab8 | [
"MIT"
] | null | null | null | tests/test_deprecation.py | szaydel/knack | cd590312b9b4ea1402d7a0a76814f6ca4da7fab8 | [
"MIT"
] | null | null | null | tests/test_deprecation.py | szaydel/knack | cd590312b9b4ea1402d7a0a76814f6ca4da7fab8 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
import mock
except ImportError:
from unittest import mock
from threading import Lock
from knack.arguments import ArgumentsContext
from knack.commands import CLICommand, CLICommandsLoader, CommandGroup
from tests.util import DummyCLI, redirect_io, disable_color
def example_handler(arg1, arg2=None, arg3=None):
""" Short summary here. Long summary here. Still long summary. """
pass
def example_arg_handler(arg1, opt1, arg2=None, opt2=None, arg3=None,
opt3=None, arg4=None, opt4=None, arg5=None, opt5=None):
pass
# pylint: disable=line-too-long
class TestCommandDeprecation(unittest.TestCase):
def setUp(self):
from knack.help_files import helps
class DeprecationTestCommandLoader(CLICommandsLoader):
def load_command_table(self, args):
super().load_command_table(args)
with CommandGroup(self, '', '{}#{{}}'.format(__name__)) as g:
g.command('cmd1', 'example_handler', deprecate_info=g.deprecate(redirect='alt-cmd1'))
g.command('cmd2', 'example_handler', deprecate_info=g.deprecate(redirect='alt-cmd2', hide='1.0.0'))
g.command('cmd3', 'example_handler', deprecate_info=g.deprecate(redirect='alt-cmd3', hide='0.1.0'))
g.command('cmd4', 'example_handler', deprecate_info=g.deprecate(redirect='alt-cmd4', expiration='1.0.0'))
g.command('cmd5', 'example_handler', deprecate_info=g.deprecate(redirect='alt-cmd5', expiration='0.1.0'))
with CommandGroup(self, 'grp1', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-grp1')) as g:
g.command('cmd1', 'example_handler')
return self.command_table
def load_arguments(self, command):
with ArgumentsContext(self, '') as c:
c.argument('arg1', options_list=['--arg', '-a'], required=False, type=int, choices=[1, 2, 3])
c.argument('arg2', options_list=['-b'], required=True, choices=['a', 'b', 'c'])
super().load_arguments(command)
helps['grp1'] = """
type: group
short-summary: A group.
"""
self.cli_ctx = DummyCLI(commands_loader_cls=DeprecationTestCommandLoader)
@redirect_io
def test_deprecate_command_group_help(self):
""" Ensure deprecated commands appear (or don't appear) correctly in group help view. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('-h'.split())
actual = self.io.getvalue()
expected = u"""
Group
{}
Subgroups:
grp1 [Deprecated] : A group.
Commands:
cmd1 [Deprecated] : Short summary here.
cmd2 [Deprecated] : Short summary here.
cmd4 [Deprecated] : Short summary here.
""".format(self.cli_ctx.name)
self.assertEqual(expected, actual)
@redirect_io
def test_deprecate_command_help_hidden(self):
""" Ensure hidden deprecated command can be used. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('cmd3 -h'.split())
actual = self.io.getvalue()
expected = """
Command
{} cmd3 : Short summary here.
Long summary here. Still long summary.
This command has been deprecated and will be removed in a future release. Use 'alt-
cmd3' instead.
Arguments
-b [Required] : Allowed values: a, b, c.
--arg -a : Allowed values: 1, 2, 3.
--arg3
""".format(self.cli_ctx.name)
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_plain_execute(self):
""" Ensure general warning displayed when running deprecated command. """
self.cli_ctx.invoke('cmd1 -b b'.split())
actual = self.io.getvalue()
expected = "This command has been deprecated and will be removed in a future release. Use 'alt-cmd1' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_hidden_execute(self):
""" Ensure general warning displayed when running hidden deprecated command. """
self.cli_ctx.invoke('cmd3 -b b'.split())
actual = self.io.getvalue()
expected = "This command has been deprecated and will be removed in a future release. Use 'alt-cmd3' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_expiring_execute(self):
""" Ensure specific warning displayed when running expiring deprecated command. """
self.cli_ctx.invoke('cmd4 -b b'.split())
actual = self.io.getvalue()
expected = "This command has been deprecated and will be removed in version '1.0.0'. Use 'alt-cmd4' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_expiring_execute_no_color(self):
""" Ensure warning is displayed without color. """
self.cli_ctx.enable_color = False
self.cli_ctx.invoke('cmd4 -b b'.split())
actual = self.io.getvalue()
expected = "WARNING: This command has been deprecated and will be removed in version '1.0.0'"
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_expired_execute(self):
""" Ensure expired command cannot be reached. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('cmd5 -h'.split())
actual = self.io.getvalue()
expected = """cli: 'cmd5' is not in the 'cli' command group."""
self.assertIn(expected, actual)
@redirect_io
@disable_color
def test_deprecate_command_expired_execute_no_color(self):
""" Ensure error is displayed without color. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('cmd5 -h'.split())
actual = self.io.getvalue()
expected = """ERROR: cli: 'cmd5' is not in the 'cli' command group."""
self.assertIn(expected, actual)
class TestCommandGroupDeprecation(unittest.TestCase):
def setUp(self):
from knack.help_files import helps
class DeprecationTestCommandLoader(CLICommandsLoader):
def load_command_table(self, args):
super().load_command_table(args)
with CommandGroup(self, 'group1', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-group1')) as g:
g.command('cmd1', 'example_handler')
with CommandGroup(self, 'group2', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-group2', hide='1.0.0')) as g:
g.command('cmd1', 'example_handler')
with CommandGroup(self, 'group3', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-group3', hide='0.1.0')) as g:
g.command('cmd1', 'example_handler')
with CommandGroup(self, 'group4', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-group4', expiration='1.0.0')) as g:
g.command('cmd1', 'example_handler')
with CommandGroup(self, 'group5', '{}#{{}}'.format(__name__), deprecate_info=self.deprecate(redirect='alt-group5', expiration='0.1.0')) as g:
g.command('cmd1', 'example_handler')
return self.command_table
def load_arguments(self, command):
with ArgumentsContext(self, '') as c:
c.argument('arg1', options_list=['--arg', '-a'], required=False, type=int, choices=[1, 2, 3])
c.argument('arg2', options_list=['-b'], required=True, choices=['a', 'b', 'c'])
super().load_arguments(command)
helps['group1'] = """
type: group
short-summary: A group.
"""
self.cli_ctx = DummyCLI(commands_loader_cls=DeprecationTestCommandLoader)
@redirect_io
def test_deprecate_command_group_help_plain(self):
""" Ensure help warnings appear for deprecated command group help. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group1 -h'.split())
actual = self.io.getvalue()
expected = """
Group
cli group1 : A group.
This command group has been deprecated and will be removed in a future release. Use
'alt-group1' instead.
Commands:
cmd1 : Short summary here.
""".format(self.cli_ctx.name)
self.assertEqual(expected, actual)
@redirect_io
def test_deprecate_command_group_help_hidden(self):
""" Ensure hidden deprecated command can be used. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group3 -h'.split())
actual = self.io.getvalue()
expected = """
Group
{} group3
This command group has been deprecated and will be removed in a future release. Use
'alt-group3' instead.
Commands:
cmd1 : Short summary here.
""".format(self.cli_ctx.name)
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_group_help_expiring(self):
""" Ensure specific warning displayed when running expiring deprecated command. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group4 -h'.split())
actual = self.io.getvalue()
expected = """
Group
{} group4
This command group has been deprecated and will be removed in version '1.0.0'. Use
'alt-group4' instead.
""".format(self.cli_ctx.name)
self.assertIn(expected, actual)
@redirect_io
@disable_color
def test_deprecate_command_group_help_expiring_no_color(self):
""" Ensure warning is displayed without color. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group4 -h'.split())
actual = self.io.getvalue()
expected = """
Group
cli group4
WARNING: This command group has been deprecated and will be removed in version \'1.0.0\'. Use
'alt-group4' instead.
""".format(self.cli_ctx.name)
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_group_expired(self):
""" Ensure expired command cannot be reached. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group5 -h'.split())
actual = self.io.getvalue()
expected = """The most similar choices to 'group5'"""
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_command_implicitly(self):
""" Ensure help warning displayed for command deprecated because of a deprecated parent group. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('group1 cmd1 -h'.split())
actual = self.io.getvalue()
expected = """
Command
{} group1 cmd1 : Short summary here.
Long summary here. Still long summary.
This command is implicitly deprecated because command group 'group1' is deprecated and
will be removed in a future release. Use 'alt-group1' instead.
""".format(self.cli_ctx.name)
self.assertIn(expected, actual)
class TestArgumentDeprecation(unittest.TestCase):
def setUp(self):
from knack.help_files import helps
class DeprecationTestCommandLoader(CLICommandsLoader):
def load_command_table(self, args):
super().load_command_table(args)
with CommandGroup(self, '', '{}#{{}}'.format(__name__)) as g:
g.command('arg-test', 'example_arg_handler')
return self.command_table
def load_arguments(self, command):
with ArgumentsContext(self, 'arg-test') as c:
c.argument('arg1', help='Arg1', deprecate_info=c.deprecate())
c.argument('opt1', help='Opt1', options_list=['--opt1', c.deprecate(redirect='--opt1', target='--alt1')])
c.argument('arg2', help='Arg2', deprecate_info=c.deprecate(hide='1.0.0'))
c.argument('opt2', help='Opt2', options_list=['--opt2', c.deprecate(redirect='--opt2', target='--alt2', hide='1.0.0')])
c.argument('arg3', help='Arg3', deprecate_info=c.deprecate(hide='0.1.0'))
c.argument('opt3', help='Opt3', options_list=['--opt3', c.deprecate(redirect='--opt3', target='--alt3', hide='0.1.0')])
c.argument('arg4', deprecate_info=c.deprecate(expiration='1.0.0'))
c.argument('opt4', options_list=['--opt4', c.deprecate(redirect='--opt4', target='--alt4', expiration='1.0.0')])
c.argument('arg5', deprecate_info=c.deprecate(expiration='0.1.0'))
c.argument('opt5', options_list=['--opt5', c.deprecate(redirect='--opt5', target='--alt5', expiration='0.1.0')])
super().load_arguments(command)
helps['grp1'] = """
type: group
short-summary: A group.
"""
self.cli_ctx = DummyCLI(commands_loader_cls=DeprecationTestCommandLoader)
@redirect_io
def test_deprecate_arguments_command_help(self):
""" Ensure deprecated arguments and options appear (or don't appear)
correctly in command help view. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('arg-test -h'.split())
actual = self.io.getvalue()
expected = """
Command
{} arg-test
Arguments
--alt1 [Deprecated] [Required] : Opt1.
Option '--alt1' has been deprecated and will be removed in a future release. Use '--
opt1' instead.
--arg1 [Deprecated] [Required] : Arg1.
Argument 'arg1' has been deprecated and will be removed in a future release.
--opt1 [Required] : Opt1.
--alt2 [Deprecated] : Opt2.
Option '--alt2' has been deprecated and will be removed in a future release. Use '--
opt2' instead.
--alt4 [Deprecated]
Option '--alt4' has been deprecated and will be removed in version '1.0.0'. Use '--
opt4' instead.
--arg2 [Deprecated] : Arg2.
Argument 'arg2' has been deprecated and will be removed in a future release.
--arg4 [Deprecated]
Argument 'arg4' has been deprecated and will be removed in version '1.0.0'.
--opt2 : Opt2.
--opt3 : Opt3.
--opt4
--opt5
""".format(self.cli_ctx.name)
self.assertTrue(actual.startswith(expected))
@redirect_io
def test_deprecate_arguments_execute(self):
""" Ensure deprecated arguments can be used. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar'.split())
actual = self.io.getvalue()
expected = "Argument 'arg1' has been deprecated and will be removed in a future release."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_arguments_execute_hidden(self):
""" Ensure hidden deprecated arguments can be used. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --arg3 bar'.split())
actual = self.io.getvalue()
expected = "Argument 'arg3' has been deprecated and will be removed in a future release."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_arguments_execute_expiring(self):
""" Ensure hidden deprecated arguments can be used. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --arg4 bar'.split())
actual = self.io.getvalue()
expected = "Argument 'arg4' has been deprecated and will be removed in version '1.0.0'."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_arguments_execute_expired(self):
""" Ensure expired deprecated arguments can't be used. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --arg5 foo'.split())
actual = self.io.getvalue()
expected = 'unrecognized arguments: --arg5 foo'
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_options_execute(self):
""" Ensure deprecated options can be used with a warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --alt1 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt1' has been deprecated and will be removed in a future release. Use '--opt1' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_non_deprecated(self):
""" Ensure non-deprecated options don't show warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt1' has been deprecated and will be removed in a future release. Use '--opt1' instead."
self.assertNotIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_hidden(self):
""" Ensure hidden deprecated options can be used with warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --alt3 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt3' has been deprecated and will be removed in a future release. Use '--opt3' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_hidden_non_deprecated(self):
""" Ensure hidden non-deprecated optionss can be used without warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --opt3 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt3' has been deprecated and will be removed in a future release. Use '--opt3' instead."
self.assertNotIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_expired(self):
""" Ensure expired deprecated options can't be used. """
with self.assertRaises(SystemExit):
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --alt5 foo'.split())
actual = self.io.getvalue()
expected = 'unrecognized arguments: --alt5 foo'
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_expired_non_deprecated(self):
""" Ensure non-expired options can be used without warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --opt5 foo'.split())
actual = self.io.getvalue()
self.assertTrue(u'--alt5' not in actual and u'--opt5' not in actual)
@redirect_io
def test_deprecate_options_execute_expiring(self):
""" Ensure expiring options can be used with warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --alt4 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt4' has been deprecated and will be removed in version '1.0.0'. Use '--opt4' instead."
self.assertIn(expected, actual)
@redirect_io
@disable_color
def test_deprecate_options_execute_expiring_no_color(self):
""" Ensure error is displayed without color. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --alt4 bar'.split())
actual = self.io.getvalue()
expected = "WARNING: Option '--alt4' has been deprecated and will be removed in version '1.0.0'. Use '--opt4' instead."
self.assertIn(expected, actual)
@redirect_io
def test_deprecate_options_execute_expiring_non_deprecated(self):
""" Ensure non-expiring options can be used without warning. """
self.cli_ctx.invoke('arg-test --arg1 foo --opt1 bar --opt4 bar'.split())
actual = self.io.getvalue()
expected = "Option '--alt4' has been deprecated and will be removed in version '1.0.0'. Use '--opt4' instead."
self.assertNotIn(expected, actual)
if __name__ == '__main__':
unittest.main()
| 43.287554 | 157 | 0.630032 |
aceb882176c023e8d28086aa33dc905a8190482c | 6,173 | py | Python | saloha/mchain.py | mvilgelm/saloha | 2852f5b1329399cf09ef33c70be290d8be2da64e | [
"MIT"
] | 1 | 2020-01-20T17:17:32.000Z | 2020-01-20T17:17:32.000Z | saloha/mchain.py | majadoon/SlottedAloha | 2852f5b1329399cf09ef33c70be290d8be2da64e | [
"MIT"
] | null | null | null | saloha/mchain.py | majadoon/SlottedAloha | 2852f5b1329399cf09ef33c70be290d8be2da64e | [
"MIT"
] | 3 | 2020-01-20T16:43:53.000Z | 2021-03-28T06:04:32.000Z | '''
Markov chain model: finite sources, infinite retransmissions, uniform back-off
'''
__author__ = 'Mikhail Vilgelm'
import numpy as np
import math, os
import matplotlib.pyplot as plt
import matplotlib
from scipy import special
class SAlohaModel():
def __init__(self, **kwargs):
'''
Parameters: number of nodes, total arrival rate, retransmission probability
:param params:
:return:
'''
self.m = kwargs['m']
self.lmb = kwargs['lmb']
self.q_r = kwargs['q_r']
# calculate transition matrix
self.t_matrix = [[]]
for i in range(self.m+1):
self.t_matrix.append([])
for j in range(self.m+1):
self.t_matrix[i].append(self.get_P_n_nplusi(i, j))
def q_a(self):
'''
Probability of transmission from unbacklogged node
:return:
'''
return 1 - math.exp(-self.lmb/self.m)
def get_Q_a(self, i, n):
'''
Probability of i unbacklogged notes attempt a transmission, given n as a size of a backlog
:param i:
:param n:
:return:
'''
return special.binom(self.m-n, i) * ((1-self.q_a()) ** (self.m-n-i)) * (self.q_a() ** i)
def get_Q_r(self, i, n):
'''
Probability of i backlogged notes attempt a transmission, given n as a size of a backlog
:param i:
:param n:
:return:
'''
return special.binom(n, i) * ((1-self.q_r) ** (n-i)) * (self.q_r ** i)
def get_P_n_nplusi(self, n, nplusi):
'''
Calculate transition probabilities
:param n:
:param nplusi:
:return:
'''
i = nplusi - n
if (i >= 2) and (i <= (self.m-n)):
return self.get_Q_a(i, n)
elif i == 1:
return self.get_Q_a(1, n)*(1 - self.get_Q_r(0, n))
elif i == 0:
return self.get_Q_a(1, n)*self.get_Q_r(0, n) + \
self.get_Q_a(0, n)*(1 - self.get_Q_r(1, n))
elif i == -1:
return self.get_Q_a(0, n)*self.get_Q_r(1, n)
else:
return 0.0
def get_p_n(self, n, p_values):
'''
:param n: state
:param p_values: previously computed steady-state probabilities
:return: current probability
'''
assert (len(p_values) == n) # make we have enough states already computed
base0 = (1.0/self.get_P_n_nplusi(n, n-1))
base1 = p_values[n-1] * (1 - self.get_P_n_nplusi(n-1, n-1))
for j in range(n-1):
base1 -= p_values[j]*self.get_P_n_nplusi(j, n-1)
return base0*base1
def get_p_success(self, n):
'''
Probability of a successful transmission given backlog state
:param n:
:return:
'''
return (self.get_Q_a(1, n)*self.get_Q_r(0, n)) + \
(self.get_Q_a(0, n)*self.get_Q_r(1, n))
def get_transition_matrix(self):
return self.t_matrix
def print_t_matrix(self):
for i in range(self.m+1):
print([('%.2f '%(x,)) for x in self.t_matrix[i]])
def get_p_values(self):
# dummy value -> after calculating all we will normalize anyways
p_0 = 0.1
# initialize with 0th state
p_values = [p_0]
# compute one after another
for i in range(1, self.m+1):
p_values.append(self.get_p_n(i, p_values))
# get normalized values
p_values = [v/sum(p_values) for v in p_values]
return p_values
def get_G(self, n):
v = (self.m-n)*self.q_a() + n*self.q_r
return v
def get_P_succ_appr_n(self, n):
return self.get_G(n)*math.exp(-self.get_G(n))
def get_P_succ_appr_G(self, G):
return G*math.exp(-G)
def get_D_n(self, n):
return (self.m-n)*self.q_a() - self.get_P_succ_appr_n(n)
def create_figure_with_params():
# global plotting settings
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'figure.autolayout': True})
plt.figure(figsize=(7, 4.5))
def plot_performance(metric="throughput"):
create_figure_with_params()
# simulation parameters:
m = 10
q_r_all = [0.05, 0.2, 0.3]
lmb_all = [0.01+0.01*x for x in range(200)]
# plots
p = []
# for every q_r
for q_r in q_r_all:
# metrics
delay = []
expected_ns = []
throughput = []
# for all loads
for lmb in lmb_all:
# create model instance with parameters
saloha = SAlohaModel(m=m, lmb=lmb, q_r=q_r)
print('Load ---> ' + str('lmb'))
# transition matrix
t_matrix = saloha.get_transition_matrix()
# print matrix nicely
saloha.print_t_matrix()
# get steady-state probabilities
p_values = saloha.get_p_values()
print(['%.2f'%(i,) for i in p_values])
# expected number of backlogged nodes
expected_n = sum([p_values[i]*i for i in range(len(p_values))])
expected_ns.append(expected_n)
print('E[n] ---> ' + str(expected_n))
# expected delay
delay.append(expected_n/lmb)
print('Delay --> ' + str(delay[-1]))
# expected throughput
T = sum([p_values[i]*saloha.get_p_success(i) for i in range(len(p_values))])
throughput.append(T)
# choose the plot you need
if metric == "throughput":
p.append(plt.plot(lmb_all, throughput, '-'))
elif metric == "delay":
p.append(plt.plot(lmb_all, delay))
# plotting parameters
plt.grid(True)
plt.xlabel('Total load '+r'$\lambda$')
# choose the label
if metric == "throughput":
plt.ylabel('Throughput '+r'$T$')
elif metric == "delay":
plt.ylabel('Delay')
# pick up the legend
plt.legend((r'$q_r$='+str(q_r_all[0]), r'$q_r$='+str(q_r_all[1]), r'$q_r$='+str(q_r_all[2])), loc=0)
if __name__=='__main__':
plot_performance("throughput")
plt.show()
| 26.268085 | 104 | 0.547384 |
aceb8835a19fa0c7b1069629b9d066459dc497f2 | 6,711 | py | Python | pyreach/tools/lib/cv2_eventloop.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/tools/lib/cv2_eventloop.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/tools/lib/cv2_eventloop.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenCV eventloop for thread-safe access to opencv GUI.
Makes it possible for multiple threads to interact with cv2 UI.
This gets rid of the problem here: https://github.com/opencv/opencv/issues/8407
Normally cv2 insists that the thread which invokes waitKey, must also be the
one to perform other UI operations such as imshow, namedWindow, etc.
This module frees the developer from ensuring it for a multi-threaded
application. Hence UI and waitKey can now be manipulated by two different,
independent, threads.
Example -
cv2e = cv2_threadsafe.get_instance()
# User thread 1: Get keys.
for key in cv2e.key_iterator():
print(key)
# User thread 2: Can independently get the same keys.
for key in cv2e.key_iterator():
print(key)
# User thread 3: Can modify cv UI without blocking.
cv2e.call(cv2.namedWindow, "my window")
cv2e.stop() # To stop the event loop.
NOTE: Non UI opencv methods, e.g. image manipulation etc. are safe to be called
directly without requiring this module.
"""
import collections
import threading
from typing import Any, Callable, Deque, List, Optional, Tuple
import cv2 # type: ignore # type: ignore
# How long to wait on cv2.waitKey() for the main event loop.
_CV2_EVENTLOOP_MS = 100
# Key on which the event loop will stop.
_STOP_KEY = 27 # Esc
class _KeyIterator:
"""Is really an iterator that returns keys as int's.
Used as the return type for cv2e.key_iterator().
Iteration ends when the cv2e object stops.
"""
def __init__(self,
add_key_listener: Callable[[Callable[[Optional[int]], None]],
None], stop: Callable[[], None],
timeout: Optional[float]) -> None:
"""Create the _KeyIterator.
Args:
add_key_listener: the key listener, called when a key is pressed.
stop: the stop callback, called when the loop is stopped.
timeout: timeout for each loop of the iterator.
"""
self._keys: Deque[int] = collections.deque()
self._event = threading.Event()
self._lock = threading.Lock()
self._timeout = timeout
self._stopped = False
self._stop = stop
# Use the passed function to add the key listener.
add_key_listener(self._on_key)
def _on_key(self, c: Optional[int]) -> None:
"""Called on key press.
Args:
c: the key that was pressed. None if is stopping of the iterator.
"""
if c is None:
self._stopped = True
if self._stopped:
with self._lock:
self._event.set()
return
with self._lock:
self._event.set()
assert c is not None # To appease pytype.
self._keys.append(c)
def __iter__(self) -> '_KeyIterator':
return self
def __next__(self) -> int:
while True:
try:
if not self._event.wait(self._timeout):
# Timed out.
return -1
break
except KeyboardInterrupt:
self._stop()
with self._lock:
if self._stopped:
raise StopIteration()
c = self._keys.popleft()
if not self._keys:
self._event.clear()
return c
class _SafeCv2:
"""Global thread-safe CV2 access object."""
_key_listeners: List[Callable[[Optional[int]], None]]
_command_queue: Deque[Tuple[Callable[..., None], Any, Any]]
_running: bool
_thread: threading.Thread
def __init__(self) -> None:
"""Initialize the object."""
# Methods to be called on a new key. Currently used internally for
# key_iterator(). The method gets a None when the event loop ends.
self._key_listeners = []
# OpenCV function calls to be processed.
self._command_queue = collections.deque()
# Used to request the thread to stop.
self._running = True
self._thread = threading.Thread(target=self._run)
self._thread.start()
def call(self, fn: Callable[..., None], *args: Any, **kwargs: Any) -> None:
"""Queues an opencv method to be called.
Note that the method will not be called immediately. If the args change by
the time the method is called, it will use the modified arguments.
Args:
fn: An opencv function, e.g. cv2.namedWindow
*args: Arguments to the method.
**kwargs: Keyword args.
"""
self._command_queue.append((fn, args, kwargs))
def stop(self) -> None:
"""Stop the iterators."""
self._command_queue.append((self._stop, [], {}))
def _add_key_listener(self, listener: Callable[[Optional[int]],
None]) -> None:
"""Add a key listener.
Args:
listener: the listener.
"""
if not self._running:
listener(None)
else:
self._key_listeners.append(listener)
def key_iterator(self, timeout_secs: Optional[float] = None) -> _KeyIterator:
"""Iterates keys.
Does not generate -1.
Multiple threads can use it if needed.
Args:
timeout_secs: How long to wait for key press in seconds. If None, will
wait indefinitely. Will yield -1 whenever it times out.
Returns:
the key iterator.
"""
return _KeyIterator(self._add_key_listener, self._stop, timeout_secs)
def _stop(self) -> None:
print('Stopping cv2 event loop.')
self._running = False
while self._key_listeners:
listener = self._key_listeners.pop()
listener(None)
cv2.destroyAllWindows()
def _run(self) -> None:
"""Run the openCV event loop."""
while True:
c = None
try:
c = cv2.waitKey(_CV2_EVENTLOOP_MS)
except KeyboardInterrupt:
c = _STOP_KEY
if not self._running:
break
if c != -1:
for handler in self._key_listeners:
handler(c)
while self._command_queue:
fn, args, kwargs = self._command_queue.popleft()
fn(*args, **kwargs)
if c == _STOP_KEY:
self._stop()
# Singleton instance.
_INSTANCE: Optional[_SafeCv2] = None
def get_instance() -> _SafeCv2:
"""Lazy initializes instance, and returns it."""
global _INSTANCE # pylint: disable=global-statement
if _INSTANCE is None:
_INSTANCE = _SafeCv2()
return _INSTANCE
| 28.802575 | 80 | 0.659961 |
aceb884bc8682a95909fa72a3351a4da10d38c69 | 188,958 | py | Python | sdks/python/apache_beam/dataframe/frames.py | melap/beam | 82c7c123e94631ff4589198a9c739cd3899fcf77 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2016-09-28T18:25:24.000Z | 2019-05-09T12:28:29.000Z | sdks/python/apache_beam/dataframe/frames.py | mouseflow/beam | bee56a65ead45c765d4695cfc7f019eb0d073f64 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 28 | 2020-03-04T22:01:48.000Z | 2022-03-12T00:59:47.000Z | sdks/python/apache_beam/dataframe/frames.py | mouseflow/beam | bee56a65ead45c765d4695cfc7f019eb0d073f64 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-11-04T04:12:06.000Z | 2021-12-07T07:01:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analogs for :class:`pandas.DataFrame` and :class:`pandas.Series`:
:class:`DeferredDataFrame` and :class:`DeferredSeries`.
These classes are effectively wrappers around a `schema-aware`_
:class:`~apache_beam.pvalue.PCollection` that provide a set of operations
compatible with the `pandas`_ API.
Note that we aim for the Beam DataFrame API to be completely compatible with
the pandas API, but there are some features that are currently unimplemented
for various reasons. Pay particular attention to the **'Differences from
pandas'** section for each operation to understand where we diverge.
.. _schema-aware:
https://beam.apache.org/documentation/programming-guide/#what-is-a-schema
.. _pandas:
https://pandas.pydata.org/
"""
import collections
import inspect
import itertools
import math
import re
import warnings
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from pandas.core.groupby.generic import DataFrameGroupBy
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import io
from apache_beam.dataframe import partitionings
__all__ = [
'DeferredSeries',
'DeferredDataFrame',
]
# Get major, minor version
PD_VERSION = tuple(map(int, pd.__version__.split('.')[0:2]))
def populate_not_implemented(pd_type):
def wrapper(deferred_type):
for attr in dir(pd_type):
# Don't auto-define hidden methods or dunders
if attr.startswith('_'):
continue
if not hasattr(deferred_type, attr):
pd_value = getattr(pd_type, attr)
if isinstance(pd_value, property) or inspect.isclass(pd_value):
# Some of the properties on pandas types (cat, dt, sparse), are
# actually attributes with class values, not properties
setattr(
deferred_type,
attr,
property(
frame_base.not_implemented_method(attr, base_type=pd_type)))
elif callable(pd_value):
setattr(
deferred_type,
attr,
frame_base.not_implemented_method(attr, base_type=pd_type))
return deferred_type
return wrapper
def _fillna_alias(method):
def wrapper(self, *args, **kwargs):
return self.fillna(*args, method=method, **kwargs)
wrapper.__name__ = method
wrapper.__doc__ = (
f'{method} is only supported for axis="columns". '
'axis="index" is order-sensitive.')
return frame_base.with_docs_from(pd.DataFrame)(
frame_base.args_to_kwargs(pd.DataFrame)(
frame_base.populate_defaults(pd.DataFrame)(wrapper)))
LIFTABLE_AGGREGATIONS = ['all', 'any', 'max', 'min', 'prod', 'sum']
LIFTABLE_WITH_SUM_AGGREGATIONS = ['size', 'count']
UNLIFTABLE_AGGREGATIONS = [
'mean',
'median',
'quantile',
'describe',
'sem',
'mad',
'skew',
'kurt',
'kurtosis',
# TODO: The below all have specialized distributed
# implementations, but they require tracking
# multiple intermediate series, which is difficult
# to lift in groupby
'std',
'var',
'corr',
'cov',
'nunique',
]
ALL_AGGREGATIONS = (
LIFTABLE_AGGREGATIONS + LIFTABLE_WITH_SUM_AGGREGATIONS +
UNLIFTABLE_AGGREGATIONS)
def _agg_method(base, func):
def wrapper(self, *args, **kwargs):
return self.agg(func, *args, **kwargs)
if func in UNLIFTABLE_AGGREGATIONS:
wrapper.__doc__ = (
f"``{func}`` cannot currently be parallelized. It will "
"require collecting all data on a single node.")
wrapper.__name__ = func
return frame_base.with_docs_from(base)(wrapper)
# Docstring to use for head and tail (commonly used to peek at datasets)
_PEEK_METHOD_EXPLANATION = (
"because it is `order-sensitive "
"<https://s.apache.org/dataframe-order-sensitive-operations>`_.\n\n"
"If you want to peek at a large dataset consider using interactive Beam's "
":func:`ib.collect "
"<apache_beam.runners.interactive.interactive_beam.collect>` "
"with ``n`` specified, or :meth:`sample`. If you want to find the "
"N largest elements, consider using :meth:`DeferredDataFrame.nlargest`.")
class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
def _render_indexes(self):
if self.index.nlevels == 1:
return 'index=' + (
'<unnamed>' if self.index.name is None else repr(self.index.name))
else:
return 'indexes=[' + ', '.join(
'<unnamed>' if ix is None else repr(ix)
for ix in self.index.names) + ']'
__array__ = frame_base.wont_implement_method(
pd.Series, '__array__', reason="non-deferred-result")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def drop(self, labels, axis, index, columns, errors, **kwargs):
"""drop is not parallelizable when dropping from the index and
``errors="raise"`` is specified. It requires collecting all data on a single
node in order to detect if one of the index values is missing."""
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
if axis in (0, 'index'):
index = labels
columns = None
elif axis in (1, 'columns'):
index = None
columns = labels
else:
raise ValueError(
"axis must be one of (0, 1, 'index', 'columns'), "
"got '%s'" % axis)
if columns is not None:
# Compute the proxy based on just the columns that are dropped.
proxy = self._expr.proxy().drop(columns=columns, errors=errors)
else:
proxy = self._expr.proxy()
if index is not None and errors == 'raise':
# In order to raise an error about missing index values, we'll
# need to collect the entire dataframe.
# TODO: This could be parallelized by putting index values in a
# ConstantExpression and partitioning by index.
requires = partitionings.Singleton(
reason=(
"drop(errors='raise', axis='index') is not currently "
"parallelizable. This requires collecting all data on a single "
f"node in order to detect if one of {index!r} is missing."))
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'drop',
lambda df: df.drop(
axis=axis,
index=index,
columns=columns,
errors=errors,
**kwargs), [self._expr],
proxy=proxy,
requires_partition_by=requires))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def droplevel(self, level, axis):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'droplevel',
lambda df: df.droplevel(level, axis=axis), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()
if axis in (1, 'column') else partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def fillna(self, value, method, axis, limit, **kwargs):
"""When ``axis="index"``, both ``method`` and ``limit`` must be ``None``.
otherwise this operation is order-sensitive."""
# Default value is None, but is overriden with index.
axis = axis or 'index'
if axis in (0, 'index'):
if method is not None:
raise frame_base.WontImplementError(
f"fillna(method={method!r}, axis={axis!r}) is not supported "
"because it is order-sensitive. Only fillna(method=None) is "
f"supported with axis={axis!r}.",
reason="order-sensitive")
if limit is not None:
raise frame_base.WontImplementError(
f"fillna(limit={method!r}, axis={axis!r}) is not supported because "
"it is order-sensitive. Only fillna(limit=None) is supported with "
f"axis={axis!r}.",
reason="order-sensitive")
if isinstance(self, DeferredDataFrame) and isinstance(value,
DeferredSeries):
# If self is a DataFrame and value is a Series we want to broadcast value
# to all partitions of self.
# This is OK, as its index must be the same size as the columns set of
# self, so cannot be too large.
class AsScalar(object):
def __init__(self, value):
self.value = value
with expressions.allow_non_parallel_operations():
value_expr = expressions.ComputedExpression(
'as_scalar',
lambda df: AsScalar(df), [value._expr],
requires_partition_by=partitionings.Singleton())
get_value = lambda x: x.value
requires = partitionings.Arbitrary()
elif isinstance(value, frame_base.DeferredBase):
# For other DeferredBase combinations, use Index partitioning to
# co-locate on the Index
value_expr = value._expr
get_value = lambda x: x
requires = partitionings.Index()
else:
# Default case, pass value through as a constant, no particular
# partitioning requirement
value_expr = expressions.ConstantExpression(value)
get_value = lambda x: x
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
# yapf: disable
expressions.ComputedExpression(
'fillna',
lambda df,
value: df.fillna(
get_value(value),
method=method,
axis=axis,
limit=limit,
**kwargs), [self._expr, value_expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires))
if hasattr(pd.DataFrame, 'ffill'):
ffill = _fillna_alias('ffill')
if hasattr(pd.DataFrame, 'bfill'):
bfill = _fillna_alias('bfill')
if hasattr(pd.DataFrame, 'backfill'):
backfill = _fillna_alias('backfill')
if hasattr(pd.DataFrame, 'pad'):
pad = _fillna_alias('pad')
@frame_base.with_docs_from(pd.DataFrame)
def first(self, offset):
per_partition = expressions.ComputedExpression(
'first-per-partition',
lambda df: df.sort_index().first(offset=offset), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'first',
lambda df: df.sort_index().first(offset=offset), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def last(self, offset):
per_partition = expressions.ComputedExpression(
'last-per-partition',
lambda df: df.sort_index().last(offset=offset), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'last',
lambda df: df.sort_index().last(offset=offset), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def groupby(self, by, level, axis, as_index, group_keys, **kwargs):
"""``as_index`` and ``group_keys`` must both be ``True``.
Aggregations grouping by a categorical column with ``observed=False`` set
are not currently parallelizable
(`BEAM-11190 <https://issues.apache.org/jira/browse/BEAM-11190>`_).
"""
if not as_index:
raise NotImplementedError('groupby(as_index=False)')
if not group_keys:
raise NotImplementedError('groupby(group_keys=False)')
if axis in (1, 'columns'):
return _DeferredGroupByCols(
expressions.ComputedExpression(
'groupbycols',
lambda df: df.groupby(by, axis=axis, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
elif level is not None:
if isinstance(level, (list, tuple)):
grouping_indexes = level
else:
grouping_indexes = [level]
grouping_columns = []
index = self._expr.proxy().index
# Translate to level numbers only
grouping_indexes = [
l if isinstance(l, int) else index.names.index(l)
for l in grouping_indexes
]
if index.nlevels == 1:
to_group_with_index = self._expr
to_group = self._expr
else:
levels_to_drop = [
i for i in range(index.nlevels) if i not in grouping_indexes
]
# Reorder so the grouped indexes are first
to_group_with_index = self.reorder_levels(
grouping_indexes + levels_to_drop)
grouping_indexes = list(range(len(grouping_indexes)))
levels_to_drop = list(range(len(grouping_indexes), index.nlevels))
if levels_to_drop:
to_group = to_group_with_index.droplevel(levels_to_drop)._expr
else:
to_group = to_group_with_index._expr
to_group_with_index = to_group_with_index._expr
elif callable(by):
def map_index(df):
df = df.copy()
df.index = df.index.map(by)
return df
to_group = expressions.ComputedExpression(
'map_index',
map_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
orig_nlevels = self._expr.proxy().index.nlevels
def prepend_mapped_index(df):
df = df.copy()
index = df.index.to_frame()
index.insert(0, None, df.index.map(by))
df.index = pd.MultiIndex.from_frame(
index, names=[None] + list(df.index.names))
return df
to_group_with_index = expressions.ComputedExpression(
'map_index_keep_orig',
prepend_mapped_index,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
# Partitioning by the original indexes is preserved
preserves_partition_by=partitionings.Index(
list(range(1, orig_nlevels + 1))))
grouping_columns = []
# The index we need to group by is the last one
grouping_indexes = [0]
elif isinstance(by, DeferredSeries):
if isinstance(self, DeferredSeries):
def set_index(s, by):
df = pd.DataFrame(s)
df, by = df.align(by, axis=0, join='inner')
return df.set_index(by).iloc[:, 0]
def prepend_index(s, by):
df = pd.DataFrame(s)
df, by = df.align(by, axis=0, join='inner')
return df.set_index([by, df.index]).iloc[:, 0]
else:
def set_index(df, by): # type: ignore
df, by = df.align(by, axis=0, join='inner')
return df.set_index(by)
def prepend_index(df, by): # type: ignore
df, by = df.align(by, axis=0, join='inner')
return df.set_index([by, df.index])
to_group = expressions.ComputedExpression(
'set_index',
set_index, [self._expr, by._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
orig_nlevels = self._expr.proxy().index.nlevels
to_group_with_index = expressions.ComputedExpression(
'prependindex',
prepend_index, [self._expr, by._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Index(
list(range(1, orig_nlevels + 1))))
grouping_columns = []
grouping_indexes = [0]
elif isinstance(by, np.ndarray):
raise frame_base.WontImplementError(
"Grouping by a concrete ndarray is order sensitive.",
reason="order-sensitive")
elif isinstance(self, DeferredDataFrame):
if not isinstance(by, list):
by = [by]
# Find the columns that we need to move into the index so we can group by
# them
column_names = self._expr.proxy().columns
grouping_columns = list(set(by).intersection(column_names))
index_names = self._expr.proxy().index.names
for label in by:
if label not in index_names and label not in self._expr.proxy().columns:
raise KeyError(label)
grouping_indexes = list(set(by).intersection(index_names))
if grouping_indexes:
if set(by) == set(index_names):
to_group = self._expr
elif set(by).issubset(index_names):
to_group = self.droplevel(index_names.difference(by))._expr
else:
to_group = self.reset_index(grouping_indexes).set_index(by)._expr
else:
to_group = self.set_index(by)._expr
if grouping_columns:
# TODO(BEAM-11711): It should be possible to do this without creating an
# expression manually, by using DeferredDataFrame.set_index, i.e.:
# to_group_with_index = self.set_index([self.index] +
# grouping_columns)._expr
to_group_with_index = expressions.ComputedExpression(
'move_grouped_columns_to_index',
lambda df: df.set_index([df.index] + grouping_columns, drop=False),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index(
list(range(self._expr.proxy().index.nlevels))))
else:
to_group_with_index = self._expr
else:
raise NotImplementedError(by)
return DeferredGroupBy(
expressions.ComputedExpression(
'groupbyindex',
lambda df: df.groupby(
level=list(range(df.index.nlevels)), **kwargs), [to_group],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()),
kwargs,
to_group,
to_group_with_index,
grouping_columns=grouping_columns,
grouping_indexes=grouping_indexes)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def loc(self):
return _DeferredLoc(self)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def iloc(self):
"""Position-based indexing with `iloc` is order-sensitive in almost every
case. Beam DataFrame users should prefer label-based indexing with `loc`.
"""
return _DeferredILoc(self)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def reset_index(self, level=None, **kwargs):
"""Dropping the entire index (e.g. with ``reset_index(level=None)``) is
not parallelizable. It is also only guaranteed that the newly generated
index values will be unique. The Beam DataFrame API makes no guarantee
that the same index values as the equivalent pandas operation will be
generated, because that implementation is order-sensitive."""
if level is not None and not isinstance(level, (tuple, list)):
level = [level]
if level is None or len(level) == self._expr.proxy().index.nlevels:
# TODO(BEAM-12182): Could do distributed re-index with offsets.
requires_partition_by = partitionings.Singleton(
reason=(
f"reset_index(level={level!r}) drops the entire index and "
"creates a new one, so it cannot currently be parallelized "
"(BEAM-12182)."))
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reset_index',
lambda df: df.reset_index(level=level, **kwargs), [self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=requires_partition_by))
abs = frame_base._elementwise_method('abs', base=pd.core.generic.NDFrame)
@frame_base.with_docs_from(pd.core.generic.NDFrame)
@frame_base.args_to_kwargs(pd.core.generic.NDFrame)
@frame_base.populate_defaults(pd.core.generic.NDFrame)
def astype(self, dtype, copy, errors):
"""astype is not parallelizable when ``errors="ignore"`` is specified.
``copy=False`` is not supported because it relies on memory-sharing
semantics.
``dtype="category`` is not supported because the type of the output column
depends on the data. Please use ``pd.CategoricalDtype`` with explicit
categories instead.
"""
requires = partitionings.Arbitrary()
if errors == "ignore":
# We need all data in order to ignore errors and propagate the original
# data.
requires = partitionings.Singleton(
reason=(
f"astype(errors={errors!r}) is currently not parallelizable, "
"because all data must be collected on one node to determine if "
"the original data should be propagated instead."))
if not copy:
raise frame_base.WontImplementError(
f"astype(copy={copy!r}) is not supported because it relies on "
"memory-sharing semantics that are not compatible with the Beam "
"model.")
if dtype == 'category':
raise frame_base.WontImplementError(
"astype(dtype='category') is not supported because the type of the "
"output column depends on the data. Please use pd.CategoricalDtype "
"with explicit categories instead.",
reason="non-deferred-columns")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'astype',
lambda df: df.astype(dtype=dtype, copy=copy, errors=errors),
[self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
at_time = frame_base._elementwise_method(
'at_time', base=pd.core.generic.NDFrame)
between_time = frame_base._elementwise_method(
'between_time', base=pd.core.generic.NDFrame)
copy = frame_base._elementwise_method('copy', base=pd.core.generic.NDFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def replace(self, to_replace, value, limit, method, **kwargs):
"""``method`` is not supported in the Beam DataFrame API because it is
order-sensitive. It cannot be specified.
If ``limit`` is specified this operation is not parallelizable."""
if method is not None and not isinstance(to_replace,
dict) and value is None:
# pandas only relies on method if to_replace is not a dictionary, and
# value is None
raise frame_base.WontImplementError(
f"replace(method={method!r}) is not supported because it is "
"order sensitive. Only replace(method=None) is supported.",
reason="order-sensitive")
if limit is None:
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Singleton(
reason=(
f"replace(limit={limit!r}) cannot currently be parallelized. It "
"requires collecting all data on a single node."))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'replace',
lambda df: df.replace(
to_replace=to_replace,
value=value,
limit=limit,
method=method,
**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def tz_localize(self, ambiguous, **kwargs):
"""``ambiguous`` cannot be set to ``"infer"`` as its semantics are
order-sensitive. Similarly, specifying ``ambiguous`` as an
:class:`~numpy.ndarray` is order-sensitive, but you can achieve similar
functionality by specifying ``ambiguous`` as a Series."""
if isinstance(ambiguous, np.ndarray):
raise frame_base.WontImplementError(
"tz_localize(ambiguous=ndarray) is not supported because it makes "
"this operation sensitive to the order of the data. Please use a "
"DeferredSeries instead.",
reason="order-sensitive")
elif isinstance(ambiguous, frame_base.DeferredFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df,
ambiguous: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr, ambiguous._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
elif ambiguous == 'infer':
# infer attempts to infer based on the order of the timestamps
raise frame_base.WontImplementError(
f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it "
"makes this operation sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def size(self):
sizes = expressions.ComputedExpression(
'get_sizes',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.size),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sum_sizes',
lambda sizes: sizes.sum(), [sizes],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def length(self):
"""Alternative to ``len(df)`` which returns a deferred result that can be
used in arithmetic with :class:`DeferredSeries` or
:class:`DeferredDataFrame` instances."""
lengths = expressions.ComputedExpression(
'get_lengths',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(len(df)),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sum_lengths',
lambda lengths: lengths.sum(), [lengths],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def __len__(self):
raise frame_base.WontImplementError(
"len(df) is not currently supported because it produces a non-deferred "
"result. Consider using df.length() instead.",
reason="non-deferred-result")
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def empty(self):
empties = expressions.ComputedExpression(
'get_empties',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.empty),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'check_all_empty',
lambda empties: empties.all(), [empties],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def bool(self):
# TODO: Documentation about DeferredScalar
# Will throw if any partition has >1 element
bools = expressions.ComputedExpression(
'get_bools',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series([], dtype=bool)
if df.empty else pd.Series([df.bool()]),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
# Will throw if overall dataset has != 1 element
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_all_bools',
lambda bools: bools.bool(), [bools],
proxy=bool(),
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def equals(self, other):
intermediate = expressions.ComputedExpression(
'equals_partitioned',
# Wrap scalar results in a Series for easier concatenation later
lambda df,
other: pd.Series(df.equals(other)),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate_equals',
lambda df: df.all(), [intermediate],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def sort_values(self, axis, **kwargs):
"""``sort_values`` is not implemented.
It is not implemented for ``axis=index`` because it imposes an ordering on
the dataset, and it likely will not be maintained (see
https://s.apache.org/dataframe-order-sensitive-operations).
It is not implemented for ``axis=columns`` because it makes the order of
the columns depend on the data (see
https://s.apache.org/dataframe-non-deferred-columns)."""
if axis in (0, 'index'):
# axis=index imposes an ordering on the DataFrame rows which we do not
# support
raise frame_base.WontImplementError(
"sort_values(axis=index) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
else:
# axis=columns will reorder the columns based on the data
raise frame_base.WontImplementError(
"sort_values(axis=columns) is not supported because the order of the "
"columns in the result depends on the data.",
reason="non-deferred-columns")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def sort_index(self, axis, **kwargs):
"""``axis=index`` is not allowed because it imposes an ordering on the
dataset, and we cannot guarantee it will be maintained (see
https://s.apache.org/dataframe-order-sensitive-operations). Only
``axis=columns`` is allowed."""
if axis in (0, 'index'):
# axis=rows imposes an ordering on the DataFrame which we do not support
raise frame_base.WontImplementError(
"sort_index(axis=index) is not supported because it imposes an "
"ordering on the dataset which we cannot guarantee will be "
"preserved.",
reason="order-sensitive")
# axis=columns reorders the columns by name
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sort_index',
lambda df: df.sort_index(axis, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def where(self, cond, other, errors, **kwargs):
"""where is not parallelizable when ``errors="ignore"`` is specified."""
requires = partitionings.Arbitrary()
deferred_args = {}
actual_args = {}
# TODO(bhulette): This is very similar to the logic in
# frame_base.elementwise_method, can we unify it?
if isinstance(cond, frame_base.DeferredFrame):
deferred_args['cond'] = cond
requires = partitionings.Index()
else:
actual_args['cond'] = cond
if isinstance(other, frame_base.DeferredFrame):
deferred_args['other'] = other
requires = partitionings.Index()
else:
actual_args['other'] = other
if errors == "ignore":
# We need all data in order to ignore errors and propagate the original
# data.
requires = partitionings.Singleton(
reason=(
f"where(errors={errors!r}) is currently not parallelizable, "
"because all data must be collected on one node to determine if "
"the original data should be propagated instead."))
actual_args['errors'] = errors
def where_execution(df, *args):
runtime_values = {
name: value
for (name, value) in zip(deferred_args.keys(), args)
}
return df.where(**runtime_values, **actual_args, **kwargs)
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
"where",
where_execution,
[self._expr] + [df._expr for df in deferred_args.values()],
requires_partition_by=requires,
preserves_partition_by=partitionings.Index(),
))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def mask(self, cond, **kwargs):
"""mask is not parallelizable when ``errors="ignore"`` is specified."""
return self.where(~cond, **kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def xs(self, key, axis, level, **kwargs):
"""Note that ``xs(axis='index')`` will raise a ``KeyError`` at execution
time if the key does not exist in the index."""
if axis in ('columns', 1):
# Special case for axis=columns. This is a simple project that raises a
# KeyError at construction time for missing columns.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'xs',
lambda df: df.xs(key, axis=axis, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif axis not in ('index', 0):
# Make sure that user's axis is valid
raise ValueError(
"axis must be one of ('index', 0, 'columns', 1). "
f"got {axis!r}.")
if not isinstance(key, tuple):
key = (key, )
key_size = len(key)
key_series = pd.Series([key], pd.MultiIndex.from_tuples([key]))
key_expr = expressions.ConstantExpression(
key_series, proxy=key_series.iloc[:0])
if level is None:
reindexed = self
else:
if not isinstance(level, list):
level = [level]
# If user specifed levels, reindex so those levels are at the beginning.
# Keep the others and preserve their order.
level = [
l if isinstance(l, int) else list(self.index.names).index(l)
for l in level
]
reindexed = self.reorder_levels(
level + [i for i in range(self.index.nlevels) if i not in level])
def xs_partitioned(frame, key):
if not len(key):
# key is not in this partition, return empty dataframe
return frame.iloc[:0].droplevel(list(range(key_size)))
# key should be in this partition, call xs. Will raise KeyError if not
# present.
return frame.xs(key.item())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'xs',
xs_partitioned,
[reindexed._expr, key_expr],
requires_partition_by=partitionings.Index(list(range(key_size))),
# Drops index levels, so partitioning is not preserved
preserves_partition_by=partitionings.Singleton()))
@property
def dtype(self):
return self._expr.proxy().dtype
isin = frame_base._elementwise_method('isin', base=pd.DataFrame)
combine_first = frame_base._elementwise_method(
'combine_first', base=pd.DataFrame)
combine = frame_base._proxy_method(
'combine',
base=pd.DataFrame,
requires_partition_by=expressions.partitionings.Singleton(
reason="combine() is not parallelizable because func might operate "
"on the full dataset."),
preserves_partition_by=expressions.partitionings.Singleton())
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def ndim(self):
return self._expr.proxy().ndim
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def index(self):
return _DeferredIndex(self)
@index.setter
def _set_index(self, value):
# TODO: assigning the index is generally order-sensitive, but we could
# support it in some rare cases, e.g. when assigning the index from one
# of a DataFrame's columns
raise NotImplementedError(
"Assigning an index is not yet supported. "
"Consider using set_index() instead.")
reindex = frame_base.wont_implement_method(
pd.DataFrame, 'reindex', reason="order-sensitive")
hist = frame_base.wont_implement_method(
pd.DataFrame, 'hist', reason="plotting-tools")
attrs = property(
frame_base.wont_implement_method(
pd.DataFrame, 'attrs', reason='experimental'))
reorder_levels = frame_base._proxy_method(
'reorder_levels',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
resample = frame_base.wont_implement_method(
pd.DataFrame, 'resample', reason='event-time-semantics')
rolling = frame_base.wont_implement_method(
pd.DataFrame, 'rolling', reason='event-time-semantics')
to_xarray = frame_base.wont_implement_method(
pd.DataFrame, 'to_xarray', reason='non-deferred-result')
to_clipboard = frame_base.wont_implement_method(
pd.DataFrame, 'to_clipboard', reason="non-deferred-result")
swapaxes = frame_base.wont_implement_method(
pd.Series, 'swapaxes', reason="non-deferred-columns")
infer_object = frame_base.wont_implement_method(
pd.Series, 'infer_objects', reason="non-deferred-columns")
ewm = frame_base.wont_implement_method(
pd.Series, 'ewm', reason="event-time-semantics")
expanding = frame_base.wont_implement_method(
pd.Series, 'expanding', reason="event-time-semantics")
sparse = property(
frame_base.not_implemented_method(
'sparse', 'BEAM-12425', base_type=pd.DataFrame))
transform = frame_base._elementwise_method('transform', base=pd.DataFrame)
tz_convert = frame_base._proxy_method(
'tz_convert',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
# Manipulates index, partitioning is not preserved
preserves_partition_by=partitionings.Singleton())
@populate_not_implemented(pd.Series)
@frame_base.DeferredFrame._register_for(pd.Series)
class DeferredSeries(DeferredDataFrameOrSeries):
def __repr__(self):
return (
f'DeferredSeries(name={self.name!r}, dtype={self.dtype}, '
f'{self._render_indexes()})')
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def name(self):
return self._expr.proxy().name
@name.setter
def name(self, value):
def fn(s):
s = s.copy()
s.name = value
return s
self._expr = expressions.ComputedExpression(
'series_set_name',
fn, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def hasnans(self):
has_nans = expressions.ComputedExpression(
'hasnans',
lambda s: pd.Series(s.hasnans), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations():
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_hasnans',
lambda s: s.any(), [has_nans],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def dtype(self):
return self._expr.proxy().dtype
dtypes = dtype
def __getitem__(self, key):
if _is_null_slice(key) or key is Ellipsis:
return self
elif (isinstance(key, int) or _is_integer_slice(key)
) and self._expr.proxy().index._should_fallback_to_positional():
raise frame_base.WontImplementError(
"Accessing an item by an integer key is order sensitive for this "
"Series.",
reason="order-sensitive")
elif isinstance(key, slice) or callable(key):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df: df[key],
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df,
indexer: df[indexer],
[self._expr, key._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif pd.core.series.is_iterator(key) or pd.core.common.is_bool_indexer(key):
raise frame_base.WontImplementError(
"Accessing a DeferredSeries with an iterator is sensitive to the "
"order of the data.",
reason="order-sensitive")
else:
# We could consider returning a deferred scalar, but that might
# be more surprising than a clear error.
raise frame_base.WontImplementError(
f"Indexing a series with key of type {type(key)} is not supported "
"because it produces a non-deferred result.",
reason="non-deferred-result")
@frame_base.with_docs_from(pd.Series)
def keys(self):
return self.index
# Series.T == transpose. Both are a no-op
T = frame_base._elementwise_method('T', base=pd.Series)
transpose = frame_base._elementwise_method('transpose', base=pd.Series)
shape = property(
frame_base.wont_implement_method(
pd.Series, 'shape', reason="non-deferred-result"))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def append(self, to_append, ignore_index, verify_integrity, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if not isinstance(to_append, DeferredSeries):
raise frame_base.WontImplementError(
"append() only accepts DeferredSeries instances, received " +
str(type(to_append)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
if verify_integrity:
# We can verify the index is non-unique within index partitioned data.
requires = partitionings.Index()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s,
to_append: s.append(
to_append, verify_integrity=verify_integrity, **kwargs),
[self._expr, to_append._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def align(self, other, join, axis, level, method, **kwargs):
"""Aligning per-level is not yet supported. Only the default,
``level=None``, is allowed.
Filling NaN values via ``method`` is not supported, because it is
`order-sensitive
<https://s.apache.org/dataframe-order-sensitive-operations>`_.
Only the default, ``method=None``, is allowed."""
if level is not None:
raise NotImplementedError('per-level align')
if method is not None:
raise frame_base.WontImplementError(
f"align(method={method!r}) is not supported because it is "
"order sensitive. Only align(method=None) is supported.",
reason="order-sensitive")
# We're using pd.concat here as expressions don't yet support
# multiple return values.
aligned = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda x,
y: pd.concat([x, y], axis=1, join='inner'),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
return aligned.iloc[:, 0], aligned.iloc[:, 1]
argsort = frame_base.wont_implement_method(
pd.Series, 'argsort', reason="order-sensitive")
array = property(
frame_base.wont_implement_method(
pd.Series, 'array', reason="non-deferred-result"))
# We can't reliably predict the output type, it depends on whether `key` is:
# - not in the index (default_value)
# - in the index once (constant)
# - in the index multiple times (Series)
get = frame_base.wont_implement_method(
pd.Series, 'get', reason="non-deferred-columns")
ravel = frame_base.wont_implement_method(
pd.Series, 'ravel', reason="non-deferred-result")
slice_shift = frame_base.wont_implement_method(
pd.Series, 'slice_shift', reason="deprecated")
tshift = frame_base.wont_implement_method(
pd.Series, 'tshift', reason="deprecated")
rename = frame_base._proxy_method(
'rename',
base=pd.Series,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
between = frame_base._elementwise_method('between', base=pd.Series)
add_suffix = frame_base._proxy_method(
'add_suffix',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
add_prefix = frame_base._proxy_method(
'add_prefix',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def explode(self, ignore_index):
# ignoring the index will not preserve it
preserves = (
partitionings.Singleton() if ignore_index else partitionings.Index())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'explode',
lambda s: s.explode(ignore_index), [self._expr],
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
def dot(self, other):
"""``other`` must be a :class:`DeferredDataFrame` or :class:`DeferredSeries`
instance. Computing the dot product with an array-like is not supported
because it is order-sensitive."""
left = self._expr
if isinstance(other, DeferredSeries):
right = expressions.ComputedExpression(
'to_dataframe',
pd.DataFrame, [other._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
right_is_series = True
elif isinstance(other, DeferredDataFrame):
right = other._expr
right_is_series = False
else:
raise frame_base.WontImplementError(
"other must be a DeferredDataFrame or DeferredSeries instance. "
"Passing a concrete list or numpy array is not supported. Those "
"types have no index and must be joined based on the order of the "
"data.",
reason="order-sensitive")
dots = expressions.ComputedExpression(
'dot',
# Transpose so we can sum across rows.
(lambda left, right: pd.DataFrame(left @ right).T),
[left, right],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
sums = expressions.ComputedExpression(
'sum',
lambda dots: dots.sum(), #
[dots],
requires_partition_by=partitionings.Singleton())
if right_is_series:
result = expressions.ComputedExpression(
'extract',
lambda df: df[0], [sums],
requires_partition_by=partitionings.Singleton())
else:
result = sums
return frame_base.DeferredFrame.wrap(result)
__matmul__ = dot
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nunique(self, **kwargs):
return self.drop_duplicates(keep="any").size
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def quantile(self, q, **kwargs):
"""quantile is not parallelizable. See
`BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking
the possible addition of an approximate, parallelizable implementation of
quantile."""
# TODO(BEAM-12167): Provide an option for approximate distributed
# quantiles
requires = partitionings.Singleton(
reason=(
"Computing quantiles across index cannot currently be "
"parallelized. See BEAM-12167 tracking the possible addition of an "
"approximate, parallelizable implementation of quantile."))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'quantile',
lambda df: df.quantile(q=q, **kwargs), [self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
def std(self, *args, **kwargs):
# Compute variance (deferred scalar) with same args, then sqrt it
return self.var(*args, **kwargs).apply(lambda var: math.sqrt(var))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def var(self, axis, skipna, level, ddof, **kwargs):
"""Per-level aggregation is not yet supported (BEAM-11777). Only the
default, ``level=None``, is allowed."""
if level is not None:
raise NotImplementedError("per-level aggregation")
if skipna is None or skipna:
self = self.dropna() # pylint: disable=self-cls-assignment
# See the online, numerically stable formulae at
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# and
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
def compute_moments(x):
n = len(x)
m = x.std(ddof=0)**2 * n
s = x.sum()
return pd.DataFrame(dict(m=[m], s=[s], n=[n]))
def combine_moments(data):
m = s = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
m, s, n = datum.m, datum.s, datum.n
else:
delta = s / n - datum.s / datum.n
m += datum.m + delta**2 * n * datum.n / (n + datum.n)
s += datum.s
n += datum.n
if n <= ddof:
return float('nan')
else:
return m / (n - ddof)
moments = expressions.ComputedExpression(
'compute_moments',
compute_moments, [self._expr],
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_moments',
combine_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def corr(self, other, method, min_periods):
"""Only ``method='pearson'`` is currently parallelizable."""
if method == 'pearson': # Note that this is the default.
x, y = self.dropna().align(other.dropna(), 'inner')
return x._corr_aligned(y, min_periods)
else:
reason = (
f"Encountered corr(method={method!r}) which cannot be "
"parallelized. Only corr(method='pearson') is currently "
"parallelizable.")
# The rank-based correlations are not obviously parallelizable, though
# perhaps an approximation could be done with a knowledge of quantiles
# and custom partitioning.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df,
other: df.corr(other, method=method, min_periods=min_periods),
[self._expr, other._expr],
requires_partition_by=partitionings.Singleton(reason=reason)))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def skew(self, axis, skipna, level, numeric_only, **kwargs):
if level is not None:
raise NotImplementedError("per-level aggregation")
if skipna is None or skipna:
self = self.dropna() # pylint: disable=self-cls-assignment
# See the online, numerically stable formulae at
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
# Note that we are calculating the unbias (sample) version of skew here.
# See https://en.wikipedia.org/wiki/Skewness#Sample_skewness
# for more details.
def compute_moments(x):
n = len(x)
if n == 0:
m2, sum, m3 = 0, 0, 0
else:
m2 = x.std(ddof=0)**2 * n
sum = x.sum()
m3 = (((x - x.mean())**3).sum())
return pd.DataFrame(dict(m2=[m2], sum=[sum], n=[n], m3=[m3]))
def combine_moments(data):
m2 = sum = n = m3 = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
m2, sum, n, m3 = datum.m2, datum.sum, datum.n, datum.m3
else:
n_a, n_b = datum.n, n
sum_a, sum_b = datum.sum, sum
m2_a, m2_b = datum.m2, m2
mean_a, mean_b = sum_a / n_a, sum_b / n_b
delta = mean_b - mean_a
combined_n = n_a + n_b
m3 += datum.m3 + (
(delta**3 * ((n_a * n_b) * (n_a - n_b)) / ((combined_n)**2)) +
((3 * delta) * ((n_a * m2_b) - (n_b * m2_a)) / (combined_n)))
m2 += datum.m2 + delta**2 * n_b * n_a / combined_n
sum += datum.sum
n += datum.n
if n < 3:
return float('nan')
elif m2 == 0:
return float(0)
else:
return combined_n * math.sqrt(combined_n - 1) / (combined_n -
2) * m3 / (
m2**(3 / 2))
moments = expressions.ComputedExpression(
'compute_moments',
compute_moments, [self._expr],
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_moments',
combine_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def kurtosis(self, axis, skipna, level, numeric_only, **kwargs):
if level is not None:
raise NotImplementedError("per-level aggregation")
if skipna is None or skipna:
self = self.dropna() # pylint: disable=self-cls-assignment
# See the online, numerically stable formulae at
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
# kurtosis here calculated as sample kurtosis
# https://en.wikipedia.org/wiki/Kurtosis#Sample_kurtosis
def compute_moments(x):
n = len(x)
if n == 0:
m2, sum, m3, m4 = 0, 0, 0, 0
else:
m2 = x.std(ddof=0)**2 * n
sum = x.sum()
m3 = (((x - x.mean())**3).sum())
m4 = (((x - x.mean())**4).sum())
return pd.DataFrame(dict(m2=[m2], sum=[sum], n=[n], m3=[m3], m4=[m4]))
def combine_moments(data):
m2 = sum = n = m3 = m4 = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
m2, sum, n, m3, m4 = datum.m2, datum.sum, datum.n, datum.m3, datum.m4
else:
n_a, n_b = datum.n, n
m2_a, m2_b = datum.m2, m2
m3_a, m3_b = datum.m3, m3
sum_a, sum_b = datum.sum, sum
mean_a, mean_b = sum_a / n_a, sum_b / n_b
delta = mean_b - mean_a
combined_n = n_a + n_b
m4 += datum.m4 + ((delta**4) * (n_a * n_b) * (
(n_a**2) - (n_a * n_b) +
(n_b**2)) / combined_n**3) + ((6 * delta**2) * ((n_a**2 * m2_b) +
(n_b**2 * m2_a)) /
(combined_n**2)) + ((4 * delta) *
((n_a * m3_b) -
(n_b * m3_a)) /
(combined_n))
m3 += datum.m3 + (
(delta**3 * ((n_a * n_b) * (n_a - n_b)) / ((combined_n)**2)) +
((3 * delta) * ((n_a * m2_b) - (n_b * m2_a)) / (combined_n)))
m2 += datum.m2 + delta**2 * n_b * n_a / combined_n
sum += datum.sum
n += datum.n
if n < 4:
return float('nan')
elif m2 == 0:
return float(0)
else:
return (((combined_n + 1) * (combined_n) * (combined_n - 1)) /
((combined_n - 2) *
(combined_n - 3))) * (m4 /
(m2)**2) - ((3 * (combined_n - 1)**2) /
((combined_n - 2) *
(combined_n - 3)))
moments = expressions.ComputedExpression(
'compute_moments',
compute_moments, [self._expr],
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_moments',
combine_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
def kurt(self, *args, **kwargs):
# Compute Kurtosis as kurt is an alias for kurtosis.
return self.kurtosis(*args, **kwargs)
def _corr_aligned(self, other, min_periods):
std_x = self.std()
std_y = other.std()
cov = self._cov_aligned(other, min_periods)
return cov.apply(
lambda cov, std_x, std_y: cov / (std_x * std_y), args=[std_x, std_y])
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def cov(self, other, min_periods, ddof):
x, y = self.dropna().align(other.dropna(), 'inner')
return x._cov_aligned(y, min_periods, ddof)
def _cov_aligned(self, other, min_periods, ddof=1):
# Use the formulae from
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance
def compute_co_moments(x, y):
n = len(x)
if n <= 1:
c = 0
else:
c = x.cov(y) * (n - 1)
sx = x.sum()
sy = y.sum()
return pd.DataFrame(dict(c=[c], sx=[sx], sy=[sy], n=[n]))
def combine_co_moments(data):
c = sx = sy = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
c, sx, sy, n = datum.c, datum.sx, datum.sy, datum.n
else:
c += (
datum.c + (sx / n - datum.sx / datum.n) *
(sy / n - datum.sy / datum.n) * n * datum.n / (n + datum.n))
sx += datum.sx
sy += datum.sy
n += datum.n
if n < max(2, ddof, min_periods or 0):
return float('nan')
else:
return c / (n - ddof)
moments = expressions.ComputedExpression(
'compute_co_moments',
compute_co_moments, [self._expr, other._expr],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_co_moments',
combine_co_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def dropna(self, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def set_axis(self, labels, **kwargs):
# TODO: assigning the index is generally order-sensitive, but we could
# support it in some rare cases, e.g. when assigning the index from one
# of a DataFrame's columns
raise NotImplementedError(
"Assigning an index is not yet supported. "
"Consider using set_index() instead.")
isnull = isna = frame_base._elementwise_method('isna', base=pd.Series)
notnull = notna = frame_base._elementwise_method('notna', base=pd.Series)
items = frame_base.wont_implement_method(
pd.Series, 'items', reason="non-deferred-result")
iteritems = frame_base.wont_implement_method(
pd.Series, 'iteritems', reason="non-deferred-result")
tolist = frame_base.wont_implement_method(
pd.Series, 'tolist', reason="non-deferred-result")
to_numpy = frame_base.wont_implement_method(
pd.Series, 'to_numpy', reason="non-deferred-result")
to_string = frame_base.wont_implement_method(
pd.Series, 'to_string', reason="non-deferred-result")
def _wrap_in_df(self):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'wrap_in_df',
lambda s: pd.DataFrame(s),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def duplicated(self, keep):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# Re-use the DataFrame based duplcated, extract the series back out
df = self._wrap_in_df()
return df.duplicated(keep=keep)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def drop_duplicates(self, keep):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# Re-use the DataFrame based drop_duplicates, extract the series back out
df = self._wrap_in_df()
return df.drop_duplicates(keep=keep)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def sample(self, **kwargs):
"""Only ``n`` and/or ``weights`` may be specified. ``frac``,
``random_state``, and ``replace=True`` are not yet supported.
See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
Note that pandas will raise an error if ``n`` is larger than the length
of the dataset, while the Beam DataFrame API will simply return the full
dataset in that case."""
# Re-use the DataFrame based sample, extract the series back out
df = self._wrap_in_df()
return df.sample(**kwargs)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def aggregate(self, func, axis, *args, **kwargs):
"""Some aggregation methods cannot be parallelized, and computing
them will require collecting all data on a single machine."""
if kwargs.get('skipna', False):
# Eagerly generate a proxy to make sure skipna is a valid argument
# for this aggregation method
_ = self._expr.proxy().aggregate(func, axis, *args, **kwargs)
kwargs.pop('skipna')
return self.dropna().aggregate(func, axis, *args, **kwargs)
if isinstance(func, list) and len(func) > 1:
# level arg is ignored for multiple aggregations
_ = kwargs.pop('level', None)
# Aggregate with each method separately, then stick them all together.
rows = [self.agg([f], *args, **kwargs) for f in func]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *rows: pd.concat(rows), [row._expr for row in rows]))
else:
# We're only handling a single column. It could be 'func' or ['func'],
# which produce different results. 'func' produces a scalar, ['func']
# produces a single element Series.
base_func = func[0] if isinstance(func, list) else func
if (_is_numeric(base_func) and
not pd.core.dtypes.common.is_numeric_dtype(self.dtype)):
warnings.warn(
f"Performing a numeric aggregation, {base_func!r}, on "
f"Series {self._expr.proxy().name!r} with non-numeric type "
f"{self.dtype!r}. This can result in runtime errors or surprising "
"results.")
if 'level' in kwargs:
# Defer to groupby.agg for level= mode
return self.groupby(
level=kwargs.pop('level'), axis=axis).agg(func, *args, **kwargs)
singleton_reason = None
if 'min_count' in kwargs:
# Eagerly generate a proxy to make sure min_count is a valid argument
# for this aggregation method
_ = self._expr.proxy().agg(func, axis, *args, **kwargs)
singleton_reason = (
"Aggregation with min_count= requires collecting all data on a "
"single node.")
# We have specialized distributed implementations for these
if base_func in ('quantile',
'std',
'var',
'nunique',
'corr',
'cov',
'skew',
'kurt',
'kurtosis'):
result = getattr(self, base_func)(*args, **kwargs)
if isinstance(func, list):
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
f'wrap_aggregate_{base_func}',
lambda x: pd.Series(x, index=[base_func]), [result._expr],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
else:
return result
agg_kwargs = kwargs.copy()
if ((_is_associative(base_func) or _is_liftable_with_sum(base_func)) and
singleton_reason is None):
intermediate = expressions.ComputedExpression(
f'pre_aggregate_{base_func}',
# Coerce to a Series, if the result is scalar we still want a Series
# so we can combine and do the final aggregation next.
lambda s: pd.Series(s.agg(func, *args, **kwargs)),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
allow_nonparallel_final = True
if _is_associative(base_func):
agg_func = func
else:
agg_func = ['sum'] if isinstance(func, list) else 'sum'
else:
intermediate = self._expr
allow_nonparallel_final = None # i.e. don't change the value
agg_func = func
singleton_reason = (
f"Aggregation function {func!r} cannot currently be "
"parallelized. It requires collecting all data for "
"this Series on a single node.")
with expressions.allow_non_parallel_operations(allow_nonparallel_final):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
f'post_aggregate_{base_func}',
lambda s: s.agg(agg_func, *args, **agg_kwargs), [intermediate],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton(
reason=singleton_reason)))
agg = aggregate
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def axes(self):
return [self.index]
clip = frame_base._elementwise_method('clip', base=pd.Series)
all = _agg_method(pd.Series, 'all')
any = _agg_method(pd.Series, 'any')
# TODO(BEAM-12074): Document that Series.count(level=) will drop NaN's
count = _agg_method(pd.Series, 'count')
describe = _agg_method(pd.Series, 'describe')
min = _agg_method(pd.Series, 'min')
max = _agg_method(pd.Series, 'max')
prod = product = _agg_method(pd.Series, 'prod')
sum = _agg_method(pd.Series, 'sum')
mean = _agg_method(pd.Series, 'mean')
median = _agg_method(pd.Series, 'median')
sem = _agg_method(pd.Series, 'sem')
mad = _agg_method(pd.Series, 'mad')
argmax = frame_base.wont_implement_method(
pd.Series, 'argmax', reason='order-sensitive')
argmin = frame_base.wont_implement_method(
pd.Series, 'argmin', reason='order-sensitive')
cummax = frame_base.wont_implement_method(
pd.Series, 'cummax', reason='order-sensitive')
cummin = frame_base.wont_implement_method(
pd.Series, 'cummin', reason='order-sensitive')
cumprod = frame_base.wont_implement_method(
pd.Series, 'cumprod', reason='order-sensitive')
cumsum = frame_base.wont_implement_method(
pd.Series, 'cumsum', reason='order-sensitive')
diff = frame_base.wont_implement_method(
pd.Series, 'diff', reason='order-sensitive')
interpolate = frame_base.wont_implement_method(
pd.Series, 'interpolate', reason='order-sensitive')
searchsorted = frame_base.wont_implement_method(
pd.Series, 'searchsorted', reason='order-sensitive')
shift = frame_base.wont_implement_method(
pd.Series, 'shift', reason='order-sensitive')
pct_change = frame_base.wont_implement_method(
pd.Series, 'pct_change', reason='order-sensitive')
is_monotonic = frame_base.wont_implement_method(
pd.Series, 'is_monotonic', reason='order-sensitive')
is_monotonic_increasing = frame_base.wont_implement_method(
pd.Series, 'is_monotonic_increasing', reason='order-sensitive')
is_monotonic_decreasing = frame_base.wont_implement_method(
pd.Series, 'is_monotonic_decreasing', reason='order-sensitive')
asof = frame_base.wont_implement_method(
pd.Series, 'asof', reason='order-sensitive')
first_valid_index = frame_base.wont_implement_method(
pd.Series, 'first_valid_index', reason='order-sensitive')
last_valid_index = frame_base.wont_implement_method(
pd.Series, 'last_valid_index', reason='order-sensitive')
autocorr = frame_base.wont_implement_method(
pd.Series, 'autocorr', reason='order-sensitive')
iat = property(
frame_base.wont_implement_method(
pd.Series, 'iat', reason='order-sensitive'))
head = frame_base.wont_implement_method(
pd.Series, 'head', explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(
pd.Series, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
filter = frame_base._elementwise_method('filter', base=pd.Series)
memory_usage = frame_base.wont_implement_method(
pd.Series, 'memory_usage', reason="non-deferred-result")
nbytes = frame_base.wont_implement_method(
pd.Series, 'nbytes', reason="non-deferred-result")
to_list = frame_base.wont_implement_method(
pd.Series, 'to_list', reason="non-deferred-result")
factorize = frame_base.wont_implement_method(
pd.Series, 'factorize', reason="non-deferred-columns")
# In Series __contains__ checks the index
__contains__ = frame_base.wont_implement_method(
pd.Series, '__contains__', reason="non-deferred-result")
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nlargest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(robertwb): Document 'any' option.
# TODO(robertwb): Consider (conditionally) defaulting to 'any' if no
# explicit keep parameter is requested.
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nlargest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nsmallest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nsmallest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def is_unique(self):
def set_index(s):
s = s[:]
s.index = s
return s
self_index = expressions.ComputedExpression(
'set_index',
set_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
is_unique_distributed = expressions.ComputedExpression(
'is_unique_distributed',
lambda s: pd.Series(s.is_unique), [self_index],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations():
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine',
lambda s: s.all(), [is_unique_distributed],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
plot = frame_base.wont_implement_method(
pd.Series, 'plot', reason="plotting-tools")
pop = frame_base.wont_implement_method(
pd.Series, 'pop', reason="non-deferred-result")
rename_axis = frame_base._elementwise_method('rename_axis', base=pd.Series)
round = frame_base._elementwise_method('round', base=pd.Series)
take = frame_base.wont_implement_method(
pd.Series, 'take', reason='deprecated')
to_dict = frame_base.wont_implement_method(
pd.Series, 'to_dict', reason="non-deferred-result")
to_frame = frame_base._elementwise_method('to_frame', base=pd.Series)
@frame_base.with_docs_from(pd.Series)
def unique(self, as_series=False):
"""unique is not supported by default because it produces a
non-deferred result: an :class:`~numpy.ndarray`. You can use the
Beam-specific argument ``unique(as_series=True)`` to get the result as
a :class:`DeferredSeries`"""
if not as_series:
raise frame_base.WontImplementError(
"unique() is not supported by default because it produces a "
"non-deferred result: a numpy array. You can use the Beam-specific "
"argument unique(as_series=True) to get the result as a "
"DeferredSeries",
reason="non-deferred-result")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unique',
lambda df: pd.Series(df.unique()), [self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton(
reason="unique() cannot currently be parallelized.")))
@frame_base.with_docs_from(pd.Series)
def update(self, other):
self._expr = expressions.ComputedExpression(
'update',
lambda df,
other: df.update(other) or df, [self._expr, other._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index())
unstack = frame_base.wont_implement_method(
pd.Series, 'unstack', reason='non-deferred-columns')
@frame_base.with_docs_from(pd.Series)
def value_counts(
self,
sort=False,
normalize=False,
ascending=False,
bins=None,
dropna=True):
"""``sort`` is ``False`` by default, and ``sort=True`` is not supported
because it imposes an ordering on the dataset which likely will not be
preserved.
When ``bin`` is specified this operation is not parallelizable. See
[BEAM-12441](https://issues.apache.org/jira/browse/BEAM-12441) tracking the
possible addition of a distributed implementation."""
if sort:
raise frame_base.WontImplementError(
"value_counts(sort=True) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
if bins is not None:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'value_counts',
lambda s: s.value_counts(
normalize=normalize, bins=bins, dropna=dropna)[self._expr],
requires_partition_by=partitionings.Singleton(
reason=(
"value_counts with bin specified requires collecting "
"the entire dataset to identify the range.")),
preserves_partition_by=partitionings.Singleton(),
))
if dropna:
column = self.dropna()
else:
column = self
result = column.groupby(column, dropna=dropna).size()
# groupby.size() names the index, which we don't need
result.index.name = None
if normalize:
return result / column.length()
else:
return result
values = property(
frame_base.wont_implement_method(
pd.Series, 'values', reason="non-deferred-result"))
view = frame_base.wont_implement_method(
pd.Series,
'view',
explanation=(
"because it relies on memory-sharing semantics that are "
"not compatible with the Beam model."))
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def str(self):
return _DeferredStringMethods(self._expr)
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def cat(self):
return _DeferredCategoricalMethods(self._expr)
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def dt(self):
return _DeferredDatetimeMethods(self._expr)
@frame_base.with_docs_from(pd.Series)
def mode(self, *args, **kwargs):
"""mode is not currently parallelizable. An approximate,
parallelizable implementation of mode may be added in the future
(`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'mode',
lambda df: df.mode(*args, **kwargs),
[self._expr],
#TODO(BEAM-12181): Can we add an approximate implementation?
requires_partition_by=partitionings.Singleton(
reason=(
"mode cannot currently be parallelized. See "
"BEAM-12181 tracking the possble addition of "
"an approximate, parallelizable implementation of mode.")),
preserves_partition_by=partitionings.Singleton()))
apply = frame_base._elementwise_method('apply', base=pd.Series)
map = frame_base._elementwise_method('map', base=pd.Series)
# TODO(BEAM-11636): Implement transform using type inference to determine the
# proxy
#transform = frame_base._elementwise_method('transform', base=pd.Series)
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def repeat(self, repeats, axis):
"""``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are
not supported because they make this operation order-sensitive."""
if isinstance(repeats, int):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series: series.repeat(repeats), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, frame_base.DeferredBase):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series,
repeats_series: series.repeat(repeats_series),
[self._expr, repeats._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, list):
raise frame_base.WontImplementError(
"repeat(repeats=) repeats must be an int or a DeferredSeries. "
"Lists are not supported because they make this operation sensitive "
"to the order of the data.",
reason="order-sensitive")
else:
raise TypeError(
"repeat(repeats=) value must be an int or a "
f"DeferredSeries (encountered {type(repeats)}).")
@populate_not_implemented(pd.DataFrame)
@frame_base.DeferredFrame._register_for(pd.DataFrame)
class DeferredDataFrame(DeferredDataFrameOrSeries):
def __repr__(self):
return (
f'DeferredDataFrame(columns={list(self.columns)}, '
f'{self._render_indexes()})')
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def columns(self):
return self._expr.proxy().columns
@columns.setter
def columns(self, columns):
def set_columns(df):
df = df.copy()
df.columns = columns
return df
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_columns',
set_columns, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
def keys(self):
return self.columns
def __getattr__(self, name):
# Column attribute access.
if name in self._expr.proxy().columns:
return self[name]
else:
return object.__getattribute__(self, name)
def __getitem__(self, key):
# TODO: Replicate pd.DataFrame.__getitem__ logic
if isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return self.loc[key]
elif isinstance(key, frame_base.DeferredBase):
# Fail early if key is a DeferredBase as it interacts surprisingly with
# key in self._expr.proxy().columns
raise NotImplementedError(
"Indexing with a non-bool deferred frame is not yet supported. "
"Consider using df.loc[...]")
elif isinstance(key, slice):
if _is_null_slice(key):
return self
elif _is_integer_slice(key):
# This depends on the contents of the index.
raise frame_base.WontImplementError(
"Integer slices are not supported as they are ambiguous. Please "
"use iloc or loc with integer slices.")
else:
return self.loc[key]
elif (
(isinstance(key, list) and all(key_column in self._expr.proxy().columns
for key_column in key)) or
key in self._expr.proxy().columns):
return self._elementwise(lambda df: df[key], 'get_column')
else:
raise NotImplementedError(key)
def __contains__(self, key):
# Checks if proxy has the given column
return self._expr.proxy().__contains__(key)
def __setitem__(self, key, value):
if isinstance(
key, str) or (isinstance(key, list) and
all(isinstance(c, str)
for c in key)) or (isinstance(key, DeferredSeries) and
key._expr.proxy().dtype == bool):
# yapf: disable
return self._elementwise(
lambda df, key, value: df.__setitem__(key, value),
'set_column',
(key, value),
inplace=True)
else:
raise NotImplementedError(key)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def align(self, other, join, axis, copy, level, method, **kwargs):
"""Aligning per level is not yet supported. Only the default,
``level=None``, is allowed.
Filling NaN values via ``method`` is not supported, because it is
`order-sensitive
<https://s.apache.org/dataframe-order-sensitive-operations>`_. Only the
default, ``method=None``, is allowed.
``copy=False`` is not supported because its behavior (whether or not it is
an inplace operation) depends on the data."""
if not copy:
raise frame_base.WontImplementError(
"align(copy=False) is not supported because it might be an inplace "
"operation depending on the data. Please prefer the default "
"align(copy=True).")
if method is not None:
raise frame_base.WontImplementError(
f"align(method={method!r}) is not supported because it is "
"order sensitive. Only align(method=None) is supported.",
reason="order-sensitive")
if kwargs:
raise NotImplementedError('align(%s)' % ', '.join(kwargs.keys()))
if level is not None:
# Could probably get by partitioning on the used levels.
requires_partition_by = partitionings.Singleton(reason=(
f"align(level={level}) is not currently parallelizable. Only "
"align(level=None) can be parallelized."))
elif axis in ('columns', 1):
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Index()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda df, other: df.align(other, join=join, axis=axis),
[self._expr, other._expr],
requires_partition_by=requires_partition_by,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def append(self, other, ignore_index, verify_integrity, sort, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if not isinstance(other, DeferredDataFrame):
raise frame_base.WontImplementError(
"append() only accepts DeferredDataFrame instances, received " +
str(type(other)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
if verify_integrity:
# We can verify the index is non-unique within index partitioned data.
requires = partitionings.Index()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s, other: s.append(other, sort=sort,
verify_integrity=verify_integrity,
**kwargs),
[self._expr, other._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()
)
)
# If column name exists this is a simple project, otherwise it is a constant
# (default_value)
@frame_base.with_docs_from(pd.DataFrame)
def get(self, key, default_value=None):
if key in self.columns:
return self[key]
else:
return default_value
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def set_index(self, keys, **kwargs):
"""``keys`` must be a ``str`` or ``List[str]``. Passing an Index or Series
is not yet supported (`BEAM-11711
<https://issues.apache.org/jira/browse/BEAM-11711>`_)."""
if isinstance(keys, str):
keys = [keys]
if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame))
for k in keys):
raise NotImplementedError("set_index with Index or Series instances is "
"not yet supported (BEAM-11711).")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_index',
lambda df: df.set_index(keys, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def set_axis(self, labels, axis, **kwargs):
if axis in ('index', 0):
# TODO: assigning the index is generally order-sensitive, but we could
# support it in some rare cases, e.g. when assigning the index from one
# of a DataFrame's columns
raise NotImplementedError(
"Assigning an index is not yet supported. "
"Consider using set_index() instead.")
else:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_axis',
lambda df: df.set_axis(labels, axis, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def axes(self):
return (self.index, self.columns)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def dtypes(self):
return self._expr.proxy().dtypes
@frame_base.with_docs_from(pd.DataFrame)
def assign(self, **kwargs):
"""``value`` must be a ``callable`` or :class:`DeferredSeries`. Other types
make this operation order-sensitive."""
for name, value in kwargs.items():
if not callable(value) and not isinstance(value, DeferredSeries):
raise frame_base.WontImplementError(
f"Unsupported value for new column '{name}': '{value}'. Only "
"callables and DeferredSeries instances are supported. Other types "
"make this operation sensitive to the order of the data",
reason="order-sensitive")
return self._elementwise(
lambda df, *args, **kwargs: df.assign(*args, **kwargs),
'assign',
other_kwargs=kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def explode(self, column, ignore_index):
# ignoring the index will not preserve it
preserves = (partitionings.Singleton() if ignore_index
else partitionings.Index())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'explode',
lambda df: df.explode(column, ignore_index),
[self._expr],
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def insert(self, value, **kwargs):
"""``value`` cannot be a ``List`` because aligning it with this
DeferredDataFrame is order-sensitive."""
if isinstance(value, list):
raise frame_base.WontImplementError(
"insert(value=list) is not supported because it joins the input "
"list to the deferred DataFrame based on the order of the data.",
reason="order-sensitive")
if isinstance(value, pd.core.generic.NDFrame):
value = frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(value))
if isinstance(value, frame_base.DeferredFrame):
def func_zip(df, value):
df = df.copy()
df.insert(value=value, **kwargs)
return df
inserted = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'insert',
func_zip,
[self._expr, value._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
else:
def func_elementwise(df):
df = df.copy()
df.insert(value=value, **kwargs)
return df
inserted = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'insert',
func_elementwise,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
self._expr = inserted._expr
@staticmethod
@frame_base.with_docs_from(pd.DataFrame)
def from_dict(*args, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(pd.DataFrame.from_dict(*args, **kwargs)))
@staticmethod
@frame_base.with_docs_from(pd.DataFrame)
def from_records(*args, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(pd.DataFrame.from_records(*args,
**kwargs)))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def duplicated(self, keep, subset):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(BEAM-12074): Document keep="any"
if keep == 'any':
keep = 'first'
elif keep is not False:
raise frame_base.WontImplementError(
f"duplicated(keep={keep!r}) is not supported because it is "
"sensitive to the order of the data. Only keep=False and "
"keep=\"any\" are supported.",
reason="order-sensitive")
by = subset or list(self.columns)
# Workaround a bug where groupby.apply() that returns a single-element
# Series moves index label to column
return self.groupby(by).apply(
lambda df: pd.DataFrame(df.duplicated(keep=keep, subset=subset),
columns=[None]))[None]
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def drop_duplicates(self, keep, subset, ignore_index):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(BEAM-12074): Document keep="any"
if keep == 'any':
keep = 'first'
elif keep is not False:
raise frame_base.WontImplementError(
f"drop_duplicates(keep={keep!r}) is not supported because it is "
"sensitive to the order of the data. Only keep=False and "
"keep=\"any\" are supported.",
reason="order-sensitive")
if ignore_index is not False:
raise frame_base.WontImplementError(
"drop_duplicates(ignore_index=False) is not supported because it "
"requires generating a new index that is sensitive to the order of "
"the data.",
reason="order-sensitive")
by = subset or list(self.columns)
return self.groupby(by).apply(
lambda df: df.drop_duplicates(keep=keep, subset=subset)).droplevel(by)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def aggregate(self, func, axis, *args, **kwargs):
# We have specialized implementations for these.
if func in ('quantile',):
return getattr(self, func)(*args, axis=axis, **kwargs)
# In pandas<1.3.0, maps to a property, args are ignored
if func in ('size',) and PD_VERSION < (1, 3):
return getattr(self, func)
# We also have specialized distributed implementations for these. They only
# support axis=0 (implicitly) though. axis=1 should fall through
if func in ('corr', 'cov') and axis in (0, 'index'):
return getattr(self, func)(*args, **kwargs)
if axis is None:
# Aggregate across all elements by first aggregating across columns,
# then across rows.
return self.agg(func, *args, **dict(kwargs, axis=1)).agg(
func, *args, **dict(kwargs, axis=0))
elif axis in (1, 'columns'):
# This is an easy elementwise aggregation.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, axis=1, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary()))
elif len(self._expr.proxy().columns) == 0:
# For this corner case, just colocate everything.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Singleton()))
else:
# In the general case, we will compute the aggregation of each column
# separately, then recombine.
# First, handle any kwargs that cause a projection, by eagerly generating
# the proxy, and only including the columns that are in the output.
PROJECT_KWARGS = ('numeric_only', 'bool_only', 'include', 'exclude')
proxy = self._expr.proxy().agg(func, axis, *args, **kwargs)
if isinstance(proxy, pd.DataFrame):
projected = self[list(proxy.columns)]
elif isinstance(proxy, pd.Series):
projected = self[list(proxy.index)]
else:
projected = self
nonnumeric_columns = [name for (name, dtype) in projected.dtypes.items()
if not
pd.core.dtypes.common.is_numeric_dtype(dtype)]
if _is_numeric(func) and nonnumeric_columns:
if 'numeric_only' in kwargs and kwargs['numeric_only'] is False:
# User has opted in to execution with non-numeric columns, they
# will accept runtime errors
pass
else:
raise frame_base.WontImplementError(
f"Numeric aggregation ({func!r}) on a DataFrame containing "
f"non-numeric columns ({*nonnumeric_columns,!r} is not "
"supported, unless `numeric_only=` is specified.\n"
"Use `numeric_only=True` to only aggregate over numeric "
"columns.\nUse `numeric_only=False` to aggregate over all "
"columns. Note this is not recommended, as it could result in "
"execution time errors.")
for key in PROJECT_KWARGS:
if key in kwargs:
kwargs.pop(key)
if not isinstance(func, dict):
col_names = list(projected._expr.proxy().columns)
func_by_col = {col: func for col in col_names}
else:
func_by_col = func
col_names = list(func.keys())
aggregated_cols = []
has_lists = any(isinstance(f, list) for f in func_by_col.values())
for col in col_names:
funcs = func_by_col[col]
if has_lists and not isinstance(funcs, list):
# If any of the columns do multiple aggregations, they all must use
# "list" style output
funcs = [funcs]
aggregated_cols.append(projected[col].agg(funcs, *args, **kwargs))
# The final shape is different depending on whether any of the columns
# were aggregated by a list of aggregators.
with expressions.allow_non_parallel_operations():
if isinstance(proxy, pd.Series):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.Series(
{col: value for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton()))
elif isinstance(proxy, pd.DataFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.DataFrame(
{col: value for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton()))
else:
raise AssertionError("Unexpected proxy type for "
f"DataFrame.aggregate!: proxy={proxy!r}, "
f"type(proxy)={type(proxy)!r}")
agg = aggregate
applymap = frame_base._elementwise_method('applymap', base=pd.DataFrame)
add_prefix = frame_base._elementwise_method('add_prefix', base=pd.DataFrame)
add_suffix = frame_base._elementwise_method('add_suffix', base=pd.DataFrame)
memory_usage = frame_base.wont_implement_method(
pd.DataFrame, 'memory_usage', reason="non-deferred-result")
info = frame_base.wont_implement_method(
pd.DataFrame, 'info', reason="non-deferred-result")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def clip(self, axis, **kwargs):
"""``lower`` and ``upper`` must be :class:`DeferredSeries` instances, or
constants. Array-like arguments are not supported because they are
order-sensitive."""
if any(isinstance(kwargs.get(arg, None), frame_base.DeferredFrame)
for arg in ('upper', 'lower')) and axis not in (0, 'index'):
raise frame_base.WontImplementError(
"axis must be 'index' when upper and/or lower are a DeferredFrame",
reason='order-sensitive')
return frame_base._elementwise_method('clip', base=pd.DataFrame)(self,
axis=axis,
**kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corr(self, method, min_periods):
"""Only ``method="pearson"`` can be parallelized. Other methods require
collecting all data on a single worker (see
https://s.apache.org/dataframe-non-parallel-operations for details).
"""
if method == 'pearson':
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for col1, col2 in itertools.combinations(columns, 2):
arg_indices.append((col1, col2))
args.append(self[col1].corr(self[col2], method=method,
min_periods=min_periods))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for col in columns:
data[col][col] = 1.0
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
else:
reason = (f"Encountered corr(method={method!r}) which cannot be "
"parallelized. Only corr(method='pearson') is currently "
"parallelizable.")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df: df.corr(method=method, min_periods=min_periods),
[self._expr],
requires_partition_by=partitionings.Singleton(reason=reason)))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def cov(self, min_periods, ddof):
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for col in columns:
arg_indices.append((col, col))
std = self[col].std(ddof)
args.append(std.apply(lambda x: x*x, 'square'))
for ix, col1 in enumerate(columns):
for col2 in columns[ix+1:]:
arg_indices.append((col1, col2))
# Note that this set may be different for each pair.
no_na = self.loc[self[col1].notna() & self[col2].notna()]
args.append(no_na[col1]._cov_aligned(no_na[col2], min_periods, ddof))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corrwith(self, other, axis, drop, method):
if axis in (1, 'columns'):
return self._elementwise(
lambda df, other: df.corrwith(other, axis=axis, drop=drop,
method=method),
'corrwith',
other_args=(other,))
if not isinstance(other, frame_base.DeferredFrame):
other = frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(other))
if isinstance(other, DeferredSeries):
proxy = self._expr.proxy().corrwith(other._expr.proxy(), axis=axis,
drop=drop, method=method)
self, other = self.align(other, axis=0, join='inner')
col_names = proxy.index
other_cols = [other] * len(col_names)
elif isinstance(other, DeferredDataFrame):
proxy = self._expr.proxy().corrwith(
other._expr.proxy(), axis=axis, method=method, drop=drop)
self, other = self.align(other, axis=0, join='inner')
col_names = list(
set(self.columns)
.intersection(other.columns)
.intersection(proxy.index))
other_cols = [other[col_name] for col_name in col_names]
else:
# Raise the right error.
self._expr.proxy().corrwith(other._expr.proxy(), axis=axis, drop=drop,
method=method)
# Just in case something else becomes valid.
raise NotImplementedError('corrwith(%s)' % type(other._expr.proxy))
# Generate expressions to compute the actual correlations.
corrs = [
self[col_name].corr(other_col, method)
for col_name, other_col in zip(col_names, other_cols)]
# Combine the results
def fill_dataframe(*args):
result = proxy.copy(deep=True)
for col, value in zip(proxy.index, args):
result[col] = value
return result
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_dataframe',
fill_dataframe,
[corr._expr for corr in corrs],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
cummax = frame_base.wont_implement_method(pd.DataFrame, 'cummax',
reason='order-sensitive')
cummin = frame_base.wont_implement_method(pd.DataFrame, 'cummin',
reason='order-sensitive')
cumprod = frame_base.wont_implement_method(pd.DataFrame, 'cumprod',
reason='order-sensitive')
cumsum = frame_base.wont_implement_method(pd.DataFrame, 'cumsum',
reason='order-sensitive')
# TODO(BEAM-12071): Consider adding an order-insensitive implementation for
# diff that relies on the index
diff = frame_base.wont_implement_method(pd.DataFrame, 'diff',
reason='order-sensitive')
interpolate = frame_base.wont_implement_method(pd.DataFrame, 'interpolate',
reason='order-sensitive')
pct_change = frame_base.wont_implement_method(
pd.DataFrame, 'pct_change', reason='order-sensitive')
asof = frame_base.wont_implement_method(
pd.DataFrame, 'asof', reason='order-sensitive')
first_valid_index = frame_base.wont_implement_method(
pd.DataFrame, 'first_valid_index', reason='order-sensitive')
last_valid_index = frame_base.wont_implement_method(
pd.DataFrame, 'last_valid_index', reason='order-sensitive')
iat = property(frame_base.wont_implement_method(
pd.DataFrame, 'iat', reason='order-sensitive'))
lookup = frame_base.wont_implement_method(
pd.DataFrame, 'lookup', reason='deprecated')
head = frame_base.wont_implement_method(pd.DataFrame, 'head',
explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(pd.DataFrame, 'tail',
explanation=_PEEK_METHOD_EXPLANATION)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def sample(self, n, frac, replace, weights, random_state, axis):
"""When ``axis='index'``, only ``n`` and/or ``weights`` may be specified.
``frac``, ``random_state``, and ``replace=True`` are not yet supported.
See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
Note that pandas will raise an error if ``n`` is larger than the length
of the dataset, while the Beam DataFrame API will simply return the full
dataset in that case.
sample is fully supported for axis='columns'."""
if axis in (1, 'columns'):
# Sampling on axis=columns just means projecting random columns
# Eagerly generate proxy to determine the set of columns at construction
# time
proxy = self._expr.proxy().sample(n=n, frac=frac, replace=replace,
weights=weights,
random_state=random_state, axis=axis)
# Then do the projection
return self[list(proxy.columns)]
# axis='index'
if frac is not None or random_state is not None or replace:
raise NotImplementedError(
f"When axis={axis!r}, only n and/or weights may be specified. "
"frac, random_state, and replace=True are not yet supported "
f"(got frac={frac!r}, random_state={random_state!r}, "
f"replace={replace!r}). See BEAM-12476.")
if n is None:
n = 1
if isinstance(weights, str):
weights = self[weights]
tmp_weight_column_name = "___Beam_DataFrame_weights___"
if weights is None:
self_with_randomized_weights = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'randomized_weights',
lambda df: df.assign(**{tmp_weight_column_name:
np.random.rand(len(df))}),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
else:
# See "Fast Parallel Weighted Random Sampling" by Efraimidis and Spirakis
# https://www.cti.gr/images_gr/reports/99-06-02.ps
def assign_randomized_weights(df, weights):
non_zero_weights = (weights > 0) | pd.Series(dtype=bool, index=df.index)
df = df.loc[non_zero_weights]
weights = weights.loc[non_zero_weights]
random_weights = np.log(np.random.rand(len(weights))) / weights
return df.assign(**{tmp_weight_column_name: random_weights})
self_with_randomized_weights = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'randomized_weights',
assign_randomized_weights,
[self._expr, weights._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
return self_with_randomized_weights.nlargest(
n=n, columns=tmp_weight_column_name, keep='any').drop(
tmp_weight_column_name, axis=1)
@frame_base.with_docs_from(pd.DataFrame)
def dot(self, other):
# We want to broadcast the right hand side to all partitions of the left.
# This is OK, as its index must be the same size as the columns set of self,
# so cannot be too large.
class AsScalar(object):
def __init__(self, value):
self.value = value
if isinstance(other, frame_base.DeferredFrame):
proxy = other._expr.proxy()
with expressions.allow_non_parallel_operations():
side = expressions.ComputedExpression(
'as_scalar',
lambda df: AsScalar(df),
[other._expr],
requires_partition_by=partitionings.Singleton())
else:
proxy = pd.DataFrame(columns=range(len(other[0])))
side = expressions.ConstantExpression(AsScalar(other))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dot',
lambda left, right: left @ right.value,
[self._expr, side],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
proxy=proxy))
__matmul__ = dot
@frame_base.with_docs_from(pd.DataFrame)
def mode(self, axis=0, *args, **kwargs):
"""mode with axis="columns" is not implemented because it produces
non-deferred columns.
mode with axis="index" is not currently parallelizable. An approximate,
parallelizable implementation of mode may be added in the future
(`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
if axis == 1 or axis == 'columns':
# Number of columns is max(number mode values for each row), so we can't
# determine how many there will be before looking at the data.
raise frame_base.WontImplementError(
"mode(axis=columns) is not supported because it produces a variable "
"number of columns depending on the data.",
reason="non-deferred-columns")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'mode',
lambda df: df.mode(*args, **kwargs),
[self._expr],
#TODO(BEAM-12181): Can we add an approximate implementation?
requires_partition_by=partitionings.Singleton(reason=(
"mode(axis='index') cannot currently be parallelized. See "
"BEAM-12181 tracking the possble addition of an approximate, "
"parallelizable implementation of mode."
)),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def dropna(self, axis, **kwargs):
"""dropna with axis="columns" specified cannot be parallelized."""
# TODO(robertwb): This is a common pattern. Generalize?
if axis in (1, 'columns'):
requires_partition_by = partitionings.Singleton(reason=(
"dropna(axis=1) cannot currently be parallelized. It requires "
"checking all values in each column for NaN values, to determine "
"if that column should be dropped."
))
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(axis=axis, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
def _eval_or_query(self, name, expr, inplace, **kwargs):
for key in ('local_dict', 'global_dict', 'level', 'target', 'resolvers'):
if key in kwargs:
raise NotImplementedError(f"Setting '{key}' is not yet supported")
# look for '@<py identifier>'
if re.search(r'\@[^\d\W]\w*', expr, re.UNICODE):
raise NotImplementedError("Accessing locals with @ is not yet supported "
"(BEAM-11202)")
result_expr = expressions.ComputedExpression(
name,
lambda df: getattr(df, name)(expr, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
if inplace:
self._expr = result_expr
else:
return frame_base.DeferredFrame.wrap(result_expr)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def eval(self, expr, inplace, **kwargs):
"""Accessing local variables with ``@<varname>`` is not yet supported
(`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_).
Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
``resolvers`` are not yet supported."""
return self._eval_or_query('eval', expr, inplace, **kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def query(self, expr, inplace, **kwargs):
"""Accessing local variables with ``@<varname>`` is not yet supported
(`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_).
Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
``resolvers`` are not yet supported."""
return self._eval_or_query('query', expr, inplace, **kwargs)
isnull = isna = frame_base._elementwise_method('isna', base=pd.DataFrame)
notnull = notna = frame_base._elementwise_method('notna', base=pd.DataFrame)
items = frame_base.wont_implement_method(pd.DataFrame, 'items',
reason="non-deferred-result")
itertuples = frame_base.wont_implement_method(pd.DataFrame, 'itertuples',
reason="non-deferred-result")
iterrows = frame_base.wont_implement_method(pd.DataFrame, 'iterrows',
reason="non-deferred-result")
iteritems = frame_base.wont_implement_method(pd.DataFrame, 'iteritems',
reason="non-deferred-result")
def _cols_as_temporary_index(self, cols, suffix=''):
original_index_names = list(self._expr.proxy().index.names)
new_index_names = [
'__apache_beam_temp_%d_%s' % (ix, suffix)
for (ix, _) in enumerate(original_index_names)]
def reindex(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reindex',
lambda df:
df.rename_axis(index=new_index_names, copy=False)
.reset_index().set_index(cols),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
def revert(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_restoreindex',
lambda df:
df.reset_index().set_index(new_index_names)
.rename_axis(index=original_index_names, copy=False),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
return reindex, revert
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def join(self, other, on, **kwargs):
if on is not None:
reindex, revert = self._cols_as_temporary_index(on)
return revert(reindex(self).join(other, **kwargs))
if isinstance(other, list):
other_is_list = True
else:
other = [other]
other_is_list = False
placeholder = object()
other_exprs = [
df._expr for df in other if isinstance(df, frame_base.DeferredFrame)]
const_others = [
placeholder if isinstance(df, frame_base.DeferredFrame) else df
for df in other]
def fill_placeholders(values):
values = iter(values)
filled = [
next(values) if df is placeholder else df for df in const_others]
if other_is_list:
return filled
else:
return filled[0]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join',
lambda df, *deferred_others: df.join(
fill_placeholders(deferred_others), **kwargs),
[self._expr] + other_exprs,
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def merge(
self,
right,
on,
left_on,
right_on,
left_index,
right_index,
suffixes,
**kwargs):
"""merge is not parallelizable unless ``left_index`` or ``right_index`` is
``True`, because it requires generating an entirely new unique index.
See notes on :meth:`DeferredDataFrame.reset_index`. It is recommended to
move the join key for one of your columns to the index to avoid this issue.
For an example see the enrich pipeline in
:mod:`apache_beam.examples.dataframe.taxiride`.
``how="cross"`` is not yet supported.
"""
self_proxy = self._expr.proxy()
right_proxy = right._expr.proxy()
# Validate with a pandas call.
_ = self_proxy.merge(
right_proxy,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
**kwargs)
if kwargs.get('how', None) == 'cross':
raise NotImplementedError("cross join is not yet implemented (BEAM-9547)")
if not any([on, left_on, right_on, left_index, right_index]):
on = [col for col in self_proxy.columns if col in right_proxy.columns]
if not left_on:
left_on = on
if left_on and not isinstance(left_on, list):
left_on = [left_on]
if not right_on:
right_on = on
if right_on and not isinstance(right_on, list):
right_on = [right_on]
if left_index:
indexed_left = self
else:
indexed_left = self.set_index(left_on, drop=False)
if right_index:
indexed_right = right
else:
indexed_right = right.set_index(right_on, drop=False)
if left_on and right_on:
common_cols = set(left_on).intersection(right_on)
if len(common_cols):
# When merging on the same column name from both dfs, we need to make
# sure only one df has the column. Otherwise we end up with
# two duplicate columns, one with lsuffix and one with rsuffix.
# It's safe to drop from either because the data has already been duped
# to the index.
indexed_right = indexed_right.drop(columns=common_cols)
merged = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'merge',
lambda left, right: left.merge(right,
left_index=True,
right_index=True,
suffixes=suffixes,
**kwargs),
[indexed_left._expr, indexed_right._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
if left_index or right_index:
return merged
else:
return merged.reset_index(drop=True)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nlargest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nlargest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nsmallest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nsmallest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
plot = frame_base.wont_implement_method(pd.DataFrame, 'plot',
reason="plotting-tools")
@frame_base.with_docs_from(pd.DataFrame)
def pop(self, item):
result = self[item]
self._expr = expressions.ComputedExpression(
'popped',
lambda df: df.drop(columns=[item]),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
return result
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def quantile(self, q, axis, **kwargs):
"""``quantile(axis="index")`` is not parallelizable. See
`BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking
the possible addition of an approximate, parallelizable implementation of
quantile.
When using quantile with ``axis="columns"`` only a single ``q`` value can be
specified."""
if axis in (1, 'columns'):
if isinstance(q, list):
raise frame_base.WontImplementError(
"quantile(axis=columns) with multiple q values is not supported "
"because it transposes the input DataFrame. Note computing "
"an individual quantile across columns (e.g. "
f"df.quantile(q={q[0]!r}, axis={axis!r}) is supported.",
reason="non-deferred-columns")
else:
requires = partitionings.Arbitrary()
else: # axis='index'
# TODO(BEAM-12167): Provide an option for approximate distributed
# quantiles
requires = partitionings.Singleton(reason=(
"Computing quantiles across index cannot currently be parallelized. "
"See BEAM-12167 tracking the possible addition of an approximate, "
"parallelizable implementation of quantile."
))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'quantile',
lambda df: df.quantile(q=q, axis=axis, **kwargs),
[self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.maybe_inplace
def rename(self, **kwargs):
"""rename is not parallelizable when ``axis="index"`` and
``errors="raise"``. It requires collecting all data on a single
node in order to detect if one of the index values is missing."""
rename_index = (
'index' in kwargs
or kwargs.get('axis', None) in (0, 'index')
or ('columns' not in kwargs and 'axis' not in kwargs))
rename_columns = (
'columns' in kwargs
or kwargs.get('axis', None) in (1, 'columns'))
if rename_index:
# Technically, it's still partitioned by index, but it's no longer
# partitioned by the hash of the index.
preserves_partition_by = partitionings.Singleton()
else:
preserves_partition_by = partitionings.Index()
if kwargs.get('errors', None) == 'raise' and rename_index:
# TODO: We could do this in parallel by creating a ConstantExpression
# with a series created from the mapper dict. Then Index() partitioning
# would co-locate the necessary index values and we could raise
# individually within each partition. Execution time errors are
# discouraged anyway so probably not worth the effort.
requires_partition_by = partitionings.Singleton(reason=(
"rename(errors='raise', axis='index') requires collecting all "
"data on a single node in order to detect missing index values."
))
else:
requires_partition_by = partitionings.Arbitrary()
proxy = None
if rename_index:
# The proxy can't be computed by executing rename, it will error
# renaming the index.
if rename_columns:
# Note if both are being renamed, index and columns must be specified
# (not axis)
proxy = self._expr.proxy().rename(**{k: v for (k, v) in kwargs.items()
if not k == 'index'})
else:
# No change in columns, reuse proxy
proxy = self._expr.proxy()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'rename',
lambda df: df.rename(**kwargs),
[self._expr],
proxy=proxy,
preserves_partition_by=preserves_partition_by,
requires_partition_by=requires_partition_by))
rename_axis = frame_base._elementwise_method('rename_axis', base=pd.DataFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def round(self, decimals, *args, **kwargs):
if isinstance(decimals, frame_base.DeferredFrame):
# Disallow passing a deferred Series in, our current partitioning model
# prevents us from using it correctly.
raise NotImplementedError("Passing a deferred series to round() is not "
"supported, please use a concrete pd.Series "
"instance or a dictionary")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'round',
lambda df: df.round(decimals, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index()
)
)
select_dtypes = frame_base._elementwise_method('select_dtypes',
base=pd.DataFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def shift(self, axis, freq, **kwargs):
"""shift with ``axis="index" is only supported with ``freq`` specified and
``fill_value`` undefined. Other configurations make this operation
order-sensitive."""
if axis in (1, 'columns'):
preserves = partitionings.Arbitrary()
proxy = None
else:
if freq is None or 'fill_value' in kwargs:
fill_value = kwargs.get('fill_value', 'NOT SET')
raise frame_base.WontImplementError(
f"shift(axis={axis!r}) is only supported with freq defined, and "
f"fill_value undefined (got freq={freq!r},"
f"fill_value={fill_value!r}). Other configurations are sensitive "
"to the order of the data because they require populating shifted "
"rows with `fill_value`.",
reason="order-sensitive")
# proxy generation fails in pandas <1.2
# Seems due to https://github.com/pandas-dev/pandas/issues/14811,
# bug with shift on empty indexes.
# Fortunately the proxy should be identical to the input.
proxy = self._expr.proxy().copy()
# index is modified, so no partitioning is preserved.
preserves = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'shift',
lambda df: df.shift(axis=axis, freq=freq, **kwargs),
[self._expr],
proxy=proxy,
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
shape = property(frame_base.wont_implement_method(
pd.DataFrame, 'shape', reason="non-deferred-result"))
stack = frame_base._proxy_method(
'stack',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
all = _agg_method(pd.DataFrame, 'all')
any = _agg_method(pd.DataFrame, 'any')
count = _agg_method(pd.DataFrame, 'count')
describe = _agg_method(pd.DataFrame, 'describe')
max = _agg_method(pd.DataFrame, 'max')
min = _agg_method(pd.DataFrame, 'min')
prod = product = _agg_method(pd.DataFrame, 'prod')
sum = _agg_method(pd.DataFrame, 'sum')
mean = _agg_method(pd.DataFrame, 'mean')
median = _agg_method(pd.DataFrame, 'median')
nunique = _agg_method(pd.DataFrame, 'nunique')
std = _agg_method(pd.DataFrame, 'std')
var = _agg_method(pd.DataFrame, 'var')
sem = _agg_method(pd.DataFrame, 'sem')
mad = _agg_method(pd.DataFrame, 'mad')
skew = _agg_method(pd.DataFrame, 'skew')
kurt = _agg_method(pd.DataFrame, 'kurt')
kurtosis = _agg_method(pd.DataFrame, 'kurtosis')
take = frame_base.wont_implement_method(pd.DataFrame, 'take',
reason='deprecated')
to_records = frame_base.wont_implement_method(pd.DataFrame, 'to_records',
reason="non-deferred-result")
to_dict = frame_base.wont_implement_method(pd.DataFrame, 'to_dict',
reason="non-deferred-result")
to_numpy = frame_base.wont_implement_method(pd.DataFrame, 'to_numpy',
reason="non-deferred-result")
to_string = frame_base.wont_implement_method(pd.DataFrame, 'to_string',
reason="non-deferred-result")
to_sparse = frame_base.wont_implement_method(pd.DataFrame, 'to_sparse',
reason="non-deferred-result")
transpose = frame_base.wont_implement_method(
pd.DataFrame, 'transpose', reason='non-deferred-columns')
T = property(frame_base.wont_implement_method(
pd.DataFrame, 'T', reason='non-deferred-columns'))
@frame_base.with_docs_from(pd.DataFrame)
def unstack(self, *args, **kwargs):
"""unstack cannot be used on :class:`DeferredDataFrame` instances with
multiple index levels, because the columns in the output depend on the
data."""
if self._expr.proxy().index.nlevels == 1:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unstack',
lambda df: df.unstack(*args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Index()))
else:
raise frame_base.WontImplementError(
"unstack() is not supported on DataFrames with a multiple indexes, "
"because the columns in the output depend on the input data.",
reason="non-deferred-columns")
update = frame_base._proxy_method(
'update',
inplace=True,
base=pd.DataFrame,
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary())
values = property(frame_base.wont_implement_method(
pd.DataFrame, 'values', reason="non-deferred-result"))
style = property(frame_base.wont_implement_method(
pd.DataFrame, 'style', reason="non-deferred-result"))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def melt(self, ignore_index, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if ignore_index:
raise frame_base.WontImplementError(
"melt(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'melt',
lambda df: df.melt(ignore_index=False, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
if hasattr(pd.DataFrame, 'value_counts'):
@frame_base.with_docs_from(pd.DataFrame)
def value_counts(self, subset=None, sort=False, normalize=False,
ascending=False, dropna=True):
"""``sort`` is ``False`` by default, and ``sort=True`` is not supported
because it imposes an ordering on the dataset which likely will not be
preserved."""
if sort:
raise frame_base.WontImplementError(
"value_counts(sort=True) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
columns = subset or list(self.columns)
if dropna:
dropped = self.dropna()
else:
dropped = self
result = dropped.groupby(columns, dropna=dropna).size()
if normalize:
return result/dropped.length()
else:
return result
for io_func in dir(io):
if io_func.startswith('to_'):
setattr(DeferredDataFrame, io_func, getattr(io, io_func))
setattr(DeferredSeries, io_func, getattr(io, io_func))
for meth in ('filter', ):
setattr(DeferredDataFrame, meth,
frame_base._elementwise_method(meth, base=pd.DataFrame))
@populate_not_implemented(DataFrameGroupBy)
class DeferredGroupBy(frame_base.DeferredFrame):
def __init__(self, expr, kwargs,
ungrouped: expressions.Expression[pd.core.generic.NDFrame],
ungrouped_with_index: expressions.Expression[pd.core.generic.NDFrame], # pylint: disable=line-too-long
grouping_columns,
grouping_indexes,
projection=None):
"""This object represents the result of::
ungrouped.groupby(level=[grouping_indexes + grouping_columns],
**kwargs)[projection]
:param expr: An expression to compute a pandas GroupBy object. Convenient
for unliftable aggregations.
:param ungrouped: An expression to compute the DataFrame pre-grouping, the
(Multi)Index contains only the grouping columns/indexes.
:param ungrouped_with_index: Same as ungrouped, except the index includes
all of the original indexes as well as any grouping columns. This is
important for operations that expose the original index, e.g. .apply(),
but we only use it when necessary to avoid unnessary data transfer and
GBKs.
:param grouping_columns: list of column labels that were in the original
groupby(..) ``by`` parameter. Only relevant for grouped DataFrames.
:param grouping_indexes: list of index names (or index level numbers) to be
grouped.
:param kwargs: Keywords args passed to the original groupby(..) call."""
super().__init__(expr)
self._ungrouped = ungrouped
self._ungrouped_with_index = ungrouped_with_index
self._projection = projection
self._grouping_columns = grouping_columns
self._grouping_indexes = grouping_indexes
self._kwargs = kwargs
if (self._kwargs.get('dropna', True) is False and
self._ungrouped.proxy().index.nlevels > 1):
raise NotImplementedError(
"dropna=False does not work as intended in the Beam DataFrame API "
"when grouping on multiple columns or indexes (See BEAM-12495).")
def __getattr__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: getattr(gb, name), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
def __getitem__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: gb[name], [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
@frame_base.with_docs_from(DataFrameGroupBy)
def agg(self, fn, *args, **kwargs):
if _is_associative(fn):
return _liftable_agg(fn)(self, *args, **kwargs)
elif _is_liftable_with_sum(fn):
return _liftable_agg(fn, postagg_meth='sum')(self, *args, **kwargs)
elif _is_unliftable(fn):
return _unliftable_agg(fn)(self, *args, **kwargs)
elif callable(fn):
return DeferredDataFrame(
expressions.ComputedExpression(
'agg',
lambda gb: gb.agg(fn, *args, **kwargs), [self._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
else:
raise NotImplementedError(f"GroupBy.agg(func={fn!r})")
@property
def ndim(self):
return self._expr.proxy().ndim
@frame_base.with_docs_from(DataFrameGroupBy)
def apply(self, func, *args, **kwargs):
"""Note that ``func`` will be called once during pipeline construction time
with an empty pandas object, so take care if ``func`` has a side effect.
When called with an empty pandas object, ``func`` is expected to return an
object of the same type as what will be returned when the pipeline is
processing actual data. If the result is a pandas object it should have the
same type and name (for a Series) or column types and names (for
a DataFrame) as the actual results."""
project = _maybe_project_func(self._projection)
grouping_indexes = self._grouping_indexes
grouping_columns = self._grouping_columns
# Unfortunately pandas does not execute func to determine the right proxy.
# We run user func on a proxy here to detect the return type and generate
# the proxy.
fn_input = project(self._ungrouped_with_index.proxy().reset_index(
grouping_columns, drop=True))
result = func(fn_input)
if isinstance(result, pd.core.generic.NDFrame):
if result.index is fn_input.index:
proxy = result
else:
proxy = result[:0]
def index_to_arrays(index):
return [index.get_level_values(level)
for level in range(index.nlevels)]
# The final result will have the grouped indexes + the indexes from the
# result
proxy.index = pd.MultiIndex.from_arrays(
index_to_arrays(self._ungrouped.proxy().index) +
index_to_arrays(proxy.index),
names=self._ungrouped.proxy().index.names + proxy.index.names)
else:
# The user fn returns some non-pandas type. The expected result is a
# Series where each element is the result of one user fn call.
dtype = pd.Series([result]).dtype
proxy = pd.Series([], dtype=dtype, index=self._ungrouped.proxy().index)
def do_partition_apply(df):
# Remove columns from index, we only needed them there for partitioning
df = df.reset_index(grouping_columns, drop=True)
gb = df.groupby(level=grouping_indexes or None,
by=grouping_columns or None)
gb = project(gb)
return gb.apply(func, *args, **kwargs)
return DeferredDataFrame(
expressions.ComputedExpression(
'apply',
do_partition_apply,
[self._ungrouped_with_index],
proxy=proxy,
requires_partition_by=partitionings.Index(grouping_indexes +
grouping_columns),
preserves_partition_by=partitionings.Index(grouping_indexes)))
@frame_base.with_docs_from(DataFrameGroupBy)
def transform(self, fn, *args, **kwargs):
"""Note that ``func`` will be called once during pipeline construction time
with an empty pandas object, so take care if ``func`` has a side effect.
When called with an empty pandas object, ``func`` is expected to return an
object of the same type as what will be returned when the pipeline is
processing actual data. The result should have the same type and name (for
a Series) or column types and names (for a DataFrame) as the actual
results."""
if not callable(fn):
raise NotImplementedError(
"String functions are not yet supported in transform.")
if self._grouping_columns and not self._projection:
grouping_columns = self._grouping_columns
def fn_wrapper(x, *args, **kwargs):
x = x.droplevel(grouping_columns)
return fn(x, *args, **kwargs)
else:
fn_wrapper = fn
project = _maybe_project_func(self._projection)
# pandas cannot execute fn to determine the right proxy.
# We run user fn on a proxy here to detect the return type and generate the
# proxy.
result = fn_wrapper(project(self._ungrouped_with_index.proxy()))
parent_frame = self._ungrouped.args()[0].proxy()
if isinstance(result, pd.core.generic.NDFrame):
proxy = result[:0]
else:
# The user fn returns some non-pandas type. The expected result is a
# Series where each element is the result of one user fn call.
dtype = pd.Series([result]).dtype
proxy = pd.Series([], dtype=dtype, name=project(parent_frame).name)
if not isinstance(self._projection, list):
proxy.name = self._projection
# The final result will have the original indexes
proxy.index = parent_frame.index
levels = self._grouping_indexes + self._grouping_columns
return DeferredDataFrame(
expressions.ComputedExpression(
'transform',
lambda df: project(df.groupby(level=levels)).transform(
fn_wrapper,
*args,
**kwargs).droplevel(self._grouping_columns),
[self._ungrouped_with_index],
proxy=proxy,
requires_partition_by=partitionings.Index(levels),
preserves_partition_by=partitionings.Index(self._grouping_indexes)))
@frame_base.with_docs_from(DataFrameGroupBy)
def filter(self, func=None, dropna=True):
if func is None or not callable(func):
raise TypeError("func must be specified and it must be callable")
def apply_fn(df):
if func(df):
return df
elif not dropna:
result = df.copy()
result.iloc[:, :] = np.nan
return result
else:
return df.iloc[:0]
return self.apply(apply_fn).droplevel(self._grouping_columns)
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def dtypes(self):
grouping_columns = self._grouping_columns
return self.apply(lambda df: df.drop(grouping_columns, axis=1).dtypes)
fillna = frame_base.wont_implement_method(
DataFrameGroupBy, 'fillna', explanation=(
"df.fillna() should be used instead. Only method=None is supported "
"because other methods are order-sensitive. df.groupby(..).fillna() "
"without a method is equivalent to df.fillna()."))
ffill = frame_base.wont_implement_method(DataFrameGroupBy, 'ffill',
reason="order-sensitive")
bfill = frame_base.wont_implement_method(DataFrameGroupBy, 'bfill',
reason="order-sensitive")
pad = frame_base.wont_implement_method(DataFrameGroupBy, 'pad',
reason="order-sensitive")
backfill = frame_base.wont_implement_method(DataFrameGroupBy, 'backfill',
reason="order-sensitive")
aggregate = agg
hist = frame_base.wont_implement_method(DataFrameGroupBy, 'hist',
reason="plotting-tools")
plot = frame_base.wont_implement_method(DataFrameGroupBy, 'plot',
reason="plotting-tools")
boxplot = frame_base.wont_implement_method(DataFrameGroupBy, 'boxplot',
reason="plotting-tools")
head = frame_base.wont_implement_method(
DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(
DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
first = frame_base.not_implemented_method('first', base_type=DataFrameGroupBy)
last = frame_base.not_implemented_method('last', base_type=DataFrameGroupBy)
nth = frame_base.wont_implement_method(
DataFrameGroupBy, 'nth', reason='order-sensitive')
cumcount = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumcount', reason='order-sensitive')
cummax = frame_base.wont_implement_method(
DataFrameGroupBy, 'cummax', reason='order-sensitive')
cummin = frame_base.wont_implement_method(
DataFrameGroupBy, 'cummin', reason='order-sensitive')
cumsum = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumsum', reason='order-sensitive')
cumprod = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumprod', reason='order-sensitive')
diff = frame_base.wont_implement_method(DataFrameGroupBy, 'diff',
reason='order-sensitive')
shift = frame_base.wont_implement_method(DataFrameGroupBy, 'shift',
reason='order-sensitive')
pct_change = frame_base.wont_implement_method(DataFrameGroupBy, 'pct_change',
reason='order-sensitive')
ohlc = frame_base.wont_implement_method(DataFrameGroupBy, 'ohlc',
reason='order-sensitive')
# TODO(BEAM-12169): Consider allowing this for categorical keys.
__len__ = frame_base.wont_implement_method(
DataFrameGroupBy, '__len__', reason="non-deferred-result")
groups = property(frame_base.wont_implement_method(
DataFrameGroupBy, 'groups', reason="non-deferred-result"))
indices = property(frame_base.wont_implement_method(
DataFrameGroupBy, 'indices', reason="non-deferred-result"))
resample = frame_base.wont_implement_method(
DataFrameGroupBy, 'resample', reason='event-time-semantics')
rolling = frame_base.wont_implement_method(
DataFrameGroupBy, 'rolling', reason='event-time-semantics')
ewm = frame_base.wont_implement_method(
DataFrameGroupBy, 'ewm', reason="event-time-semantics")
expanding = frame_base.wont_implement_method(
DataFrameGroupBy, 'expanding', reason="event-time-semantics")
tshift = frame_base.wont_implement_method(
DataFrameGroupBy, 'tshift', reason="deprecated")
def _maybe_project_func(projection: Optional[List[str]]):
""" Returns identity func if projection is empty or None, else returns
a function that projects the specified columns. """
if projection:
return lambda df: df[projection]
else:
return lambda x: x
def _liftable_agg(meth, postagg_meth=None):
agg_name, _ = frame_base.name_and_func(meth)
if postagg_meth is None:
post_agg_name = agg_name
else:
post_agg_name, _ = frame_base.name_and_func(postagg_meth)
@frame_base.with_docs_from(DataFrameGroupBy, name=agg_name)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
if 'min_count' in kwargs:
return _unliftable_agg(meth)(self, *args, **kwargs)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
# Don't include un-observed categorical values in the preagg
preagg_groupby_kwargs = groupby_kwargs.copy()
preagg_groupby_kwargs['observed'] = True
project = _maybe_project_func(self._projection)
pre_agg = expressions.ComputedExpression(
'pre_combine_' + agg_name,
lambda df: getattr(
project(
df.groupby(level=list(range(df.index.nlevels)),
**preagg_groupby_kwargs)
),
agg_name)(**kwargs),
[self._ungrouped],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
post_agg = expressions.ComputedExpression(
'post_combine_' + post_agg_name,
lambda df: getattr(
df.groupby(level=list(range(df.index.nlevels)),
**groupby_kwargs),
post_agg_name)(**kwargs),
[pre_agg],
requires_partition_by=(partitionings.Singleton(reason=(
"Aggregations grouped by a categorical column are not currently "
"parallelizable (BEAM-11190)."
))
if is_categorical_grouping
else partitionings.Index()),
preserves_partition_by=partitionings.Arbitrary())
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
def _unliftable_agg(meth):
agg_name, _ = frame_base.name_and_func(meth)
@frame_base.with_docs_from(DataFrameGroupBy, name=agg_name)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
project = _maybe_project_func(self._projection)
post_agg = expressions.ComputedExpression(
agg_name,
lambda df: getattr(project(
df.groupby(level=list(range(df.index.nlevels)),
**groupby_kwargs),
), agg_name)(**kwargs),
[self._ungrouped],
requires_partition_by=(partitionings.Singleton(reason=(
"Aggregations grouped by a categorical column are not currently "
"parallelizable (BEAM-11190)."
))
if is_categorical_grouping
else partitionings.Index()),
# Some aggregation methods (e.g. corr/cov) add additional index levels.
# We only preserve the ones that existed _before_ the groupby.
preserves_partition_by=partitionings.Index(
list(range(self._ungrouped.proxy().index.nlevels))))
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
for meth in LIFTABLE_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth))
for meth in LIFTABLE_WITH_SUM_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth, postagg_meth='sum'))
for meth in UNLIFTABLE_AGGREGATIONS:
if meth in ('kurt', 'kurtosis'):
# pandas doesn't currently allow kurtosis on GroupBy:
# https://github.com/pandas-dev/pandas/issues/40139
continue
setattr(DeferredGroupBy, meth, _unliftable_agg(meth))
def _check_str_or_np_builtin(agg_func, func_list):
return agg_func in func_list or (
getattr(agg_func, '__name__', None) in func_list
and agg_func.__module__ in ('numpy', 'builtins'))
def _is_associative(agg_func):
return _check_str_or_np_builtin(agg_func, LIFTABLE_AGGREGATIONS)
def _is_liftable_with_sum(agg_func):
return _check_str_or_np_builtin(agg_func, LIFTABLE_WITH_SUM_AGGREGATIONS)
def _is_unliftable(agg_func):
return _check_str_or_np_builtin(agg_func, UNLIFTABLE_AGGREGATIONS)
NUMERIC_AGGREGATIONS = ['max', 'min', 'prod', 'sum', 'mean', 'median', 'std',
'var', 'sem', 'mad', 'skew', 'kurt', 'kurtosis']
def _is_numeric(agg_func):
return _check_str_or_np_builtin(agg_func, NUMERIC_AGGREGATIONS)
@populate_not_implemented(DataFrameGroupBy)
class _DeferredGroupByCols(frame_base.DeferredFrame):
# It's not clear that all of these make sense in Pandas either...
agg = aggregate = frame_base._elementwise_method('agg', base=DataFrameGroupBy)
any = frame_base._elementwise_method('any', base=DataFrameGroupBy)
all = frame_base._elementwise_method('all', base=DataFrameGroupBy)
boxplot = frame_base.wont_implement_method(
DataFrameGroupBy, 'boxplot', reason="plotting-tools")
describe = frame_base.not_implemented_method('describe',
base_type=DataFrameGroupBy)
diff = frame_base._elementwise_method('diff', base=DataFrameGroupBy)
fillna = frame_base._elementwise_method('fillna', base=DataFrameGroupBy)
filter = frame_base._elementwise_method('filter', base=DataFrameGroupBy)
first = frame_base._elementwise_method('first', base=DataFrameGroupBy)
get_group = frame_base._elementwise_method('get_group', base=DataFrameGroupBy)
head = frame_base.wont_implement_method(
DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION)
hist = frame_base.wont_implement_method(
DataFrameGroupBy, 'hist', reason="plotting-tools")
idxmax = frame_base._elementwise_method('idxmax', base=DataFrameGroupBy)
idxmin = frame_base._elementwise_method('idxmin', base=DataFrameGroupBy)
last = frame_base._elementwise_method('last', base=DataFrameGroupBy)
mad = frame_base._elementwise_method('mad', base=DataFrameGroupBy)
max = frame_base._elementwise_method('max', base=DataFrameGroupBy)
mean = frame_base._elementwise_method('mean', base=DataFrameGroupBy)
median = frame_base._elementwise_method('median', base=DataFrameGroupBy)
min = frame_base._elementwise_method('min', base=DataFrameGroupBy)
nunique = frame_base._elementwise_method('nunique', base=DataFrameGroupBy)
plot = frame_base.wont_implement_method(
DataFrameGroupBy, 'plot', reason="plotting-tools")
prod = frame_base._elementwise_method('prod', base=DataFrameGroupBy)
quantile = frame_base._elementwise_method('quantile', base=DataFrameGroupBy)
shift = frame_base._elementwise_method('shift', base=DataFrameGroupBy)
size = frame_base._elementwise_method('size', base=DataFrameGroupBy)
skew = frame_base._elementwise_method('skew', base=DataFrameGroupBy)
std = frame_base._elementwise_method('std', base=DataFrameGroupBy)
sum = frame_base._elementwise_method('sum', base=DataFrameGroupBy)
tail = frame_base.wont_implement_method(
DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
take = frame_base.wont_implement_method(
DataFrameGroupBy, 'take', reason='deprecated')
tshift = frame_base._elementwise_method('tshift', base=DataFrameGroupBy)
var = frame_base._elementwise_method('var', base=DataFrameGroupBy)
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def groups(self):
return self._expr.proxy().groups
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def indices(self):
return self._expr.proxy().indices
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def ndim(self):
return self._expr.proxy().ndim
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def ngroups(self):
return self._expr.proxy().ngroups
@populate_not_implemented(pd.core.indexes.base.Index)
class _DeferredIndex(object):
def __init__(self, frame):
self._frame = frame
@property
def names(self):
return self._frame._expr.proxy().index.names
@names.setter
def names(self, value):
def set_index_names(df):
df = df.copy()
df.index.names = value
return df
self._frame._expr = expressions.ComputedExpression(
'set_index_names',
set_index_names,
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property
def name(self):
return self._frame._expr.proxy().index.name
@name.setter
def name(self, value):
self.names = [value]
@property
def ndim(self):
return self._frame._expr.proxy().index.ndim
@property
def dtype(self):
return self._frame._expr.proxy().index.dtype
@property
def nlevels(self):
return self._frame._expr.proxy().index.nlevels
def __getattr__(self, name):
raise NotImplementedError('index.%s' % name)
@populate_not_implemented(pd.core.indexing._LocIndexer)
class _DeferredLoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, key):
if isinstance(key, tuple):
rows, cols = key
return self[rows][cols]
elif isinstance(key, list) and key and isinstance(key[0], bool):
# Aligned by numerical key.
raise NotImplementedError(type(key))
elif isinstance(key, list):
# Select rows, but behaves poorly on missing values.
raise NotImplementedError(type(key))
elif isinstance(key, slice):
args = [self._frame._expr]
func = lambda df: df.loc[key]
elif isinstance(key, frame_base.DeferredFrame):
func = lambda df, key: df.loc[key]
if pd.core.dtypes.common.is_bool_dtype(key._expr.proxy()):
# Boolean indexer, just pass it in as-is
args = [self._frame._expr, key._expr]
else:
# Likely a DeferredSeries of labels, overwrite the key's index with it's
# values so we can colocate them with the labels they're selecting
def data_to_index(s):
s = s.copy()
s.index = s
return s
reindexed_expr = expressions.ComputedExpression(
'data_to_index',
data_to_index,
[key._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton(),
)
args = [self._frame._expr, reindexed_expr]
elif callable(key):
def checked_callable_key(df):
computed_index = key(df)
if isinstance(computed_index, tuple):
row_index, _ = computed_index
else:
row_index = computed_index
if isinstance(row_index, list) and row_index and isinstance(
row_index[0], bool):
raise NotImplementedError(type(row_index))
elif not isinstance(row_index, (slice, pd.Series)):
raise NotImplementedError(type(row_index))
return computed_index
args = [self._frame._expr]
func = lambda df: df.loc[checked_callable_key]
else:
raise NotImplementedError(type(key))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'loc',
func,
args,
requires_partition_by=(
partitionings.Index()
if len(args) > 1
else partitionings.Arbitrary()),
preserves_partition_by=partitionings.Arbitrary()))
__setitem__ = frame_base.not_implemented_method(
'loc.setitem', base_type=pd.core.indexing._LocIndexer)
@populate_not_implemented(pd.core.indexing._iLocIndexer)
class _DeferredILoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, index):
if isinstance(index, tuple):
rows, _ = index
if rows != slice(None, None, None):
raise frame_base.WontImplementError(
"Using iloc to select rows is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'iloc',
lambda df: df.iloc[index],
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
else:
raise frame_base.WontImplementError(
"Using iloc to select rows is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
def __setitem__(self, index, value):
raise frame_base.WontImplementError(
"Using iloc to mutate a frame is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
class _DeferredStringMethods(frame_base.DeferredBase):
@frame_base.with_docs_from(pd.core.strings.StringMethods)
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
@frame_base.populate_defaults(pd.core.strings.StringMethods)
def cat(self, others, join, **kwargs):
"""If defined, ``others`` must be a :class:`DeferredSeries` or a ``list`` of
``DeferredSeries``."""
if others is None:
# Concatenate series into a single String
requires = partitionings.Singleton(reason=(
"cat(others=None) concatenates all data in a Series into a single "
"string, so it requires collecting all data on a single node."
))
func = lambda df: df.str.cat(join=join, **kwargs)
args = [self._expr]
elif (isinstance(others, frame_base.DeferredBase) or
(isinstance(others, list) and
all(isinstance(other, frame_base.DeferredBase) for other in others))):
if isinstance(others, frame_base.DeferredBase):
others = [others]
requires = partitionings.Index()
def func(*args):
return args[0].str.cat(others=args[1:], join=join, **kwargs)
args = [self._expr] + [other._expr for other in others]
else:
raise frame_base.WontImplementError(
"others must be None, DeferredSeries, or List[DeferredSeries] "
f"(encountered {type(others)}). Other types are not supported "
"because they make this operation sensitive to the order of the "
"data.", reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'cat',
func,
args,
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.core.strings.StringMethods)
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
def repeat(self, repeats):
"""``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are
not supported because they make this operation order-sensitive."""
if isinstance(repeats, int):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series: series.str.repeat(repeats),
[self._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, frame_base.DeferredBase):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series, repeats_series: series.str.repeat(repeats_series),
[self._expr, repeats._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, list):
raise frame_base.WontImplementError(
"str.repeat(repeats=) repeats must be an int or a DeferredSeries. "
"Lists are not supported because they make this operation sensitive "
"to the order of the data.", reason="order-sensitive")
else:
raise TypeError("str.repeat(repeats=) value must be an int or a "
f"DeferredSeries (encountered {type(repeats)}).")
get_dummies = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'get_dummies',
reason='non-deferred-columns')
split = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'split',
reason='non-deferred-columns')
rsplit = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'rsplit',
reason='non-deferred-columns')
ELEMENTWISE_STRING_METHODS = [
'capitalize',
'casefold',
'contains',
'count',
'endswith',
'extract',
'findall',
'fullmatch',
'get',
'isalnum',
'isalpha',
'isdecimal',
'isdigit',
'islower',
'isnumeric',
'isspace',
'istitle',
'isupper',
'join',
'len',
'lower',
'lstrip',
'match',
'pad',
'partition',
'replace',
'rpartition',
'rstrip',
'slice',
'slice_replace',
'startswith',
'strip',
'swapcase',
'title',
'upper',
'wrap',
'zfill',
'__getitem__',
]
NON_ELEMENTWISE_STRING_METHODS = [
'extractall',
]
def make_str_func(method):
def func(df, *args, **kwargs):
try:
df_str = df.str
except AttributeError:
# If there's a non-string value in a Series passed to .str method, pandas
# will generally just replace it with NaN in the result. However if
# there are _only_ non-string values, pandas will raise:
#
# AttributeError: Can only use .str accessor with string values!
#
# This can happen to us at execution time if we split a partition that is
# only non-strings. This branch just replaces all those values with NaN
# in that case.
return df.map(lambda _: np.nan)
else:
return getattr(df_str, method)(*args, **kwargs)
return func
for method in ELEMENTWISE_STRING_METHODS:
if not hasattr(pd.core.strings.StringMethods, method):
# older versions (1.0.x) don't support some of these methods
continue
setattr(_DeferredStringMethods,
method,
frame_base._elementwise_method(make_str_func(method),
name=method,
base=pd.core.strings.StringMethods))
for method in NON_ELEMENTWISE_STRING_METHODS:
if not hasattr(pd.core.strings.StringMethods, method):
# older versions (1.0.x) don't support some of these methods
continue
setattr(_DeferredStringMethods,
method,
frame_base._proxy_method(
make_str_func(method),
name=method,
base=pd.core.strings.StringMethods,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
def make_cat_func(method):
def func(df, *args, **kwargs):
return getattr(df.cat, method)(*args, **kwargs)
return func
class _DeferredCategoricalMethods(frame_base.DeferredBase):
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def categories(self):
return self._expr.proxy().cat.categories
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def ordered(self):
return self._expr.proxy().cat.ordered
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def codes(self):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'codes',
lambda s: s.cat.codes,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
)
)
remove_unused_categories = frame_base.wont_implement_method(
pd.core.arrays.categorical.CategoricalAccessor,
'remove_unused_categories', reason="non-deferred-columns")
ELEMENTWISE_CATEGORICAL_METHODS = [
'add_categories',
'as_ordered',
'as_unordered',
'remove_categories',
'rename_categories',
'reorder_categories',
'set_categories',
]
for method in ELEMENTWISE_CATEGORICAL_METHODS:
setattr(_DeferredCategoricalMethods,
method,
frame_base._elementwise_method(
make_cat_func(method), name=method,
base=pd.core.arrays.categorical.CategoricalAccessor))
class _DeferredDatetimeMethods(frame_base.DeferredBase):
@property # type: ignore
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def tz(self):
return self._expr.proxy().dt.tz
@property # type: ignore
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def freq(self):
return self._expr.proxy().dt.freq
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def tz_localize(self, *args, ambiguous='infer', **kwargs):
"""``ambiguous`` cannot be set to ``"infer"`` as its semantics are
order-sensitive. Similarly, specifying ``ambiguous`` as an
:class:`~numpy.ndarray` is order-sensitive, but you can achieve similar
functionality by specifying ``ambiguous`` as a Series."""
if isinstance(ambiguous, np.ndarray):
raise frame_base.WontImplementError(
"tz_localize(ambiguous=ndarray) is not supported because it makes "
"this operation sensitive to the order of the data. Please use a "
"DeferredSeries instead.",
reason="order-sensitive")
elif isinstance(ambiguous, frame_base.DeferredFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda s,
ambiguous: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs),
[self._expr, ambiguous._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif ambiguous == 'infer':
# infer attempts to infer based on the order of the timestamps
raise frame_base.WontImplementError(
f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it "
"makes this operation sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda s: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
to_period = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_period',
reason="event-time-semantics")
to_pydatetime = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_pydatetime',
reason="non-deferred-result")
to_pytimedelta = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_pytimedelta',
reason="non-deferred-result")
def make_dt_property(method):
def func(df):
return getattr(df.dt, method)
return func
def make_dt_func(method):
def func(df, *args, **kwargs):
return getattr(df.dt, method)(*args, **kwargs)
return func
ELEMENTWISE_DATETIME_METHODS = [
'ceil',
'day_name',
'month_name',
'floor',
'isocalendar',
'round',
'normalize',
'strftime',
'tz_convert',
]
for method in ELEMENTWISE_DATETIME_METHODS:
if not hasattr(pd.core.indexes.accessors.DatetimeProperties, method):
# older versions (1.0.x) don't support some of these methods
continue
setattr(_DeferredDatetimeMethods,
method,
frame_base._elementwise_method(
make_dt_func(method),
name=method,
base=pd.core.indexes.accessors.DatetimeProperties))
ELEMENTWISE_DATETIME_PROPERTIES = [
'date',
'day',
'dayofweek',
'dayofyear',
'days_in_month',
'daysinmonth',
'hour',
'is_leap_year',
'is_month_end',
'is_month_start',
'is_quarter_end',
'is_quarter_start',
'is_year_end',
'is_year_start',
'microsecond',
'minute',
'month',
'nanosecond',
'quarter',
'second',
'time',
'timetz',
'week',
'weekday',
'weekofyear',
'year',
]
for method in ELEMENTWISE_DATETIME_PROPERTIES:
setattr(_DeferredDatetimeMethods,
method,
property(frame_base._elementwise_method(
make_dt_property(method),
name=method,
base=pd.core.indexes.accessors.DatetimeProperties)))
for base in ['add',
'sub',
'mul',
'div',
'truediv',
'floordiv',
'mod',
'divmod',
'pow',
'and',
'or']:
for p in ['%s', 'r%s', '__%s__', '__r%s__']:
# TODO: non-trivial level?
name = p % base
if hasattr(pd.Series, name):
setattr(
DeferredSeries,
name,
frame_base._elementwise_method(name, restrictions={'level': None},
base=pd.Series))
if hasattr(pd.DataFrame, name):
setattr(
DeferredDataFrame,
name,
frame_base._elementwise_method(name, restrictions={'level': None},
base=pd.DataFrame))
inplace_name = '__i%s__' % base
if hasattr(pd.Series, inplace_name):
setattr(
DeferredSeries,
inplace_name,
frame_base._elementwise_method(inplace_name, inplace=True,
base=pd.Series))
if hasattr(pd.DataFrame, inplace_name):
setattr(
DeferredDataFrame,
inplace_name,
frame_base._elementwise_method(inplace_name, inplace=True,
base=pd.DataFrame))
for name in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
for p in '%s', '__%s__':
# Note that non-underscore name is used for both as the __xxx__ methods are
# order-sensitive.
setattr(DeferredSeries, p % name,
frame_base._elementwise_method(name, base=pd.Series))
setattr(DeferredDataFrame, p % name,
frame_base._elementwise_method(name, base=pd.DataFrame))
for name in ['__neg__', '__pos__', '__invert__']:
setattr(DeferredSeries, name,
frame_base._elementwise_method(name, base=pd.Series))
setattr(DeferredDataFrame, name,
frame_base._elementwise_method(name, base=pd.DataFrame))
DeferredSeries.multiply = DeferredSeries.mul # type: ignore
DeferredDataFrame.multiply = DeferredDataFrame.mul # type: ignore
DeferredSeries.subtract = DeferredSeries.sub # type: ignore
DeferredDataFrame.subtract = DeferredDataFrame.sub # type: ignore
DeferredSeries.divide = DeferredSeries.div # type: ignore
DeferredDataFrame.divide = DeferredDataFrame.div # type: ignore
def _slice_parts(s):
yield s.start
yield s.stop
yield s.step
def _is_null_slice(s):
return isinstance(s, slice) and all(x is None for x in _slice_parts(s))
def _is_integer_slice(s):
return isinstance(s, slice) and all(
x is None or isinstance(x, int)
for x in _slice_parts(s)) and not _is_null_slice(s)
| 39.74716 | 117 | 0.654489 |
aceb889c08c8b04a6d4de29cf5297800733620e3 | 28,517 | py | Python | dev/tools/leveleditor/direct/tkwidgets/Valuator.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | dev/tools/leveleditor/direct/tkwidgets/Valuator.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | dev/tools/leveleditor/direct/tkwidgets/Valuator.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | """Undocumented Module"""
__all__ = ['Valuator', 'ValuatorGroup', 'ValuatorGroupPanel']
from direct.showbase.DirectObject import *
from direct.showbase.TkGlobal import *
from Tkinter import *
import tkColorChooser
import WidgetPropertiesDialog
import string, Pmw
from direct.directtools.DirectUtil import getTkColorString
VALUATOR_MINI = 'mini'
VALUATOR_FULL = 'full'
class Valuator(Pmw.MegaWidget):
sfBase = 3.0
sfDist = 7
deadband = 5
""" Base class for widgets used to interactively adjust numeric values """
def __init__(self, parent = None, **kw):
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
('state', NORMAL, self.setState),
# Widget appearance
('relief', GROOVE, None),
('borderwidth', 2, None),
('text', 'Valuator', self.setLabel),
# Initial and reset values
('value', 0.0, INITOPT),
('resetValue', 0.0, None),
# Behavior
('min', None, None),
('max', None, None),
('resolution', None, None),
('numDigits', 2, self.setEntryFormat),
# Enable/disable popup menu
('fAdjustable', 1, None),
# Actions
('command', None, None),
('commandData', [], None),
('fCommandOnInit', 0, INITOPT),
# Callbacks to execute when updating widget's value
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialize the superclass
Pmw.MegaWidget.__init__(self, parent)
# Current adjusted (for min/max/resolution) value
self.adjustedValue = self['value']
# Create the components
interior = self.interior()
interior.configure(relief = self['relief'], bd = self['borderwidth'])
# The Valuator
self.createValuator()
# Set valuator callbacks for mouse start/stop
self._valuator['preCallback'] = self._mouseDown
self._valuator['postCallback'] = self._mouseUp
# The Label
if self['text'] is not None:
self._label = self.createcomponent('label', (), None,
Label, (interior,),
text = self['text'],
font = ('MS Sans Serif', 12),
anchor = CENTER)
else:
self._label = None
# The entry
self._entryVal = StringVar()
self._entry = self.createcomponent('entry', (), None,
Entry, (interior,),
justify = RIGHT,
width = 12,
textvariable = self._entryVal)
self._entry.bind('<Return>', self.validateEntryInput)
self._entryBackground = self._entry.cget('background')
# Pack Valuator Widget
self.packValuator()
# Set reset value if none specified
if not kw.has_key('resetValue'):
self['resetValue'] = self['value']
if self['fAdjustable']:
# The popup menu
self._popupMenu = Menu(interior, tearoff = 0)
self.addValuatorMenuEntries()
self._popupMenu.add_command(label = 'Reset',
command = self.reset)
self._popupMenu.add_command(label = 'Set to Zero',
command = self.zero)
self._popupMenu.add_command(
label = 'Properties...',
command = self._popupPropertiesDialog)
# Add key bindings
if self._label:
self._label.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
self._entry.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
self._valuator._widget.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
# A Dictionary of dictionaries for the popup property dialog
self.propertyDict = {
'state':
{'widget': self,
'type': 'string',
'help': 'Enter state: normal or disabled.'
},
'text':
{'widget': self,
'type': 'string',
'help': 'Enter label text.'
},
'min':
{ 'widget': self,
'type': 'real',
'fNone': 1,
'help': 'Minimum allowable value. Enter None for no minimum.'},
'max':
{ 'widget': self,
'type': 'real',
'fNone': 1,
'help': 'Maximum allowable value. Enter None for no maximum.'},
'numDigits':
{'widget': self,
'type': 'integer',
'help': 'Number of digits after decimal point.'
},
'resolution':
{'widget': self,
'type': 'real',
'fNone': 1,
'help':'Widget resolution. Enter None for no resolution .'
},
'resetValue':
{ 'widget': self,
'type': 'real',
'help': 'Enter value to set widget to on reset.'}
}
# Property list defines the display order of the properties
self.propertyList = [
'state', 'text', 'min', 'max', 'numDigits',
'resolution', 'resetValue']
# Add any valuator specific properties
self.addValuatorPropertiesToDialog()
# Make sure input variables processed
self.fInit = self['fCommandOnInit']
self.initialiseoptions(Valuator)
def set(self, value, fCommand = 1):
"""
Update widget's value by setting valuator, which will in
turn update the entry. fCommand flag (which is passed to the
valuator as commandData, which is then passed in turn to
self.setEntry) controls command execution.
"""
self._valuator['commandData'] = [fCommand]
self._valuator.set(value)
# Restore commandData to 1 so that interaction via valuator widget
# will result in command being executed, otherwise a set with
# commandData == 0 will stick and commands will not be executed
self._valuator['commandData'] = [1]
def get(self):
""" Return current widget value """
return self.adjustedValue
def setEntry(self, value, fCommand = 1):
"""
Update value displayed in entry, fCommand flag controls
command execution
"""
# Clamp value
if self['min'] is not None:
if value < self['min']:
value = self['min']
if self['max'] is not None:
if value > self['max']:
value = self['max']
# Round by resolution
if self['resolution'] is not None:
value = round(value / self['resolution']) * self['resolution']
# Format value and use it to set entry
self._entryVal.set(self.entryFormat % value)
# Update indicator (if any) to reflect new adjusted value
self._valuator.updateIndicator(value)
# Execute command if required
if fCommand and self.fInit and (self['command'] is not None):
apply(self['command'], [value] + self['commandData'])
# Record adjusted value
self.adjustedValue = value
# Once initialization is finished, allow commands to execute
self.fInit = 1
def setEntryFormat(self):
"""
Change the number of significant digits in entry
"""
# Create new format string
self.entryFormat = "%." + "%df" % self['numDigits']
# Update entry to reflect new format
self.setEntry(self.get())
# Pass info down to valuator to adjust valuator sensitivity
self._valuator['numDigits'] = self['numDigits']
def validateEntryInput(self, event):
""" Check validity of entry and if valid pass along to valuator """
input = self._entryVal.get()
try:
# Reset background
self._entry.configure(background = self._entryBackground)
# Get new value and check validity
newValue = string.atof(input)
# If OK, execute preCallback if one defined
self._preCallback()
# Call set to update valuator
self.set(newValue)
# Execute callback
self._postCallback()
# Update valuator to reflect adjusted value
# Don't execute command
self._valuator.set(self.adjustedValue, 0)
except ValueError:
# Invalid entry, flash background
self._entry.configure(background = 'Pink')
# Callbacks executed on mouse down/up
def _mouseDown(self):
""" Function to execute at start of mouse interaction """
# Execute pre interaction callback
self._preCallback()
def _mouseUp(self):
""" Function to execute at end of mouse interaction """
# Execute post interaction callback
self._postCallback()
# Update valuator to reflect adjusted value
# Don't execute command
self._valuator.set(self.adjustedValue, 0)
# Callback functions
def _preCallback(self):
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
def _postCallback(self):
# Exectute post callback if one defined
if self['postCallback']:
apply(self['postCallback'], self['callbackData'])
def setState(self):
""" Enable/disable widget """
if self['state'] == NORMAL:
self._entry['state'] = NORMAL
self._entry['background'] = self._entryBackground
self._valuator._widget['state'] = NORMAL
elif self['state'] == DISABLED:
self._entry['background'] = 'grey75'
self._entry['state'] = DISABLED
self._valuator._widget['state'] = DISABLED
def setLabel(self):
""" Update label's text """
if self._label:
self._label['text'] = self['text']
def zero(self):
"""
self.zero()
Set valuator to zero
"""
self.set(0.0)
def reset(self):
"""
self.reset()
Reset valuator to reset value
"""
self.set(self['resetValue'])
def mouseReset(self, event):
"""
Reset valuator to resetValue
"""
# If not over any canvas item
#if not self._widget.find_withtag(CURRENT):
self.reset()
# Popup dialog to adjust widget properties
def _popupValuatorMenu(self, event):
self._popupMenu.post(event.widget.winfo_pointerx(),
event.widget.winfo_pointery())
def _popupPropertiesDialog(self):
WidgetPropertiesDialog.WidgetPropertiesDialog(
self.propertyDict,
propertyList = self.propertyList,
title = 'Widget Properties',
parent = self.interior())
def addPropertyToDialog(self, property, pDict):
self.propertyDict[property] = pDict
self.propertyList.append(property)
# Virtual functions to be redefined by subclass
def createValuator(self):
""" Function used by subclass to create valuator geometry """
pass
def packValuator(self):
""" Function used by subclass to pack widget """
pass
def addValuatorMenuEntries(self):
""" Function used by subclass to add menu entries to popup menu """
pass
def addValuatorPropertiesToDialog(self):
""" Function used by subclass to add properties to property dialog """
pass
FLOATER = 'floater'
DIAL = 'dial'
ANGLEDIAL = 'angledial'
SLIDER = 'slider'
class ValuatorGroup(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
# Default group size
DEFAULT_DIM = 1
# Default value depends on *actual* group size, test for user input
DEFAULT_VALUE = [0.0] * kw.get('dim', DEFAULT_DIM)
DEFAULT_LABELS = map(lambda x: 'v[%d]' % x,
range(kw.get('dim', DEFAULT_DIM)))
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
('type', FLOATER, INITOPT),
('dim', DEFAULT_DIM, INITOPT),
('side', TOP, INITOPT),
# A list of initial values, one for each valuator
('value', DEFAULT_VALUE, INITOPT),
('min', None, INITOPT),
('max', None, INITOPT),
('resolution', None, INITOPT),
('numDigits', 2, self._setNumDigits),
# A tuple of labels, one for each valuator
('labels', DEFAULT_LABELS, self._updateLabels),
# The command to be executed when one of the valuators is updated
('command', None, None),
# Callbacks to execute when updating widget's value
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialize the toplevel widget
Pmw.MegaWidget.__init__(self, parent)
# Create the components
interior = self.interior()
# Get a copy of the initial value (making sure its a list)
self._value = list(self['value'])
# Create the valuators
self._valuatorList = []
for index in range(self['dim']):
# Add a group alias so you can configure the valuators via:
# fg.configure(Valuator_XXX = YYY)
if self['type'] == DIAL:
import Dial
valuatorType = Dial.Dial
elif self['type'] == ANGLEDIAL:
import Dial
valuatorType = Dial.AngleDial
elif self['type'] == SLIDER:
import Slider
valuatorType = Slider.Slider
else:
import Floater
valuatorType = Floater.Floater
f = self.createcomponent(
'valuator%d' % index, (), 'valuator', valuatorType,
(interior,), value = self._value[index],
min = self['min'], max = self['max'],
resolution = self['resolution'],
text = self['labels'][index],
command = lambda val, i = index: self._valuatorSetAt(i, val),
preCallback = self._preCallback,
postCallback = self._postCallback,
callbackData = [self],
)
f.pack(side = self['side'], expand = 1, fill = X)
self._valuatorList.append(f)
# Make sure valuators are initialized
self.set(self['value'], fCommand = 0)
# Make sure input variables processed
self.initialiseoptions(ValuatorGroup)
# This is the command is used to set the groups value
def set(self, value, fCommand = 1):
for i in range(self['dim']):
self._value[i] = value[i]
# Update valuator, but don't execute its command
self._valuatorList[i].set(value[i], 0)
if fCommand and (self['command'] is not None):
self['command'](self._value)
def setAt(self, index, value):
# Update valuator and execute its command
self._valuatorList[index].set(value)
# This is the command used by the valuator
def _valuatorSetAt(self, index, value):
self._value[index] = value
if self['command']:
self['command'](self._value)
def get(self):
return self._value
def getAt(self, index):
return self._value[index]
def _setNumDigits(self):
self['valuator_numDigits'] = self['numDigits']
self.formatString = '%0.' + '%df' % self['numDigits']
def _updateLabels(self):
if self['labels']:
for index in range(self['dim']):
self._valuatorList[index]['text'] = self['labels'][index]
def _preCallback(self, valGroup):
# Execute pre callback
if self['preCallback']:
apply(self['preCallback'], valGroup.get())
def _postCallback(self, valGroup):
# Execute post callback
if self['postCallback']:
apply(self['postCallback'], valGroup.get())
def __len__(self):
return self['dim']
def __repr__(self):
str = '[' + self.formatString % self._value[0]
for val in self._value[1:]:
str += ', ' + self.formatString % val
str += ']'
return str
class ValuatorGroupPanel(Pmw.MegaToplevel):
def __init__(self, parent = None, **kw):
# Default group size
DEFAULT_DIM = 1
# Default value depends on *actual* group size, test for user input
DEFAULT_VALUE = [0.0] * kw.get('dim', DEFAULT_DIM)
DEFAULT_LABELS = map(lambda x: 'v[%d]' % x,
range(kw.get('dim', DEFAULT_DIM)))
#define the megawidget options
INITOPT = Pmw.INITOPT
optiondefs = (
('type', FLOATER, INITOPT),
('dim', DEFAULT_DIM, INITOPT),
('side', TOP, INITOPT),
('title', 'Valuator Group', None),
# A list of initial values, one for each floater
('value', DEFAULT_VALUE, INITOPT),
('min', None, INITOPT),
('max', None, INITOPT),
('resolution', None, INITOPT),
# A tuple of labels, one for each floater
('labels', DEFAULT_LABELS, self._updateLabels),
('numDigits', 2, self._setNumDigits),
# The command to be executed when one of the floaters is updated
('command', None, self._setCommand),
# Callbacks to execute when updating widget's value
('preCallback', None, self._setPreCallback),
('postCallback', None, self._setPostCallback),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], self._setCallbackData),
# Destroy or withdraw
('fDestroy', 0, INITOPT)
)
self.defineoptions(kw, optiondefs)
# Initialize the toplevel widget
Pmw.MegaToplevel.__init__(self, parent)
# Create the components
interior = self.interior()
# The Menu Bar
self.balloon = Pmw.Balloon()
menubar = self.createcomponent('menubar', (), None,
Pmw.MenuBar, (interior,),
balloon = self.balloon)
menubar.pack(fill=X)
# ValuatorGroup Menu
menubar.addmenu('Valuator Group', 'Valuator Group Operations')
menubar.addmenuitem(
'Valuator Group', 'command', 'Reset the Valuator Group panel',
label = 'Reset',
command = lambda s = self: s.reset())
if self['fDestroy']:
dismissCommand = self.destroy
else:
dismissCommand = self.withdraw
menubar.addmenuitem(
'Valuator Group', 'command', 'Dismiss Valuator Group panel',
label = 'Dismiss', command = dismissCommand)
menubar.addmenu('Help', 'Valuator Group Help Operations')
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(0)
menubar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label = 'Balloon Help',
variable = self.toggleBalloonVar,
command = self.toggleBalloon)
# Create the valuator group
self.valuatorGroup = self.createcomponent(
'valuatorGroup',
(('valuator', 'valuatorGroup_valuator'),),
None, ValuatorGroup,
(interior,),
type = self['type'],
dim = self['dim'],
value = self['value'],
min = self['min'],
max = self['max'],
resolution = self['resolution'],
labels = self['labels'],
command = self['command'])
self.valuatorGroup.pack(expand = 1, fill = X)
# Make sure input variables processed
self.initialiseoptions(ValuatorGroupPanel)
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.balloon.configure(state = 'balloon')
else:
self.balloon.configure(state = 'none')
def _updateLabels(self):
self.valuatorGroup['labels'] = self['labels']
def _setNumDigits(self):
self.valuatorGroup['numDigits'] = self['numDigits']
def _setCommand(self):
self.valuatorGroup['command'] = self['command']
def _setPreCallback(self):
self.valuatorGroup['preCallback'] = self['preCallback']
def _setPostCallback(self):
self.valuatorGroup['postCallback'] = self['postCallback']
def _setCallbackData(self):
self.valuatorGroup['callbackData'] = self['callbackData']
def reset(self):
self.set(self['value'])
Pmw.forwardmethods(ValuatorGroupPanel, ValuatorGroup, 'valuatorGroup')
def rgbPanel(nodePath, callback = None, style = 'mini'):
def onRelease(r, g, b, a, nodePath = nodePath):
messenger.send('RGBPanel_setColor', [nodePath, r, g, b, a])
def popupColorPicker():
# Can pass in current color with: color = (255, 0, 0)
color = tkColorChooser.askcolor(
parent = vgp.interior(),
# Initialize it to current color
initialcolor = tuple(vgp.get()[:3]))[0]
if color:
vgp.set((color[0], color[1], color[2], vgp.getAt(3)))
def printToLog():
c=nodePath.getColor()
print "Vec4(%.3f, %.3f, %.3f, %.3f)"%(c[0], c[1], c[2], c[3])
# Check init color
if nodePath.hasColor():
initColor = nodePath.getColor() * 255.0
else:
initColor = Vec4(255)
# Create entry scale group
vgp = ValuatorGroupPanel(title = 'RGBA Panel: ' + nodePath.getName(),
dim = 4,
labels = ['R','G','B','A'],
value = [int(initColor[0]),
int(initColor[1]),
int(initColor[2]),
int(initColor[3])],
type = 'slider',
valuator_style = style,
valuator_min = 0,
valuator_max = 255,
valuator_resolution = 1,
# Destroy not withdraw panel on dismiss
fDestroy = 1)
# Update menu button
vgp.component('menubar').component('Valuator Group-button')['text'] = (
'RGBA Panel')
# Set callback
vgp['postCallback'] = onRelease
# Add a print button which will also serve as a color tile
pButton = Button(vgp.interior(), text = 'Print to Log',
bg = getTkColorString(initColor),
command = printToLog)
pButton.pack(expand = 1, fill = BOTH)
# Update menu
menu = vgp.component('menubar').component('Valuator Group-menu')
# Some helper functions
# Clear color
menu.insert_command(index = 1, label = 'Clear Color',
command = lambda: nodePath.clearColor())
# Set Clear Transparency
menu.insert_command(index = 2, label = 'Set Transparency',
command = lambda: nodePath.setTransparency(1))
menu.insert_command(
index = 3, label = 'Clear Transparency',
command = lambda: nodePath.clearTransparency())
# System color picker
menu.insert_command(index = 4, label = 'Popup Color Picker',
command = popupColorPicker)
menu.insert_command(index = 5, label = 'Print to log',
command = printToLog)
def setNodePathColor(color):
nodePath.setColor(color[0]/255.0, color[1]/255.0,
color[2]/255.0, color[3]/255.0)
# Update color chip button
pButton['bg'] = getTkColorString(color)
# Execute callback to pass along color info
if callback:
callback(color)
vgp['command'] = setNodePathColor
return vgp
def lightRGBPanel(light, style = 'mini'):
# Color picker for lights
def popupColorPicker():
# Can pass in current color with: color = (255, 0, 0)
color = tkColorChooser.askcolor(
parent = vgp.interior(),
# Initialize it to current color
initialcolor = tuple(vgp.get()[:3]))[0]
if color:
vgp.set((color[0], color[1], color[2], vgp.getAt(3)))
def printToLog():
n = light.getName()
c=light.getColor()
print n + (".setColor(Vec4(%.3f, %.3f, %.3f, %.3f))" %
(c[0], c[1], c[2], c[3]))
# Check init color
initColor = light.getColor() * 255.0
# Create entry scale group
vgp = ValuatorGroupPanel(title = 'RGBA Panel: ' + light.getName(),
dim = 4,
labels = ['R','G','B','A'],
value = [int(initColor[0]),
int(initColor[1]),
int(initColor[2]),
int(initColor[3])],
type = 'slider',
valuator_style = style,
valuator_min = 0,
valuator_max = 255,
valuator_resolution = 1,
# Destroy not withdraw panel on dismiss
fDestroy = 1)
# Update menu button
vgp.component('menubar').component('Valuator Group-button')['text'] = (
'Light Control Panel')
# Add a print button which will also serve as a color tile
pButton = Button(vgp.interior(), text = 'Print to Log',
bg = getTkColorString(initColor),
command = printToLog)
pButton.pack(expand = 1, fill = BOTH)
# Update menu
menu = vgp.component('menubar').component('Valuator Group-menu')
# System color picker
menu.insert_command(index = 4, label = 'Popup Color Picker',
command = popupColorPicker)
menu.insert_command(index = 5, label = 'Print to log',
command = printToLog)
def setLightColor(color):
light.setColor(Vec4(color[0]/255.0, color[1]/255.0,
color[2]/255.0, color[3]/255.0))
# Update color chip button
pButton['bg'] = getTkColorString(color)
vgp['command'] = setLightColor
return vgp
| 38.329301 | 81 | 0.520461 |
aceb88d656c03f87506d152f83828d577bada1ae | 461 | py | Python | summary.py | hito0512/yolov4-tiny-keras | 7de08f794d6d5c048a33d9af38d3b4743cb0b048 | [
"MIT"
] | 141 | 2020-07-02T09:21:22.000Z | 2022-03-21T15:01:19.000Z | summary.py | hito0512/yolov4-tiny-keras | 7de08f794d6d5c048a33d9af38d3b4743cb0b048 | [
"MIT"
] | 18 | 2020-07-01T21:17:26.000Z | 2022-03-30T11:11:48.000Z | summary.py | hito0512/yolov4-tiny-keras | 7de08f794d6d5c048a33d9af38d3b4743cb0b048 | [
"MIT"
] | 54 | 2020-07-07T07:18:12.000Z | 2022-03-18T21:28:53.000Z | #--------------------------------------------#
# 该部分代码用于看网络结构
#--------------------------------------------#
from nets.yolo import yolo_body
if __name__ == "__main__":
input_shape = [416, 416, 3]
anchors_mask = [[3, 4, 5], [1, 2, 3]]
num_classes = 80
model = yolo_body(input_shape, anchors_mask, num_classes, phi = 0)
model.summary()
# for i,layer in enumerate(model.layers):
# print(i,layer.name)
| 28.8125 | 71 | 0.468547 |
aceb89feead143c4f525ab34f3a5bbe45d36afff | 1,347 | py | Python | setup.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | setup.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | setup.py | zeitcodes/django-paginator | 2978deb60e2575e0a5245c78e68ce155108acfff | [
"BSD-3-Clause"
] | null | null | null | import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
def get_readme():
"""Return the README file contents. Supports text,rst, and markdown"""
for name in ('README', 'README.rst', 'README.md'):
if os.path.exists(name):
return read_file(name)
return ''
setup(
name = 'django-paginator',
version = __import__('paginator').get_version().replace(' ', '-'),
url = 'https://bitbucket.org/nextscreenlabs/django-paginator',
author = 'Jason Christa',
author_email = 'jason@zeitcode.com',
description = 'A template tag that works with Django\'s Paginator class',
long_description = get_readme(),
packages = find_packages(exclude=['tests']),
include_package_data = True,
install_requires = read_file('requirements.txt'),
classifiers = [
'Environment :: Web Environment',
'License :: OSI Approved :: BSD License',
'Framework :: Django',
'Programming Language :: Python',
],
)
| 31.325581 | 78 | 0.628805 |
aceb8a52b9088c02cefa8e3755e636ae6f450f76 | 8,744 | py | Python | pyfesom2/pfinterp.py | JiuShiNewBee/mypyfesom2 | d84adad116888f83b89813e1a86ce8a233171138 | [
"MIT"
] | null | null | null | pyfesom2/pfinterp.py | JiuShiNewBee/mypyfesom2 | d84adad116888f83b89813e1a86ce8a233171138 | [
"MIT"
] | null | null | null | pyfesom2/pfinterp.py | JiuShiNewBee/mypyfesom2 | d84adad116888f83b89813e1a86ce8a233171138 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
from collections import OrderedDict
import numpy as np
from .load_mesh_data import get_data, load_mesh, ind_for_depth
from .regriding import fesom2regular
from .ut import mask_ne, set_standard_attrs
import xarray as xr
def pfinterp():
parser = argparse.ArgumentParser(
prog="pfinterp", description="Interpolates FESOM2 data to regular grid."
)
parser.add_argument("meshpath", help="Path to the mesh folder")
parser.add_argument("result_path", help="Path to the results")
parser.add_argument(
"variable", default="temp", help="Name of the variable inside the file"
)
parser.add_argument(
"--years",
"-y",
default="1948",
type=str,
help="Years as a string. Options are one year, coma separated years, range in a form of 1948:2000 or * for everything.",
)
parser.add_argument(
"--depths", "-d", default="0", type=str, help="Depths in meters. \
Closest values from model levels will be taken.\
Several options available: number - e.g. '100',\
coma separated list - e.g. '0,10,100,200',\
-1 - all levels will be selected."
)
parser.add_argument(
"--box",
"-b",
nargs=4,
type=float,
default=[-180.0, 180.0, -80.0, 90.0],
help="Map boundaries in -180 180 -90 90 format that will be used for interpolation.",
metavar=("LONMIN", "LONMAX", "LATMIN", "LATMAX"),
)
parser.add_argument(
"--res",
"-r",
nargs=2,
type=int,
default=(360, 170),
help="Number of points along each axis that will be used for interpolation (for lon and lat).",
metavar=("N_POINTS_LON", "N_POINTS_LAT"),
)
parser.add_argument(
"--influence",
"-i",
default=80000,
type=float,
help="Radius of influence for interpolation, in meters.",
)
parser.add_argument(
"--timesteps",
"-t",
default="-1",
type=str,
help="Explicitly define timesteps of the input fields. There are several oprions:\
'-1' - all time steps, number - one time step (e.g. '5'), numbers - coma separated (e.g. '0, 3, 8, 10'), slice - e.g. '5:10',\
slice with steps - e.g. '8:-1:12'.",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="If present additional information will not be printed.",
)
parser.add_argument(
"--ofile",
"-o",
default="out.nc",
type=str,
help="Path to the output file. Default is ./out.nc.",
)
parser.add_argument(
"--abg",
nargs=3,
type=float,
default=(0.0, 0.0, 0.0),
help="Alpha, beta and gamma Euler angles. If you plots look rotated, you use wrong abg values. Usually nessesary only during the first use of the mesh.",
)
parser.add_argument(
"--interp",
choices=["nn", "idist", "linear", "cubic"],
default="nn",
help="Interpolation method. Options are nn - nearest neighbor (KDTree implementation, fast), idist - inverse distance (KDTree implementation, decent speed), linear (scipy implementation, slow) and cubic (scipy implementation, slowest and give strange results on corarse meshes).",
)
parser.add_argument(
"-k",
type=int,
default=1,
help="k-th nearest neighbors to use. Only used when interpolation method (--interp) is idist",
)
args = parser.parse_args()
# args.func(args)
if not args.quiet:
print("Mesh path: {}".format(args.meshpath))
print("Input file path: {}".format(args.result_path))
print("Name of the variable: {}".format(args.variable))
print("Years: {}".format(args.years))
print("Depths: {}".format(args.depths))
print("Bounding box: {}".format(args.box))
print("Number of points along sides: {}".format(args.res))
print("Radius of influence (in m.): {}".format(args.influence))
print("Nearest neighbors to use: {}".format(args.k))
print("Timesteps index: {}".format(args.timesteps))
print("Quiet?: {}".format(args.quiet))
print("Output file: {}".format(args.ofile))
print("Euler angles of mesh rotation: {}".format(args.abg))
print("Interpolation method: {}".format(args.interp))
years = args.years
if len(years.split(":")) == 2:
y = range(int(years.split(":")[0]), int(years.split(":")[1]))
elif len(years.split(",")) > 1:
y = list(map(int, years.split(",")))
else:
y = [int(years)]
years = y
# args.timesteps = [0,1]
timesteps = args.timesteps
if len(timesteps.split(":")) == 2:
y = slice(int(timesteps.split(":")[0]), int(timesteps.split(":")[1]))
if len(timesteps.split(":")) == 3:
y = slice(int(timesteps.split(":")[0]),
int(timesteps.split(":")[1]),
int(timesteps.split(":")[2]))
elif len(timesteps.split(",")) > 1:
y = list(map(int, timesteps.split(",")))
elif int(timesteps) == -1:
y = -1
else:
y = [int(timesteps)]
timesteps = y
print("timesteps {}".format(timesteps))
mesh = load_mesh(args.meshpath, abg=args.abg, usepickle=True, usejoblib=False)
depths = args.depths
if len(depths.split(",")) > 1:
depths = list(map(int, depths.split(",")))
elif int(depths) == -1:
depths = [-1]
else:
depths = [int(depths)]
print(depths)
if depths[0] == -1:
dind = range(mesh.zlev.shape[0])
realdepth = mesh.zlev
else:
dind = []
realdepth = []
for depth in depths:
ddepth = ind_for_depth(depth, mesh)
dind.append(ddepth)
realdepth.append(mesh.zlev[ddepth])
print(dind)
print(realdepth)
data = get_data(
result_path=args.result_path,
variable=args.variable,
years=years,
mesh=mesh,
runid="fesom",
records=timesteps,
depth=None,
how=None,
ncfile=None,
compute=False,
)
if len(dind) <= data.shape[2]:
data = data.isel(nz1=dind)
elif len(dind) > data.shape[2]:
dind = dind[:-1]
realdepth = realdepth[:-1]
data = data.isel(nz1=dind)
left, right, down, up = args.box
lonNumber, latNumber = args.res
lonreg = np.linspace(left, right, lonNumber)
latreg = np.linspace(down, up, latNumber)
lonreg2, latreg2 = np.meshgrid(lonreg, latreg)
dshape = data.shape
empty_data = np.empty((dshape[0], dshape[2], latNumber, lonNumber ))
da = xr.DataArray(empty_data, dims=['time', 'depth_coord', 'lat', 'lon'],
coords={'time':data.time,
'depth_coord':realdepth,
'lat':latreg2[:,0].flatten(),
'lon':lonreg2[0,:].flatten()},
name=args.variable,
attrs=data.attrs)
da = set_standard_attrs(da)
m2 = mask_ne(lonreg2, latreg2)
for timestep in range(da.time.shape[0]):
for depth_ind in range(da.depth_coord.shape[0]):
interp_data = fesom2regular(
data[timestep,:,depth_ind].values,
mesh,
lonreg2,
latreg2,
distances_path=None,
inds_path=None,
qhull_path=None,
how=args.interp,
k=args.k,
radius_of_influence=args.influence,
n_jobs=2,
dumpfile=True,
basepath=None,
)
interp_data = np.ma.masked_where(m2, interp_data)
interp_data = np.ma.masked_equal(interp_data, 0)
da[timestep, depth_ind,:,:] = interp_data[:]
da.to_netcdf(args.ofile)
# parser.set_defaults(func=pfinterp)
if __name__ == "__main__":
# args = parser.parse_args()
# args.func(args)
pfinterp()
| 36.282158 | 288 | 0.521958 |
aceb8a9f062199c5dbb92895b1cdade1a8696637 | 9,236 | py | Python | SMBcorr/mar_extrap_seasonal.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | null | null | null | SMBcorr/mar_extrap_seasonal.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | null | null | null | SMBcorr/mar_extrap_seasonal.py | SmithB/SMBcorr | 7c35cf8383058986fd82b28beab3c5580f9f8667 | [
"MIT"
] | 1 | 2020-08-06T19:48:52.000Z | 2020-08-06T19:48:52.000Z | #!/usr/bin/env python
u"""
mar_extrap_seasonal.py
Written by Tyler Sutterley (06/2020)
Interpolates seasonal MAR products to times and coordinates
Seasonal files are climatology files for each day of the year
Uses fast nearest-neighbor search algorithms
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
and inverse distance weighted interpolation to extrapolate spatially
INPUTS:
DIRECTORY: full path to the MAR data directory
<path_to_mar>/MARv3.11/Greenland/ERA_1958-2019-15km/daily_15km
<path_to_mar>/MARv3.11/Greenland/NCEP1_1948-2020_20km/daily_20km
<path_to_mar>/MARv3.10/Greenland/NCEP1_1948-2019_20km/daily_20km
<path_to_mar>/MARv3.9/Greenland/ERA_1958-2018_10km/daily_10km
EPSG: projection of input spatial coordinates
tdec: dates to interpolate in year-decimal
X: x-coordinates to interpolate in projection EPSG
Y: y-coordinates to interpolate in projection EPSG
OPTIONS:
XNAME: x-coordinate variable name in MAR netCDF4 file
YNAME: x-coordinate variable name in MAR netCDF4 file
TIMENAME: time variable name in MAR netCDF4 file
VARIABLE: MAR product to interpolate
RANGE: start year and end year of seasonal file
SIGMA: Standard deviation for Gaussian kernel
SEARCH: nearest-neighbor search algorithm (BallTree or KDTree)
NN: number of nearest-neighbor points to use
POWER: inverse distance weighting power
FILL_VALUE: output fill_value for invalid points
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
scikit-learn: Machine Learning in Python
https://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
UPDATE HISTORY:
Written 06/2020
"""
from __future__ import print_function
import sys
import os
import re
import pyproj
import netCDF4
import numpy as np
import scipy.spatial
import scipy.ndimage
import scipy.interpolate
from sklearn.neighbors import KDTree, BallTree
#-- PURPOSE: read and interpolate a seasonal field of MAR outputs
def extrapolate_mar_seasonal(DIRECTORY, EPSG, VERSION, tdec, X, Y,
XNAME=None, YNAME=None, TIMENAME='TIME', VARIABLE='SMB',
RANGE=[2000,2019], SIGMA=1.5, SEARCH='BallTree', NN=10, POWER=2.0,
FILL_VALUE=None):
#-- regular expression pattern for MAR dataset
rx = re.compile('MARseasonal(.*?){0}-{1}.nc$'.format(*RANGE))
#-- find mar seasonal file for RANGE
FILE, = [f for f in os.listdir(DIRECTORY) if rx.match(f)]
#-- Open the MAR NetCDF file for reading
with netCDF4.Dataset(os.path.join(DIRECTORY,FILE), 'r') as fileID:
nx = len(fileID.variables[XNAME][:])
ny = len(fileID.variables[YNAME][:])
#-- add 1 to use january 1st as day 366
nt = len(fileID.variables[TIMENAME][:]) + 1
#-- python dictionary with file variables
fd = {}
fd['TIME'] = np.arange(nt)/365.0
#-- create a masked array with all data
fd[VARIABLE] = np.ma.zeros((nt,ny,nx),fill_value=FILL_VALUE)
fd[VARIABLE].mask = np.zeros((nt,ny,nx),dtype=np.bool)
#-- python dictionary with gaussian filtered variables
gs = {}
#-- use a gaussian filter to smooth each model field
gs[VARIABLE] = np.ma.zeros((nt,ny,nx), fill_value=FILL_VALUE)
gs[VARIABLE].mask = np.ones((nt,ny,nx), dtype=np.bool)
#-- calculate cumulative sum of gaussian filtered values
cumulative = np.zeros((ny,nx))
gs['CUMULATIVE'] = np.ma.zeros((nt,ny,nx), fill_value=FILL_VALUE)
gs['CUMULATIVE'].mask = np.ones((nt,ny,nx), dtype=np.bool)
#-- Open the MAR NetCDF file for reading
with netCDF4.Dataset(os.path.join(DIRECTORY,FILE), 'r') as fileID:
#-- surface type
SRF=fileID.variables['SRF'][:]
#-- indices of specified ice mask
i,j=np.nonzero(SRF == 4)
#-- ice fraction
FRA=fileID.variables['FRA'][:]/100.0
#-- Get data from netCDF variable and remove singleton dimensions
tmp=np.squeeze(fileID.variables[VARIABLE][:])
#-- combine sectors for multi-layered data
if (np.ndim(tmp) == 4):
#-- create mask for combining data
MASK=np.zeros((nt,ny,nx))
MASK[:,i,j]=FRA[:,0,i,j]
#-- combine data
fd[VARIABLE][:-1,:,:] = MASK*tmp[:,0,:,:] + \
(1.0-MASK)*tmp[:,1,:,:]
else:
#-- copy data
fd[VARIABLE][:-1,:,:] = tmp.copy()
#-- use january 1st as time 366
fd[VARIABLE][-1,:,:] = np.copy(fd[VARIABLE][0,:,:])
#-- verify mask object for interpolating data
surf_mask = np.broadcast_to(SRF, (nt,ny,nx))
fd[VARIABLE].mask[:,:,:] |= (surf_mask != 4)
#-- combine mask object through time to create a single mask
fd['MASK']=1.0-np.any(fd[VARIABLE].mask,axis=0).astype(np.float)
#-- MAR coordinates
fd['LON']=fileID.variables['LON'][:,:].copy()
fd['LAT']=fileID.variables['LAT'][:,:].copy()
#-- convert x and y coordinates to meters
fd['x']=1000.0*fileID.variables[XNAME][:].copy()
fd['y']=1000.0*fileID.variables[YNAME][:].copy()
#-- use a gaussian filter to smooth mask
gs['MASK']=scipy.ndimage.gaussian_filter(fd['MASK'],SIGMA,
mode='constant',cval=0)
#-- indices of smoothed ice mask
ii,jj = np.nonzero(np.ceil(gs['MASK']) == 1.0)
#-- for each time
for t in range(nt):
#-- replace fill values before smoothing data
temp1 = np.zeros((ny,nx))
i,j = np.nonzero(~fd[VARIABLE].mask[t,:,:])
temp1[i,j] = fd[VARIABLE][t,i,j].copy()
#-- smooth spatial field
temp2 = scipy.ndimage.gaussian_filter(temp1, SIGMA,
mode='constant', cval=0)
#-- scale output smoothed field
gs[VARIABLE].data[t,ii,jj] = temp2[ii,jj]/gs['MASK'][ii,jj]
#-- replace valid values with original
gs[VARIABLE].data[t,i,j] = temp1[i,j]
#-- set mask variables for time
gs[VARIABLE].mask[t,ii,jj] = False
#-- calculate cumulative
cumulative[ii,jj] += gs[VARIABLE][t,ii,jj]
gs['CUMULATIVE'].data[t,ii,jj] = np.copy(cumulative[ii,jj])
gs['CUMULATIVE'].mask[t,ii,jj] = False
#-- convert MAR latitude and longitude to input coordinates (EPSG)
proj1 = pyproj.Proj("+init={0}".format(EPSG))
proj2 = pyproj.Proj("+init=EPSG:{0:d}".format(4326))
xg,yg = pyproj.transform(proj2, proj1, fd['LON'], fd['LAT'])
#-- construct search tree from original points
#-- can use either BallTree or KDTree algorithms
xy1 = np.concatenate((xg[i,j,None],yg[i,j,None]),axis=1)
tree = BallTree(xy1) if (SEARCH == 'BallTree') else KDTree(xy1)
#-- calculate the modulus of the time in year-decimal
tmod = tdec % 1
#-- number of output data points
npts = len(tdec)
#-- output interpolated arrays of output variable
extrap = np.ma.zeros((npts),fill_value=FILL_VALUE,dtype=np.float)
extrap.mask = np.ones((npts),dtype=np.bool)
#-- initially set all values to fill value
extrap.data[:] = extrap.fill_value
#-- find indices for linearly interpolating in time
f = scipy.interpolate.interp1d(fd['TIME'], np.arange(nt), kind='linear')
date_indice = f(tmod).astype(np.int)
#-- for each unique model date
#-- linearly interpolate in time between two model maps
#-- then then inverse distance weighting to extrapolate in space
for k in np.unique(date_indice):
kk, = np.nonzero(date_indice==k)
count = np.count_nonzero(date_indice==k)
#-- query the search tree to find the NN closest points
xy2 = np.concatenate((X[kk,None],Y[kk,None]),axis=1)
dist,indices = tree.query(xy2, k=NN, return_distance=True)
#-- normalized weights if POWER > 0 (typically between 1 and 3)
#-- in the inverse distance weighting
power_inverse_distance = dist**(-POWER)
s = np.sum(power_inverse_distance, axis=1)
w = power_inverse_distance/np.broadcast_to(s[:,None],(count,NN))
#-- variable for times before and after tdec
var1 = gs['CUMULATIVE'][k,i,j]
var2 = gs['CUMULATIVE'][k+1,i,j]
#-- linearly interpolate to date
dt = (tmod[kk] - fd['TIME'][k])/(fd['TIME'][k+1] - fd['TIME'][k])
#-- spatially extrapolate using inverse distance weighting
extrap.data[kk] = (1.0-dt)*np.sum(w*var1[indices],axis=1) + \
dt*np.sum(w*var2[indices], axis=1)
#-- complete mask if any invalid in data
invalid, = np.nonzero((extrap.data == extrap.fill_value) |
np.isnan(extrap.data))
extrap.mask[invalid] = True
#-- return the interpolated values
return extrap
| 44.191388 | 81 | 0.644435 |
aceb8abb3a7f9c1f24c32b4684d1bc40821f37e3 | 14,273 | py | Python | magpie/montecarlo/box2ring.py | knaidoo29/magpie | efab3c2666aab2c928ca12a631758bc1b43c149c | [
"MIT"
] | null | null | null | magpie/montecarlo/box2ring.py | knaidoo29/magpie | efab3c2666aab2c928ca12a631758bc1b43c149c | [
"MIT"
] | null | null | null | magpie/montecarlo/box2ring.py | knaidoo29/magpie | efab3c2666aab2c928ca12a631758bc1b43c149c | [
"MIT"
] | null | null | null | import numpy as np
from .. import coords
from .. import polar
from .. import randoms
from .. import utils
class Box2Ring:
"""Remaps pixels given on 2D grid to polar grid.
Note
----
p is used as a shorthand for phi in the naming of variables.
Example
-------
import numpy as np
import magpie
# Initialise magpie Box2Ring class
b2r = magpie.mc.Box2Ring()
# Setup the box grid
b2r.setup_box(-20., 20., 100, -20., 20., 100)
# Create an 'interesting' function for remapping
# I'm using a Bessel function of order 5 multiplied by a sine wave
f = np.zeros(np.shape(b2r.x2d))
r, phi = magpie.coords.cart2polar(b2r.x2d, b2r.y2d)
f = special.jv(5, r)*np.sin(phi*5)
# Construct polar grid
b2r.setup_polar_lin(0., 20., 50, 150, center=[0., 0.])
# Calculate monte carlo weights for remapping
b2r.get_weights()
# The weights for a particular remapping can be stored.
# Now, let's remap the function from a linear cartesian grid to polar coordinates
f_polar = b2r.remap(f)
"""
def __init__(self):
"""Initialises the class."""
self.xedges = None
self.yedges = None
self.xmid = None
self.ymid = None
self.dx = None
self.dy = None
self.x2d = None
self.y2d = None
self.redges = None
self.pedges = None
self.rmid = None
self.pmid = None
self.dr = None
self.dp = None
self.r2d = None
self.p2d = None
self.center = None
self.phi_shift = None
self.ind_xs = None
self.ind_ys = None
self.ind_ws = None
self.area2d = None
self.pixarea = None
self.mask_in_bound = None
def setup_box(self, xmin, xmax, numx, ymin, ymax, numy):
"""Setups the box grid.
Parameters
----------
xmin : float
Minimum x.
xmax : float
Maximum x.
numx : int
Number of bins along x-axis.
ymin : float
Minimum y.
ymax : float
Maximum y.
numy : int
Number of bins along y-axis.
"""
assert numx > 0, "numx must be greater than zero."
assert numy > 0, "numy must be greater than zero."
self.xedges = np.linspace(xmin, xmax, numx+1)
self.yedges = np.linspace(ymin, ymax, numy+1)
self.xmid = 0.5*(self.xedges[1:] + self.xedges[:-1])
self.ymid = 0.5*(self.yedges[1:] + self.yedges[:-1])
self.dx = self.xedges[1] - self.xedges[0]
self.dy = self.yedges[1] - self.yedges[0]
self.x2d, self.y2d = np.meshgrid(self.xmid, self.ymid)
def setup_polar_lin(self, rmin, rmax, numr, nump, pmin=0., pmax=2.*np.pi,
center=[0., 0.], phi_shift=0.):
"""Setups the polar grid.
Parameters
----------
rmin : float
Minimum r.
rmax : float
Maximum r.
numr : int
Number of bins along r-axis.
nump : int
Number of bins along phi-axis.
pmin : float
Minimum phi (default=0).
pmax : float
Maximum phi (default=2pi).
center : list
Center point of polar coordinate grid.
phi_shift : float
Rotation to the polar coordinate grid, given in radians within a range
of 0 and 2pi.
"""
assert rmin >= 0., "rmin must be greater or equal to zero."
assert rmin < rmax, "rmin must be smaller than rmax."
assert numr > 0, "numr must be greater than zero."
assert pmin >= 0., "pmin must be greater or equal to zero."
assert pmin < 2.*np.pi, "pmin must be smaller than 2pi."
assert pmin < pmax, "pmin must be smaller than pmax."
assert pmax > 0., "pmax must be greater than zero."
assert pmax <= 2.*np.pi, "pmax must be smaller of equal to 2pi."
assert nump > 0, "nump must be greater than zero."
assert len(center) == 2, "center list must have length 2."
self.redges = np.linspace(rmin, rmax, numr+1)
self.pedges = np.linspace(pmin, pmax, nump+1)
self.rmid = 0.5*(self.redges[1:] + self.redges[:-1])
self.pmid = 0.5*(self.pedges[1:] + self.pedges[:-1])
self.dr = self.rmid[1] - self.rmid[0]
self.dp = self.pmid[1] - self.pmid[0]
self.r2d, self.p2d = np.meshgrid(self.rmid, self.pmid)
self.center = center
self.phi_shift = phi_shift
def setup_polar_log(self, rmin, rmax, numr, nump, pmin=0., pmax=2.*np.pi,
center=[0., 0.], phi_shift=0., addzero=True):
"""Setups the polar grid with logarithmic radial bins.
Parameters
----------
rmin : float
Minimum r, must be r > 0.
rmax : float
Maximum r.
numr : int
Number of bins along r-axis.
nump : int
Number of bins along phi-axis.
pmin : float
Minimum phi (default=0).
pmax : float
Maximum phi (default=2pi).
center : list
Center point of polar coordinate grid.
phi_shift : float
Rotation to the polar coordinate grid, given in radians within a range
of 0 and 2pi.
addzero : bool
Adds r=0 to the radial edges.
"""
assert rmin > 0., "rmin must be greater than zero."
assert rmin < rmax, "rmin must be smaller than rmax."
assert numr > 0, "numr must be greater than zero."
assert pmin >= 0., "pmin must be greater or equal to zero."
assert pmin < 2.*np.pi, "pmin must be smaller than 2pi."
assert pmin < pmax, "pmin must be smaller than pmax."
assert pmax > 0., "pmax must be greater than zero."
assert pmax <= 2.*np.pi, "pmax must be smaller of equal to 2pi."
assert nump > 0, "nump must be greater than zero."
assert len(center) == 2, "center list must have length 2."
if addzero == True:
self.redges = np.zeros(numr+1)
self.redges[1:] = np.logspace(np.log10(rmin), np.log10(rmax), numr)
else:
self.redges = np.logspace(np.log10(rmin), np.log10(rmax), numr+1)
self.pedges = np.linspace(pmin, pmax, nump+1)
self.rmid = 0.5*(self.redges[1:] + self.redges[:-1])
self.pmid = 0.5*(self.pedges[1:] + self.pedges[:-1])
self.dr = self.redges[1:] - self.redges[:-1]
self.dp = self.pmid[1] - self.pmid[0]
self.r2d, self.p2d = np.meshgrid(self.rmid, self.pmid)
self.center = center
self.phi_shift = phi_shift
def get_weights(self, mc_size=10000, verbose=True):
"""Calculates Monte Carlo pixel weights for remapping from cartesian grid
to polar coordinate grid.
Parameters
----------
mc_size : int
Size of the Monte Carlo random points for calculating weights from Monte
Carlo integration.
verbose : bool
If true will output a progress bar.
"""
assert mc_size > 1, "mc_size must be greater than 1, ideally greater than 100."
ind_xs = []
ind_ys = []
ind_ws = []
for i in range(0, len(self.r2d)):
_ind_xs = []
_ind_ys = []
_ind_ws = []
for j in range(0, len(self.r2d[0])):
rmin, rmax = self.redges[j], self.redges[j+1]
pmin, pmax = self.pedges[i], self.pedges[i+1]
rrand, prand = randoms.randoms_polar(mc_size, rmin, rmax, pmin, pmax)
if self.phi_shift != 0.:
prand += self.phi_shift
condition = np.where(prand > 2.*np.pi)
prand[condition] -= 2.*np.pi
condition = np.where(prand < 0.)
prand[condition] += 2.*np.pi
xrand, yrand = coords.polar2cart(rrand, prand, center=self.center)
H, _ = np.histogramdd((xrand, yrand), bins=[len(self.xedges)-1, len(self.yedges)-1],
range=[[self.xedges[0], self.xedges[-1]],[self.yedges[0], self.yedges[-1]]])
condition = np.where(H != 0.)
ind_x = condition[1]
ind_y = condition[0]
ind_w = H[condition]/float(mc_size)
_ind_xs.append(ind_x)
_ind_ys.append(ind_y)
_ind_ws.append(ind_w)
ind_xs.append(np.array(_ind_xs, dtype=object))
ind_ys.append(np.array(_ind_ys, dtype=object))
ind_ws.append(np.array(_ind_ws, dtype=object))
if verbose == True:
utils.progress_bar(i, len(self.r2d), explanation='Calculating weights')
self.ind_xs = np.array(ind_xs, dtype=object)
self.ind_ys = np.array(ind_ys, dtype=object)
self.ind_ws = np.array(ind_ws, dtype=object)
def remap(self, f, w=None, verbose=True):
"""Remaps 2d grid data f onto polar coordinate grid.
Parameters
----------
f : 2darray
2d pixel data.
w : 2darray
Weights.
verbose : bool
If true will output a progress bar.
Returns
-------
f_polar : 2darray
Remapped 2d data onto polar coordinate grid.
"""
assert np.shape(f) == np.shape(self.x2d), "Shape of f does not match stored cartesian grid."
if w is not None:
assert np.shape(w) == np.shape(self.x2d), "Shape of w does not match stored cartesian grid."
f_polar = np.zeros(np.shape(self.r2d))
for i in range(0, len(self.r2d)):
for j in range(0, len(self.r2d[0])):
if w is None:
if np.sum(self.ind_ws[i, j]) != 0.:
f_polar[i, j] = np.sum(self.ind_ws[i, j]*f[self.ind_xs[i, j], self.ind_ys[i, j]])
f_polar[i, j] /= np.sum(self.ind_ws[i, j])
else:
f_polar[i, j] = np.nan
else:
if np.sum(self.ind_ws[i, j]) != 0.:
f_polar[i, j] = np.sum(self.ind_ws[i, j]*w[self.ind_xs[i, j], self.ind_ys[i, j]]*f[self.ind_xs[i, j], self.ind_ys[i, j]])
f_polar[i, j] /= np.sum(self.ind_ws[i, j]*w[self.ind_xs[i, j], self.ind_ys[i, j]])
else:
f_polar[i, j] = np.nan
if verbose == True:
utils.progress_bar(i, len(self.r2d), explanation='Remapping')
return f_polar
def get_mask_in_bound(self, verbose=True):
"""Returns a mask showing whether the polar grid remains inside the boundaries
of the initial cartesian grid.
Parameters
----------
verbose : bool
If true will print progress, etc.
"""
assert self.x2d is not None, "Cartesian grid is not defined."
assert self.r2d is not None, "Polar grid is not defined."
if verbose == True:
print("Construct mask_in_bound...")
mask = np.ones(np.shape(self.x2d))
mask_in_bound = self.remap(mask, verbose=verbose)
condition = np.where(np.isfinite(mask_in_bound) == False)
mask_in_bound[condition] = 0.
self.mask_in_bound = mask_in_bound
def rotate_polar(self, f_polar, phi_shift):
"""Rotates polar coordinate grid by phi_shift.
Parameters
----------
f_polar : 2darray
Polar coordinate gridded data.
phi_shift : float
Rotation to be applied, given in radians within a range of 0 and 2pi.
Returns
-------
f_polar_rot : 2darray
Rotated polar coordinate data.
"""
assert np.shape(f_polar) == np.shape(self.p2d), "Shape of f_polar does not match stored polar coordinate grid."
return coords.rotate_polar(self.pedges, f_polar, phi_shift)
def polar2radial(self, f_polar, sigma=None, w=None, verbose=False):
"""Calculates the radial mean of data provided in a polar coordinate grid
which originates from a 2D cartesian grid.
Parameters
----------
f_polar : ndarray
2D array of a function f in polar coordinates.
sigma : ndarray
2D array of the noise for function f in polar coordinates.
w : ndarray
2D array containing weights for each pixel in polar grid, ideal for adding
a binary mask.
verbose : bool
If true will print progress, etc.
Returns
-------
f_radial : array
Radial profile of f.
sigma_radial : array
If sigma is provided then the radial errors are outputted.
"""
if self.area2d is None:
self.area2d = polar.get_polar_area2d(self.redges, self.pedges)
if self.pixarea is None:
self.pixarea = polar.get_pixarea(self.xedges, self.yedges)
assert np.shape(f_polar) == np.shape(self.p2d), "Shape of f_polar does not match stored polar coordinate grid."
if self.mask_in_bound is None:
self.get_mask_in_bound(verbose=verbose)
if self.mask_in_bound.all() != 1.:
condition = np.where(self.mask_in_bound != 1.)
f_polar = np.copy(f_polar)
f_polar[condition] = 0.
if sigma is not None:
sigma = np.copy(sigma)
sigma[condition] = 0.
if w is not None:
w *= self.mask_in_bound
else:
w = self.mask_in_bound
if sigma is None:
f_radial = polar.polar2radial(f_polar, self.area2d, self.pixarea, sigma=sigma, w=w)
return f_radial
else:
assert np.shape(sigma) == np.shape(self.p2d), "Shape of sigma does not match stored polar coordinate grid."
f_radial, sigma_radial = polar.polar2radial(f_polar, self.area2d, self.pixarea, sigma=sigma, w=w)
return f_radial, sigma_radial
def clean(self):
"""Cleans by reinitialising the class."""
self.__init__()
| 37.560526 | 145 | 0.552231 |
aceb8b1f3479df3558d15eba0166ba099cc710d5 | 5,180 | py | Python | ParlAI/projects/controllable_dialogue/controllable_seq2seq/nidf.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 163 | 2019-06-23T14:07:57.000Z | 2022-02-25T23:06:07.000Z | ParlAI/projects/controllable_dialogue/controllable_seq2seq/nidf.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 8 | 2019-07-24T12:41:31.000Z | 2022-02-10T00:17:20.000Z | ParlAI/projects/controllable_dialogue/controllable_seq2seq/nidf.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 31 | 2019-06-26T01:21:07.000Z | 2021-09-06T17:23:24.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains code to compute NIDF measures, used for specificity control.
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
from parlai.core.utils import TimeLogger
from collections import Counter
import os
import math
import pickle
CONTROLLABLE_DIR = 'controllable_dialogue'
PARLAI_FORMAT_DIR = 'controllable_dialogue/ConvAI2_parlaiformat'
def get_word_counts(opt, count_inputs):
"""Goes through the dataset specified in opt and gets word counts.
Inputs:
count_inputs: If True, include both input and reply when counting words
and utterances. Otherwise, only include reply text.
Returns:
word_counter_per_sent: a Counter mapping each word to the number of
utterances in which it appears.
num_sents: int. number of utterances counted
"""
# Create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
# Count word frequency for all words in dataset
word_counter_per_sent = Counter()
num_sents = 0
count = 0
log_timer = TimeLogger()
while True:
count += 1
world.parley()
reply = world.acts[0].get('labels', world.acts[0].get('eval_labels'))[0]
words = reply.split()
words_no_dups = list(set(words)) # remove duplicates
word_counter_per_sent.update(words_no_dups)
num_sents += 1
# Optionally count words in input text
if count_inputs:
input = world.acts[0]['text']
input = input.split('\n')[-1] # e.g. in ConvAI2, this removes persona
words = input.split()
words_no_dups = list(set(words)) # remove duplicates
word_counter_per_sent.update(words_no_dups)
num_sents += 1
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
print('EPOCH DONE')
break
return word_counter_per_sent, num_sents
def learn_nidf(opt):
"""
Go through ConvAI2 and Twitter data, and count word frequences.
Save word2count.pkl, which contains word2count, and total num_sents.
These are both needed to calculate NIDF later.
"""
opt['log_every_n_secs'] = 2
print('Counting words in Twitter train set...')
opt['datatype'] = 'train:ordered'
opt['task'] = 'twitter'
wc1, ns1 = get_word_counts(opt, count_inputs=True)
print('Counting words in Twitter val set...')
opt['datatype'] = 'valid'
opt['task'] = 'twitter'
wc2, ns2 = get_word_counts(opt, count_inputs=True)
opt['task'] = 'fromfile:parlaiformat'
print('Counting words in ConvAI2 train set...')
opt['datatype'] = 'train:ordered'
opt['fromfile_datapath'] = os.path.join(
opt['datapath'], PARLAI_FORMAT_DIR, 'train.txt'
)
# Don't include inputs because ConvAI2 train set reverses every conversation
wc3, ns3 = get_word_counts(opt, count_inputs=False)
print('Counting words in ConvAI2 val set...')
opt['datatype'] = 'valid'
opt['fromfile_datapath'] = os.path.join(
opt['datapath'], PARLAI_FORMAT_DIR, 'valid.txt'
)
wc4, ns4 = get_word_counts(opt, count_inputs=True)
# Merge word counts
word_counter = Counter()
for wc in [wc1, wc2, wc3, wc4]:
for word, count in wc.items():
word_counter[word] += count
num_sents = ns1 + ns2 + ns3 + ns4
# Write word2count and num_sents to file
word2count_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'word2count.pkl')
print("Saving word count stats to %s..." % word2count_fp)
data = {"word2count": word_counter, "num_sents": num_sents}
with open(word2count_fp, "wb") as f:
pickle.dump(data, f)
def load_word2nidf(opt):
"""
Loads word count stats from word2count.pkl file in data/controllable_dialogue,
computes NIDF for all words, and returns the word2nidf dictionary.
Returns:
word2nidf: dict mapping words to their NIDF score (float between 0 and 1)
"""
word2count_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'word2count.pkl')
print("Loading word count stats from %s..." % word2count_fp)
with open(word2count_fp, "rb") as f:
data = pickle.load(f)
num_sents = data['num_sents']
print('num_sents: ', num_sents)
word2count = data['word2count']
min_c = min(word2count.values()) # max count
max_c = max(word2count.values()) # min count
word2nidf = {
w: (math.log(max_c) - math.log(c)) / (math.log(max_c) - math.log(min_c))
for w, c in word2count.items()
}
print("Done loading word2nidf dictionary.")
return word2nidf
if __name__ == '__main__':
parser = ParlaiParser()
opt = parser.parse_args()
learn_nidf(opt)
| 32.993631 | 85 | 0.667181 |
aceb8b45f64e5c2373b69959de70a8d8418efdb3 | 513 | py | Python | corehq/ex-submodules/casexml/apps/stock/migrations/0002_delete_stocktransaction.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/ex-submodules/casexml/apps/stock/migrations/0002_delete_stocktransaction.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/ex-submodules/casexml/apps/stock/migrations/0002_delete_stocktransaction.py | akashkj/commcare-hq | b00a62336ec26cea1477dfb8c048c548cc462831 | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | # Generated by Django 2.2.24 on 2021-09-22 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_squashed_0002_stockreport_server_date'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[], # table to be deleted later
state_operations=[
migrations.DeleteModel(
name='StockTransaction',
),
]
),
]
| 23.318182 | 64 | 0.576998 |
aceb8b5767b4e2d622c76d30e3df9dfa6acba3bc | 1,009 | py | Python | algos/kth-smallest.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | 1 | 2015-10-01T21:17:10.000Z | 2015-10-01T21:17:10.000Z | algos/kth-smallest.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | null | null | null | algos/kth-smallest.py | asaini/algo-py | e9d18ef82d14e6304430bbd8b065430e76aa7eb8 | [
"MIT"
] | null | null | null | def kth_smallest(array, start, end, k):
"""
"""
print "kth_smallest ", array, start, end, k
if k > 0 and k < end - start + 1:
pos = partition(array, start, end)
if pos - start == k - 1:
return array[pos]
if pos - start > k - 1:
return kth_smallest(array, start, pos - 1, k)
return kth_smallest(array, pos + 1, end, k - pos + start - 1)
return -1
def partition(array, start, end):
"""
Standard partition process of QuickSort(). It considers the last
element as pivot and moves all smaller element to left of it
and greater elements to right
"""
print "partition ", array, start, end
last = array[end]
i = start
for j in range(start, end):
if array[j] <= last:
# Swap elements at i and j
array[i], array[j] = array[j], array[i]
i = i + 1
# Swap the last element to where it belongs
array[i], array[end] = array[end], array[i]
return i
if __name__ == '__main__':
array = [12, 3, 5, 7, 4, 19, 26, 11]
n = len(array)
print kth_smallest(array, 0, n - 1, 3); | 22.931818 | 66 | 0.627354 |
aceb8d4057e7416affd0037f7e68909e30cebd0c | 1,345 | py | Python | steroidsornot/_nbdev.py | cedarmora/steroids-or-not | afa951dad1c18e0d2ccb24faea0f3efef6ec1178 | [
"Apache-2.0"
] | null | null | null | steroidsornot/_nbdev.py | cedarmora/steroids-or-not | afa951dad1c18e0d2ccb24faea0f3efef6ec1178 | [
"Apache-2.0"
] | null | null | null | steroidsornot/_nbdev.py | cedarmora/steroids-or-not | afa951dad1c18e0d2ccb24faea0f3efef6ec1178 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"PrawClient": "00_firsttry.ipynb",
"storage_path": "00_firsttry.ipynb",
"natural_path": "00_firsttry.ipynb",
"bad_natural_path": "00_firsttry.ipynb",
"steroids_path": "00_firsttry.ipynb",
"bad_steroids_path": "00_firsttry.ipynb",
"uncertain_path": "00_firsttry.ipynb",
"irrelevant_path": "00_firsttry.ipynb",
"get_thumbnail_index": "00_firsttry.ipynb",
"get_thumbnail": "00_firsttry.ipynb",
"get_images": "00_firsttry.ipynb",
"SubmissionsHandler": "03_submissionshandler.ipynb",
"create_db": "05_database.ipynb",
"mapper_registry": "05_database.ipynb",
"Base": "05_database.ipynb",
"database_path": "05_database.ipynb",
"engine": "05_database.ipynb",
"Session": "05_database.ipynb",
"Label": "06_submission.ipynb",
"Submission": "06_submission.ipynb"}
modules = ["firsttry.py",
"pushshift.py",
"submissionshandler.py",
"database.py",
"submission.py"]
doc_url = "https://cedarmora.github.io/steroidsornot/"
git_url = "https://github.com/cedarmora/steroidsornot/tree/master/"
def custom_doc_links(name): return None
| 36.351351 | 67 | 0.628996 |
aceb8dc15bfbb19a0deb6d9f1fb076747dbe9e85 | 66,052 | py | Python | selfdrive/car/hyundai/values.py | matt12eagles/dragonpilot | b88aa72960edb9f24303fde9a373446bbc24b27b | [
"MIT"
] | 251 | 2019-07-12T05:14:20.000Z | 2022-03-30T21:05:22.000Z | selfdrive/car/hyundai/values.py | matt12eagles/dragonpilot | b88aa72960edb9f24303fde9a373446bbc24b27b | [
"MIT"
] | 31 | 2020-03-06T01:50:25.000Z | 2022-03-11T19:09:08.000Z | selfdrive/car/hyundai/values.py | matt12eagles/dragonpilot | b88aa72960edb9f24303fde9a373446bbc24b27b | [
"MIT"
] | 284 | 2019-07-29T13:14:19.000Z | 2022-03-30T17:26:47.000Z | # flake8: noqa
from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
from common.params import Params
# Steer torque limits
class CarControllerParams:
ACCEL_MIN = -3.5 # m/s
ACCEL_MAX = 2.0 # m/s
def __init__(self, CP):
if Params().get('dp_hkg_smart_mdps') == b'1':
self.STEER_MAX = 384
elif CP.carFingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SANTA_FE, CAR.VELOSTER, CAR.GENESIS_G70,
CAR.IONIQ_EV_2020, CAR.KIA_CEED, CAR.KIA_SELTOS, CAR.ELANTRA_2021,
CAR.ELANTRA_HEV_2021, CAR.SONATA_HYBRID, CAR.KONA_HEV, CAR.SANTA_FE_2022, CAR.KIA_K5_2021, CAR.KONA_EV, CAR.KONA, CAR.IONIQ_HEV_2022]:
self.STEER_MAX = 384
else:
self.STEER_MAX = 255
self.STEER_DELTA_UP = 3
self.STEER_DELTA_DOWN = 7
self.STEER_DRIVER_ALLOWANCE = 50
self.STEER_DRIVER_MULTIPLIER = 2
self.STEER_DRIVER_FACTOR = 1
class CAR:
# Hyundai
ELANTRA = "HYUNDAI ELANTRA 2017"
ELANTRA_2021 = "HYUNDAI ELANTRA 2021"
ELANTRA_HEV_2021 = "HYUNDAI ELANTRA HYBRID 2021"
ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT"
HYUNDAI_GENESIS = "HYUNDAI GENESIS 2015-2016"
IONIQ = "HYUNDAI IONIQ HYBRID 2017-2019"
IONIQ_HEV_2022 = "HYUNDAI IONIQ HYBRID 2020-2022"
IONIQ_EV_LTD = "HYUNDAI IONIQ ELECTRIC LIMITED 2019"
IONIQ_EV_2020 = "HYUNDAI IONIQ ELECTRIC 2020"
IONIQ_PHEV = "HYUNDAI IONIQ PHEV 2020"
KONA = "HYUNDAI KONA 2020"
KONA_EV = "HYUNDAI KONA ELECTRIC 2019"
KONA_HEV = "HYUNDAI KONA HYBRID 2020"
SANTA_FE = "HYUNDAI SANTA FE 2019"
SANTA_FE_2022 = "HYUNDAI SANTA FE 2022"
SONATA = "HYUNDAI SONATA 2020"
SONATA_LF = "HYUNDAI SONATA 2019"
PALISADE = "HYUNDAI PALISADE 2020"
VELOSTER = "HYUNDAI VELOSTER 2019"
SONATA_HYBRID = "HYUNDAI SONATA HYBRID 2021"
# Kia
KIA_FORTE = "KIA FORTE E 2018 & GT 2021"
KIA_K5_2021 = "KIA K5 2021"
KIA_NIRO_EV = "KIA NIRO EV 2020"
KIA_NIRO_HEV = "KIA NIRO HYBRID 2019"
KIA_NIRO_HEV_2021 = "KIA NIRO HYBRID 2021"
KIA_OPTIMA = "KIA OPTIMA SX 2019 & 2016"
KIA_OPTIMA_H = "KIA OPTIMA HYBRID 2017 & SPORTS 2019"
KIA_SELTOS = "KIA SELTOS 2021"
KIA_SORENTO = "KIA SORENTO GT LINE 2018"
KIA_STINGER = "KIA STINGER GT2 2018"
KIA_CEED = "KIA CEED INTRO ED 2019"
# Genesis
GENESIS_G70 = "GENESIS G70 2018"
GENESIS_G80 = "GENESIS G80 2017"
GENESIS_G90 = "GENESIS G90 2017"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
FINGERPRINTS = {
CAR.ELANTRA: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 897: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2001: 8, 2003: 8, 2004: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.ELANTRA_GT_I30: [{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1193: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1952: 8, 1960: 8, 1988: 8, 2000: 8, 2001: 8, 2005: 8, 2008: 8, 2009: 8, 2013: 8, 2017: 8, 2025: 8
},
{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8
},
{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1960: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.HYUNDAI_GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1437: 8, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
},
{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},
{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
},
{
67: 8, 68: 8, 80: 4, 160: 8, 161: 8, 272: 8, 288: 4, 339: 8, 356: 8, 357: 8, 399: 8, 544: 8, 608: 8, 672: 8, 688: 5, 704: 1, 790: 8, 809: 8, 848: 8, 880: 8, 898: 8, 900: 8, 901: 8, 904: 8, 1056: 8, 1064: 8, 1065: 8, 1072: 8, 1075: 8, 1087: 8, 1088: 8, 1151: 8, 1200: 8, 1201: 8, 1232: 4, 1264: 8, 1265: 8, 1266: 8, 1296: 8, 1306: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1348: 8, 1349: 8, 1369: 8, 1370: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1440: 8, 1442: 4, 1461: 8, 1470: 8
}],
CAR.SONATA: [
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1096: 8, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8},
],
CAR.SONATA_LF: [
{66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8},
],
CAR.KIA_OPTIMA: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 640: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1953: 8, 1968: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8, 1371: 8, 1397: 8, 1961: 8
}],
CAR.KIA_SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.KIA_STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GENESIS_G80: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8
},
{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8
},
{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1193: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4, 1470: 8
}],
CAR.GENESIS_G90: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8
}],
CAR.IONIQ_EV_2020: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.IONIQ: [{
68:8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1473: 8, 1476: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.KONA_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 1157: 4, 1193: 8, 1379: 8, 1988: 8, 1996: 8
}],
CAR.KIA_NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1990: 8, 1998: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.KIA_OPTIMA_H: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},
{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8
}],
}
FW_VERSIONS = {
CAR.IONIQ: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.07 56310/G2301 4AEHC107',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.00 95740-G2400 180222',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F2051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3H1051\x00\x00\xf1\x006U3H0_C2\x00\x006U3H1051\x00\x00HAE0G16US2\x00\x00\x00\x00',
],
},
CAR.IONIQ_PHEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000AEhe SCC FHCUP 1.00 1.02 99110-G2100 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2200 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000AE MDPS C 1.00 1.01 56310/G2510 4APHC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G2560 4APHC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000AEP MFC AT USA LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEP MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\000\000\xf1\0006U3H1_C2\000\0006U3J9051\000\000PAE0G16NL0\x82zT\xd2',
b'\xf1\x816U3J8051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J8051\x00\x00PAETG16UL0\x00\x00\x00\x00',
],
},
CAR.IONIQ_EV_2020: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.01 99110-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7310 4APEC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.03 95740-G2500 190516',
b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
},
CAR.IONIQ_EV_LTD: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7100 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102',
b'\xf1\x00AE MDPS C 1.00 1.04 56310/G7501 4AEEC104',
b'\xf1\x00AE MDPS C 1.00 1.03 56310/G7300 4AEEC103',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418',
b'\xf1\x00AEE MFC AT USA LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G2300 170703',
],
},
CAR.IONIQ_HEV_2022: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2600 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.01 56310G2510\x00 4APHC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT USA LHD 1.00 1.00 95740-G2700 201027',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HAE0G16NL2\x00\x00\x00\x00',
],
},
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.02 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN89110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300',
b'\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x08 103\x19\x06\x01 58910-L1300',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0300\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x82DNCVN5GMCCXXXF0A',
b'\xf1\x81HM6M1_0a0_F00',
b'\xf1\x82DNBVN5GMCCXXXDCA',
b'\xf1\x82DNBWN5TMDCXXXG2E',
b'\xf1\x82DNCVN5GMCCXXXG2B',
b'\xf1\x87391162M003',
b'\xf1\x87391162M013',
b'HM6M1_0a0_F00',
b'HM6M2_0a0_BD0',
b'HM6M1_0a0_G20',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x8756310-L0210\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0210 4DNAC101',
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103',
b'\xf1\x8756310-L1030\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1030 4DNDC103',
b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x8756310L0210\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0210\x00 4DNAC101',
b'\xf1\x8757700-L0000\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422',
b'\xf1\x00DN8 MFC AT RUS LHD 1.00 1.03 99211-L1000 190705',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6TA260BLHT6TA810A1TDN8M25GS0\x00\x00\x00\x00\x00\x00\xaa\x8c\xd9p',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
b'\xf1\x00HT6WA280BLHT6WAD10A1SDN8G25NB2\x00\x00\x00\x00\x00\x00\x08\xc9O:',
b'\xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87954A02N060\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87SALDBA3510954GJ3ww\x87xUUuWx\x88\x87\x88\x87w\x88wvfwfc_\xf9\xff\x98wO\xffl\xe0\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3573534GJ3\x89\x98\x89\x88EUuWgwvwwwwww\x88\x87xTo\xfa\xff\x86f\x7f\xffo\x0e\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3601464GJ3\x88\x88\x88\x88ffvggwvwvw\x87gww\x87wvo\xfb\xff\x98\x88\x7f\xffjJ\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3753044GJ3UUeVff\x86hwwwwvwwgvfgfvo\xf9\xfffU_\xffC\xae\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3873834GJ3fefVwuwWx\x88\x97\x88w\x88\x97xww\x87wU_\xfb\xff\x86f\x8f\xffN\x04\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4525334GJ3\x89\x99\x99\x99fevWh\x88\x86\x88fwvgw\x88\x87xfo\xfa\xffuDo\xff\xd1>\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4626804GJ3wwww\x88\x87\x88xx\x88\x87\x88wwgw\x88\x88\x98\x88\x95_\xf9\xffuDo\xff|\xe7\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4803224GJ3wwwwwvwg\x88\x88\x98\x88wwww\x87\x88\x88xu\x9f\xfc\xff\x87f\x8f\xff\xea\xea\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6347404GJ3wwwwff\x86hx\x88\x97\x88\x88\x88\x88\x88vfgf\x88?\xfc\xff\x86Uo\xff\xec/\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6901634GJ3UUuWVeVUww\x87wwwwwvUge\x86/\xfb\xff\xbb\x99\x7f\xff]2\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA7077724GJ3\x98\x88\x88\x88ww\x97ygwvwww\x87ww\x88\x87x\x87_\xfd\xff\xba\x99o\xff\x99\x01\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALFBA4195874GJ2EVugvf\x86hgwvwww\x87wgw\x86wc_\xfb\xff\x98\x88\x8f\xff\xe23\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAMDBA8054504GJ3gw\x87xffvgffffwwwweUVUf?\xfc\xffvU_\xff\xddl\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
],
},
CAR.SONATA_LF: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LF ESC \f 11 \x17\x01\x13 58920-C2610',
b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606D5051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2\x00\x00\x00\x00',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2H\r\xbdm',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00',
b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
],
},
CAR.SANTA_FE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.01 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \r 100\x18\x031 58910-S2650',
b'\xf1\x00TM ESC \r 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650',
b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600',
b'\xf1\x00TM ESC \x02 102\x18\x07\x01 58910-S2600',
b'\xf1\x00TM ESC \x02 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x02 104\x19\x07\x07 58910-S2600',
b'\xf1\x00TM ESC \x03 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x0c 103\x18\x11\x08 58910-S2650',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8A12',
b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LBJSGA7082574HG0\x87www\x98\x88\x88\x88\x99\xaa\xb9\x9afw\x86gx\x99\xa7\x89co\xf8\xffvU_\xffR\xaf\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\xa6\xe0\x91',
b'\xf1\x87LBKSGA0458404HG0vfvg\x87www\x89\x99\xa8\x99y\xaa\xa7\x9ax\x88\xa7\x88t_\xf9\xff\x86w\x8f\xff\x15x\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\x00\x00\x00',
b'\xf1\x87LDJUEA6010814HG1\x87w\x87x\x86gvw\x88\x88\x98\x88gw\x86wx\x88\x97\x88\x85o\xf8\xff\x86f_\xff\xd37\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDJUEA6458264HG1ww\x87x\x97x\x87\x88\x88\x99\x98\x89g\x88\x86xw\x88\x97x\x86o\xf7\xffvw\x8f\xff3\x9a\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDKUEA2045844HG1wwww\x98\x88x\x87\x88\x88\xa8\x88x\x99\x97\x89x\x88\xa7\x88U\x7f\xf8\xffvfO\xffC\x1e\xf1\x816W3E0051\x00\x00\xf1\x006W351_C2\x00\x006W3E0051\x00\x00TTM4T20NS3\x00\x00\x00\x00',
b'\xf1\x87LDKUEA9993304HG1\x87www\x97x\x87\x88\x99\x99\xa9\x99x\x99\xa7\x89w\x88\x97x\x86_\xf7\xffwwO\xffl#\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS1R\x7f\x90\n',
b'\xf1\x87LDLUEA6061564HG1\xa9\x99\x89\x98\x87wwwx\x88\x97\x88x\x99\xa7\x89x\x99\xa7\x89sO\xf9\xffvU_\xff<\xde\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6852664HG1\x97wWu\x97www\x89\xaa\xc8\x9ax\x99\x97\x89x\x99\xa7\x89SO\xf7\xff\xa8\x88\x7f\xff\x03z\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87SBJWAA5842214GG0\x88\x87\x88xww\x87x\x89\x99\xa8\x99\x88\x99\x98\x89w\x88\x87xw_\xfa\xfffU_\xff\xd1\x8d\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA5890864GG0\xa9\x99\x89\x98\x98\x87\x98y\x89\x99\xa8\x99w\x88\x87xww\x87wvo\xfb\xffuD_\xff\x9f\xb5\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA7780564GG0wvwgUUeVwwwwx\x88\x87\x88wwwwd_\xfc\xff\x86f\x7f\xff\xd7*\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBJWAA8278284GG0ffvgUU\x85Xx\x88\x87\x88x\x88w\x88ww\x87w\x96o\xfd\xff\xa7U_\xff\xf2\xa0\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA6622844GG0wwwwff\x86hwwwwx\x88\x87\x88\x88\x88\x88\x88\x98?\xfd\xff\xa9\x88\x7f\xffn\xe5\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7u\x1e{\x1c',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2\x00\x00\x00\x00',
b'\xf1\x87SDKXAA2443414GG1vfvgwv\x87h\x88\x88\x88\x88ww\x87wwwww\x99_\xfc\xffvD?\xffl\xd2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA4899564GG0VfvgUU\x85Xx\x88\x87\x88vfgf\x87wxwvO\xfb\xff\x97f\xb1\xffSB\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7\x00\x00\x00\x00',
],
},
CAR.SANTA_FE_2022: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
b'\xf1\x8799110S1500\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
b'\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2DA0\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2GA0\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x82TMBZN5TMD3XXXG2E',
b'\xf1\x82TACVN5GSI3XXXH0A',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.02 56370-S2AA0 0B19',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TMA MFC AT MEX LHD 1.00 1.01 99211-S2500 210205',
b'\xf1\x00TMA MFC AT USA LHD 1.00 1.00 99211-S2500 200720',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
b'\xf1\x87SDMXCA8653204GN1EVugEUuWwwwwww\x87wwwwwv/\xfb\xff\xa8\x88\x9f\xff\xa5\x9c\xf1\x89HT6WAD00A1\xf1\x82STM4G25NH1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87954A02N250\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VTMPT25XXX730NS2\xa6\x06\x88\xf7',
],
},
CAR.KIA_STINGER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CK__ SCC F_CUP 1.00 1.01 96400-J5100 ',
b'\xf1\x00CK__ SCC F_CUP 1.00 1.03 96400-J5100 ',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606DE051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640E0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x82CKJN3TMSDE0B\x00\x00\x00\x00',
b'\xf1\x82CKKN3TMD_H0A\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5200 4C2CL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5220 4C2VL104',
b'\xf1\x00CK MDPS R 1.00 1.04 57700-J5420 4C4VL104',
b'\xf1\x00CK MDPS R 1.00 1.06 57700-J5420 4C4VL106',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CK MFC AT USA LHD 1.00 1.03 95740-J5000 170822',
b'\xf1\x00CK MFC AT USA LHD 1.00 1.04 95740-J5000 180504',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87VCJLE17622572DK0vd6D\x99\x98y\x97vwVffUfvfC%CuT&Dx\x87o\xff{\x1c\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17000192DK2xdFffT\xa5VUD$DwT\x86wveVeeD&T\x99\xba\x8f\xff\xcc\x99\xf1\x89E21\x00\x00\x00\x00\x00\x00\x00\xf1\x82SCK0T33NB0',
b'\xf1\x87VDHLG17034412DK2vD6DfVvVTD$D\x99w\x88\x98EDEDeT6DgfO\xff\xc3=\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDHLG17118862DK2\x8awWwgu\x96wVfUVwv\x97xWvfvUTGTx\x87o\xff\xc9\xed\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00SCK0T33NB0\x88\xa2\xe6\xf0',
b'\xf1\x87VDKLJ18675252DK6\x89vhgwwwwveVU\x88w\x87w\x99vgf\x97vXfgw_\xff\xc2\xfb\xf1\x89E25\x00\x00\x00\x00\x00\x00\x00\xf1\x82TCK0T33NB2',
b'\xf1\x87WAJTE17552812CH4vfFffvfVeT5DwvvVVdFeegeg\x88\x88o\xff\x1a]\xf1\x81E21\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E21\x00\x00\x00\x00\x00\x00\x00TCK2T20NB1\x19\xd2\x00\x94',
],
},
CAR.PALISADE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000LX2_ SCC F-CUP 1.00 1.05 99110-S8100 ',
b'\xf1\x00LX2 SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.04 99110-S8100 ',
b'\xf1\x00LX2_ SCC FHCUP 1.00 1.05 99110-S8100 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LX ESC \x01 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x01 103\x31\t\020 58910-S8360',
b'\xf1\x00LX ESC \x0b 101\x19\x03\x17 58910-S8330',
b'\xf1\x00LX ESC \x0b 102\x19\x05\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x07 58910-S8330',
b'\xf1\x00LX ESC \x0b 103\x19\t\x10 58910-S8360',
b'\xf1\x00LX ESC \x0b 104 \x10\x16 58910-S8360',
b'\xf1\x00ON ESC \x0b 100\x18\x12\x18 58910-S9360',
b'\xf1\x00ON ESC \x0b 101\x19\t\x08 58910-S9360',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81640J0051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81640K0051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00LX2 MDPS C 1,00 1,03 56310-S8020 4LXDC103', # modified firmware
b'\xf1\x00LX2 MDPS C 1.00 1.03 56310-S8020 4LXDC103',
b'\xf1\x00ON MDPS C 1.00 1.00 56340-S9000 8B13',
b'\xf1\x00ON MDPS C 1.00 1.01 56340-S9000 9201',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.03 99211-S8100 190125',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.05 99211-S8100 190909',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 200422',
b'\xf1\x00LX2 MFC AT USA LHD 1.00 1.08 99211-S8100 200903',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 181105',
b'\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 200720',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U872\x00\x00\x00\x00\x00\x00TON4G38NB1\x96z28',
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00TON4G38NB2[v\\\xb6',
b'\xf1\x87LBLUFN650868KF36\xa9\x98\x89\x88\xa8\x88\x88\x88h\x99\xa6\x89fw\x86gw\x88\x97x\xaa\x7f\xf6\xff\xbb\xbb\x8f\xff+\x82\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN655162KF36\x98\x88\x88\x88\x98\x88\x88\x88x\x99\xa7\x89x\x99\xa7\x89x\x99\x97\x89g\x7f\xf7\xffwU_\xff\xe9!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LBLUFN731381KF36\xb9\x99\x89\x98\x98\x88\x88\x88\x89\x99\xa8\x99\x88\x99\xa8\x89\x88\x88\x98\x88V\177\xf6\xff\x99w\x8f\xff\xad\xd8\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\000bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8',
b'\xf1\x87LDKVBN382172KF26\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\xa7\x89\x87\x88\x98x\x98\x99\xa9\x89\xa5_\xf6\xffDDO\xff\xcd\x16\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN424201KF26\xba\xaa\x9a\xa9\x99\x99\x89\x98\x89\x99\xa8\x99\x88\x99\x98\x89\x88\x99\xa8\x89v\x7f\xf7\xffwf_\xffq\xa6\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDKVBN540766KF37\x87wgv\x87w\x87xx\x99\x97\x89v\x88\x97h\x88\x88\x88\x88x\x7f\xf6\xffvUo\xff\xd3\x01\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVBN560098KF26\x86fff\x87vgfg\x88\x96xfw\x86gfw\x86g\x95\xf6\xffeU_\xff\x92c\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB2\xafL]\xe7',
b'\xf1\x87LDLVBN645817KF37\x87www\x98\x87xwx\x99\x97\x89\x99\x99\x99\x99g\x88\x96x\xb6_\xf7\xff\x98fo\xff\xe2\x86\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN662115KF37\x98\x88\x88\x88\xa8\x88\x88\x88x\x99\x97\x89x\x99\xa7\x89\x88\x99\xa8\x89\x88\x7f\xf7\xfffD_\xff\xdc\x84\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN667933KF37\xb9\x99\x89\x98\xb9\x99\x99\x99x\x88\x87\x88w\x88\x87x\x88\x88\x98\x88\xcbo\xf7\xffe3/\xffQ!\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN673087KF37\x97www\x86fvgx\x99\x97\x89\x99\xaa\xa9\x9ag\x88\x86x\xe9_\xf8\xff\x98w\x7f\xff"\xad\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN681363KF37\x98\x88\x88\x88\x97x\x87\x88y\xaa\xa7\x9a\x88\x88\x98\x88\x88\x88\x88\x88vo\xf6\xffvD\x7f\xff%v\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN713890KF26\xb9\x99\x89\x98\xa9\x99\x99\x99x\x99\x97\x89\x88\x99\xa8\x89\x88\x99\xb8\x89Do\xf7\xff\xa9\x88o\xffs\r\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN733215KF37\x99\x98y\x87\x97wwwi\x99\xa6\x99x\x99\xa7\x89V\x88\x95h\x86o\xf7\xffeDO\xff\x12\xe7\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN750044KF37\xca\xa9\x8a\x98\xa7wwwy\xaa\xb7\x9ag\x88\x96x\x88\x99\xa8\x89\xb9\x7f\xf6\xff\xa8w\x7f\xff\xbe\xde\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN752612KF37\xba\xaa\x8a\xa8\x87w\x87xy\xaa\xa7\x9a\x88\x99\x98\x89x\x88\x97\x88\x96o\xf6\xffvU_\xffh\x1b\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDLVBN755553KF37\x87xw\x87\x97w\x87xy\x99\xa7\x99\x99\x99\xa9\x99Vw\x95gwo\xf6\xffwUO\xff\xb5T\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX4G38NB3X\xa8\xc08',
b'\xf1\x87LDMVBN778156KF37\x87vWe\xa9\x99\x99\x99y\x99\xb7\x99\x99\x99\x99\x99x\x99\x97\x89\xa8\x7f\xf8\xffwf\x7f\xff\x82_\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b'\xf1\x87LDMVBN780576KF37\x98\x87hv\x97x\x97\x89x\x99\xa7\x89\x88\x99\x98\x89w\x88\x97x\x98\x7f\xf7\xff\xba\x88\x8f\xff\x1e0\xf1\x81U922\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U922\x00\x00\x00\x00\x00\x00SLX4G38NB4\xd6\xe8\xd7\xa6',
b"\xf1\x87LBLUFN622950KF36\xa8\x88\x88\x88\x87w\x87xh\x99\x96\x89\x88\x99\x98\x89\x88\x99\x98\x89\x87o\xf6\xff\x98\x88o\xffx'\xf1\x81U891\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U891\x00\x00\x00\x00\x00\x00SLX2G38NB3\xd1\xc3\xf8\xa8",
],
},
CAR.VELOSTER: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00JS__ SCC H-CUP 1.00 1.02 95650-J3200 ',
b'\xf1\x00JS__ SCC HNCUP 1.00 1.02 95650-J3100 ',
],
(Ecu.esp, 0x7d1, None): [b'\xf1\x00\x00\x00\x00\x00\x00\x00', ],
(Ecu.engine, 0x7e0, None): [
b'\x01TJS-JNU06F200H0A',
b'\x01TJS-JDK06F200H0A',
],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00JSL MDPS C 1.00 1.03 56340-J3000 8308', ],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00JS LKAS AT USA LHD 1.00 1.02 95740-J3000 K32',
b'\xf1\x00JS LKAS AT KOR LHD 1.00 1.03 95740-J3000 K33',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\xba\x02\xb8\x80',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16NS1\x00\x00\x00\x00',
b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJS0T16KS2\016\xba\036\xa2',
],
},
CAR.GENESIS_G70: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00IK__ SCC F-CUP 1.00 1.02 96400-G9100 ', ],
(Ecu.engine, 0x7e0, None): [b'\xf1\x81640F0051\x00\x00\x00\x00\x00\x00\x00\x00', ],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00IK MDPS R 1.00 1.06 57700-G9420 4I4VL106', ],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00IK MFC AT USA LHD 1.00 1.01 95740-G9000 170920', ],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x87VDJLT17895112DN4\x88fVf\x99\x88\x88\x88\x87fVe\x88vhwwUFU\x97eFex\x99\xff\xb7\x82\xf1\x81E25\x00\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 E25\x00\x00\x00\x00\x00\x00\x00SIK0T33NB2\x11\x1am\xda', ],
},
CAR.KONA: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00OS__ SCC F-CUP 1.00 1.00 95655-J9200 ', ],
(Ecu.esp, 0x7d1, None): [b'\xf1\x816V5RAK00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00', ],
(Ecu.engine, 0x7e0, None): [b'"\x01TOS-0NU06F301J02', ],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00OS MDPS C 1.00 1.05 56310J9030\x00 4OSDC105', ],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00OS9 LKAS AT USA LHD 1.00 1.00 95740-J9300 g21', ],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00', ],
},
CAR.KIA_CEED: {
(Ecu.fwdRadar, 0x7D0, None): [b'\xf1\000CD__ SCC F-CUP 1.00 1.02 99110-J7000 ', ],
(Ecu.esp, 0x7D4, None): [b'\xf1\000CD MDPS C 1.00 1.06 56310-XX000 4CDEC106', ],
(Ecu.fwdCamera, 0x7C4, None): [b'\xf1\000CD LKAS AT EUR LHD 1.00 1.01 99211-J7000 B40', ],
(Ecu.engine, 0x7E0, None): [b'\001TCD-JECU4F202H0K', ],
(Ecu.transmission, 0x7E1, None): [b'\xf1\x816U2V7051\000\000\xf1\0006U2V0_C2\000\0006U2V7051\000\000DCD0T14US1\000\000\000\000', ],
(Ecu.esp, 0x7D1, None): [b'\xf1\000CD ESC \003 102\030\b\005 58920-J7350', ],
},
CAR.KIA_FORTE: {
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00BD MDPS C 1.00 1.08 56310M6300\x00 4BDDC108',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00BD LKAS AT USA LHD 1.00 1.04 95740-M6000 J33',
],
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00BD__ SCC H-CUP 1.00 1.02 99110-M6000 ',
],
(Ecu.engine, 0x7e0, None): [
b'\x01TBDM1NU06F200H01',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x816VGRAH00018.ELF\xf1\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U2VC051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VC051\x00\x00DBD0T16SS0\xcf\x1e'\xc3",
],
},
CAR.KIA_K5_2021: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
b'\xf1\x8799110L2000\xf1\000DL3_ SCC FHCUP 1.00 1.03 99110-L2000 ',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x8756310-L3110\xf1\000DL3 MDPS C 1.00 1.01 56310-L3110 4DLAC101',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\000DL3 MFC AT USA LHD 1.00 1.03 99210-L3000 200915',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\000DL ESC \006 101 \004\002 58910-L3200',
b'\xf1\x8758910-L3200\xf1\000DL ESC \006 101 \004\002 58910-L3200',
],
(Ecu.engine, 0x7E0, None): [
b'\xf1\x87391212MKT0\xf1\xa00240',
b'\xf1\x87391212MKT0',
],
(Ecu.transmission, 0x7E1, None): [
b'\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
b'\xf1\x87SALFEA5652514GK2UUeV\x88\x87\x88xxwg\x87ww\x87wwfwvd/\xfb\xffvU_\xff\x93\xd3\xf1\x81U913\000\000\000\000\000\000\xf1\000bcsh8p54 U913\000\000\000\000\000\000TDL2T16NB1ia\v\xb8',
],
},
CAR.KONA_EV: {
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000',
b'\xf1\x00OS IEB \x01 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x02 212 \x11\x13 58520-K4000',
b'\xf1\x00OS IEB \x03 210 \x02\x14 58520-K4000',
b'\xf1\x00OS IEB \x03 212 \x11\x13 58520-K4000',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\x00OE2 LKAS AT EUR LHD 1.00 1.00 95740-K4200 200',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT EUR RHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT KOR LHD 1.00 1.00 95740-K4100 W40',
b'\xf1\x00OSE LKAS AT USA LHD 1.00 1.00 95740-K4300 W50',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00OS MDPS C 1.00 1.03 56310/K4550 4OEDC103',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4000\x00 4OEDC104',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
],
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4000 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.00 99110-K4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x00OSev SCC FNCUP 1.00 1.01 99110-K4000 ',
],
},
CAR.KIA_NIRO_EV: {
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x00DEev SCC F-CUP 1.00 1.03 96400-Q4100 ',
b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 ',
b'\xf1\x8799110Q4000\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4000 ',
b'\xf1\x8799110Q4100\xf1\x00DEev SCC F-CUP 1.00 1.00 99110-Q4100 ',
b'\xf1\x8799110Q4500\xf1\000DEev SCC F-CUP 1.00 1.00 99110-Q4500 ',
],
(Ecu.esp, 0x7D1, None): [
b'\xf1\x00OS IEB \r 212 \x11\x13 58520-K4000',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4000\x00 4DEEC105',
b'\xf1\x00DE MDPS C 1.00 1.05 56310Q4100\x00 4DEEC105',
b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\000DEE MFC AT EUR LHD 1.00 1.00 99211-Q4100 200706',
b'\xf1\x00DEE MFC AT EUR LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.00 99211-Q4000 191211',
b'\xf1\x00DEE MFC AT USA LHD 1.00 1.03 95740-Q4000 180821',
b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40',
],
},
CAR.KIA_NIRO_HEV: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F4051\000\000\000\000\000\000\000\000',
],
(Ecu.transmission, 0x7e1, None): [
b"\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\xf4\'\\\x91",
b'\xf1\x816U3J2051\000\000\xf1\0006U3H0_C2\000\0006U3J2051\000\000PDE0G16NS2\000\000\000\000',
],
(Ecu.eps, 0x7D4, None): [
b'\xf1\000DE MDPS C 1.00 1.09 56310G5301\000 4DEHC109',
],
(Ecu.fwdCamera, 0x7C4, None): [
b'\xf1\000DEP MFC AT USA LHD 1.00 1.01 95740-G5010 170424',
],
(Ecu.fwdRadar, 0x7D0, None): [
b'\xf1\000DEhe SCC H-CUP 1.01 1.02 96400-G5100 ',
],
},
CAR.KIA_NIRO_HEV_2021: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\x00\x00\x00\x00',
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HDE0G16NL3\xb9\xd3\xfaW',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DE MDPS C 1.00 1.01 56310G5520\x00 4DEPC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DEH MFC AT USA LHD 1.00 1.07 99211-G5000 201221',
],
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DEhe SCC FHCUP 1.00 1.00 99110-G5600 ',
],
},
CAR.KIA_SELTOS: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x8799110Q5100\xf1\000SP2_ SCC FHCUP 1.01 1.05 99110-Q5100 ',],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-Q5450\xf1\000SP ESC \a 101\031\t\005 58910-Q5450',
b'\xf1\x8758910-Q5450\xf1\000SP ESC \t 101\031\t\005 58910-Q5450',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81616D2051\000\000\000\000\000\000\000\000',
b'\xf1\x81616D5051\000\000\000\000\000\000\000\000',
b'\001TSP2KNL06F100J0K',
b'\001TSP2KNL06F200J0K',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000SP2 MDPS C 1.00 1.04 56300Q5200 ',
b'\xf1\000SP2 MDPS C 1.01 1.05 56300Q5200 ',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.04 99210-Q5000 191114',
b'\xf1\000SP2 MFC AT USA LHD 1.00 1.05 99210-Q5000 201012',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87CZLUB49370612JF7h\xa8y\x87\x99\xa7hv\x99\x97fv\x88\x87x\x89x\x96O\xff\x88\xff\xff\xff.@\xf1\x816V2C2051\000\000\xf1\0006V2B0_C2\000\0006V2C2051\000\000CSP4N20NS3\000\000\000\000',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS6\xd30\xa5\xb9',
b'\xf1\x87954A22D200\xf1\x81T01950A1 \xf1\000T0190XBL T01950A1 DSP2T16X4X950NS8\r\xfe\x9c\x8b',
],
},
CAR.KIA_OPTIMA: {
(Ecu.fwdRadar, 0x7d0, None): [b'\xf1\x00JF__ SCC F-CUP 1.00 1.00 96400-D4110 '],
(Ecu.esp, 0x7d1, None): [b'\xf1\x00JF ESC \v 11 \x18\x030 58920-D5180',],
(Ecu.engine, 0x7e0, None): [
b'\x01TJFAJNU06F201H03',
b'\xf1\x89F1JF600AISEIU702\xf1\x82F1JF600AISEIU702',
],
(Ecu.eps, 0x7d4, None): [b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409'],
(Ecu.fwdCamera, 0x7c4, None): [b'\xf1\x00JFA LKAS AT USA LHD 1.00 1.02 95895-D5000 h31'],
(Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2V8051\x00\x00\xf1\x006U2V0_C2\x00\x006U2V8051\x00\x00DJF0T16NL0\t\xd2GW'],
},
CAR.ELANTRA_2021: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106',
b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106',
b'\xf1\x8756310/AA070\xf1\x00CN7 MDPS C 1.00 1.06 56310/AA070 4CNDC106\xf1\xa01.06',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.00 99210-AB000 200819',
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AA000 200819',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 104 \x08\x03 58910-AA800\xf1\xa01.04',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA280BLHT6VA640A1CCN0N20NS5\x00\x00\x00\x00\x00\x00\xe8\xba\xce\xfa',
b'\xf1\x87CXMQFM2135005JB2E\xb9\x89\x98W\xa9y\x97h\xa9\x98\x99wxvwh\x87\177\xffx\xff\xff\xff,,\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXMQFM1916035JB2\x88vvgg\x87Wuwgev\xa9\x98\x88\x98h\x99\x9f\xffh\xff\xff\xff\xa5\xee\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
b'\xf1\x87CXLQF40189012JL2f\x88\x86\x88\x88vUex\xb8\x88\x88\x88\x87\x88\x89fh?\xffz\xff\xff\xff\x08z\xf1\x89HT6VA640A1\xf1\x82CCN0N20NS5\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x82CNCWD0AMFCXCSFFA',
b'\xf1\x82CNCWD0AMFCXCSFFB',
b'\xf1\x82CNCVD0AMFCXCSFFB',
],
},
CAR.ELANTRA_HEV_2021: {
(Ecu.fwdCamera, 0x7c4, None) : [
b'\xf1\000CN7HMFC AT USA LHD 1.00 1.03 99210-AA000 200819'
],
(Ecu.fwdRadar, 0x7d0, None) : [
b'\xf1\000CNhe SCC FHCUP 1.00 1.01 99110-BY000 '
],
(Ecu.eps, 0x7d4, None) :[
b'\xf1\x8756310/BY050\xf1\000CN7 MDPS C 1.00 1.02 56310/BY050 4CNHC102'
],
(Ecu.transmission, 0x7e1, None) :[
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\xb9?A\xaa',
b'\xf1\x816U3K3051\000\000\xf1\0006U3L0_C2\000\0006U3K3051\000\000HCN0G16NS0\000\000\000\000'
],
(Ecu.engine, 0x7e0, None) : [
b'\xf1\x816H6G5051\000\000\000\000\000\000\000\000'
]
},
CAR.KONA_HEV: {
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00OS IEB \x01 104 \x11 58520-CM000',
],
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00OShe SCC FNCUP 1.00 1.01 99110-CM000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00OS MDPS C 1.00 1.00 56310CM030\x00 4OHDC100',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00OSH LKAS AT KOR LHD 1.00 1.01 95740-CM000 l31',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HOS0G16DS1\x16\xc7\xb0\xd9',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
]
},
CAR.SONATA_HYBRID: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC FHCUP 1.00 1.02 99110-L5000 ',
b'\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
b'\xf1\x8799110L5000\xf1\000DNhe SCC F-CUP 1.00 1.02 99110-L5000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310-L5500\xf1\x00DN8 MDPS C 1.00 1.02 56310-L5500 4DNHC102',
b'\xf1\x8756310-L5450\xf1\x00DN8 MDPS C 1.00 1.02 56310-L5450 4DNHC102',
b'\xf1\x8756310-L5450\xf1\000DN8 MDPS C 1.00 1.03 56310-L5450 4DNHC103',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8HMFC AT USA LHD 1.00 1.04 99211-L1000 191016',
b'\xf1\x00DN8HMFC AT USA LHD 1.00 1.05 99211-L1000 201109',
b'\xf1\000DN8HMFC AT USA LHD 1.00 1.06 99211-L1000 210325',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\000PSBG2333 E14\x00\x00\x00\x00\x00\x00\x00TDN2H20SA6N\xc2\xeeW',
b'\xf1\x87959102T250\000\000\000\000\000\xf1\x81E09\000\000\000\000\000\000\000\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2323 E09\000\000\000\000\000\000\000TDN2H20SA5\x97R\x88\x9e',
b'\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
b'\xf1\x87PCU\000\000\000\000\000\000\000\000\000\xf1\x81E16\000\000\000\000\000\000\000\xf1\000PSBG2333 E16\000\000\000\000\000\000\000TDN2H20SA7\0323\xf9\xab',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x87391162J012',
b'\xf1\x87391162J013',
],
},
}
CHECKSUM = {
"crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE, CAR.KIA_SELTOS, CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021, CAR.SONATA_HYBRID, CAR.SANTA_FE_2022, CAR.KIA_K5_2021],
"6B": [CAR.KIA_SORENTO, CAR.HYUNDAI_GENESIS],
}
FEATURES = {
# which message has the gear
"use_cluster_gears": set([CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.KONA]),
"use_tcu_gears": set([CAR.KIA_OPTIMA, CAR.SONATA_LF, CAR.VELOSTER]),
"use_elect_gears": set([CAR.KIA_NIRO_EV, CAR.KIA_NIRO_HEV, CAR.KIA_NIRO_HEV_2021, CAR.KIA_OPTIMA_H, CAR.IONIQ_EV_LTD, CAR.KONA_EV, CAR.IONIQ, CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.ELANTRA_HEV_2021,CAR.SONATA_HYBRID, CAR.KONA_HEV, CAR.IONIQ_HEV_2022]),
# these cars use the FCA11 message for the AEB and FCW signals, all others use SCC12
"use_fca": set([CAR.SONATA, CAR.SONATA_HYBRID, CAR.ELANTRA, CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021, CAR.ELANTRA_GT_I30, CAR.KIA_STINGER, CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.KONA_EV, CAR.KIA_FORTE, CAR.KIA_NIRO_EV, CAR.PALISADE, CAR.GENESIS_G70, CAR.KONA, CAR.SANTA_FE, CAR.KIA_SELTOS, CAR.KONA_HEV, CAR.SANTA_FE_2022, CAR.KIA_K5_2021, CAR.IONIQ_HEV_2022]),
}
HYBRID_CAR = set([CAR.IONIQ_PHEV, CAR.ELANTRA_HEV_2021, CAR.KIA_NIRO_HEV, CAR.KIA_NIRO_HEV_2021, CAR.SONATA_HYBRID, CAR.KONA_HEV, CAR.IONIQ, CAR.IONIQ_HEV_2022]) # these cars use a different gas signal
EV_CAR = set([CAR.IONIQ_EV_2020, CAR.IONIQ_EV_LTD, CAR.KONA_EV, CAR.KIA_NIRO_EV])
# If 0x500 is present on bus 1 it probably has a Mando radar outputting radar points.
# If no points are outputted by default it might be possible to turn it on using selfdrive/debug/hyundai_enable_radar_points.py
DBC = {
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_2021: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_HEV_2021: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
CAR.HYUNDAI_GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_PHEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_2020: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_LTD: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.IONIQ: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_HEV_2022: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_K5_2021: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_NIRO_HEV: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.KIA_NIRO_HEV_2021: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_OPTIMA_H: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_SORENTO: dbc_dict('hyundai_kia_generic', None), # Has 0x5XX messages, but different format
CAR.KIA_STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SANTA_FE_2022: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.SONATA_LF: dbc_dict('hyundai_kia_generic', None), # Has 0x5XX messages, but different format
CAR.PALISADE: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
CAR.KIA_CEED: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_HYBRID: dbc_dict('hyundai_kia_generic', 'hyundai_kia_mando_front_radar'),
}
STEER_THRESHOLD = 150
| 72.664466 | 928 | 0.638966 |
aceb8ddbf3b4134e92cbbdf8c84d92e9cdd6199c | 971 | py | Python | lambda/lambda/urls.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 1 | 2020-10-26T13:09:27.000Z | 2020-10-26T13:09:27.000Z | lambda/lambda/urls.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 5 | 2020-10-24T20:08:13.000Z | 2021-06-10T19:05:24.000Z | lambda/lambda/urls.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 1 | 2020-10-19T14:35:24.000Z | 2020-10-19T14:35:24.000Z | """lambda URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("teams/", include("teams.urls")),
path("tasks/", include("tasks.urls")),
path("projects/", include("projects.urls")),
path("users/", include("users.urls")),
path("", include("base.urls")),
path("admin/", admin.site.urls),
]
| 35.962963 | 77 | 0.681771 |
aceb8e4a8095b872efc17e2a1cbee372ba3b68ed | 444 | py | Python | turtletrans/translate.py | wrwrwr/turtle-trans | 3b906bfaa45912d16a8b8877649388539b02eb75 | [
"MIT"
] | null | null | null | turtletrans/translate.py | wrwrwr/turtle-trans | 3b906bfaa45912d16a8b8877649388539b02eb75 | [
"MIT"
] | null | null | null | turtletrans/translate.py | wrwrwr/turtle-trans | 3b906bfaa45912d16a8b8877649388539b02eb75 | [
"MIT"
] | null | null | null | """
Some common utilities.
"""
from turtle import Turtle
def turtle_subclass(name):
"""
Creates a subclass of Turtle with the given name.
"""
return type(name, (Turtle,), {})
def translate_methods(cls, translations):
"""
Creates aliases for method names.
"""
for method, aliases in translations.items():
func = getattr(cls, method)
for alias in aliases:
setattr(cls, alias, func)
| 20.181818 | 53 | 0.623874 |
aceb8fa6f93154f3ebfcad7df13f03f9e4b88cb7 | 860 | py | Python | games/WAVES/extractors/QuestionAnswered.py | mjgm97/opengamedata-core | 10bbaaf8ffc43855298ecb8ad0940acc59475cc1 | [
"MIT"
] | null | null | null | games/WAVES/extractors/QuestionAnswered.py | mjgm97/opengamedata-core | 10bbaaf8ffc43855298ecb8ad0940acc59475cc1 | [
"MIT"
] | 2 | 2022-01-04T19:41:13.000Z | 2022-02-04T20:36:51.000Z | games/WAVES/extractors/QuestionAnswered.py | mjgm97/opengamedata-core | 10bbaaf8ffc43855298ecb8ad0940acc59475cc1 | [
"MIT"
] | 1 | 2021-12-17T15:53:16.000Z | 2021-12-17T15:53:16.000Z | from schemas import Event
from typing import Any, List, Union
# local imports
from extractors.Feature import Feature
from schemas.Event import Event
class QuestionAnswered(Feature):
def __init__(self, name:str, description:str, count_index:int):
Feature.__init__(self, name=name, description=description, count_index=count_index)
self._answer = None
def GetEventTypes(self) -> List[str]:
return ["CUSTOM.3"]
# return ["QUESTION_ANSWER"]
def GetFeatureValues(self) -> List[Any]:
return [self._answer]
def _extractFromEvent(self, event:Event) -> None:
if event.event_data['question'] == self._count_index:
self._answer = event.event_data['answered']
def MinVersion(self) -> Union[str,None]:
return None
def MaxVersion(self) -> Union[str,None]:
return None
| 30.714286 | 91 | 0.681395 |
aceb90c59a3708a6386da60dde7b4d3a2270d0f2 | 1,675 | py | Python | howdimain/urls.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | null | null | null | howdimain/urls.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | 9 | 2021-03-19T02:30:36.000Z | 2022-01-13T02:37:39.000Z | howdimain/urls.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | null | null | null | """noticeboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
Examples for Django version 1.0:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('boards.urls')),
path('', include('accounts.urls')),
path('', include('newsfeed.urls')),
path('', include('stock.urls')),
path('martor/', include('martor.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 39.880952 | 82 | 0.704478 |
aceb91038a3242d45a672803271e262fb622710e | 1,902 | py | Python | meiduo_34/mall/apps/areas/views.py | codedaliu/meiduo-store | 4654ff9737b941ce9945a0c92187beea27bb0fc3 | [
"MIT"
] | null | null | null | meiduo_34/mall/apps/areas/views.py | codedaliu/meiduo-store | 4654ff9737b941ce9945a0c92187beea27bb0fc3 | [
"MIT"
] | null | null | null | meiduo_34/mall/apps/areas/views.py | codedaliu/meiduo-store | 4654ff9737b941ce9945a0c92187beea27bb0fc3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from areas.models import Area
from areas.serailizers import AreaSerializer, SubsAreaSerialzier
"""
id name parent_id
1000 北京省 null
1010 北京市 1000
1011 昌平区 1010
1012 海淀区 1010
1013 朝阳区 1010
"""
"""
1. 获取省份信息的时候
select * from tb_areas where parent_id is null;
2. 获取市的信息的时候
3. 获取区县的信息的时候
select * from tb_areas where parent_id=110000;
select * from tb_areas where parent_id=110100;
"""
# class AreaProviceAPIView(APIView):
# #获取省份信息
# def get(self,request):
# #
# pass
#
#
# class AreaDistrictAPIView(APIView):
# # 获取市,区县信息
# def get(self, request):
# pass
# http://127.0.0.1:8000/areas/infos/ 省份信息 list 方法
# http://127.0.0.1:8000/areas/infos/110000 市区县信息 retrieve
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_extensions.cache.mixins import ListCacheResponseMixin
from rest_framework_extensions.cache.mixins import RetrieveCacheResponseMixin
from rest_framework_extensions.cache.mixins import CacheResponseMixin
class AreaModelViewSet(CacheResponseMixin,ReadOnlyModelViewSet):
#让它不是使用分类类
pagination_class = None
# queryset = Area.objects.all() #所有信息
# queryset = Area.objects.filter(parent=None) #省的信息
def get_queryset(self):
# 我们可以根据 不同的业务逻辑返回不同的数据源
if self.action == 'list':
# Area.objects.filter(parent__isnull=True)
return Area.objects.filter(parent=None)
else:
return Area.objects.all()
# serializer_class = AreaSerializer
def get_serializer_class(self):
# 我们可以根据 不同的业务逻辑返回不同的 序列化器
if self.action == 'list':
return AreaSerializer
else:
return SubsAreaSerialzier
| 22.915663 | 77 | 0.671399 |
aceb91f7add1b625ae60e389b6eb83ff42590d36 | 16,660 | py | Python | petastorm/pytorch.py | acmore/petastorm | dac335220223c857fece29ef55827b7531372171 | [
"Apache-2.0"
] | null | null | null | petastorm/pytorch.py | acmore/petastorm | dac335220223c857fece29ef55827b7531372171 | [
"Apache-2.0"
] | 1 | 2020-10-26T13:29:04.000Z | 2021-01-20T11:43:56.000Z | petastorm/pytorch.py | acmore/petastorm | dac335220223c857fece29ef55827b7531372171 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import decimal
# Must import pyarrow before torch. See: https://github.com/uber/petastorm/blob/master/docs/troubleshoot.rst
import re
import logging
import numpy as np
from six import PY2
from torch.utils.data.dataloader import default_collate
import torch
from packaging import version
from petastorm.reader_impl.shuffling_buffer import RandomShufflingBuffer, NoopShufflingBuffer
from petastorm.reader_impl.pytorch_shuffling_buffer import BatchedRandomShufflingBuffer, BatchedNoopShufflingBuffer
_TORCH_BEFORE_1_1 = version.parse(torch.__version__) < version.parse('1.1.0') # type: ignore
if PY2:
_string_classes = basestring # noqa: F821
else:
_string_classes = (str, bytes)
logger = logging.getLogger(__name__)
def _sanitize_pytorch_types(row_as_dict):
"""Promotes values types in a dictionary to the types supported by pytorch. Raises an error if type is clear error
if the type can not be promoted.
The parameter is modified in-place.
int8, uint16 are promoted to int32; uint32 -> int64;
numpy string_, unicode_, object arrays are not supported.
:param dict[str,obj] row_as_dict: a dictionary of key-value pairs. The values types are promoted to
pytorch compatible.
:return: None
"""
for name, value in row_as_dict.items():
# PyTorch supported types are: double, float, float16, int64, int32, and uint8
if isinstance(value, np.ndarray):
if value.dtype == np.int8 and _TORCH_BEFORE_1_1:
row_as_dict[name] = value.astype(np.int16)
elif value.dtype == np.uint16:
row_as_dict[name] = value.astype(np.int32)
elif value.dtype == np.uint32:
row_as_dict[name] = value.astype(np.int64)
elif value.dtype == np.bool_:
row_as_dict[name] = value.astype(np.uint8)
elif re.search('[SaUO]', value.dtype.str):
raise TypeError('Pytorch does not support arrays of string or object classes. '
'Found in field {}.'.format(name))
elif isinstance(value, np.bool_):
row_as_dict[name] = np.uint8(value)
elif value is None:
raise TypeError('Pytorch does not support nullable fields. Found None in {}'.format(name))
def decimal_friendly_collate(batch):
"""A wrapper on top of ``default_collate`` function that allows decimal.Decimal types to be collated.
We use ``decimal.Decimal`` types in petastorm dataset to represent timestamps. PyTorch's ``default_collate``
implementation does not support collating ``decimal.Decimal`` types. ``decimal_friendly_collate`` collates
``decimal.Decimal`` separately and then combines with the rest of the fields collated by a standard
``default_collate``.
:param batch: A list of dictionaries to collate
:return: A dictionary of lists/pytorch.Tensor types
"""
if isinstance(batch[0], decimal.Decimal):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: decimal_friendly_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], _string_classes):
return batch
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [decimal_friendly_collate(samples) for samples in transposed]
else:
return default_collate(batch)
_PARALLEL_ITER_ERROR = "You must finish a full pass of Petastorm DataLoader before making another pass from the \
beginning.If you do need to terminate early and restart from beginning, please re-create the reader and the data \
loader."
class LoaderBase(object):
def __init__(self):
self._in_iter = None
self._error = None
def __iter__(self):
if self._error is not None:
raise RuntimeError('Cannot start a new iteration because last time iteration failed with error {err}.'
.format(err=repr(self._error)))
if self._in_iter is not None and self._in_iter == True: # noqa: E712
raise RuntimeError(_PARALLEL_ITER_ERROR)
if self._in_iter is not None:
self.reader.reset()
logger.warning('Start a new pass of Petastorm DataLoader, reset underlying Petastorm reader to position 0.')
self._in_iter = True
try:
for batch in self._iter_impl():
yield batch
except Exception as e:
self._error = e
logger.error('Iteration on Petastorm DataLoader raise error: %s', repr(e))
raise
finally:
self._in_iter = False
class DataLoader(LoaderBase):
"""
A data loader adaptor for ``torch.utils.data.DataLoader``.
This class iterates and returns items from the Reader in batches.
This loader can be used as an iterator and will terminate when the reader used in the construction of the class
runs out of samples.
"""
def __init__(self, reader, batch_size=1, collate_fn=decimal_friendly_collate,
shuffling_queue_capacity=0):
"""
Initializes a data loader object, with a default collate.
Number of epochs is defined by the configuration of the reader argument.
An optional shuffling queue is created if shuffling_queue_capacity is greater than 0. No samples will be
returned to a user by the ``DataLoader`` until the queue is full. After that, batches of `batch_size`
will be created by uniformly sampling the shuffling queue. Once no more samples are available from the data
reader, the shuffling queue is allowed to be consumed till no further samples are available.
Note that the last returned batch could have less then ``batch_size`` samples.
NOTE: if you are using ``make_batch_reader``, this shuffling queue will be randomizing the order of the
entire batches and not changing the order of elements within a batch. This is likely not what you intend to do.
:param reader: petastorm Reader instance
:param batch_size: the number of items to return per batch; factored into the len() of this reader
:param collate_fn: an optional callable to merge a list of samples to form a mini-batch.
:param shuffling_queue_capacity: Queue capacity is passed to the underlying :class:`tf.RandomShuffleQueue`
instance. If set to 0, no suffling will be done.
"""
super(DataLoader, self).__init__()
self.reader = reader
self.batch_size = batch_size
self.collate_fn = collate_fn
# _batch_acc accumulates samples for a single batch.
self._batch_acc = []
self.shuffling_queue_capacity = shuffling_queue_capacity
self._in_iter = None
def _iter_impl(self):
"""
The Data Loader iterator stops the for-loop when reader runs out of samples.
"""
# As we iterate over incoming samples, we are going to store them in `self._batch_acc`, until we have a batch of
# the requested batch_size ready.
keys = None
if self.shuffling_queue_capacity > 0:
# We can not know what is the reasonable number to use for the extra capacity, so we set a huge number
# and give up on the unbound growth protection mechanism.
min_after_dequeue = self.shuffling_queue_capacity - 1
self._shuffling_buffer = RandomShufflingBuffer(self.shuffling_queue_capacity,
min_after_retrieve=min_after_dequeue,
extra_capacity=100000000)
else:
self._shuffling_buffer = NoopShufflingBuffer()
for row in self.reader:
# Default collate does not work nicely on namedtuples and treat them as lists
# Using dict will result in the yielded structures being dicts as well
row_as_dict = row._asdict()
keys = row_as_dict.keys()
# Promote some types that are incompatible with pytorch to be pytorch friendly.
_sanitize_pytorch_types(row_as_dict)
# Add rows to shuffling buffer
if not self.reader.is_batched_reader:
self._shuffling_buffer.add_many([row_as_dict])
else:
# Transposition:
# row_as_dict: {'a': [1,2,3], 'b':[4,5,6]}
# row_group_as_tuple: [(1, 4), (2, 5), (3, 6)]
# The order within a tuple is defined by key order in 'keys'
row_group_as_tuple = list(zip(*(row_as_dict[k] for k in keys)))
# Adding data as 'row-by-row' into a shuffling buffer. This is a pretty
# slow implementation though. Probably can comeup with a faster way to shuffle,
# perhaps at the expense of a larger memory consumption...
self._shuffling_buffer.add_many(row_group_as_tuple)
# _yield_batches will emit as much batches as are allowed by the shuffling_buffer (RandomShufflingBuffer
# will avoid underflowing below a certain number of samples to guarantee some samples decorrelation)
for batch in self._yield_batches(keys):
yield batch
# Once reader can not read new rows, we might still have a bunch of rows waiting in the shuffling buffer.
# Telling shuffling buffer that we are finished allows to deplete the buffer completely, regardless its
# min_after_dequeue setting.
self._shuffling_buffer.finish()
for batch in self._yield_batches(keys):
yield batch
# Yield the last and partial batch
if self._batch_acc:
yield self.collate_fn(self._batch_acc)
def _yield_batches(self, keys):
while self._shuffling_buffer.can_retrieve():
post_shuffled_row = self._shuffling_buffer.retrieve()
if not isinstance(post_shuffled_row, dict):
# This is for the case of batched reads. Here we restore back the
# dictionary format of records
post_shuffled_row = dict(zip(keys, post_shuffled_row))
self._batch_acc.append(post_shuffled_row)
# Batch is ready? Collate and emmit
if len(self._batch_acc) == self.batch_size:
yield self.collate_fn(self._batch_acc)
self._batch_acc = []
# Functions needed to treat data loader as a context manager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.reader.stop()
self.reader.join()
class BatchedDataLoader(LoaderBase):
"""
Same as DataLoader except it uses torch-based shuffling buffers which enable batched buffering
(significantly faster for small data).
"""
def __init__(self, reader, batch_size=1, transform_fn=None,
shuffling_queue_capacity=0):
"""
Initializes a data loader object.
Number of epochs is defined by the configuration of the reader argument.
An optional shuffling queue is created if shuffling_queue_capacity is greater than 0. No samples will be
returned to a user by the ``BatchedDataLoader`` until the queue is full. After that, batches of `batch_size`
will be created by uniformly sampling the shuffling queue. Once no more samples are available from the data
reader, the shuffling queue is allowed to be consumed till no further samples are available.
Note that the last returned batch could have less then ``batch_size`` samples.
NOTE: if you are using ``make_batch_reader``, this shuffling queue will be randomizing the order of the
entire batches and not changing the order of elements within a batch. This is likely not what you intend to do.
This class does not support special types that are not supported in PyTorch (decimal/string).
:param reader: petastorm Reader instance
:param batch_size: the number of items to return per batch; factored into the len() of this reader
:param transform_fn: an optional callable to convert batches from the reader to PyTorch tensors
:param shuffling_queue_capacity: Queue capacity is passed to the underlying :class:`tf.RandomShuffleQueue`
instance. If set to 0, no suffling will be done.
"""
super(BatchedDataLoader, self).__init__()
self.reader = reader
self.batch_size = batch_size
self.transform_fn = transform_fn or torch.as_tensor
# _batch_acc accumulates samples for a single batch.
self._batch_acc = []
self.shuffling_queue_capacity = shuffling_queue_capacity
self._in_iter = None
def _iter_impl(self):
"""
The Data Loader iterator stops the for-loop when reader runs out of samples.
"""
# As we iterate over incoming samples, we are going to store them in `self._batch_acc`, until we have a batch of
# the requested batch_size ready.
keys = None
if self.shuffling_queue_capacity > 0:
# We can not know what is the reasonable number to use for the extra capacity, so we set a huge number
# and give up on the unbound growth protection mechanism.
# To keep the same behavior as DataLoader, we need to increase the shuffling_queue_capacity
min_after_dequeue = self.shuffling_queue_capacity - 1
shuffling_queue_capacity = min_after_dequeue + self.batch_size
self._shuffling_buffer = BatchedRandomShufflingBuffer(
shuffling_queue_capacity,
min_after_retrieve=min_after_dequeue,
extra_capacity=100000000,
batch_size=self.batch_size
)
else:
self._shuffling_buffer = BatchedNoopShufflingBuffer(batch_size=self.batch_size)
for row in self.reader:
# Default collate does not work nicely on namedtuples and treat them as lists
# Using dict will result in the yielded structures being dicts as well
row_as_dict = row._asdict()
keys = row_as_dict.keys()
# Promote some types that are incompatible with pytorch to be pytorch friendly.
_sanitize_pytorch_types(row_as_dict)
# Add rows to shuffling buffer
for k, v in row_as_dict.items():
if not self.reader.is_batched_reader:
row_as_dict[k] = self.transform_fn([v])
else:
row_as_dict[k] = self.transform_fn(v)
self._shuffling_buffer.add_many(row_as_dict.values())
# _yield_batches will emit as much batches as are allowed by the shuffling_buffer (RandomShufflingBuffer
# will avoid underflowing below a certain number of samples to guarantee some samples decorrelation)
for batch in self._yield_batches(keys):
yield batch
# Once reader can not read new rows, we might still have a bunch of rows waiting in the shuffling buffer.
# Telling shuffling buffer that we are finished allows to deplete the buffer completely, regardless its
# min_after_dequeue setting.
self._shuffling_buffer.finish()
for batch in self._yield_batches(keys):
yield batch
def _yield_batches(self, keys):
while self._shuffling_buffer.can_retrieve():
batch = self._shuffling_buffer.retrieve()
if not isinstance(batch, dict):
# This is for the case of batched reads. Here we restore back the
# dictionary format of records
batch = dict(zip(keys, batch))
yield batch
# Functions needed to treat data loader as a context manager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.reader.stop()
self.reader.join()
| 45.271739 | 120 | 0.667647 |
aceb921231f7e1d4293c76a7397298cb5428ae2d | 4,415 | py | Python | src/read_paper.py | lubianat/wikidata_bib | 33d19d13876405819a87ef449a7375b2f750d003 | [
"Unlicense"
] | 12 | 2020-11-05T15:00:08.000Z | 2021-12-13T11:42:49.000Z | src/read_paper.py | lubianat/wikidata_bib | 33d19d13876405819a87ef449a7375b2f750d003 | [
"Unlicense"
] | 15 | 2020-11-23T20:03:46.000Z | 2021-12-17T13:10:45.000Z | src/read_paper.py | lubianat/wikidata_bib | 33d19d13876405819a87ef449a7375b2f750d003 | [
"Unlicense"
] | 1 | 2021-02-04T18:38:07.000Z | 2021-02-04T18:38:07.000Z | #!/usr/bin/python3
import sys
from helper import wikidata2df
from mdutils.mdutils import MdUtils
import pandas as pd
import urllib.parse
import os.path
import rdflib
from datetime import date, datetime
import wbib.queries
def main():
def get_title_df(wikidata_id):
query = (
"""
SELECT ?item ?itemLabel ?date ?doi ?url
WHERE
{
VALUES ?item {wd:"""
+ wikidata_id
+ """}
OPTIONAL {?item wdt:P577 ?date}.
OPTIONAL {?item wdt:P356 ?doi} .
OPTIONAL {?item wdt:P953 ?url}
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
)
df = wikidata2df(query)
return df
def create_markdown(file_path, title, publication_date="None", doi="", url=""):
mdFile = MdUtils(file_name=file_path, title=title)
mdFile.new_line(" [@wikidata:" + wikidata_id + "]")
mdFile.new_line()
if publication_date != "None":
mdFile.new_line("Publication date : " + str(publication_date))
mdFile.new_line()
mdFile.new_header(1, "Highlights")
mdFile.new_header(1, "Comments")
mdFile.new_header(2, "Tags")
mdFile.new_header(1, "Links")
mdFile.new_line(
f" * [Scholia Profile](https://scholia.toolforge.org/work/{wikidata_id})"
)
mdFile.new_line(f" * [Wikidata](https://www.wikidata.org/wiki/{wikidata_id})")
mdFile.new_line(
" * [Author Disambiguator](https://author-disambiguator.toolforge.org/work_item_oauth.php?id="
+ wikidata_id
+ "&batch_id=&match=1&author_list_id=&doit=Get+author+links+for+work)"
)
if doi != "":
mdFile.new_line(f" * [DOI](https://doi.org/{doi})")
if url != "":
mdFile.new_line(f" * [Full text URL]({url})")
mdFile.new_line()
mdFile.create_md_file()
def update_turtle(wikidata_id):
g = rdflib.Graph()
result = g.parse("read.ttl", format="ttl")
wb = rdflib.Namespace("https://github.com/lubianat/wikidata_bib/tree/main/")
wbn = rdflib.Namespace(
"https://github.com/lubianat/wikidata_bib/tree/main/notes/"
)
wd = rdflib.Namespace("http://www.wikidata.org/entity/")
s = rdflib.term.URIRef(wd + wikidata_id)
p1 = rdflib.term.URIRef(wb + "has_notes")
o1 = rdflib.term.URIRef(wbn + wikidata_id + ".md")
g.add((s, p1, o1))
g.serialize(destination="read.ttl", format="turtle")
today = date.today()
d1 = today.strftime("+%Y-%m-%dT00:00:00Z/11")
s = rdflib.term.URIRef(wd + wikidata_id)
p2 = rdflib.term.URIRef(wb + "read_in")
o2 = rdflib.term.Literal(d1)
g.add((s, p2, o2))
g.serialize(destination="read.ttl", format="turtle")
def update_csv(df):
df_stored = pd.read_csv("read.csv")
new_row = {"human_id": df["itemLabel"][0], "wikidata_id": df["item"][0]}
df_stored = df_stored.append(new_row, ignore_index=True)
df_stored = df_stored.drop_duplicates()
print(df_stored)
df_stored.to_csv("read.csv", index=False)
wikidata_id = sys.argv[1]
print("======= Getting title from Wikidata =======")
df = get_title_df(wikidata_id)
update_csv(df)
title = df["itemLabel"][0]
try:
publication_date = df["date"][0]
date_in_dateformat = datetime.strptime(publication_date, "%Y-%m-%dT00:00:00Z")
publication_date = date_in_dateformat.strftime("%d of %B, %Y")
except:
publication_date = "None"
pass
try:
doi = df["doi"][0]
except:
doi = ""
pass
try:
text_url = df["url"][0]
except:
text_url = ""
pass
file_path = "notes/" + wikidata_id
print("======= Creating markdown =======")
create_markdown(file_path, title, publication_date, doi, text_url)
update_turtle(wikidata_id)
print("======= Updating dashboard =======")
exec(open("src/update_dashboard.py").read())
print("======= Done =======")
if __name__ == "__main__":
wikidata_id = sys.argv[1]
assert wikidata_id[0] == "Q"
filename = "notes/" + wikidata_id + ".md"
if os.path.isfile(filename):
print("Article has already been read")
else:
main()
| 29.630872 | 106 | 0.579388 |
aceb93060e72182e6a2231f300dc1e0363ffef36 | 5,413 | py | Python | restkit/contrib/wsgi_proxy.py | gelnior/restkit | 6a8507afde15d576318ce24134016511b5e14331 | [
"MIT"
] | null | null | null | restkit/contrib/wsgi_proxy.py | gelnior/restkit | 6a8507afde15d576318ce24134016511b5e14331 | [
"MIT"
] | null | null | null | restkit/contrib/wsgi_proxy.py | gelnior/restkit | 6a8507afde15d576318ce24134016511b5e14331 | [
"MIT"
] | 1 | 2019-04-28T11:04:14.000Z | 2019-04-28T11:04:14.000Z | # -*- coding: utf-8 -
#
# This file is part of restkit released under the MIT license.
# See the NOTICE for more information.
import urllib.parse
try:
from io import StringIO
except ImportError:
from io import StringIO
from restkit.client import Client
from restkit.conn import MAX_BODY
from restkit.util import rewrite_location
ALLOWED_METHODS = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE']
BLOCK_SIZE = 4096 * 16
WEBOB_ERROR = ("Content-Length is set to -1. This usually mean that WebOb has "
"already parsed the content body. You should set the Content-Length "
"header to the correct value before forwarding your request to the "
"proxy: ``req.content_length = str(len(req.body));`` "
"req.get_response(proxy)")
class Proxy(object):
"""A proxy wich redirect the request to SERVER_NAME:SERVER_PORT
and send HTTP_HOST header"""
def __init__(self, manager=None, allowed_methods=ALLOWED_METHODS,
strip_script_name=True, **kwargs):
self.allowed_methods = allowed_methods
self.strip_script_name = strip_script_name
self.client = Client(**kwargs)
def extract_uri(self, environ):
port = None
scheme = environ['wsgi.url_scheme']
if 'SERVER_NAME' in environ:
host = environ['SERVER_NAME']
else:
host = environ['HTTP_HOST']
if ':' in host:
host, port = host.split(':')
if not port:
if 'SERVER_PORT' in environ:
port = environ['SERVER_PORT']
else:
port = scheme == 'https' and '443' or '80'
uri = '%s://%s:%s' % (scheme, host, port)
return uri
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
if method not in self.allowed_methods:
start_response('403 Forbidden', ())
return ['']
if self.strip_script_name:
path_info = ''
else:
path_info = environ['SCRIPT_NAME']
path_info += environ['PATH_INFO']
query_string = environ['QUERY_STRING']
if query_string:
path_info += '?' + query_string
host_uri = self.extract_uri(environ)
uri = host_uri + path_info
new_headers = {}
for k, v in list(environ.items()):
if k.startswith('HTTP_'):
k = k[5:].replace('_', '-').title()
new_headers[k] = v
ctype = environ.get("CONTENT_TYPE")
if ctype and ctype is not None:
new_headers['Content-Type'] = ctype
clen = environ.get('CONTENT_LENGTH')
te = environ.get('transfer-encoding', '').lower()
if not clen and te != 'chunked':
new_headers['transfer-encoding'] = 'chunked'
elif clen:
new_headers['Content-Length'] = clen
if new_headers.get('Content-Length', '0') == '-1':
raise ValueError(WEBOB_ERROR)
response = self.client.request(uri, method, body=environ['wsgi.input'],
headers=new_headers)
if 'location' in response:
if self.strip_script_name:
prefix_path = environ['SCRIPT_NAME']
new_location = rewrite_location(host_uri, response.location,
prefix_path=prefix_path)
headers = []
for k, v in response.headerslist:
if k.lower() == 'location':
v = new_location
headers.append((k, v))
else:
headers = response.headerslist
start_response(response.status, headers)
if method == "HEAD":
return StringIO()
return response.tee()
class TransparentProxy(Proxy):
"""A proxy based on HTTP_HOST environ variable"""
def extract_uri(self, environ):
port = None
scheme = environ['wsgi.url_scheme']
host = environ['HTTP_HOST']
if ':' in host:
host, port = host.split(':')
if not port:
port = scheme == 'https' and '443' or '80'
uri = '%s://%s:%s' % (scheme, host, port)
return uri
class HostProxy(Proxy):
"""A proxy to redirect all request to a specific uri"""
def __init__(self, uri, **kwargs):
super(HostProxy, self).__init__(**kwargs)
self.uri = uri.rstrip('/')
self.scheme, self.net_loc = urllib.parse.urlparse(self.uri)[0:2]
def extract_uri(self, environ):
environ['HTTP_HOST'] = self.net_loc
return self.uri
def get_config(local_config):
"""parse paste config"""
config = {}
allowed_methods = local_config.get('allowed_methods', None)
if allowed_methods:
config['allowed_methods'] = [m.upper() for m in allowed_methods.split()]
strip_script_name = local_config.get('strip_script_name', 'true')
if strip_script_name.lower() in ('false', '0'):
config['strip_script_name'] = False
config['max_connections'] = int(local_config.get('max_connections', '5'))
return config
def make_proxy(global_config, **local_config):
"""TransparentProxy entry_point"""
config = get_config(local_config)
return TransparentProxy(**config)
def make_host_proxy(global_config, uri=None, **local_config):
"""HostProxy entry_point"""
uri = uri.rstrip('/')
config = get_config(local_config)
return HostProxy(uri, **config)
| 31.47093 | 80 | 0.599667 |
aceb9334be90f1d3145b58cfe988f537459950c8 | 845 | py | Python | recipes/Python/361527_Emit_Qt_signals_nQObject/recipe-361527.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/361527_Emit_Qt_signals_nQObject/recipe-361527.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/361527_Emit_Qt_signals_nQObject/recipe-361527.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | import qt
import weakref
_emitterCache = weakref.WeakKeyDictionary()
def emitter(ob):
"""Returns a QObject surrogate for *ob*, to use in Qt signaling.
This function enables you to connect to and emit signals from (almost)
any python object with having to subclass QObject.
>>> class A(object):
... def notify(self, *args):
... QObject.emit(emitter(self), PYSIGNAL('test'), args)
...
>>> ob = A()
>>> def myhandler(*args): print 'got', args
...
>>> QObject.connect(emitter(ob), PYSIGNAL('test'), myhandler)
... True
>>> ob.notify('hello')
got ('hello',)
>>> QObject.emit(emitter(ob), PYSIGNAL('test'), (42, 'abc',))
got (42, 'abc')
"""
if ob not in _emitterCache:
_emitterCache[ob] = qt.QObject()
return _emitterCache[ob]
| 27.258065 | 74 | 0.585799 |
aceb9399ae33f498b050dfa3c2767fcb0c5af232 | 364 | py | Python | molecool/tests/test_molecool.py | JoaoRodrigues/mfb_2020 | 95090e9cc0dfe1307418fbcaa527ee5c42684211 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T19:24:08.000Z | 2020-02-05T19:24:08.000Z | molecool/tests/test_molecool.py | JoaoRodrigues/mfb_2020 | 95090e9cc0dfe1307418fbcaa527ee5c42684211 | [
"BSD-3-Clause"
] | null | null | null | molecool/tests/test_molecool.py | JoaoRodrigues/mfb_2020 | 95090e9cc0dfe1307418fbcaa527ee5c42684211 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T19:24:38.000Z | 2020-02-05T19:24:38.000Z | """
Unit and regression test for the molecool package.
"""
# Import package, test suite, and other packages as needed
# import molecool
# import pytest
import sys
def test_molecool_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "molecool" in sys.modules
def test_are_waffles_great():
assert True is True
| 20.222222 | 74 | 0.736264 |
aceb9431e6ca789e8b7a1897931de1a38e9b0c81 | 16,604 | py | Python | pygears/core/gear.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/core/gear.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | pygears/core/gear.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | import copy
import inspect
import functools
import sys
from pygears.conf import PluginBase, bind, registry, core_log, safe_bind
from pygears.typing import Any
from .hier_node import NamedHierNode
from .infer_ftypes import TypeMatchError, infer_ftypes, type_is_specified
from .intf import Intf
from .partial import Partial
from .port import InPort, OutPort
from .util import doublewrap
code_map = {}
class TooManyArguments(Exception):
pass
class GearTypeNotSpecified(Exception):
pass
class GearArgsNotSpecified(Exception):
pass
def check_arg_num(argnames, varargsname, args):
if (len(args) < len(argnames)) or (not varargsname and
(len(args) > len(argnames))):
balance = "few" if (len(args) < len(argnames)) else "many"
raise TooManyArguments(f"Too {balance} arguments provided.")
def check_arg_specified(args):
args_res = []
const_args_gears = []
for i, a in enumerate(args):
if isinstance(a, Partial):
raise GearArgsNotSpecified(f"Unresolved input arg {i}")
if not isinstance(a, Intf):
from pygears.common import const
try:
a = const(val=a)
const_args_gears.append(module().child[-1])
except GearTypeNotSpecified:
raise GearArgsNotSpecified(f"Unresolved input arg {i}")
args_res.append(a)
if not type_is_specified(a.dtype):
raise GearArgsNotSpecified(
f"Input arg {i} has unresolved type {repr(a.dtype)}")
return tuple(args_res), const_args_gears
def get_obj_var_name(frame, obj):
for var_name, var_obj in frame.f_locals.items():
if obj is var_obj:
return var_name
else:
None
# def assign_intf_var_name(intf):
# import os
# if getattr(intf, 'var_name', None) is not None:
# return
# for frame, *_ in reversed(inspect.stack()):
# is_internal = frame.f_code.co_filename.startswith(
# os.path.dirname(__file__))
# is_boltons = 'boltons' in frame.f_code.co_filename
# if not is_internal and not is_boltons:
# var_name = get_obj_var_name(frame, intf)
# if var_name is not None:
# print(f'{intf}: {var_name} in {frame.f_code.co_filename}')
# intf.var_name = var_name
# return
# else:
# intf.var_name = None
def find_current_gear_frame():
import inspect
code_map = registry('gear/code_map')
if not code_map:
return None
for frame, *_ in inspect.stack():
if frame.f_code is code_map[-1].func.__code__:
return frame
else:
return None
class create_hier:
def __init__(self, gear):
self.gear = gear
def __enter__(self):
bind('gear/current_module', self.gear)
return self
def __exit__(self, exception_type, exception_value, traceback):
bind('gear/current_module', self.gear.parent)
if exception_type is not None:
self.gear.clear()
class Gear(NamedHierNode):
def __new__(cls, func, meta_kwds, *args, name=None, __base__=None, **kwds):
if name is None:
if __base__ is None:
name = func.__name__
else:
name = __base__.__name__
kwds_comb = kwds.copy()
kwds_comb.update(meta_kwds)
gear = super().__new__(cls)
try:
gear.__init__(func, *args, name=name, **kwds_comb)
except Exception as e:
gear.remove()
raise e
if not gear.params.pop('enablement'):
gear.remove()
raise TypeMatchError(
f'Enablement condition failed for "{gear.name}" alternative'
f' "{gear.definition.__module__}.{gear.definition.__name__}": '
f'{meta_kwds["enablement"]}')
return gear.resolve()
def __init__(self, func, *args, name=None, intfs=None, outnames=[],
**kwds):
super().__init__(name, registry('gear/current_module'))
self.in_ports = []
self.out_ports = []
self.const_args_gears = []
self.func = func
self.__doc__ = func.__doc__
self.outnames = outnames.copy()
if intfs is None:
self.fix_intfs = []
elif isinstance(intfs, Intf):
self.fix_intfs = [intfs]
else:
self.fix_intfs = intfs.copy()
self.args = args
self.resolved = False
argspec = inspect.getfullargspec(func)
self.argnames = argspec.args
self.varargsname = argspec.varargs
self.annotations = argspec.annotations
self.kwdnames = argspec.kwonlyargs
try:
check_arg_num(self.argnames, self.varargsname, self.args)
except TooManyArguments as e:
TooManyArguments(f'{e}, for the module {self.name}')
try:
self.args, self.const_args_gears = check_arg_specified(self.args)
except GearArgsNotSpecified as e:
raise GearArgsNotSpecified(
f'{str(e)}, when instantiating {self.name}')
# for intf in self.args:
# assign_intf_var_name(intf)
self.params = {}
if isinstance(argspec.kwonlydefaults, dict):
self.params.update(argspec.kwonlydefaults)
self.params.update(kwds)
self.params.update({
a: (self.annotations[a] if a in self.annotations else Any)
for a in self.argnames
})
self._handle_return_annot()
self._expand_varargs()
self.in_ports = [
InPort(self, i, name) for i, name in enumerate(self.argnames)
]
for i, a in enumerate(self.args):
try:
a.connect(self.in_ports[i])
except AttributeError:
raise GearArgsNotSpecified(
f"Input arg {i} for module {self.name} was not"
f" resolved to interface, instead {repr(a)} received")
self.infer_params()
def _handle_return_annot(self):
if "return" in self.annotations:
ret_anot = self.annotations["return"]
if isinstance(ret_anot, dict):
self.outnames = tuple(ret_anot.keys())
self.params['return'] = tuple(ret_anot.values())
else:
self.params['return'] = ret_anot
else:
self.params['return'] = None
def _expand_varargs(self):
if self.varargsname:
vararg_type_list = []
if self.varargsname in self.annotations:
vararg_type = self.annotations[self.varargsname]
else:
vararg_type = Any
# Append the types of the self.varargsname
for i, a in enumerate(self.args[len(self.argnames):]):
if isinstance(vararg_type, str):
# If vararg_type is a template string, it can be made
# dependent on the arguments position
type_tmpl_i = vararg_type.format(i).encode()
else:
# Vararg is not a template and should be passed as is
type_tmpl_i = vararg_type
argname = f'{self.varargsname}{i}'
vararg_type_list.append(argname)
self.params[argname] = type_tmpl_i
self.argnames.append(argname)
self.params[
self.
varargsname] = f'({", ".join(vararg_type_list)}, )'.encode()
def remove(self):
for p in self.in_ports:
if p.producer is not None:
try:
p.producer.disconnect(p)
except ValueError:
pass
for p in self.out_ports:
if p.producer is not None:
p.producer.disconnect(p)
for g in self.const_args_gears:
g.remove()
try:
super().remove()
except ValueError:
pass
@property
def definition(self):
return self.params['definition']
@property
def dout(self):
if len(self.intfs) > 1:
return tuple(p.producer for p in self.out_ports)
else:
return self.out_ports[0].producer
@property
def tout(self):
if len(self.intfs) > 1:
return tuple(i.dtype for i in self.intfs)
else:
return self.intfs[0].dtype
def set_ftype(self, ft, i):
self.dtype_templates[i] = ft
def is_specified(self):
for i in self.intfs:
if not type_is_specified(i.dtype):
return False
else:
return True
def get_arg_types(self):
return tuple(a.dtype for a in self.args)
def get_type(self):
if len(self.intfs) > 1:
return tuple(i.dtype for i in self.intfs)
elif len(self.intfs) == 1:
return self.intfs[0].dtype
else:
return None
def infer_params(self):
arg_types = {
name: arg.dtype
for name, arg in zip(self.argnames, self.args)
}
try:
self.params = infer_ftypes(
self.params,
arg_types,
namespace=self.func.__globals__,
allow_incomplete=False)
except TypeMatchError as e:
raise TypeMatchError(f'{str(e)}, of the module "{self.name}"')
def resolve(self):
for port in self.in_ports:
Intf(port.dtype).source(port)
is_async_gen = bool(
self.func.__code__.co_flags & inspect.CO_ASYNC_GENERATOR)
func_ret = tuple()
if (not inspect.iscoroutinefunction(self.func)
and not inspect.isgeneratorfunction(self.func)
and not is_async_gen):
func_ret = self.resolve_func()
out_dtype = tuple()
if func_ret:
out_dtype = tuple(r.dtype for r in func_ret)
elif self.params['return'] is not None:
if not isinstance(self.params['return'], tuple):
out_dtype = (self.params['return'], )
else:
out_dtype = self.params['return']
dflt_dout_name = registry('gear/naming/default_out_name')
for i in range(len(self.outnames), len(out_dtype)):
if func_ret and hasattr(func_ret[i], 'var_name'):
self.outnames.append(func_ret[i].var_name)
else:
self.outnames.append(
dflt_dout_name
if len(out_dtype) == 1 else f'{dflt_dout_name}{i}')
self.out_ports = [
OutPort(self, i, name) for i, name in enumerate(self.outnames)
]
# Connect internal interfaces
if func_ret:
for i, r in enumerate(func_ret):
r.connect(self.out_ports[i])
else:
for dtype, port in zip(out_dtype, self.out_ports):
Intf(dtype).connect(port)
# Connect output interfaces
self.intfs = []
out_intfs = []
if isinstance(self.fix_intfs, dict):
for i, (name, dt) in enumerate(zip(self.outnames, out_dtype)):
if name in self.fix_intfs:
intf = self.fix_intfs[name]
else:
intf = Intf(dt)
out_intfs.append(intf)
self.intfs.append(intf)
elif self.fix_intfs:
self.intfs = self.fix_intfs
else:
self.intfs = [Intf(dt) for dt in out_dtype]
out_intfs = self.intfs
assert len(self.intfs) == len(out_dtype)
for intf, port in zip(self.intfs, self.out_ports):
intf.source(port)
for name, dtype in zip(self.outnames, out_dtype):
self.params[name] = dtype
if not self.is_specified():
raise GearTypeNotSpecified(
f"Output type of the module {self.name}"
f" could not be resolved, and resulted in {repr(out_dtype)}")
for c in self.child:
for p in c.out_ports:
intf = p.consumer
if intf not in self.intfs and not intf.consumers:
core_log().warning(f'{c.name}.{p.basename} left dangling.')
if len(out_intfs) > 1:
return tuple(out_intfs)
elif len(out_intfs) == 1:
return out_intfs[0]
else:
return None
def resolve_func(self):
with create_hier(self):
func_args = [p.consumer for p in self.in_ports]
func_kwds = {
k: self.params[k]
for k in self.kwdnames if k in self.params
}
self.func_locals = {}
code_map = registry('gear/code_map')
code_map.append(self)
def tracer(frame, event, arg):
if event == 'return':
for cm in code_map:
if frame.f_code is cm.func.__code__:
cm.func_locals = frame.f_locals.copy()
# tracer is activated on next call, return or exception
if registry('gear/current_module').parent == registry(
'gear/hier_root'):
sys.setprofile(tracer)
ret = self.func(*func_args, **func_kwds)
code_map.pop()
if registry('gear/current_module').parent == registry(
'gear/hier_root'):
sys.setprofile(None)
for name, val in self.func_locals.items():
if isinstance(val, Intf):
val.var_name = name
# if not any([isinstance(c, Gear) for c in self.child]):
# self.clear()
if ret is None:
ret = tuple()
elif not isinstance(ret, tuple):
ret = (ret, )
return ret
def alternative(*base_gear_defs):
def gear_decorator(gear_def):
for d in base_gear_defs:
alternatives = getattr(d.func, 'alternatives', [])
alternatives.append(gear_def.func)
d.func.alternatives = alternatives
return gear_def
return gear_decorator
@doublewrap
def gear(func, gear_cls=Gear, **meta_kwds):
from pygears.core.funcutils import FunctionBuilder
fb = FunctionBuilder.from_func(func)
fb.filename = '<string>'
# Add defaults from GearExtraParams registry
for k, v in registry('gear/params/extra').items():
if k not in fb.kwonlyargs:
fb.kwonlyargs.append(k)
fb.kwonlydefaults[k] = copy.copy(v)
fb.body = (f"return gear_cls(gear_func, meta_kwds, "
f"{fb.get_invocation_str()})")
# Add defaults from GearMetaParams registry
for k, v in registry('gear/params/meta').items():
if k not in meta_kwds:
meta_kwds[k] = copy.copy(v)
execdict = {
'gear_cls': gear_cls,
'meta_kwds': meta_kwds,
'gear_func': func
}
execdict.update(func.__globals__)
execdict_keys = list(execdict.keys())
execdict_values = list(execdict.values())
def formatannotation(annotation, base_module=None):
try:
return execdict_keys[execdict_values.index(annotation)]
except ValueError:
if not isinstance(str, bytes):
return '"b' + repr(annotation) + '"'
else:
return annotation
gear_func = fb.get_func(
execdict=execdict, formatannotation=formatannotation)
functools.update_wrapper(gear_func, func)
p = Partial(gear_func)
meta_kwds['definition'] = p
p.meta_kwds = meta_kwds
return p
def module():
return registry('gear/current_module')
class GearPlugin(PluginBase):
@classmethod
def bind(cls):
safe_bind('gear/naming', {'default_out_name': 'dout'})
safe_bind('gear/hier_root', NamedHierNode(''))
safe_bind('gear/current_module', cls.registry['gear']['hier_root'])
safe_bind('gear/code_map', [])
safe_bind('gear/params/meta', {'enablement': True})
safe_bind('gear/params/extra', {
'name': None,
'intfs': [],
'outnames': [],
'__base__': None
})
@classmethod
def reset(cls):
safe_bind('gear/hier_root', NamedHierNode(''))
safe_bind('gear/current_module', cls.registry['gear']['hier_root'])
safe_bind('gear/code_map', [])
| 30.466055 | 79 | 0.56378 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.