hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31b1491cb1218b11a46facdc384274b8e0baf48c
| 860
|
py
|
Python
|
test/unit/rules/resources/dynamodb/test_billing_mode.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1
|
2020-05-08T20:12:31.000Z
|
2020-05-08T20:12:31.000Z
|
test/unit/rules/resources/dynamodb/test_billing_mode.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | null | null | null |
test/unit/rules/resources/dynamodb/test_billing_mode.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1
|
2020-12-01T14:54:28.000Z
|
2020-12-01T14:54:28.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.resources.dynamodb.BillingMode import BillingMode # pylint: disable=E0401
class TestBillingMode(BaseRuleTestCase):
"""Test BillingMode"""
def setUp(self):
"""Setup"""
super(TestBillingMode, self).setUp()
self.collection.register(BillingMode())
self.success_templates = [
'test/fixtures/templates/good/resources/dynamodb/billing_mode.yaml'
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative_alias(self):
"""Test failure"""
self.helper_file_negative(
'test/fixtures/templates/bad/resources/dynamodb/billing_mode.yaml', 3)
| 30.714286
| 93
| 0.686047
|
8bd46bb3dd76883746a69195c7e016be18a7828a
| 36,235
|
py
|
Python
|
src/urllib3/connectionpool.py
|
perfectykills/urllib3-pyqt5
|
5d01683120051e78ed46467c1f24649970e82c0a
|
[
"MIT"
] | null | null | null |
src/urllib3/connectionpool.py
|
perfectykills/urllib3-pyqt5
|
5d01683120051e78ed46467c1f24649970e82c0a
|
[
"MIT"
] | null | null | null |
src/urllib3/connectionpool.py
|
perfectykills/urllib3-pyqt5
|
5d01683120051e78ed46467c1f24649970e82c0a
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .packages.rfc3986.normalizers import normalize_host
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection,
HTTPSConnection,
VerifiedHTTPSConnection,
HTTPException,
BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url, NORMALIZABLE_SCHEMES
from .util.queue import LifoQueue
from PyQt5.QtCore import QObject
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = QObject()
# Pool objects
class ConnectionPool(QObject):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more " "connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning("Connection pool is full, discarding connection: %s", self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == "http":
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection " "broken by '%r': %s",
retries,
err,
url,
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos,
**response_kw
)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
):
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL " "module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings"
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host.strip("[]")
if scheme in NORMALIZABLE_SCHEMES:
host = normalize_host(host)
return host
| 34.841346
| 99
| 0.59368
|
a84fd3572717a7c1126090679403169e719ec280
| 2,673
|
py
|
Python
|
research/object_detection/tpu_exporters/export_saved_model_tpu_lib_tf1_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
object_detection/tpu_exporters/export_saved_model_tpu_lib_tf1_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
object_detection/tpu_exporters/export_saved_model_tpu_lib_tf1_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for object detection's TPU exporter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.tpu_exporters import export_saved_model_tpu_lib
from object_detection.utils import tf_version
flags = tf.app.flags
FLAGS = flags.FLAGS
def get_path(path_suffix):
return os.path.join(tf.resource_loader.get_data_files_path(), 'testdata',
path_suffix)
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class ExportSavedModelTPUTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('ssd', get_path('ssd/ssd_pipeline.config'), 'image_tensor', True, 20),
('faster_rcnn',
get_path('faster_rcnn/faster_rcnn_resnet101_atrous_coco.config'),
'image_tensor', True, 20))
def testExportAndLoad(self,
pipeline_config_file,
input_type='image_tensor',
use_bfloat16=False,
repeat=1):
input_placeholder_name = 'placeholder_tensor'
export_dir = os.path.join(FLAGS.test_tmpdir, 'tpu_saved_model')
if tf.gfile.Exists(export_dir):
tf.gfile.DeleteRecursively(export_dir)
ckpt_path = None
export_saved_model_tpu_lib.export(pipeline_config_file, ckpt_path,
export_dir, input_placeholder_name,
input_type, use_bfloat16)
inputs = np.random.rand(256, 256, 3)
tensor_dict_out = export_saved_model_tpu_lib.run_inference_from_saved_model(
inputs, export_dir, input_placeholder_name, repeat)
for k, v in tensor_dict_out.items():
tf.logging.info('{}: {}'.format(k, v))
if __name__ == '__main__':
tf.test.main()
| 37.125
| 81
| 0.668911
|
8540a505322d5f71a92064fee9061673c674b5d3
| 106
|
py
|
Python
|
sparv/modules/saldo/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 17
|
2018-09-21T07:01:45.000Z
|
2022-02-24T23:26:49.000Z
|
sparv/modules/saldo/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 146
|
2018-11-13T19:13:25.000Z
|
2022-03-31T09:57:56.000Z
|
sparv/modules/saldo/__init__.py
|
heatherleaf/sparv-pipeline
|
0fe5f27d0d82548ecc6cb21a69289668aac54cf1
|
[
"MIT"
] | 5
|
2019-02-14T00:50:38.000Z
|
2021-03-29T15:37:41.000Z
|
"""SALDO-related annotations."""
from . import compound, nst_comp_model, saldo, saldo_model, stats_model
| 26.5
| 71
| 0.773585
|
784fee27ec83c80903d6becec35fc6273d1f861c
| 5,514
|
py
|
Python
|
tests/gold_tests/logging/log-filter.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/logging/log-filter.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/logging/log-filter.test.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test log filter.
'''
# Only on Linux (why??)
Test.SkipUnless(
Condition.IsPlatform("linux")
)
# Define default ATS
ts = Test.MakeATSProcess("ts")
# Microserver
server = Test.MakeOriginServer("server")
request_header = {'timestamp': 100, "headers": "GET /test-1 HTTP/1.1\r\nHost: test-1\r\n\r\n", "body": ""}
response_header = {'timestamp': 100,
"headers": "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 1"}
server.addResponse("sessionlog.json", request_header, response_header)
server.addResponse("sessionlog.json",
{'timestamp': 101, "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", "body": ""},
{'timestamp': 101, "headers": "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 2"}
)
server.addResponse("sessionlog.json",
{'timestamp': 102, "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", "body": ""},
{'timestamp': 102, "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 3"}
)
server.addResponse("sessionlog.json",
{'timestamp': 103, "headers": "GET /test-4 HTTP/1.1\r\nHost: test-4\r\n\r\n", "body": ""},
{'timestamp': 103, "headers": "HTTP/1.1 200 OK\r\nTest: 4\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 4"}
)
server.addResponse("sessionlog.json",
{'timestamp': 104, "headers": "GET /test-5 HTTP/1.1\r\nHost: test-5\r\n\r\n", "body": ""},
{'timestamp': 104, "headers": "HTTP/1.1 200 OK\r\nTest: 5\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 5"}
)
ts.Disk.records_config.update({
'proxy.config.net.connections_throttle': 100,
'proxy.config.http.cache.http': 0
})
# setup some config file for this server
ts.Disk.remap_config.AddLine(
'map / http://localhost:{}/'.format(server.Variables.Port)
)
ts.Disk.logging_yaml.AddLines(
'''
logging:
filters:
- name: queryparamescaper_cquuc
action: WIPE_FIELD_VALUE
condition: cquuc CASE_INSENSITIVE_CONTAIN password,secret,access_token,session_redirect,cardNumber,code,query,search-query,prefix,keywords,email,handle
formats:
- name: custom
format: '%<cquuc>'
logs:
- filename: filter-test
format: custom
filters:
- queryparamescaper_cquuc
'''.split("\n")
)
# #########################################################################
# at the end of the different test run a custom log file should exist
# Because of this we expect the testruns to pass the real test is if the
# customlog file exists and passes the format check
Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'filter-test.log'),
exists=True, content='gold/filter-test.gold')
# first test is a miss for default
tr = Test.AddTestRun()
# Wait for the micro server
tr.Processes.Default.StartBefore(server)
# Delay on readiness of our ssl ports
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" "http://localhost:{0}/test-1?name=value&email=123@gmail.com"' .format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" "http://localhost:{0}/test-2?email=123@gmail.com&name=password"' .format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" "http://localhost:{0}/test-3?trivial=password&name1=val1&email=123@gmail.com"' .format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --verbose --header "Host: test-4" "http://localhost:{0}/test-4?trivial=password&email=&name=handle&session_redirect=wiped_string"' .format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --verbose --header "Host: test-5" "http://localhost:{0}/test-5?trivial=password&email=123@gmail.com&email=456@gmail.com&session_redirect=wiped_string&email=789@gmail.com&name=value"' .format(
ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.DelayStart = 10
tr.Processes.Default.Command = 'echo "Delay for log flush"'
tr.Processes.Default.ReturnCode = 0
| 44.112
| 228
| 0.673558
|
4c752c722ef9f3bd15c73d4917e51758c0eb8e2c
| 2,157
|
py
|
Python
|
source/conf.py
|
junisky/doc
|
a5a8b65edfb6466677ea36d88f864c93fa8a5df9
|
[
"MIT"
] | null | null | null |
source/conf.py
|
junisky/doc
|
a5a8b65edfb6466677ea36d88f864c93fa8a5df9
|
[
"MIT"
] | null | null | null |
source/conf.py
|
junisky/doc
|
a5a8b65edfb6466677ea36d88f864c93fa8a5df9
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'junisky'
copyright = '2021, junisky'
author = 'junisky'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ko'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.238095
| 79
| 0.676866
|
1e18c69a09a301223512c5ea1a53991e7dcb044a
| 1,039
|
py
|
Python
|
Python/queue-reconstruction-by-height.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | 1
|
2020-10-27T03:22:31.000Z
|
2020-10-27T03:22:31.000Z
|
Python/queue-reconstruction-by-height.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | null | null | null |
Python/queue-reconstruction-by-height.py
|
se77enn/LeetCode-Solution
|
d29ef5358cae592b63952c3d293897a176fb75e1
|
[
"MIT"
] | 1
|
2021-03-22T18:58:23.000Z
|
2021-03-22T18:58:23.000Z
|
# Time: O(n * sqrt(n))
# Space: O(n)
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda h_k: (-h_k[0], h_k[1]))
blocks = [[]]
for p in people:
index = p[1]
for i, block in enumerate(blocks):
if index <= len(block):
break
index -= len(block)
block.insert(index, p)
if len(block) * len(block) > len(people):
blocks.insert(i+1, block[len(block)/2:])
del block[len(block)/2:]
return [p for block in blocks for p in block]
class Solution2(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda h_k1: (-h_k1[0], h_k1[1]))
result = []
for p in people:
result.insert(p[1], p)
return result
| 25.975
| 57
| 0.485082
|
f8d454c48db24502fc7410180cd849711f08c44b
| 171
|
py
|
Python
|
chat/routing.py
|
hamraa/channels-demo
|
9bcc3395bdc8062f55ef15052be1d2a5b8af2631
|
[
"MIT"
] | 1
|
2022-01-06T16:57:48.000Z
|
2022-01-06T16:57:48.000Z
|
chat/routing.py
|
hamraa/channels-demo
|
9bcc3395bdc8062f55ef15052be1d2a5b8af2631
|
[
"MIT"
] | null | null | null |
chat/routing.py
|
hamraa/channels-demo
|
9bcc3395bdc8062f55ef15052be1d2a5b8af2631
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_name>\w+)/$', consumers.AsyncChatConsumer.as_asgi()),
]
| 21.375
| 84
| 0.730994
|
b57a883822ff08ece5dc8d8a11d0eb4d087db883
| 13,496
|
py
|
Python
|
students/models.py
|
tnemisteam/cdf-steps
|
78896eebd08ba9975a2dece97f73dca9aa781238
|
[
"MIT"
] | null | null | null |
students/models.py
|
tnemisteam/cdf-steps
|
78896eebd08ba9975a2dece97f73dca9aa781238
|
[
"MIT"
] | null | null | null |
students/models.py
|
tnemisteam/cdf-steps
|
78896eebd08ba9975a2dece97f73dca9aa781238
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models.fields import *
from baseapp.models import *
import caching.base
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from django.db.models.signals import post_save, post_delete
from imagekit import ImageSpec, register
from imagekit.processors import ResizeToFill
from django.conf import settings as django_settings
import os
from django.core.exceptions import ValidationError
class Thumbnail(ImageSpec):
processors = [ResizeToFill(150, 150)]
format = 'JPEG'
options = {'quality': 60}
register.generator('students:thumbnail', Thumbnail)
class Child_detail(caching.base.CachingMixin, models.Model):
def save(self):
if not self.unique_id_no:
self.unique_id_no = (
self.school.school_code * 100000) + (
self.school.student_id_count + 1)
super(Child_detail, self).save()
def validate_image(fieldfile_obj):
filesize = fieldfile_obj.file.size
kb_limit = 50
Kilobyte_limit = 1024 *50
if filesize > Kilobyte_limit:
raise ValidationError("Max file size is %sKB" % str(kb_limit))
def get_path(instance, filename):
import random
import string
random=''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
extension = filename.split('.')[-1]
a = instance.block.block_name.replace(" ", ".")
a.replace("(", '_')
a.replace(")", '_')
try:
child = Child_detail.objects.get(unique_id_no=instance.unique_id_no)
path=django_settings.MEDIA_ROOT+"/"+str(child.photograph)
os.remove(path)
except:
pass
dir = "images/child_pics/%s/%s" % (a, instance.school.school_code)
name = str(instance.unique_id_no)+"_"+random
return "%s/%s.%s" % (dir, name, extension)
name = models.CharField(default='', max_length=200)
name_tamil = models.CharField(default='', max_length=200,blank=True, null=True)
aadhaar_id = models.CharField(max_length=3)
aadhaar_eid_number = models.CharField(max_length=50,blank=True, null=True)
aadhaar_uid_number = models.BigIntegerField(blank=True, null=True)
photograph = ProcessedImageField(upload_to=get_path,
processors=[ResizeToFill(200, 200)],
format='JPEG',
options={'quality': 60},
blank=True,
null=True,
validators=[validate_image])
photo = ProcessedImageField(upload_to=get_path,
processors=[ResizeToFill(125, 125)],
format='JPEG',
options={'quality': 60},
blank=True,
null=True,
)
gender = models.CharField(max_length=15)
dob = models.DateField(default='1990-01-01')
community = models.ForeignKey(Community,blank=True,null=True)
community_certificate = models.CharField(max_length=3,blank=True,null=True)
community_certificate_no = models.CharField(max_length=200,blank=True,null=True)
community_certificate_date = models.DateField(blank=True,null=True,default='1990-01-01')
nativity_certificate = models.CharField(max_length=3,blank=True,null=True)
religion = models.ForeignKey(Religion)
mothertounge = models.ForeignKey(Language)
phone_number = BigIntegerField(default=0, blank=True, null=True)
email = models.CharField(max_length=100, blank=True, null=True)
child_differently_abled = models.CharField(max_length=3)
differently_abled = models.CharField(max_length=1000,blank=True,null=True)
child_admitted_under_reservation = models.CharField(max_length=3,blank=True,null=True)
weaker_section = models.CharField(max_length=3)
weaker_section_income_certificate_no = models.CharField(max_length=200,blank=True,null=True)
child_disadvantaged_group = models.CharField(max_length=3,blank=True,null=True)
disadvantaged_group = models.CharField(max_length=1000,blank=True,null=True)
subcaste = ChainedForeignKey(Sub_Castes, chained_field='community',
chained_model_field='community',
auto_choose=True,
blank=True,
null=True)
nationality = models.ForeignKey(Nationality)
child_status = models.CharField(max_length=200,blank=True, null=True)
house_address = models.CharField(default='', max_length=1000,blank=True, null=True)
native_district = models.CharField(max_length=50, blank=True, null=True)
pin_code = models.PositiveIntegerField(default=6, blank=True, null=True)
blood_group = models.CharField(max_length=10, blank=True, null=True)
height = models.PositiveIntegerField(max_length=3, default=0, blank=True, null=True)
weight = models.PositiveIntegerField(default=0, blank=True, null=True)
mother_name = models.CharField(default='', max_length=100, blank=True, null=True)
mother_occupation = models.CharField(max_length=50, blank=True, null=True)
father_name = models.CharField(default='', max_length=100, blank=True, null=True)
father_occupation = models.CharField(max_length=50, blank=True, null=True)
parent_income = models.PositiveIntegerField(default=0, blank=True, null=True)
guardian_name = models.CharField(default='', max_length=100, blank=True, null=True)
class_studying = models.ForeignKey(Class_Studying)
class_section = models.CharField(max_length=30)
group_code = models.ForeignKey(Group_code, blank=True, null=True)
attendance_status = models.CharField(max_length=30, blank=True, null=True)
sport_participation = models.CharField(max_length=20, blank=True, null=True)
laptop_issued = models.CharField(max_length=3,blank=True,null=True)
laptop_slno = models.CharField(max_length=200,blank=True,null=True)
education_medium = models.ForeignKey(Education_medium)
state = models.ForeignKey(State)
district = models.ForeignKey(District)
block = models.ForeignKey(Block)
unique_id_no = models.BigIntegerField(blank=True, null=True)
school = models.ForeignKey(School)
staff_id = models.CharField(max_length=30)
student_admitted_section = models.CharField(max_length=100,blank=True, null=True)
school_admission_no = models.CharField(max_length=100)
bank = models.ForeignKey(Bank, blank=True, null=True)
bank_account_no = models.BigIntegerField(default='', blank=True, null=True)
bank_ifsc_code = models.BigIntegerField(default='', blank=True, null=True)
sports_player = models.CharField(max_length=3)
sports_name = models.CharField(max_length=1000,blank=True,null=True)
# govt_schemes_status = models.CharField(max_length=5)
schemes = models.CharField(max_length=1000,blank=True,null=True)
academic_year = models.ForeignKey(Academic_Year)
scholarship_from_other_source = models.CharField(max_length=3)
scholarship_details = models.CharField(max_length=1000,blank=True,null=True)
scholarship_other_details = models.CharField(max_length=1000,blank=True,null=True)
bus_pass = models.CharField(max_length=3)
bus_from_route = models.CharField(max_length=50,blank=True,null=True)
bus_to_route = models.CharField(max_length=50,blank=True,null=True)
bus_route_no = models.CharField(max_length=5,blank=True,null=True)
transfer_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
transfer_date = models.DateField(blank=True, null=True)
nutritious_meal_flag = models.CharField(default='', max_length=5, blank=True, null=True)
modification_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
verification_flag = models.PositiveIntegerField(
default=0, blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
objects = caching.base.CachingManager()
# history = HistoricalRecords()
def __unicode__(self):
return u'%s %s %s %s' % (self.unique_id_no, self.name, self.staff_id,
self.verification_flag)
class Child_family_detail(caching.base.CachingMixin, models.Model):
child_key = models.ForeignKey(Child_detail)
si_no = models.PositiveIntegerField()
block = models.ForeignKey(Block)
sibling_name = models.CharField(max_length=50)
sibling_relationship = models.CharField(max_length=20)
sibling_age = models.IntegerField(max_length=3)
sibling_status = models.CharField(max_length=50, blank=True, null=True)
sibling_studying = models.CharField(max_length=50, blank=True, null=True)
sibling_studying_same_school = models.CharField(max_length=3)
staff_id = models.CharField(max_length=30)
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
# history = HistoricalRecords()
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s %s' % (self.child_key, self.sibling_name)
"""
Model for Old school
"""
class Child_Transfer_History(models.Model):
child_key = models.ForeignKey(Child_detail)
old_school = models.ForeignKey(School)
tc_issue_date = models.DateField()
created_date = models.DateTimeField(auto_now_add=True, editable=False)
modified_date = models.DateTimeField(auto_now=True)
# history = HistoricalRecords()
# objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s %s' % (self.child_key, self.old_school)
def ensure_stud_count_increase(sender, instance, **kwargs):
if kwargs.get('created', True):
school = School.objects.get(
school_code=instance.school.school_code)
school.student_id_count += 1
school.save()
post_save.connect(ensure_stud_count_increase, sender=Child_detail)
"""
Model for school child count
"""
class School_child_count(models.Model):
school = models.ForeignKey(School)
one = models.PositiveIntegerField()
two = models.PositiveIntegerField()
three = models.PositiveIntegerField()
four = models.PositiveIntegerField()
five = models.PositiveIntegerField()
six = models.PositiveIntegerField()
seven = models.PositiveIntegerField()
eight = models.PositiveIntegerField()
nine = models.PositiveIntegerField()
ten = models.PositiveIntegerField()
eleven = models.PositiveIntegerField()
twelve = models.PositiveIntegerField()
total_count = models.PositiveIntegerField()
def school_child_count_increase(sender, instance, **kwargs):
# import ipdb;ipdb.set_trace()
if kwargs.get('created', True):
try:
child = School_child_count.objects.get(school=instance.school)
except School_child_count.DoesNotExist:
child=School_child_count.objects.create(school=instance.school,one=0,two=0,three=0,four=0,five=0,six=0,seven=0,eight=0,nine=0,ten=0,eleven=0,twelve=0,total_count=0)
class_studying= instance.class_studying
if str(class_studying)=='I':
child.one += 1
elif str(class_studying)=='II':
child.two += 1
elif str(class_studying)=='III':
child.three += 1
elif str(class_studying)=='IV':
child.four += 1
elif str(class_studying)=='V':
child.five += 1
elif str(class_studying)=='VI':
child.six += 1
elif str(class_studying)=='VII':
child.seven += 1
elif str(class_studying)=='VIII':
child.eight += 1
elif str(class_studying)=='IX':
child.nine+= 1
elif str(class_studying)=='X':
child.ten += 1
elif str(class_studying)=='XI':
child.eleven += 1
elif str(class_studying)=='XII':
child.twelve += 1
child.total_count += 1
child.save()
post_save.connect(school_child_count_increase, sender=Child_detail)
def school_child_count_decrease(sender, instance, **kwargs):
child = School_child_count.objects.get(school=instance.school)
class_studying= instance.class_studying
if str(class_studying)=='I':
child.one -=1
elif str(class_studying)=='II':
child.two -=1
elif str(class_studying)=='III':
child.three -=1
elif str(class_studying)=='IV':
child.four -=1
elif str(class_studying)=='V':
child.five -=1
elif str(class_studying)=='VI':
child.six -=1
elif str(class_studying)=='VII':
child.seven -=1
elif str(class_studying)=='VIII':
child.eight -=1
elif str(class_studying)=='IX':
child.nine-=1
elif str(class_studying)=='X':
child.ten -=1
elif str(class_studying)=='XI':
child.eleven -=1
elif str(class_studying)=='XII':
child.twelve -=1
child.total_count -= 1
child.save()
post_delete.connect(school_child_count_decrease, sender=Child_detail)
"""
Model for Parent's Annual Income
"""
class Parent_annual_income(models.Model):
income = models.CharField(max_length=50)
| 43.118211
| 176
| 0.679238
|
d0c12fcc98685367ba3a03554e853f2875656c14
| 5,814
|
py
|
Python
|
server-py/room.py
|
pwmarcz/minefield
|
f4055da45251b79185d26fff7de7ccb153b5c171
|
[
"MIT"
] | 22
|
2015-03-22T21:36:57.000Z
|
2021-11-08T12:37:33.000Z
|
server-py/room.py
|
pwmarcz/minefield
|
f4055da45251b79185d26fff7de7ccb153b5c171
|
[
"MIT"
] | 10
|
2015-03-22T20:21:07.000Z
|
2021-05-09T09:59:02.000Z
|
server-py/room.py
|
pwmarcz/minefield
|
f4055da45251b79185d26fff7de7ccb153b5c171
|
[
"MIT"
] | 6
|
2015-03-26T04:23:13.000Z
|
2021-04-06T05:55:12.000Z
|
import unittest
import logging
from game import Game
from utils import make_key
logger = logging.getLogger('room')
class Room(object):
def __init__(self, nicks=['P1', 'P2'], game_class=Game):
self.game = game_class(callback=self.send_to_player)
self.nicks = nicks
self.players = [None, None]
self.messages = [[], []]
self.keys = self.make_keys()
self.aborted = False
self.id = None
def init_from_data(self, data):
self.game = Game.from_data(data['game'], callback=self.send_to_player)
del data['game']
self.players = [None, None]
super(Room, self).init_from_data(data)
def start_game(self):
logger.info('[room %s] starting', self.id)
self.game.start()
def make_keys(self):
return (make_key(), make_key())
def send_to_player(self, idx, msg_type, **msg):
self.messages[idx].append((msg_type, msg))
if self.players[idx]:
logger.info('[room %s] send to %d: %s %r', self.id, idx, msg_type, msg)
self.players[idx].send(msg_type, **msg)
def add_player(self, idx, player, n_received=0):
assert not self.players[idx]
self.players[idx] = player
player.set_room(self, idx)
self.replay_messages(idx, n_received)
def remove_player(self, idx):
self.players[idx] = None
def replay_messages(self, idx, n_received):
messages = self.messages[idx]
for msg_type, msg in messages[n_received:]:
# we don't replay move info, only send the last one to the player
if msg_type in ['start_move', 'end_move']:
continue
logger.info('[room %s] replay to %d: %s %r', self.id, idx, msg_type, msg)
self.players[idx].send('replay', msg={'type': msg_type, **msg})
self.game.send_move(idx)
def send_to_game(self, idx, msg_type, **msg):
logger.info('[room %s] receive from %d: %s %r', self.id, idx, msg_type, msg)
try:
handler = getattr(self.game, 'on_'+msg_type)
handler(idx, **msg)
except:
logger.exception('exception after receiving')
self.abort()
def beat(self):
if self.finished:
return
try:
self.game.beat()
except:
logger.exception('exception in beat')
self.abort()
def abort(self):
self.aborted = True
for idx in range(2):
if self.players[idx]:
self.players[idx].shutdown()
@property
def finished(self):
return self.aborted or self.game.finished
class RoomTest(unittest.TestCase):
class MockGame(object):
def __init__(self, nicks=None, east=None, callback=None):
self.callback = callback
self.started = False
def start(self):
assert not self.started
self.started = True
def on_ping(self, idx, **msg):
self.callback(1-idx, 'pong', **msg)
def on_crash(self, idx, **msg):
raise RuntimeError('crashed')
def send_move(self, idx):
pass
class MockPlayer(object):
def __init__(self):
self.messages = []
self.finished = False
self.room = None
self.idx = None
def send(self, msg_type, **msg):
self.messages.append((msg_type, msg))
def shutdown(self):
self.finished = True
def set_room(self, room, idx):
self.room = room
self.idx = idx
def create_room(self, players=[None, None]):
room = Room(game_class=self.MockGame)
for idx, player in enumerate(players):
if player:
room.add_player(idx, player)
room.start_game()
return room
def test_create(self):
room = self.create_room()
self.assertIsInstance(room.game, self.MockGame)
self.assertTrue(room.game.started)
def test_send_immediately(self):
player0 = self.MockPlayer()
room = self.create_room([player0, None])
room.game.callback(0, 'ping_0')
self.assertEquals(len(player0.messages), 1)
self.assertEquals(player0.messages[0][0], 'ping_0')
player1 = self.MockPlayer()
room.add_player(1, player1)
room.game.callback(1, 'ping_1')
self.assertEquals(len(player1.messages), 1)
self.assertEquals(player1.messages[0][0], 'ping_1')
def test_replay_after_connect(self):
room = self.create_room()
room.game.callback(0, 'a')
room.game.callback(0, 'b')
room.game.callback(1, 'c')
room.game.callback(1, 'd')
room.game.callback(0, 'e')
player0 = self.MockPlayer()
room.add_player(0, player0, n_received=1)
self.assertEquals(len(player0.messages), 2)
self.assertEquals(player0.messages[0], ('replay', {'msg': {'type': 'b'}}))
self.assertEquals(player0.messages[1], ('replay', {'msg': {'type': 'e'}}))
def test_send_to_game(self):
player0 = self.MockPlayer()
player1 = self.MockPlayer()
room = self.create_room([player0, player1])
room.send_to_game(1, 'ping')
self.assertEquals(len(player0.messages), 1)
self.assertEquals(player0.messages[0][0], 'pong')
def test_send_and_crash(self):
player0 = self.MockPlayer()
player1 = self.MockPlayer()
room = self.create_room([player0, player1])
room.send_to_game(0, 'crash')
self.assertTrue(player0.finished)
self.assertTrue(player1.finished)
if __name__ == '__main__':
#logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(name)s: %(message)s')
unittest.main()
| 31.427027
| 92
| 0.586171
|
50194a34ec8b1f5aefa20e42f423939adafc4627
| 365
|
py
|
Python
|
Pics/migrations/0007_auto_20191013_1408.py
|
IreneMercy/Instapic
|
98e3f48bffbc7c182cecb5460972872865f6e336
|
[
"MIT"
] | null | null | null |
Pics/migrations/0007_auto_20191013_1408.py
|
IreneMercy/Instapic
|
98e3f48bffbc7c182cecb5460972872865f6e336
|
[
"MIT"
] | 8
|
2019-12-05T00:36:46.000Z
|
2022-02-10T10:32:41.000Z
|
Pics/migrations/0007_auto_20191013_1408.py
|
IreneMercy/Instapic
|
98e3f48bffbc7c182cecb5460972872865f6e336
|
[
"MIT"
] | 1
|
2020-07-05T19:24:28.000Z
|
2020-07-05T19:24:28.000Z
|
# Generated by Django 2.2 on 2019-10-13 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Pics', '0006_auto_20191013_1405'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='image',
new_name='profile_photo',
),
]
| 19.210526
| 45
| 0.589041
|
a07b2632346dbd2688db31cea1a1ec4f24dec322
| 613
|
py
|
Python
|
Python/tests.py
|
ayan59dutta/Small-Scripts
|
b560d7800445ee1f35d23eaf92900b87a902b238
|
[
"Unlicense"
] | 2
|
2018-04-13T18:41:47.000Z
|
2021-11-19T13:20:06.000Z
|
Python/tests.py
|
ayan59dutta/Small-Scripts
|
b560d7800445ee1f35d23eaf92900b87a902b238
|
[
"Unlicense"
] | 4
|
2018-03-29T17:12:35.000Z
|
2018-04-13T18:57:16.000Z
|
Python/tests.py
|
ayan59dutta/Small-Scripts
|
b560d7800445ee1f35d23eaf92900b87a902b238
|
[
"Unlicense"
] | 2
|
2018-03-30T13:22:50.000Z
|
2019-10-24T18:57:18.000Z
|
#!/usr/bin/env python3
import unittest
# Imports for caesar.py tests
from caesar import decrypt
from caesar import encrypt
# Unit Tests for caesar.py
class CaesarTest(unittest.TestCase):
def test_decrypt_key(self):
self.assertEqual('I am a good boy', encrypt('L dp d jrrg erb',-3))
def test_encrypt(self):
self.assertEqual('L dp d jrrg erb', encrypt('I am a good boy', 3))
def test_decrypt_big(self):
self.assertEqual('THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG', decrypt('QEB NRFZH YOLTK CLU GRJMP LSBO QEB IXWV ALD'))
if __name__ == '__main__':
unittest.main()
| 24.52
| 127
| 0.696574
|
98da3f35ef2deccde2028103bca8757b28a3af36
| 222
|
py
|
Python
|
watchdog_kj_kultura/organizations/apps.py
|
watchdogpolska/watchdog-kj-kultura
|
ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0
|
[
"MIT"
] | null | null | null |
watchdog_kj_kultura/organizations/apps.py
|
watchdogpolska/watchdog-kj-kultura
|
ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0
|
[
"MIT"
] | 138
|
2016-12-10T19:18:18.000Z
|
2019-06-10T19:32:40.000Z
|
watchdog_kj_kultura/organizations/apps.py
|
watchdogpolska/watchdog-kj-kultura
|
ea1a5c52ef2a174c012cc08eff5fdd7aa3b911b0
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OrganizationsConfig(AppConfig):
name = 'watchdog_kj_kultura.organizations'
verbose_name = _("Organizations module")
| 27.75
| 55
| 0.801802
|
3d35e5e000a6bb40f519026206eee82216e1b4e0
| 4,694
|
py
|
Python
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/edifact_delimiter_override_py3.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/edifact_delimiter_override_py3.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/logic/azure-mgmt-logic/azure/mgmt/logic/models/edifact_delimiter_override_py3.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EdifactDelimiterOverride(Model):
"""The Edifact delimiter override settings.
All required parameters must be populated in order to send to Azure.
:param message_id: The message id.
:type message_id: str
:param message_version: The message version.
:type message_version: str
:param message_release: The message release.
:type message_release: str
:param data_element_separator: Required. The data element separator.
:type data_element_separator: int
:param component_separator: Required. The component separator.
:type component_separator: int
:param segment_terminator: Required. The segment terminator.
:type segment_terminator: int
:param repetition_separator: Required. The repetition separator.
:type repetition_separator: int
:param segment_terminator_suffix: Required. The segment terminator suffix.
Possible values include: 'NotSpecified', 'None', 'CR', 'LF', 'CRLF'
:type segment_terminator_suffix: str or
~azure.mgmt.logic.models.SegmentTerminatorSuffix
:param decimal_point_indicator: Required. The decimal point indicator.
Possible values include: 'NotSpecified', 'Comma', 'Decimal'
:type decimal_point_indicator: str or
~azure.mgmt.logic.models.EdifactDecimalIndicator
:param release_indicator: Required. The release indicator.
:type release_indicator: int
:param message_association_assigned_code: The message association assigned
code.
:type message_association_assigned_code: str
:param target_namespace: The target namespace on which this delimiter
settings has to be applied.
:type target_namespace: str
"""
_validation = {
'data_element_separator': {'required': True},
'component_separator': {'required': True},
'segment_terminator': {'required': True},
'repetition_separator': {'required': True},
'segment_terminator_suffix': {'required': True},
'decimal_point_indicator': {'required': True},
'release_indicator': {'required': True},
}
_attribute_map = {
'message_id': {'key': 'messageId', 'type': 'str'},
'message_version': {'key': 'messageVersion', 'type': 'str'},
'message_release': {'key': 'messageRelease', 'type': 'str'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'repetition_separator': {'key': 'repetitionSeparator', 'type': 'int'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'SegmentTerminatorSuffix'},
'decimal_point_indicator': {'key': 'decimalPointIndicator', 'type': 'EdifactDecimalIndicator'},
'release_indicator': {'key': 'releaseIndicator', 'type': 'int'},
'message_association_assigned_code': {'key': 'messageAssociationAssignedCode', 'type': 'str'},
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
}
def __init__(self, *, data_element_separator: int, component_separator: int, segment_terminator: int, repetition_separator: int, segment_terminator_suffix, decimal_point_indicator, release_indicator: int, message_id: str=None, message_version: str=None, message_release: str=None, message_association_assigned_code: str=None, target_namespace: str=None, **kwargs) -> None:
super(EdifactDelimiterOverride, self).__init__(**kwargs)
self.message_id = message_id
self.message_version = message_version
self.message_release = message_release
self.data_element_separator = data_element_separator
self.component_separator = component_separator
self.segment_terminator = segment_terminator
self.repetition_separator = repetition_separator
self.segment_terminator_suffix = segment_terminator_suffix
self.decimal_point_indicator = decimal_point_indicator
self.release_indicator = release_indicator
self.message_association_assigned_code = message_association_assigned_code
self.target_namespace = target_namespace
| 51.582418
| 376
| 0.697912
|
46e8ae2bdca053ee87cb93f3897ba819e617d26f
| 7,440
|
py
|
Python
|
ludolph/message.py
|
erigones/Ludolph
|
f46d2c0484db2b54b491be40b6324be8c64243db
|
[
"BSD-3-Clause"
] | 33
|
2015-01-12T16:48:22.000Z
|
2022-01-24T09:45:56.000Z
|
ludolph/message.py
|
erigones/Ludolph
|
f46d2c0484db2b54b491be40b6324be8c64243db
|
[
"BSD-3-Clause"
] | 36
|
2015-01-24T19:23:27.000Z
|
2020-10-03T20:04:47.000Z
|
ludolph/message.py
|
erigones/Ludolph
|
f46d2c0484db2b54b491be40b6324be8c64243db
|
[
"BSD-3-Clause"
] | 10
|
2015-01-24T19:27:20.000Z
|
2022-02-24T16:47:54.000Z
|
"""
Ludolph: Monitoring Jabber bot
Copyright (C) 2012-2015 Erigones, s. r. o.
This file is part of Ludolph.
See the file LICENSE for copying permission.
"""
import logging
import re
from datetime import datetime, timedelta
from sleekxmpp.xmlstream import ET
from sleekxmpp.stanza import Message
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
__all__ = ('red', 'green', 'blue', 'IncomingLudolphMessage', 'OutgoingLudolphMessage')
logger = logging.getLogger(__name__)
r = re.compile
TEXT2BODY = (
(r(r'\*\*(.+?)\*\*'), r'*\1*'),
(r(r'__(.+?)__'), r'\1'),
(r(r'\^\^(.+?)\^\^'), r'\1'),
(r(r'~~(.+?)~~'), r'\1'),
(r(r'%{(.+?)}(.+)%'), r'\2'),
(r(r'\[\[(.+?)\|(.+?)\]\]'), r'\1'),
)
TEXT2HTML = (
('&', '&'),
('<', '<'),
('>', '>'),
("'", '''),
('"', '"'),
(r(r'\*\*(.+?)\*\*'), r'<b>\1</b>'),
(r(r'__(.+?)__'), r'<i>\1</i>'),
(r(r'\^\^(.+?)\^\^'), r'<sup>\1</sup>'),
(r(r'~~(.+?)~~'), r'<sub>\1</sub>'),
(r(r'\[\[(.+?)\|(.+?)\]\]'), r'<a href="\1">\2</a>'),
(r(r'%{(.+?)}(.+?)%'), r'<span style="\1">\2</span>'),
(r(r'(ERROR)'), r'<span style="color:#FF0000;">\1</span>'),
(r(r'(PROBLEM|OFF)'), r'<span style="color:#FF0000;"><strong>\1</strong></span>'),
(r(r'(OK|ON)'), r'<span style="color:#00FF00;"><strong>\1</strong></span>'),
(r(r'([Dd]isaster)'), r'<span style="color:#FF0000;"><strong>\1</strong></span>'),
(r(r'([Cc]ritical)'), r'<span style="color:#FF3300;"><strong>\1</strong></span>'),
(r(r'([Hh]igh)'), r'<span style="color:#FF6600;"><strong>\1</strong></span>'),
(r(r'([Aa]verage)'), r'<span style="color:#FF9900;"><strong>\1</strong></span>'),
(r(r'([Ww]arning)'), r'<span style="color:#FFCC00;"><strong>\1</strong></span>'),
# (r(r'([Ii]nformation)'), r'<span style="color:#FFFF00;"><strong>\1</strong></span>'),
(r(r'(Monitored)'), r'<span style="color:#00FF00;"><strong>\1</strong></span>'),
(r(r'(Not\ monitored)'), r'<span style="color:#FF0000;"><strong>\1</strong></span>'),
('\n', '<br/>\n'),
)
class MessageError(Exception):
"""
Error while creating new XMPP message.
"""
pass
def red(s):
return '%%{color:#FF0000}%s%%' % s
def green(s):
return '%%{color:#00FF00}%s%%' % s
def blue(s):
return '%%{color:#0000FF}%s%%' % s
# noinspection PyAttributeOutsideInit
class IncomingLudolphMessage(Message):
"""
SleekXMPP Message object wrapper.
"""
_ludolph_attrs = ('reply_output', 'stream_output')
@classmethod
def wrap_msg(cls, msg):
"""Inject our properties into original Message object"""
if isinstance(msg, cls):
raise TypeError('Message object is already wrapped')
obj = cls()
obj.__class__ = type(msg.__class__.__name__, (cls, msg.__class__), {})
obj.__dict__ = msg.__dict__
return obj
def dump(self):
data = {}
# The underlying ElementBase object does not implement the dict interface properly
for k in self.interfaces:
v = self.get(k, None)
if v is not None:
data[k] = str(v)
# Add our custom attributes
for i in self._ludolph_attrs:
data[i] = getattr(self, i)
return data
@classmethod
def load(cls, data):
from ludolph.bot import get_xmpp
obj = cls(stream=get_xmpp().client)
# First set our custom attributes
for i in cls._ludolph_attrs:
try:
setattr(obj, i, data.pop(i))
except KeyError:
continue
# The all other ElementBase items
for k, v in data.items():
obj[k] = v
return obj
def _get_ludolph_attr(self, attr, default, set_default=False):
try:
return getattr(self, attr)
except AttributeError:
if set_default:
setattr(self, attr, default)
return default
def get_reply_output(self, default=True, set_default=False):
return self._get_ludolph_attr('_reply_output_', default, set_default=set_default)
def set_reply_output(self, value):
self._reply_output_ = value
reply_output = property(get_reply_output, set_reply_output)
def get_stream_output(self, default=False, set_default=False):
return self._get_ludolph_attr('_stream_output_', default, set_default=set_default)
def set_stream_output(self, value):
self._stream_output_ = value
stream_output = property(get_stream_output, set_stream_output)
class OutgoingLudolphMessage(object):
"""
Creating and sending bots messages (replies).
"""
def __init__(self, mbody, mhtml=None, mtype=None, msubject=None, delay=None, timestamp=None):
"""
Construct message body in plain text and html.
"""
self.mtype = mtype
self.msubject = msubject
if mbody is not None:
self.mbody = self._text2body(str(mbody))
if mhtml is None and mbody is not None:
self.mhtml = self._text2html(str(mbody))
else:
self.mhtml = str(mhtml)
if delay:
timestamp = datetime.utcnow() + timedelta(seconds=delay)
self.timestamp = timestamp
@staticmethod
def _replace(replist, text):
"""
Helper for replacing text parts according to replist.
"""
for rx, te in replist:
# noinspection PyProtectedMember
if isinstance(rx, re._pattern_type):
try:
text = rx.sub(te, text)
except re.error as exc:
logger.error('Regexp error during message text replacement: %s', exc)
else:
text = text.replace(rx, te)
return text
def _text2body(self, text):
"""
Remove tags from text.
"""
body = self._replace(TEXT2BODY, text.strip())
return body
def _text2html(self, text):
"""
Convert text to html.
"""
html = self._replace(TEXT2HTML, text.strip())
html = '<div>\n' + html + '\n</div>'
try:
# noinspection PyUnresolvedReferences
return ET.XML(html)
except (ParseError, SyntaxError) as e:
logger.error('Could not parse html: %s', e)
return None
@classmethod
def create(cls, mbody, **kwargs):
"""
Return LudolphMessage instance.
"""
if isinstance(mbody, cls):
return mbody
return cls(mbody, **kwargs)
def send(self, xmpp, mto, mfrom=None, mnick=None):
"""
Send a new message.
"""
msg = xmpp.client.make_message(mto, self.mbody, msubject=self.msubject, mtype=self.mtype, mhtml=self.mhtml,
mfrom=mfrom, mnick=mnick)
if self.timestamp:
msg['delay'].set_stamp(self.timestamp)
return msg.send()
def reply(self, msg, clear=True):
"""
Send a reply to incoming msg.
"""
msg.reply(self.mbody, clear=clear)
msg['html']['body'] = self.mhtml
if self.timestamp:
msg['delay'].set_stamp(self.timestamp)
return msg.send()
LudolphMessage = OutgoingLudolphMessage # Backward compatibility
| 29.0625
| 115
| 0.557258
|
61016253d65208ae4a9dd7de8bc18e95b89738ac
| 984
|
py
|
Python
|
standard_library/threads/demo_consumer_producer.py
|
2581676612/python
|
b309564a05838b23044bb8112fd4ef71307266b6
|
[
"MIT"
] | 112
|
2017-09-19T17:38:38.000Z
|
2020-05-27T18:00:27.000Z
|
standard_library/threads/demo_consumer_producer.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | null | null | null |
standard_library/threads/demo_consumer_producer.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | 56
|
2017-09-20T01:24:12.000Z
|
2020-04-16T06:19:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/4/29 14:44
# @Author : tom.lee
# @Site :
# @File : consumer_producer.py
# @Software: PyCharm
import time
import threading
import Queue
class Consumer(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
while True:
msg = self._queue.get()
if isinstance(msg, str) and msg == 'quit':
break
print "I'm a thread, and I received %s!!" % msg
self._queue.task_done()
print 'Bye byes!'
def producer():
queue = Queue.Queue()
worker = Consumer(queue)
worker.start()
start_time = time.time()
# While under 5 seconds..
while time.time() - start_time < 5:
queue.put('something at %s' % time.time())
time.sleep(1)
queue.put('quit')
worker.join()
if __name__ == '__main__':
print 'test'
producer()
| 21.391304
| 59
| 0.569106
|
f20734a7d1a08c44b84b414beed794b22624cacc
| 477
|
py
|
Python
|
training/size_convertion.py
|
ndl-lab/ssd_keras
|
94c951cbe7b3d8c59a3ef5b2db26af5aa2099cc4
|
[
"CC-BY-4.0",
"MIT"
] | 4
|
2019-08-23T13:12:44.000Z
|
2019-12-24T05:40:17.000Z
|
training/size_convertion.py
|
ndl-lab/ssd_keras
|
94c951cbe7b3d8c59a3ef5b2db26af5aa2099cc4
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
training/size_convertion.py
|
ndl-lab/ssd_keras
|
94c951cbe7b3d8c59a3ef5b2db26af5aa2099cc4
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
import numpy as np
import os
import glob
import pandas as pd
import cv2
from xml.etree import ElementTree
import random
import shutil
import glob
from PIL import Image
for dirname in os.listdir("tmp"):
print(dirname)
for fpath in glob.glob(os.path.join("fullsizeimg",dirname,"original","*")):
img=Image.open(fpath)
img_resize = img.resize((300, 300),Image.LANCZOS)
img_resize.save(os.path.join("300_300img",dirname+"_"+os.path.basename(fpath)))
| 28.058824
| 87
| 0.721174
|
16470838ce12928e0b9945d343c1bc7d3952caee
| 235
|
py
|
Python
|
20211125_PwnIntro/3_java/solution.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | 15
|
2021-10-01T16:10:48.000Z
|
2022-02-19T20:45:35.000Z
|
20211125_PwnIntro/3_java/solution.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | null | null | null |
20211125_PwnIntro/3_java/solution.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | 2
|
2021-11-06T08:32:41.000Z
|
2021-12-11T16:18:54.000Z
|
from pwn import *
context.binary = "./java"
p = process()
p.sendline(b"java" + b"A" * 28 + p64(context.binary.functions["bash"].address + 0x32))
p.sendline(b"cat flag.txt")
log.success(p.recvline_regex(rb".*{.*}.*").decode("ascii"))
| 26.111111
| 86
| 0.655319
|
eca90e337e79d729395c28aaf83ac96fbce7ebd4
| 1,952
|
py
|
Python
|
tests/cli/helpers/language.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 2
|
2019-10-23T03:37:59.000Z
|
2020-08-14T17:09:26.000Z
|
tests/cli/helpers/language.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | null | null | null |
tests/cli/helpers/language.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the language CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import language
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class LanguagergumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the language CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--language LANGUAGE]
Test argument parser.
optional arguments:
--language LANGUAGE The preferred language identifier for Windows Event Log
message strings. Use "--language list" to see a list of
available language identifiers. Note that formatting
will fall back on en-US (LCID 0x0409) if the preferred
language is not available in the database of message
string templates.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
language.LanguageArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.preferred_language = 'is'
test_tool = tools.CLITool()
language.LanguageArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._preferred_language, options.preferred_language)
with self.assertRaises(errors.BadConfigObject):
language.LanguageArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
| 31.483871
| 79
| 0.720287
|
6445b6a74b119c1876423ab6d483861eb080e4bc
| 16,508
|
py
|
Python
|
NetBuilder.py
|
xxxyyyqqq12345/NetBuilder
|
7ff2c441c397aa44b0e4435639609c4cb155f969
|
[
"MIT"
] | null | null | null |
NetBuilder.py
|
xxxyyyqqq12345/NetBuilder
|
7ff2c441c397aa44b0e4435639609c4cb155f969
|
[
"MIT"
] | null | null | null |
NetBuilder.py
|
xxxyyyqqq12345/NetBuilder
|
7ff2c441c397aa44b0e4435639609c4cb155f969
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from Graph import *
class NNetLayers(object):
tfConvs=[tf.layers.conv1d,tf.layers.conv2d,tf.layers.conv3d]
tfConvsT=[tf.layers.conv1d,tf.layers.conv2d_transpose,tf.layers.conv3d_transpose]
tfMPools=[tf.layers.max_pooling1d,tf.layers.max_pooling2d,tf.layers.max_pooling3d]
tfAPools=[tf.layers.average_pooling1d,tf.layers.average_pooling2d,tf.layers.average_pooling3d]
tflayer_map={"dense":tf.layers.dense,"conv":tfConvs,"conv1d":tf.layers.conv1d,"conv2d":tf.layers.conv2d,
"conv3d":tf.layers.conv3d,"conv_t":tfConvsT,"conv1d_t":tf.layers.conv1d,
"conv2d_t":tf.layers.conv2d_transpose,"conv3d_t":tf.layers.conv3d_transpose,"dropout":tf.layers.dropout,
"Maxpool":tfMPools,"Averagepool":tfAPools,"Apool":tfAPools,"Avpool":tfAPools,"flatten":tf.layers.Flatten,
"LSTM":tf.contrib.rnn.LayerNormBasicLSTMCell,"lstm":tf.contrib.rnn.LayerNormBasicLSTMCell,
"+":tf.math.add,"/":tf.math.divide,"-":tf.math.subtract,"*":tf.math.multiply,"Input":tf.placeholder,
"input":tf.placeholder,"Inputs":tf.placeholder,"inputs":tf.placeholder,
"mean_squared_error":tf.losses.mean_squared_error,"MSE":tf.losses.mean_squared_error,
"softmax_cross_entropy":tf.losses.softmax_cross_entropy,"SoftCE":tf.losses.softmax_cross_entropy,
"sigmoid_cross_entropy":tf.losses.sigmoid_cross_entropy,"SigtCE":tf.losses.sigmoid_cross_entropy
}
tfDenseArg=["inputs","units","activation","use_bias","kernel_initializer","bias_initializer","kernel_regularizer",
"bias_regularizer","activity_regularizer","kernel_constraint","bias_constraint","trainable","name","reuse"]
tfConvArg=["inputs","filters","kernel_size","strides","padding","data_format","dilation_rate","activation","use_bias",
"kernel_initializer","bias_initializer","kernel_regularizer","bias_regularizer","activity_regularizer",
"kernel_constraint","bias_constraint","trainable","name","reuse"]
tfConvTArg=["inputs","filters","kernel_size","strides","padding","data_format","activation","use_bias",
"kernel_initializer","bias_initializer","kernel_regularizer","bias_regularizer","activity_regularizer",
"kernel_constraint","bias_constraint","trainable","name","reuse"]
tfDropoutArg=["inputs","rate","noise_shape","seed","training","name"]
tfFlattenArg=["inputs","name"]
tfPoolArg=["inputs","pool_size","strides","padding","data_format","name"]
tfoperatorargs=["x","y","name"]
tfoperatorinputargs=["dtype","shape","name"]
tfMSE=["labels","predictions","weights","scope","loss_collection","reduction"]
tfSCE=["labels","logits","weights","label_smoothing","scope","loss_collection","reduction"]
tfArgList={tf.layers.dense:tfDenseArg,tf.layers.conv1d:tfConvArg,tf.layers.conv2d:tfConvArg,tf.layers.conv3d:tfConvArg,
tf.layers.conv2d_transpose:tfConvTArg,tf.layers.conv3d_transpose:tfConvTArg,tf.layers.dropout:tfDropoutArg,
tf.layers.Flatten:tfFlattenArg,tf.layers.max_pooling1d:tfPoolArg,tf.layers.max_pooling2d:tfPoolArg,
tf.layers.max_pooling3d:tfPoolArg,tf.layers.average_pooling1d:tfPoolArg,tf.layers.average_pooling2d:tfPoolArg,
tf.layers.average_pooling3d:tfPoolArg,tf.math.add:tfoperatorargs,tf.math.divide:tfoperatorargs,
tf.math.multiply:tfoperatorargs,tf.math.subtract:tfoperatorargs,tf.placeholder:tfoperatorinputargs,
tf.losses.mean_squared_error:tfMSE,tf.losses.softmax_cross_entropy:tfSCE,
tf.losses.sigmoid_cross_entropy:tfSCE
}
inputx2=["+","-","*","/","mean_squared_error","softmax_cross_entropy","sigmoid_cross_entropy","MSE","SoftCE","SigtCE"]
Input_variation=["Input","Inputs","input","inputs"]
def __init__(self,ntype,dim=None,net_specification={},custom_func=None,name=None):
self.type=ntype
self.dim=dim
self.net_specification=net_specification
self.custom_func=custom_func
self.values=None
self.name=name
self.trainning_var=None
self.W=None
self.b=None
def build_layer(self,inp,pack):
self.pack=pack
if pack is "tf":
return self.build_tf(inp)
elif pack is "keras":
return self.build_keras(inp)
elif pack is "pytorch":
return self.build_pytorch(inp)
def build_keras(self,inp):
pass
def build_pytorch(self,inp):
pass
def build_tf(self,inp):
layer_func=self._get_func("tf",self.type,self.dim)
args=self._fill_func(inp,layer_func)
if type(args) is list:
self.layer=layer_func(*args)
elif type(args is dict):
self.layer=layer_func(**args)
return self.layer
def _get_func(self,pack,ntype,dim=None):
if pack is "tf":
if self.custom_func is not None:
return self.custom_func
lfunc=self.tflayer_map[ntype]
if type(lfunc) is list:
lfunc=lfunc[dim-1]
elif pack is "keras":
pass
else:
pass
return lfunc
def get_input_length(self):
if self.type in self.Input_variation:
return 0
elif self.type in self.inputx2:
return 2
else:
return 1
def _fill_func(self,Input,layer_func):
input_len=self.get_input_length()
inp=[i.layer for i in Input]
assert(len(inp)==input_len)
if type(self.net_specification) is list:
args=inp+self.net_specification
elif type(self.net_specification) is dict:
args={}
if "inputs" in self.tfArgList[layer_func]:
args["inputs"]=inp[0]
if "x" in self.tfArgList[layer_func]:
args["x"]=inp[0]
if "labels" in self.tfArgList[layer_func]:
args["labels"]=inp[0]
if "y" in self.tfArgList[layer_func]:
args["y"]=inp[1]
if "predictions" in self.tfArgList[layer_func]:
args["predictions"]=inp[1]
if "logits" in self.tfArgList[layer_func]:
args["logits"]=inp[1]
if self.custom_func is not None:
return args
if self.custom_func is not None:
return args
#check everything is alright and only feed required args
for arg in self.tfArgList[layer_func]:
if arg in self.net_specification:
args[arg]=self.net_specification[arg]
if "training" in self.tfArgList[layer_func] and self.trainning_var is not None:
args["training"]=self.trainning_var
return args
def set_train_var(self,var):
self.trainning_var=var
def helpfunc(self,pack=None,ntype=None):
if ntype is None:
ntype=self.type
if pack is None or pack is "tf":
print("Tensorflow Parameters:")
for i in self.tfArgList[self._get_func("tf",ntype)]:
print(" "+i)
elif pack is None or pack is "keras":
pass
def save_weights(self,args={}):
if self.pack is "tf":
assert("sess" in args)
self._save_weights_tf(args["sess"])
elif self.pack is "keras":
self._save_weights_keras()
elif self.pack is "pytorch":
self._save_weights_pytorch()
def _save_weights_tf(self,sess):
Name=self.layer.name.partition(":")[0].partition("/")[0]
W=[v for v in tf.trainable_variables() if v.name == Name+"/kernel:0"]
b=[v for v in tf.trainable_variables() if v.name == Name+"/bias:0"]
if len(W)>0:
self.W=W[0].eval(sess)
self.b=b[0].eval(sess)
def assign_weights(self,args={}):
if self.pack is "tf":
assert("sess" in args)
self._assign_weights_tf(args["sess"])
elif self.pack is "keras":
self._assign_weights_keras()
elif self.pack is "pytorch":
self._assign_weights_pytorch()
def _assign_weights_tf(self,sess):
Name=self.layer.name.partition(":")[0].partition("/")[0]
W=[v for v in tf.trainable_variables() if v.name == Name+"/kernel:0"]
b=[v for v in tf.trainable_variables() if v.name == Name+"/bias:0"]
if self.W is not None:
W[0].load(self.W,sess)
if self.b is not None:
b[0].load(self.b,sess)
def _assign_weights_keras(self):
pass
def _assign_weights_pytorch(self):
pass
class Optimizer(object):
Optimizers_tf={"Adam":tf.train.AdamOptimizer,"AdamOptimizer":tf.train.AdamOptimizer,
"Adagrad":tf.train.AdagradOptimizer,"AdagradOptimizer":tf.train.AdagradOptimizer,
"Adadelta":tf.train.AdadeltaOptimizer,"AdadeltaOptimizer":tf.train.AdadeltaOptimizer}
Optimizers_keras={}
Optimizers_pythorch={}
Optimizers={"tf":Optimizers_tf,"keras":Optimizers_keras,"pythorch":Optimizers_pythorch}
def __init__(self,opttype,inputs,args,ntype=None):
self.ntype=ntype
self.opttype=opttype
self.inputs=inputs
self.args=args
self.built=0
if ntype is not None:
self.build(ntype)
def build(self,ntype):
if not self.built:
if type(self.inputs) is list:
Input=[Inp.layer for Inp in self.inputs]
else:
Input=self.inputs.layer
self.ntype=ntype
if type(self.args) is list:
self.optimizer=self.Optimizers[self.ntype][self.opttype](*self.args).minimize(Input)
elif type(self.args) is dict:
self.optimizer=self.Optimizers[self.ntype][self.opttype](**self.args).minimize(Input)
self.built=1
class NNet(object):
def __init__(self,inputs,outputs,Net,name=None):
self.name=name
self.inputs=inputs # dict or list of inputs
self.outputs=outputs
self.Net=Net
self.optimizers={}
self.loss={} #loss functions
self.layers={}
for v in self.Net.V:
self.layers[v.name]=v
self.ntype=None
self.net_built=0
def build_net(self,pack):
self.ntype=pack
if pack is "tf":
self._build_tf_net()
elif pack is "keras":
self._build_keras_net()
elif pack is "pytorch":
self._build_pytorch_net()
for optimizer in self.optimizers:
self.optimizers[optimizer].build(self.ntype)
def create_optimizer(self,name,optimizer_type,inputs,args={"learning_rate":0.01}):
if self.optimizers is None:
self.optimizers={}
if type(inputs) is list:
Input=[]
for inp in inputs:
Input+=[self.layers[inp]]
else:
Input=self.layers[inputs]
self.optimizers[name]=Optimizer(optimizer_type,Input,args,self.ntype)
def display(self):
#to change away from nx library
G=nx.DiGraph()
for e in self.Net.E:
G.add_edge(e[0].name,e[1].name)
nx.draw(G, with_labels = True)
plt.show()
def _build_tf_net(self):
self._is_training=tf.placeholder_with_default(True,shape=())
built_layer_list=[]
for inp in self.inputs:
self.inputs[inp].build_layer([],"tf")
built_layer_list+=[self.inputs[inp]]
forward_net=self.Net.get_forward_graph()
for layers in forward_net:
for layer in layers:
layer.set_train_var(self._is_training)
if layer not in built_layer_list:
InputFrom=self.Net.E_in[layer]
layer_input=InputFrom
layer.build_layer(layer_input,"tf")
self.net_built=1
def _build_keras_net(self):
raise Exception("not implemented for keras yet")
def _build_pytorch_net(self):
raise Exception("not implemented for pytorch yet")
def save_net(self):
pass
def load_net(self):
pass
def init_weights(self,args={}):
for name,layer in self.layers.items():
layer.assign_weights(args)
def run_net(self,inputs,outputs=None):
if not self.net_built:
raise Exception("Net not built, run build_net(tool_type)")
if self.ntype is "tf":
out=self._run_net_tf(inputs)
elif self.ntype is "keras":
pass
elif self.ntype is "pytorch":
pass
return out
def _run_net_tf(self,inputs,outputs=None):
#implement selecting outputs
#to change maybe to remove sess elsewhere
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#reassign all weights
self.init_weights({"sess":sess})
feed_dict={}
feed_dict[self._is_training]=False
for inp in inputs:
if inp in self.inputs:
feed_dict[self.inputs[inp].layer]=inputs[inp]
if type(self.outputs) is list:
tf_out=[out.layer for out in self.outputs]
elif type(self.outputs) is dict:
tf_out=[out.layer for out in self.outputs.values]
out=sess.run(tf_out,feed_dict=feed_dict)
sess.close()
return out
def train_net(self,inputs,optimizer_name,outputs=None,batch_size=1,test_perc=0,epochs=1):
if not self.net_built:
raise Exception("Net not built, run build_net(tool_type)")
if self.ntype is "tf":
out=self._train_net_tf(inputs,optimizer_name,outputs,batch_size,test_perc,epochs)
elif self.ntype is "keras":
pass
elif self.ntype is "pytorch":
pass
return out
def _train_net_tf(self,inputs,optimizer_name,outputs=None,batch_size=1,test_perc=0,epochs=1):
"""
inputs: a matrix of inputs, the 1st dimension is assumed to be the batch size
optimizer_name: name of the optimizer function used
outputs: actual outputs for training purpose, None if not used
batch_size: batch_size of every train step
test_perc: percentage of test set, test ratio+train ratio=1
epochs: number of trainning epochs
"""
sess = tf.Session()
sess.run(tf.global_variables_initializer())
exp_set_size=None
for inp in inputs:
if exp_set_size is not None:
assert(inputs[inp].shape[0]==exp_set_size)
else:
exp_set_size=inputs[inp].shape[0]
Opt=self.optimizers[optimizer_name].optimizer
run_vals=[Opt]
#reassign all weights
self.init_weights({"sess":sess})
if outputs is None:
if type(self.outputs) is list:
run_vals+=[out.layer for out in self.outputs]
elif type(self.outputs) is dict:
run_vals+=[out.layer for out in self.outputs.values]
else:
if type(outputs) is list:
run_vals+=[self.layers[out].layer for out in outputs]
elif type(outputs) is dict:
run_vals+=[self.layers[out].layer for out in outputs.values]
if test_perc==0:
Test_Set=[]
train_range=exp_set_size-1
else:
Test_Set=[]
train_range=exp_set_size-1
#to change and implement test_perc
last_outs=[]
for i in range(epochs):
inp_set=np.random.choice(train_range, batch_size, replace=False)
feed_dict={}
feed_dict[self._is_training]=True
for inp in inputs:
if inp in self.inputs:
feed_dict[self.inputs[inp].layer]=inputs[inp][inp_set]
out=sess.run(run_vals, feed_dict=feed_dict)
out=out[1:len(out)]
last_outs+=[out]
if len(last_outs)>10:
last_outs=last_outs[1:len(last_outs)]
for name,layer in self.layers.items():
layer.save_weights({"sess":sess})
sess.close()
return last_outs
| 42.437018
| 125
| 0.606797
|
e0c16832154470689bcfa03e85670610fa9db21b
| 5,022
|
py
|
Python
|
deploy.py
|
shusain93/dehydrated-dreamhost-hook
|
3b49bbf4395a092499b73017b0c853a6433d24d9
|
[
"MIT"
] | null | null | null |
deploy.py
|
shusain93/dehydrated-dreamhost-hook
|
3b49bbf4395a092499b73017b0c853a6433d24d9
|
[
"MIT"
] | 1
|
2020-11-15T16:45:42.000Z
|
2020-11-15T16:45:42.000Z
|
deploy.py
|
shusain93/dehydrated-dreamhost-hook
|
3b49bbf4395a092499b73017b0c853a6433d24d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Deploys new Let's Encrypt certificate.
Copyright (c) 2016 Erin Morelli
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
"""
from __future__ import print_function
import filecmp
import os
import sys
import shutil
import subprocess
import yaml
# Define letsencrypt.sh defaults
LETSENCRYPT_ROOT = '/etc/dehydrated/certs/{domain}/{pem}.pem'
# Set user config file path
CONFIG_FILE = os.path.join(
os.path.expanduser('~'), '.config', 'dehydrated', 'deploy.conf')
# Set generic error message template
ERROR = ' + ERROR: Could not locate {name} files:\n\t{files}'
def parse_config():
"""Parse the user config file."""
print(
'# INFO: Using deployment config file {0}'.format(CONFIG_FILE),
file=sys.stdout
)
# Make sure file exists
if not os.path.exists(CONFIG_FILE):
sys.exit(ERROR.format(name='deployment config', files=CONFIG_FILE))
# Parse YAML config file
return yaml.load(file(CONFIG_FILE, 'r'))
def deploy_file(file_type, old_file, new_file):
"""Deploy new file and store old file."""
# If the two files are the same, bail
if filecmp.cmp(old_file, new_file):
print(
' + WARNING: {0} matches new {1}, skipping deployment'.format(
old_file,
file_type
),
file=sys.stdout
)
return False
# Get old file information
stat = os.stat(old_file)
# Rename existing file
os.rename(old_file, '{0}.bak'.format(old_file))
# # Copy new file
shutil.copy(new_file, old_file)
# Update file ownership
os.chown(old_file, stat.st_uid, stat.st_gid)
# Update file permissions
os.chmod(old_file, stat.st_mode)
print(
' + Succesfully deployed new {0} to {1}'.format(file_type, old_file),
file=sys.stdout
)
return True
def deploy_domain(domain, config):
"""Deploy new certs for a given domain."""
print('Deploying new files for: {0}'.format(domain), file=sys.stdout)
deployed = False
# Deploy new certs for each location
for location in config:
# Loop through file types
for file_type in location.keys():
# Get the new version of this file
new_file = LETSENCRYPT_ROOT.format(domain=domain, pem=file_type)
# Make sure it exists
if not os.path.exists(new_file):
sys.exit(
ERROR.format(
name='new {0}'.format(file_type),
files=new_file
)
)
# Get the old version
old_file = location[file_type]
# Make sure it exists
if not os.path.exists(old_file):
sys.exit(
ERROR.format(
name='old {0}'.format(file_type),
files=old_file
)
)
# Deploy new file
deploy_success = deploy_file(file_type, old_file, new_file)
# Set deploy status
if deploy_success:
deployed = True
return deployed
def run_deployment():
"""Main wrapper function."""
print('Starting new file deployment', file=sys.stdout)
# Get user deploy config
config = parse_config()
# Monitor for new deloyments
saw_new_deployments = False
# Iterate over domains
for domain in config['domains'].keys():
# Deploy new files for the domain
deployed = deploy_domain(domain, config['domains'][domain])
if deployed:
saw_new_deployments = True
# Only run post-deployment actions if we saw new deploys
if saw_new_deployments:
# Run post deployment actions
print('Starting post-deployment actions', file=sys.stdout)
for action in config['post_actions']:
print(' + Attempting action: {0}'.format(action), file=sys.stdout)
try:
# Attempt action
status = subprocess.call(action, shell=True)
# Return result
print(
' + Action exited with status {0}'.format(status),
file=sys.stdout
)
except OSError as error:
# Catch errors
print(' + ERROR: {0}'.format(error), file=sys.stderr)
print('New file deployment done.', file=sys.stdout)
if __name__ == '__main__':
run_deployment()
| 27.9
| 78
| 0.607129
|
ce288d8f965086e32006ac63d641074c31d7cea4
| 2,528
|
py
|
Python
|
app/management/commands/train_item_cf.py
|
skeptycal/albedo
|
50f76f080c7a834cb75ac42daa56a1895af2e595
|
[
"MIT"
] | 1
|
2021-12-30T17:09:14.000Z
|
2021-12-30T17:09:14.000Z
|
app/management/commands/train_item_cf.py
|
00mjk/albedo
|
be94cad3e806616850af985e5befffa2f0898a21
|
[
"MIT"
] | 1
|
2021-06-10T23:12:19.000Z
|
2021-06-10T23:12:19.000Z
|
app/management/commands/train_item_cf.py
|
00mjk/albedo
|
be94cad3e806616850af985e5befffa2f0898a21
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from django.core.management.base import BaseCommand
from sklearn.metrics.pairwise import pairwise_distances
import numpy as np
import pandas as pd
from app.utils_repo import prepare_user_item_df
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-u', '--username', action='store', dest='username', required=True)
def handle(self, *args, **options):
active_user = options['username']
print(self.style.SUCCESS('Active user: @{0}'.format(active_user)))
user_item_df = prepare_user_item_df(min_stargazers_count=500)
n_users, n_items = user_item_df.shape
print(self.style.SUCCESS('Build the utility matrix:'))
print(self.style.SUCCESS('The number of users: {0}'.format(n_users)))
print(self.style.SUCCESS('The number of items: {0}'.format(n_items)))
print(self.style.SUCCESS('Calculate similarities of items'))
user_item_matrix = user_item_df.as_matrix()
item_user_matrix = user_item_df.T.as_matrix()
similarity_method = 'cosine'
filename = 'caches/item-similarities-{0}x{1}-{2}.pickle'.format(n_users, n_items, similarity_method)
try:
item_similarities = np.load(open(filename, 'rb'))
except IOError:
item_similarities = 1 - pairwise_distances(item_user_matrix, metric=similarity_method)
np.save(open(filename, 'wb'), item_similarities)
print(self.style.SUCCESS('Calculate predictions'))
users_array = user_item_df.index
items_array = user_item_df.columns
predictions = user_item_matrix.dot(item_similarities) / np.abs(item_similarities).sum(axis=1)
prediction_df = pd.DataFrame(predictions, index=users_array, columns=items_array)
print('item_user_matrix: {0}'.format(item_user_matrix.shape))
print('item_similarities: {0}'.format(item_similarities.shape))
print('predictions: {0}'.format(predictions.shape))
user_starred = user_item_df.loc[active_user, :][user_item_df.loc[active_user, :] == 1]
user_unstarred = prediction_df.loc[active_user, :].drop(user_starred.index)
user_unstarred.sort_values(ascending=False)
recommendations = user_unstarred.sort_values(ascending=False).index.tolist()
print(self.style.SUCCESS('Recommended repositories:'))
for i, repo in enumerate(recommendations[:100]):
print(self.style.SUCCESS('{0:02d}. https://github.com/{1}'.format(i + 1, repo)))
| 41.442623
| 108
| 0.69462
|
71984fb5c8d0c7fcc50afc77418691f478616baa
| 1,023
|
py
|
Python
|
stars_etc/multipleSnowflakesrandomColours.py
|
cmulliss/turtles-doing-things
|
70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf
|
[
"CC0-1.0"
] | null | null | null |
stars_etc/multipleSnowflakesrandomColours.py
|
cmulliss/turtles-doing-things
|
70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf
|
[
"CC0-1.0"
] | null | null | null |
stars_etc/multipleSnowflakesrandomColours.py
|
cmulliss/turtles-doing-things
|
70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf
|
[
"CC0-1.0"
] | null | null | null |
# Multiple snowflakes
from turtle import *
import random
mode('logo')
shape('turtle')
color('white')
bgcolor('blue')
pensize(2)
shapesize(1)
speed(600)
colours = ['cyan', 'white', 'green', "red", 'yellow', 'red', 'orange', 'pink','purple', 'magenta','grey', 'turquoise' ]
#beginning of snowflake function
def snowflake(flakeSize):
color(random.choice(colours))
dFull = random.randint(4, 80)
dBranch = (dFull / 6)
dRem = (dFull / 2)
for i in range(6):
fd(dFull)
for i in range(3):
bk(dBranch)
lt(60)
fd(dBranch)
bk(dBranch)
rt(120)
fd(dBranch)
bk(dBranch)
lt(60)
bk(dRem)
rt(60)
#end of snowflake function
#loop to create multiple snowflakes in different locations
for i in range(20):
x = random.randint(-400, 400)
y = random.randint(-400, 400)
flakeSize = random.randint(1, 100)
penup()
goto(x, y)
pendown()
snowflake(flakeSize)
| 20.058824
| 119
| 0.57087
|
940a97c96fce4d5f9a8030c86500227dd4e0fe04
| 3,039
|
py
|
Python
|
test/pyaz/group/lock/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/group/lock/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/group/lock/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(name, lock_type, resource_group, __RESOURCE_PROVIDER_NAMESPACE=None, notes=None, __PARENT_RESOURCE_PATH=None, __RESOURCE_TYPE=None, __RESOURCE_NAME=None):
params = get_params(locals())
command = "az group lock create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(name=None, resource_group=None, __RESOURCE_PROVIDER_NAMESPACE=None, __PARENT_RESOURCE_PATH=None, __RESOURCE_TYPE=None, __RESOURCE_NAME=None, ids=None):
params = get_params(locals())
command = "az group lock delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group=None, __RESOURCE_PROVIDER_NAMESPACE=None, __PARENT_RESOURCE_PATH=None, __RESOURCE_TYPE=None, __RESOURCE_NAME=None, filter_string=None):
params = get_params(locals())
command = "az group lock list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(name=None, resource_group=None, __RESOURCE_PROVIDER_NAMESPACE=None, __PARENT_RESOURCE_PATH=None, __RESOURCE_TYPE=None, __RESOURCE_NAME=None, ids=None):
params = get_params(locals())
command = "az group lock show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(name=None, resource_group=None, __RESOURCE_PROVIDER_NAMESPACE=None, notes=None, __PARENT_RESOURCE_PATH=None, __RESOURCE_TYPE=None, __RESOURCE_NAME=None, lock_type=None, ids=None):
params = get_params(locals())
command = "az group lock update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 41.067568
| 190
| 0.699572
|
bfb0edb73bb137813640133aefffa7dedbf66fa5
| 839
|
py
|
Python
|
utils.py
|
FanLgchen/JWT-Certification
|
3f840696a5a296f0ea9e5400625bbcc502779323
|
[
"MIT"
] | null | null | null |
utils.py
|
FanLgchen/JWT-Certification
|
3f840696a5a296f0ea9e5400625bbcc502779323
|
[
"MIT"
] | null | null | null |
utils.py
|
FanLgchen/JWT-Certification
|
3f840696a5a296f0ea9e5400625bbcc502779323
|
[
"MIT"
] | null | null | null |
import jwt as jwt
from flask import current_app
def generate_jwt(payload, expiry, secret=None):
"""
生成jwt
:param payload: dict 载荷
:param expiry: datetime 有效期
:param secret: 密钥
:return: jwt
"""
_payload = {'exp': expiry}
_payload.update(payload)
if not secret:
secret = current_app.config['JWT_SECRET']
# 获取token
token = jwt.encode(_payload, secret, algorithm='HS256')
# 返回token
return token.decode()
def verify_jwt(token, secret=None):
"""
检验jwt
:param token: jwt
:param secret: 密钥
:return: dict: payload
"""
if not secret:
secret = current_app.config['JWT_SECRET']
try:
# 校验jwt
payload = jwt.decode(token, secret, algorithm=['HS256'])
except jwt.PyJWTError:
payload = None
return payload
| 17.479167
| 64
| 0.61025
|
99fabbea20b8537db72f4eb26799884a13f22ad3
| 24,350
|
py
|
Python
|
arc/mainTest.py
|
mefuller/ARC
|
69b877bd2e6063bc51410de6c65129a476a97bae
|
[
"MIT"
] | 30
|
2019-02-02T01:45:40.000Z
|
2022-03-20T13:03:27.000Z
|
arc/mainTest.py
|
mefuller/ARC
|
69b877bd2e6063bc51410de6c65129a476a97bae
|
[
"MIT"
] | 434
|
2018-12-24T18:00:07.000Z
|
2022-03-13T15:55:26.000Z
|
arc/mainTest.py
|
mefuller/ARC
|
69b877bd2e6063bc51410de6c65129a476a97bae
|
[
"MIT"
] | 19
|
2019-01-04T19:05:45.000Z
|
2022-03-20T13:03:28.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
"""
This module contains unit tests for the arc.main module
"""
import os
import shutil
import unittest
from arc.common import ARC_PATH
from arc.exceptions import InputError
from arc.imports import settings
from arc.main import ARC, StatmechEnum, process_adaptive_levels
from arc.species.species import ARCSpecies
servers = settings['servers']
class TestEnumerationClasses(unittest.TestCase):
"""
Contains unit tests for various enumeration classes.
"""
def test_statmech_enum(self):
"""Test the StatmechEnum class"""
self.assertEqual(StatmechEnum('arkane').value, 'arkane')
with self.assertRaises(ValueError):
StatmechEnum('wrong')
class TestARC(unittest.TestCase):
"""
Contains unit tests for the ARC class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
cls.servers = servers.keys()
cls.job_types1 = {'conformers': True,
'opt': True,
'fine_grid': False,
'freq': True,
'sp': True,
'rotors': False,
'orbitals': False,
'lennard_jones': False,
'bde': True,
}
def test_as_dict(self):
"""Test the as_dict() method of ARC"""
spc1 = ARCSpecies(label='spc1',
smiles='CC',
compute_thermo=False,
)
arc0 = ARC(project='arc_test',
job_types=self.job_types1,
species=[spc1],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)',
three_params=False,
)
arc0.freq_level.args['keyword']['general'] = 'scf=(NDump=30)'
restart_dict = arc0.as_dict()
long_thermo_description = restart_dict['species'][0]['long_thermo_description']
self.assertIn('Bond corrections:', long_thermo_description)
self.assertIn("'C-C': 1", long_thermo_description)
self.assertIn("'C-H': 6", long_thermo_description)
# mol.atoms are not tested since all id's (including connectivity) changes depending on how the test is run.
expected_dict = {'T_count': 50,
'T_max': None,
'T_min': None,
'allow_nonisomorphic_2d': False,
'arkane_level_of_theory': {'basis': 'cc-pvdz-f12',
'method': 'ccsd(t)-f12',
'method_type': 'wavefunction',
'software': 'molpro'},
'calc_freq_factor': True,
'compute_transport': False,
'conformer_level': {'basis': 'def2svp',
'compatible_ess': ['gaussian', 'terachem'],
'method': 'wb97xd',
'method_type': 'dft',
'software': 'gaussian'},
'e_confs': 5.0,
'ess_settings': {'gaussian': ['local', 'server2'],
'molpro': ['local', 'server2'],
'onedmin': ['server1'],
'orca': ['local'],
'qchem': ['server1'],
'terachem': ['server1']},
'freq_level': {'basis': '6-311+g(3df,2p)',
'method': 'b3lyp',
'method_type': 'dft',
'software': 'gaussian'},
'freq_scale_factor': 0.967,
'irc_level': {'basis': 'def2tzvp',
'compatible_ess': ['gaussian', 'terachem'],
'method': 'wb97xd',
'method_type': 'dft',
'software': 'gaussian'},
'job_memory': 14,
'job_types': {'bde': True,
'conformers': True,
'fine': False,
'freq': True,
'irc': True,
'onedmin': False,
'opt': True,
'orbitals': False,
'rotors': False,
'sp': True},
'kinetics_adapter': 'arkane',
'max_job_time': 120,
'n_confs': 10,
'opt_level': {'basis': '6-311+g(3df,2p)',
'method': 'b3lyp',
'method_type': 'dft',
'software': 'gaussian'},
'output': {},
'project': 'arc_test',
'reactions': [],
'running_jobs': {},
'sp_level': {'basis': 'cc-pvdz-f12',
'method': 'ccsd(t)-f12',
'method_type': 'wavefunction',
'software': 'molpro'},
'species': [{'arkane_file': None,
'bond_corrections': {'C-C': 1, 'C-H': 6},
'charge': 0,
'compute_thermo': False,
'consider_all_diastereomers': True,
'force_field': 'MMFF94s',
'is_ts': False,
'label': 'spc1',
'long_thermo_description': long_thermo_description,
'mol': {'atoms': restart_dict['species'][0]['mol']['atoms'],
'multiplicity': 1,
'props': {}},
'multiplicity': 1,
'number_of_rotors': 0}],
'thermo_adapter': 'arkane',
'three_params': False}
# import pprint # left intentionally for debugging
# print(pprint.pprint(restart_dict))
self.assertEqual(restart_dict, expected_dict)
def test_from_dict(self):
"""Test the from_dict() method of ARC"""
restart_dict = {'composite_method': '',
'conformer_level': 'b97-d3/6-311+g(d,p)',
'freq_level': 'wb97x-d3/6-311+g(d,p)',
'freq_scale_factor': 0.96,
'opt_level': 'wb97x-d3/6-311+g(d,p)',
'output': {},
'project': 'testing_from_dict',
'reactions': [],
'scan_level': '',
'sp_level': 'ccsd(t)-f12/cc-pvqz-f12',
'species': [{'bond_corrections': {'C-C': 1, 'C-H': 6},
'charge': 1,
'conformer_energies': [],
'conformers': [],
'external_symmetry': 1,
'compute_thermo': False,
'is_ts': False,
'label': 'testing_spc1',
'mol': '1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}'
'\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 '
'c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n',
'multiplicity': 1,
'neg_freqs_trshed': [],
'number_of_rotors': 0,
'opt_level': '',
'optical_isomers': 1,
'rotors_dict': {},
'xyzs': []}],
'three_params': False,
'project_directory': os.path.join(ARC_PATH, 'Projects',
'arc_project_for_testing_delete_after_usage_test_from_dict'),
}
arc1 = ARC(project='wrong', freq_scale_factor=0.95)
self.assertEqual(arc1.freq_scale_factor, 0.95) # user input
arc2 = ARC(**restart_dict)
self.assertEqual(arc2.freq_scale_factor, 0.96) # loaded from the restart dict
self.assertEqual(arc2.project, 'testing_from_dict')
self.assertIn('arc_project_for_testing_delete_after_usage', arc2.project_directory)
self.assertTrue(arc2.job_types['fine'])
self.assertTrue(arc2.job_types['rotors'])
self.assertEqual(arc2.sp_level.simple(), 'ccsd(t)-f12/cc-pvqz-f12')
self.assertEqual(arc2.level_of_theory, '')
self.assertEqual(arc2.species[0].label, 'testing_spc1')
self.assertFalse(arc2.species[0].is_ts)
self.assertEqual(arc2.species[0].charge, 1)
self.assertFalse(arc2.three_params)
def test_from_dict_specific_job(self):
"""Test the from_dict() method of ARC"""
restart_dict = {'specific_job_type': 'bde',
'project': 'unit_test_specific_job',
'project_directory': os.path.join(ARC_PATH, 'Projects', 'unit_test_specific_job'),
}
arc1 = ARC(**restart_dict)
job_type_expected = {'conformers': False, 'opt': True, 'freq': True, 'sp': True, 'rotors': False,
'orbitals': False, 'bde': True, 'onedmin': False, 'fine': True, 'irc': False}
self.assertEqual(arc1.job_types, job_type_expected)
def test_check_project_name(self):
"""Test project name invalidity"""
with self.assertRaises(InputError):
ARC(project='ar c')
with self.assertRaises(InputError):
ARC(project='ar:c')
with self.assertRaises(InputError):
ARC(project='ar<c')
with self.assertRaises(InputError):
ARC(project='ar%c')
def test_determine_model_chemistry_and_freq_scale_factor(self):
"""Test determining the model chemistry and the frequency scaling factor"""
arc0 = ARC(project='arc_model_chemistry_test', level_of_theory='CBS-QB3')
self.assertEqual(str(arc0.arkane_level_of_theory), "cbs-qb3, software: gaussian (composite)")
self.assertEqual(arc0.freq_scale_factor, 1.00386) # 0.99 * 1.014 = 1.00386
arc1 = ARC(project='arc_model_chemistry_test',
level_of_theory='cbs-qb3-paraskevas')
self.assertEqual(str(arc1.arkane_level_of_theory), 'cbs-qb3-paraskevas, software: gaussian (composite)')
self.assertEqual(arc1.freq_scale_factor, 1.00386) # 0.99 * 1.014 = 1.00386
self.assertEqual(arc1.bac_type, 'p')
arc2 = ARC(project='arc_model_chemistry_test',
level_of_theory='ccsd(t)-f12/cc-pvtz-f12//m06-2x/cc-pvtz')
self.assertEqual(str(arc2.arkane_level_of_theory), 'ccsd(t)-f12/cc-pvtz-f12, software: molpro (wavefunction)')
self.assertEqual(arc2.freq_scale_factor, 0.955)
arc3 = ARC(project='arc_model_chemistry_test',
sp_level='ccsd(t)-f12/cc-pvtz-f12', opt_level='wb97xd/def2tzvp')
self.assertEqual(str(arc3.arkane_level_of_theory), 'ccsd(t)-f12/cc-pvtz-f12, software: molpro (wavefunction)')
self.assertEqual(arc3.freq_scale_factor, 0.988)
def test_determine_model_chemistry_for_job_types(self):
"""Test determining the model chemistry specification dictionary for job types"""
# Test conflicted inputs: specify both level_of_theory and composite_method
with self.assertRaises(InputError):
ARC(project='test', level_of_theory='ccsd(t)-f12/cc-pvtz-f12//wb97x-d/aug-cc-pvtz',
composite_method='cbs-qb3')
# Test illegal level of theory specification (method contains multiple slashes)
with self.assertRaises(ValueError):
ARC(project='test', level_of_theory='dlpno-mp2-f12/D/cc-pVDZ(fi/sf/fw)//b3lyp/G/def2svp')
# Test illegal job level specification (method contains multiple slashes)
with self.assertRaises(ValueError):
ARC(project='test', opt_level='b3lyp/d/def2tzvp/def2tzvp/c')
# Test illegal job level specification (method contains empty space)
with self.assertRaises(ValueError):
ARC(project='test', opt_level='b3lyp/def2tzvp def2tzvp/c')
# Test direct job level specification conflicts with level of theory specification
with self.assertRaises(InputError):
ARC(project='test', level_of_theory='b3lyp/sto-3g', opt_level='wb97xd/def2tzvp')
# Test deduce levels from default method from settings.py
arc1 = ARC(project='test')
self.assertEqual(arc1.opt_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(arc1.freq_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(arc1.sp_level.simple(), 'ccsd(t)-f12/cc-pvtz-f12')
# Test deduce levels from composite method specification
arc2 = ARC(project='test', composite_method='cbs-qb3')
self.assertIsNone(arc2.opt_level)
self.assertIsNone(arc2.sp_level)
self.assertIsNone(arc2.orbitals_level)
self.assertEqual(arc2.freq_level.simple(), 'b3lyp/cbsb7')
self.assertEqual(arc2.scan_level.simple(), 'b3lyp/cbsb7')
self.assertEqual(arc2.composite_method.simple(), 'cbs-qb3')
# Test deduce levels from level of theory specification
arc3 = ARC(project='test', level_of_theory='ccsd(t)-f12/cc-pvtz-f12//wb97m-v/def2tzvpd')
self.assertEqual(arc3.opt_level.simple(), 'wb97m-v/def2tzvpd')
self.assertEqual(arc3.freq_level.simple(), 'wb97m-v/def2tzvpd')
self.assertEqual(arc3.sp_level.simple(), 'ccsd(t)-f12/cc-pvtz-f12')
self.assertEqual(arc3.scan_level.simple(), 'wb97m-v/def2tzvpd')
self.assertIsNone(arc3.orbitals_level)
arc4 = ARC(project='test', opt_level='wb97x-d3/6-311++G(3df,3pd)', freq_level='m062x/def2-tzvpp',
sp_level='ccsd(t)f12/aug-cc-pvqz', calc_freq_factor=False)
self.assertEqual(arc4.opt_level.simple(), 'wb97x-d3/6-311++g(3df,3pd)')
self.assertEqual(arc4.freq_level.simple(), 'm062x/def2-tzvpp')
self.assertEqual(arc4.sp_level.simple(), 'ccsd(t)f12/aug-cc-pvqz')
# Test deduce freq level from opt level
arc7 = ARC(project='test', opt_level='wb97xd/aug-cc-pvtz', calc_freq_factor=False)
self.assertEqual(arc7.opt_level.simple(), 'wb97xd/aug-cc-pvtz')
self.assertEqual(arc7.freq_level.simple(), 'wb97xd/aug-cc-pvtz')
# Test a level not supported by Arkane does not raise error if compute_thermo is False
arc8 = ARC(project='test', sp_level='method/unsupported', calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc8.sp_level.simple(), 'method/unsupported')
self.assertEqual(arc8.freq_level.simple(), 'wb97xd/def2tzvp')
# Test that a level not supported by Arkane does raise an error if compute_thermo is True (default)
with self.assertRaises(ValueError):
ARC(project='test', sp_level='method/unsupported', calc_freq_factor=False)
# Test dictionary format specification with auxiliary basis and DFT dispersion
arc9 = ARC(project='test', opt_level={},
freq_level={'method': 'B3LYP/G', 'basis': 'cc-pVDZ(fi/sf/fw)', 'auxiliary_basis': 'def2-svp/C',
'dispersion': 'DEF2-tzvp/c'},
sp_level={'method': 'DLPNO-CCSD(T)-F12', 'basis': 'cc-pVTZ-F12',
'auxiliary_basis': 'aug-cc-pVTZ/C cc-pVTZ-F12-CABS'},
calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc9.opt_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(str(arc9.freq_level), 'b3lyp/g/cc-pvdz(fi/sf/fw), auxiliary_basis: def2-svp/c, '
'dispersion: def2-tzvp/c, software: gaussian (dft)')
self.assertEqual(str(arc9.sp_level),
'dlpno-ccsd(t)-f12/cc-pvtz-f12, auxiliary_basis: aug-cc-pvtz/c cc-pvtz-f12-cabs, '
'software: orca (wavefunction)')
# Test using default frequency and orbital level for composite job, also forbid rotors job
arc10 = ARC(project='test', composite_method='cbs-qb3', calc_freq_factor=False,
job_types={'rotors': False, 'orbitals': True})
self.assertEqual(arc10.freq_level.simple(), 'b3lyp/cbsb7')
self.assertIsNone(arc10.scan_level)
self.assertEqual(arc10.orbitals_level.simple(), 'b3lyp/cbsb7')
# Test using specified frequency, scan, and orbital for composite job
arc11 = ARC(project='test', composite_method='cbs-qb3', freq_level='wb97xd/6-311g', scan_level='apfd/def2svp',
orbitals_level='hf/sto-3g', job_types={'orbitals': True}, calc_freq_factor=False)
self.assertEqual(arc11.scan_level.simple(), 'apfd/def2svp')
self.assertEqual(arc11.freq_level.simple(), 'wb97xd/6-311g')
self.assertEqual(arc11.orbitals_level.simple(), 'hf/sto-3g')
# Test using default frequency and orbital level for job specified from level of theory, also forbid rotors job
arc12 = ARC(project='test', level_of_theory='b3lyp/sto-3g', calc_freq_factor=False,
job_types={'rotors': False, 'orbitals': True}, compute_thermo=False)
self.assertIsNone(arc12.scan_level)
self.assertEqual(arc12.orbitals_level.simple(), 'wb97x-d3/def2tzvp')
# Test using specified scan level
arc13 = ARC(project='test', level_of_theory='b3lyp/sto-3g', calc_freq_factor=False, scan_level='apfd/def2svp',
job_types={'rotors': True}, compute_thermo=False)
self.assertEqual(arc13.scan_level.simple(), 'apfd/def2svp')
# Test specifying semi-empirical and force-field methods using dictionary
arc14 = ARC(project='test', opt_level={'method': 'AM1'}, freq_level={'method': 'PM6'},
sp_level={'method': 'AMBER'}, calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc14.opt_level.simple(), 'am1')
self.assertEqual(arc14.freq_level.simple(), 'pm6')
self.assertEqual(arc14.sp_level.simple(), 'amber')
def test_determine_unique_species_labels(self):
"""Test the determine_unique_species_labels method"""
spc0 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
spc1 = ARCSpecies(label='spc1', smiles='CC', compute_thermo=False)
spc2 = ARCSpecies(label='spc2', smiles='CC', compute_thermo=False)
arc0 = ARC(project='arc_test', job_types=self.job_types1, species=[spc0, spc1, spc2],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
self.assertEqual(arc0.unique_species_labels, ['spc0', 'spc1', 'spc2'])
spc3 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
arc0.species.append(spc3)
with self.assertRaises(ValueError):
arc0.determine_unique_species_labels()
def test_add_hydrogen_for_bde(self):
"""Test the add_hydrogen_for_bde method"""
spc0 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
arc0 = ARC(project='arc_test', job_types=self.job_types1, species=[spc0],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
arc0.add_hydrogen_for_bde()
self.assertEqual(len(arc0.species), 1)
spc1 = ARCSpecies(label='spc1', smiles='CC', compute_thermo=False, bdes=['all_h'])
arc1 = ARC(project='arc_test', job_types=self.job_types1, species=[spc1],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
arc1.add_hydrogen_for_bde()
self.assertEqual(len(arc1.species), 2)
self.assertIn('H', [spc.label for spc in arc1.species])
def test_process_adaptive_levels(self):
"""Test processing the adaptive levels"""
adaptive_levels_1 = {(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 15): {('opt', 'freq'): 'b3lyp/cbsb7',
'sp': 'dlpno-ccsd(t)/def2-tzvp'},
(16, 30): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': {'method': 'wb97xd', 'basis': '6-311+g(2d,2p)'}},
(31, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}}
processed_1 = process_adaptive_levels(adaptive_levels_1)
self.assertEqual(processed_1[(6, 15)][('sp',)].simple(), 'dlpno-ccsd(t)/def2-tzvp')
self.assertEqual(processed_1[(16, 30)][('sp',)].simple(), 'wb97xd/6-311+g(2d,2p)')
# test non dict
with self.assertRaises(InputError):
process_adaptive_levels(4)
# wrong atom range
with self.assertRaises(InputError):
process_adaptive_levels({5: {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
# no 'inf
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 75): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
# adaptive level not a dict
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 'inf'): 'b3lyp/6-31g(d,p)'})
# non-consecutive atom ranges
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(15, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
@classmethod
def tearDownClass(cls):
"""
A function that is run ONCE after all unit tests in this class.
Delete all project directories created during these unit tests
"""
projects = ['arc_project_for_testing_delete_after_usage_test_from_dict',
'arc_model_chemistry_test', 'arc_test', 'test', 'unit_test_specific_job', 'wrong']
for project in projects:
project_directory = os.path.join(ARC_PATH, 'Projects', project)
shutil.rmtree(project_directory, ignore_errors=True)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 54.842342
| 120
| 0.522587
|
8882edc18e59bc6723d5b4e7475655a6c0848a60
| 387
|
py
|
Python
|
processData.py
|
ganesh-cc/ticket_update
|
b01689e99422704080cd9a54d50be9ccc695fde0
|
[
"Unlicense"
] | null | null | null |
processData.py
|
ganesh-cc/ticket_update
|
b01689e99422704080cd9a54d50be9ccc695fde0
|
[
"Unlicense"
] | null | null | null |
processData.py
|
ganesh-cc/ticket_update
|
b01689e99422704080cd9a54d50be9ccc695fde0
|
[
"Unlicense"
] | null | null | null |
import freshdeskService
from collections import defaultdict
name_mapping = {
480XXXX7506:"agent1",
480XXXX2697:"agent2",
480XXXX4635:"agent3",
480XXXX0364:"agent4"
}
fin = {}
fd = defaultdict(list)
def dataMapper(fd):
for k in fd:
for ke, va in name_mapping.items():
if(k == ke):
key = name_mapping.get(ke)
value = fd.get(k)
fin[key] = value
return(fin)
| 13.344828
| 37
| 0.669251
|
c6299197bc4d1378357665bae7afc2dc03af904a
| 726
|
py
|
Python
|
World 1/If...Else/ex034 - Multiple Increase.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 2
|
2021-04-23T19:18:06.000Z
|
2021-05-15T17:45:21.000Z
|
World 1/If...Else/ex034 - Multiple Increase.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:29:23.000Z
|
2021-05-14T00:29:23.000Z
|
World 1/If...Else/ex034 - Multiple Increase.py
|
MiguelChichorro/PythonExercises
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
[
"MIT"
] | 1
|
2021-05-14T00:19:33.000Z
|
2021-05-14T00:19:33.000Z
|
from time import sleep
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
sal = int(input("Enter your salary:"))
print("{}Loading...{}".format(colors["green"], colors["clean"]))
sleep(2)
if sal > 1250:
newsal = sal + (sal * 0.10)
print("{}Your new salary with 10% increase is US${}{}".format(colors["green"], newsal, colors["clean"]))
else:
newsal = sal + (sal * 0.15)
print("{}Your new salary with 15% increase is US${}{}".format(colors["green"], newsal, colors["clean"]))
print("{}Congratulations!!!{}".format(colors["yellow"], colors["clean"]))
| 38.210526
| 108
| 0.553719
|
cb58aab54a57076131aef487d1d60c71a4c53dca
| 10,964
|
py
|
Python
|
src/m3_nested_loops_in_printing.py
|
khoslarr/18-LoopsWithinLoops
|
d18a4fd071779f9958b442e08503c4a8e1183454
|
[
"MIT"
] | null | null | null |
src/m3_nested_loops_in_printing.py
|
khoslarr/18-LoopsWithinLoops
|
d18a4fd071779f9958b442e08503c4a8e1183454
|
[
"MIT"
] | null | null | null |
src/m3_nested_loops_in_printing.py
|
khoslarr/18-LoopsWithinLoops
|
d18a4fd071779f9958b442e08503c4a8e1183454
|
[
"MIT"
] | null | null | null |
"""
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of PRINTING on the CONSOLE.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Rishav Khosla.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
run_test_rectangle_of_stars()
run_test_triangle_of_stars()
run_test_decreasing_exclamation_marks()
run_test_alternating_brackets()
run_test_triangle_same_number_in_each_row()
run_test_triangle_all_numbers_in_each_row()
def run_test_rectangle_of_stars():
""" Tests the rectangle_of_stars function. """
print()
print('--------------------------------------------')
print('Testing the RECTANGLE_OF_STARS function:')
print('--------------------------------------------')
print('Test 1 of rectangle_of_stars: (3, 5)')
rectangle_of_stars(3, 5)
print('Test 2 of rectangle_of_stars: (4, 11)')
rectangle_of_stars(4, 11)
print('Test 3 of rectangle_of_stars: (6, 2)')
rectangle_of_stars(6, 2)
def rectangle_of_stars(r, c):
"""
Prints a rectangle of stars (asterisks), with r rows and c columns.
For example, when r = 3 and c = 5:
*****
*****
*****
Preconditions: r and c are non-negative integers.
"""
# ------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Pause the video when it completes this problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(r):
for k in range(c):
print('*', end='')
print()
def run_test_triangle_of_stars():
""" Tests the triangle_of_stars function. """
print()
print('-------------------------------------------')
print('Testing the TRIANGLE_OF_STARS function:')
print('-------------------------------------------')
print('Test 1 of triangle_of_stars: (5)')
triangle_of_stars(5)
print('Test 2 of triangle_of_stars: (1)')
triangle_of_stars(1)
print('Test 3 of triangle_of_stars: (3)')
triangle_of_stars(3)
print('Test 4 of triangle_of_stars: (6)')
triangle_of_stars(6)
def triangle_of_stars(r):
"""
Prints a triangle of stars (asterisks), with r rows.
-- The first row is 1 star,
the second is 2 stars,
the third is 3 stars, and so forth.
For example, when r = 5:
*
**
***
****
*****
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Some tests are already written for you (above).
#
# *** Unless your instructor directs you otherwise,
# see the video
# nested_loops_in_PRINTING.mp4
# in Preparation for Session 18
# ** NOW **
# and follow along in that video as you do this problem.
# (Continue the video from where you paused it
# in the previous problem.)
# ***
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(r):
for k in range(j+1):
print('*', end='')
print()
def run_test_decreasing_exclamation_marks():
""" Tests the decreasing_exclamation_marks function. """
print()
print('----------------------------------------------------------')
print('Testing the DECREASING_EXCLAMATION_MARKS function:')
print('----------------------------------------------------------')
print('Test 1 of decreasing_exclamation_marks: (5, 2)')
decreasing_exclamation_marks(5, 2)
print('Test 2 of decreasing_exclamation_marks: (3, 1)')
decreasing_exclamation_marks(3, 1)
print('Test 3 of decreasing_exclamation_marks: (4, 4)')
decreasing_exclamation_marks(4, 4)
print('Test 4 of decreasing_exclamation_marks: (8, 6)')
decreasing_exclamation_marks(8, 6)
def decreasing_exclamation_marks(m, n):
"""
Prints exclamation marks: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
!!!!!
!!!!
!!!
!!
Precondition: m and n are positive integers with m >= n.
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(m-n+1):
for k in range(m-j):
print('!', end='')
print()
def run_test_alternating_brackets():
""" Tests the alternating_brackets function. """
print()
print('----------------------------------------------------------')
print('Testing the ALTERNATING_BRACKETS function:')
print('----------------------------------------------------------')
print('Test 1 of alternating_brackets: (5, 2)')
alternating_brackets(5, 2)
print('Test 2 of alternating_brackets: (3, 1)')
alternating_brackets(3, 1)
print('Test 3 of alternating_brackets: (4, 4)')
alternating_brackets(4, 4)
print('Test 4 of alternating_brackets: (8, 6)')
alternating_brackets(8, 6)
def alternating_brackets(m, n):
"""
Prints alternating left/right square brackets: m on the first row,
m-1 on the next row, m-2 on the next, etc, until n on the last row.
For example, when m = 5 and n = 2:
[][][
[][]
[][
[]
Precondition: m and n are positive integers with m >= n.
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(m-n+1):
for k in range(m-j):
if k % 2 == 0:
print('[', end='')
else:
print(']', end='')
print()
def run_test_triangle_same_number_in_each_row():
""" Tests the triangle_same_number_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_SAME_NUMBER_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_same_number_in_each_row: (5)')
triangle_same_number_in_each_row(5)
print('Test 2 of triangle_same_number_in_each_row: (1)')
triangle_same_number_in_each_row(1)
print('Test 3 of triangle_same_number_in_each_row: (3)')
triangle_same_number_in_each_row(3)
print('Test 4 of triangle_same_number_in_each_row: (6)')
triangle_same_number_in_each_row(6)
def triangle_same_number_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 22, the 3rd is 333, etc.
For example, when r = 5:
1
22
333
4444
55555
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(r):
for k in range(j):
print(j,end='')
print()
def run_test_triangle_all_numbers_in_each_row():
""" Tests the triangle_all_numbers_in_each_row function. """
print()
print('----------------------------------------------------------')
print('Testing the TRIANGLE_ALL_NUMBERS_IN_EACH_ROW function:')
print('----------------------------------------------------------')
print('Test 1 of triangle_all_numbers_in_each_row: (5)')
triangle_all_numbers_in_each_row(5)
print('Test 2 of triangle_all_numbers_in_each_row: (1)')
triangle_all_numbers_in_each_row(1)
print('Test 3 of triangle_all_numbers_in_each_row: (3)')
triangle_all_numbers_in_each_row(3)
print('Test 4 of triangle_all_numbers_in_each_row: (6)')
triangle_all_numbers_in_each_row(6)
def triangle_all_numbers_in_each_row(r):
"""
Prints a triangle of numbers, with r rows.
The first row is 1, the 2nd is 12, the 3rd is 123, etc.
For example, when r = 5:
1
12
123
1234
12345
Precondition: r is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 7. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use string multiplication **
# in this or the other problems in this module, as doing so
# would defeat the goal of providing practice at loops within loops.
# ------------------------------------------------------------------
for j in range(r):
for k in range(j+1):
print(k+1, end='')
print()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 34.049689
| 74
| 0.530828
|
0ee22d74b57c3bc124ab3e4f95bb482e9d22fde5
| 9,249
|
py
|
Python
|
docs/source/conf.py
|
guidj/jsonuri-py
|
0e266be86a52dc93d792b05e458e07f0b279e490
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
guidj/jsonuri-py
|
0e266be86a52dc93d792b05e458e07f0b279e490
|
[
"Apache-2.0"
] | 5
|
2015-06-17T05:34:55.000Z
|
2018-01-29T19:01:11.000Z
|
docs/source/conf.py
|
guidj/jsonuri-py
|
0e266be86a52dc93d792b05e458e07f0b279e490
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# JsonUri documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 19 01:41:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'JsonURI'
copyright = '2015-2018, Guilherme Dinis J'
author = 'Guilherme Dinis J'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.1'
# The full version, including alpha/beta/rc tags.
release = '0.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsonUridoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JsonUri.tex', 'JsonUri Documentation',
'Guilherme Dinis J', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jsonuri', 'JsonUri Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JsonUri', 'JsonUri Documentation',
author, 'JsonUri', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.00346
| 79
| 0.716618
|
fc255974b5b6637d2a3dbaa153aa8c94b3c781d9
| 656
|
py
|
Python
|
leetcode/1133-last-substring-in-lexicographical-order.py
|
Magic07/online-judge-solutions
|
02a289dd7eb52d7eafabc97bd1a043213b65f70a
|
[
"MIT"
] | null | null | null |
leetcode/1133-last-substring-in-lexicographical-order.py
|
Magic07/online-judge-solutions
|
02a289dd7eb52d7eafabc97bd1a043213b65f70a
|
[
"MIT"
] | null | null | null |
leetcode/1133-last-substring-in-lexicographical-order.py
|
Magic07/online-judge-solutions
|
02a289dd7eb52d7eafabc97bd1a043213b65f70a
|
[
"MIT"
] | null | null | null |
class Solution:
def lastSubstring(self, s: str) -> str:
currentMax=0
start=1
length=0
while start+length<len(s):
if s[currentMax+length]==s[start+length]:
length+=1
continue
if s[currentMax+length]>s[start+length]:
start=start+length+1
else:
currentMax=start
start+=1
length=0
return s[currentMax:]
# Ref: https://leetcode.com/problems/last-substring-in-lexicographical-order/discuss/363662/Short-python-code-O(n)-time-and-O(1)-space-with-proof-and-visualization
| 36.444444
| 163
| 0.545732
|
125f37a1d1eb1f4462372874a95427fd3d6be79b
| 3,406
|
py
|
Python
|
.dev_scripts/convert_train_benchmark_script.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
.dev_scripts/convert_train_benchmark_script.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
.dev_scripts/convert_train_benchmark_script.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--partition',
type=str,
default='openmmlab',
help='slurm partition name')
parser.add_argument(
'--max-keep-ckpts',
type=int,
default=1,
help='The maximum checkpoints to keep')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
partition = args.partition # cluster name
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
# stdout is no output
stdout_cfg = '>/dev/null'
max_keep_ckpts = args.max_keep_ckpts
commands = []
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = osp.join(root_name, 'work_dir', fname)
# default setting
if cfg.find('16x') >= 0:
command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \
cfg.find('gn-head_4x4_2x_coco.py') >= 0:
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
else:
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
command_info += f'{partition} '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
if max_keep_ckpts:
command_info += f'--cfg-options ' \
f'checkpoint_config.max_keep_ckpts=' \
f'{max_keep_ckpts}' + ' '
command_info += f'{stdout_cfg} &'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
| 34.06
| 75
| 0.525837
|
39c4f0f926dab2b8a69d373d2799595996223947
| 1,510
|
py
|
Python
|
tests/test_intersection.py
|
abdessamad14/zellij
|
ac49d4abc3779eb4bd11e425f502fd1de96cc0b8
|
[
"Apache-2.0"
] | 12
|
2017-07-10T16:44:49.000Z
|
2021-11-09T21:24:57.000Z
|
tests/test_intersection.py
|
abdessamad14/zellije
|
ac49d4abc3779eb4bd11e425f502fd1de96cc0b8
|
[
"Apache-2.0"
] | 2
|
2017-09-05T16:13:21.000Z
|
2018-05-05T19:46:46.000Z
|
tests/test_intersection.py
|
abdessamad14/zellije
|
ac49d4abc3779eb4bd11e425f502fd1de96cc0b8
|
[
"Apache-2.0"
] | 3
|
2017-08-16T00:13:29.000Z
|
2021-11-09T21:29:58.000Z
|
from hypothesis import assume, given
from hypothesis.strategies import builds, lists, integers, tuples
from zellij.defuzz import Defuzzer
from zellij.euclid import collinear, Segment, BadGeometry
from zellij.intersection import segment_intersections
from zellij.postulates import all_pairs
nums = integers(min_value=-1000, max_value=1000)
points = tuples(nums, nums)
segments = builds(lambda l: Segment(*l), lists(points, min_size=2, max_size=2, unique=True))
@given(lists(segments, min_size=2, max_size=100, unique=True))
def test_intersections(segments):
defuzz = Defuzzer().defuzz
# Check that none of our segment pairs are pathological, and collect the
# true answers the hard way, by checking pair-wise.
true = set()
for s1, s2 in all_pairs(segments):
try:
ipt = s1.intersect(s2)
if ipt is not None:
true.add(defuzz(ipt))
except BadGeometry:
# If two segments don't have an answer, then don't use this test
# case.
assume(False)
# Run the actual function we care about.
isects = segment_intersections(segments)
for pt, segs in isects.items():
# Property: the answer should be in the true answers we found the hard
# way.
assert defuzz(pt) in true
# Property: every intersection should be collinear with the segment it
# claims to be part of.
for seg in segs:
s1, s2 = seg
assert collinear(s1, pt, s2)
| 34.318182
| 92
| 0.669536
|
013efe4985b14efa1224cd76e8e3be655ae6f076
| 3,945
|
py
|
Python
|
airflow/providers/elasticsearch/backport_provider_setup.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | 2
|
2020-12-03T01:29:54.000Z
|
2020-12-03T01:30:06.000Z
|
airflow/providers/elasticsearch/backport_provider_setup.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/elasticsearch/backport_provider_setup.py
|
gtossou/airflow
|
0314a3a218f864f78ec260cc66134e7acae34bc5
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE\
# OVERWRITTEN WHEN RUNNING
#
# ./breeze prepare-provider-readme
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `SETUP_TEMPLATE.py.jinja2` IN the `provider_packages` DIRECTORY
"""Setup.py for the apache-airflow-backport-providers-elasticsearch package."""
import logging
import os
import sys
from os.path import dirname
from setuptools import find_packages, setup
logger = logging.getLogger(__name__)
version = '2020.10.29'
my_dir = dirname(__file__)
try:
with open(
os.path.join(my_dir, 'airflow/providers/elasticsearch/BACKPORT_PROVIDER_README.md'), encoding='utf-8'
) as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def do_setup(version_suffix_for_pypi=''):
"""Perform the package apache-airflow-backport-providers-elasticsearch setup."""
setup(
name='apache-airflow-backport-providers-elasticsearch',
description='Backport provider package '
'apache-airflow-backport-providers-elasticsearch for Apache Airflow',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version + version_suffix_for_pypi,
packages=find_packages(include=['airflow.providers.elasticsearch*']),
zip_safe=False,
install_requires=['apache-airflow~=1.10'],
setup_requires=['setuptools', 'wheel'],
extras_require={},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=('https://archive.apache.org/dist/airflow/backport-providers'),
python_requires='~=3.6',
project_urls={
'Documentation': 'https://airflow.apache.org/docs/',
'Bug Tracker': 'https://github.com/apache/airflow/issues',
'Source Code': 'https://github.com/apache/airflow',
},
)
#
# Note that --version-suffix-for-pypi should only be used in case we generate RC packages for PyPI
# Those packages should have actual RC version in order to be published even if source version
# should be the final one.
#
if __name__ == "__main__":
suffix = ''
if len(sys.argv) > 1 and sys.argv[1] == "--version-suffix-for-pypi":
if len(sys.argv) < 3:
print("ERROR! --version-suffix-for-pypi needs parameter!", file=sys.stderr)
sys.exit(1)
suffix = sys.argv[2]
sys.argv = [sys.argv[0]] + sys.argv[3:]
do_setup(version_suffix_for_pypi=suffix)
| 37.571429
| 109
| 0.677567
|
701a4065c3659ab1b245837fcaa77fbdc5b775c4
| 730
|
py
|
Python
|
jichu/api.py
|
theoturner/Fetch-Jichu
|
a6f34a0c6519b0a5d5a006acd13fc677ab7fbdbe
|
[
"Apache-2.0"
] | null | null | null |
jichu/api.py
|
theoturner/Fetch-Jichu
|
a6f34a0c6519b0a5d5a006acd13fc677ab7fbdbe
|
[
"Apache-2.0"
] | null | null | null |
jichu/api.py
|
theoturner/Fetch-Jichu
|
a6f34a0c6519b0a5d5a006acd13fc677ab7fbdbe
|
[
"Apache-2.0"
] | 1
|
2019-10-22T15:29:32.000Z
|
2019-10-22T15:29:32.000Z
|
from flask import Flask, render_template, request, redirect, url_for
from contract import ContractDeployer
app = Flask(__name__)
# Load secret key using local config / env var in production
app.config["SECRET_KEY"] = "session-cookie-signing-key"
txhash = ''
@app.route("/")
def index():
return render_template("index.html", txhash = txhash)
@app.route("/deploy", methods = ["POST"])
def deploy():
# Server-side cookie to get around this golbal dropping Quart, no async!
global txhash
address = request.form['address']
contract = request.form['contract']
cd = ContractDeployer(address, 8000)
txhash = cd.deploy(contract)
return redirect(url_for('index'))
if __name__ == "__main__":
app.run()
| 30.416667
| 76
| 0.706849
|
59a400d46130b8c8c86037cc3ec3c807b4828675
| 5,314
|
py
|
Python
|
coherence_timeseries.py
|
olliestephenson/dpm-rnn-public
|
bc4247fd7126eac66dd07c50149a62be72a316ec
|
[
"MIT"
] | 13
|
2021-07-03T09:58:55.000Z
|
2022-03-30T13:36:30.000Z
|
coherence_timeseries.py
|
olliestephenson/dpm-rnn-public
|
bc4247fd7126eac66dd07c50149a62be72a316ec
|
[
"MIT"
] | null | null | null |
coherence_timeseries.py
|
olliestephenson/dpm-rnn-public
|
bc4247fd7126eac66dd07c50149a62be72a316ec
|
[
"MIT"
] | 5
|
2021-02-18T16:54:25.000Z
|
2022-03-31T08:02:54.000Z
|
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from enum import Enum
class Mode(Enum):
DEPLOY = 0
TRAIN = 1
TEST = 2
class Coherence_Timeseries(Dataset):
data_dim = 1 # coherence values are scalar (dimension = 1)
mode = Mode.DEPLOY # default mode
def __init__(self, data_config):
# Check fields in data_config
assert 'path' in data_config and isinstance(data_config['path'], str)
assert 'shape' in data_config and isinstance(data_config['shape'], list) and len(data_config['shape']) == 2
assert 'length' in data_config and isinstance(data_config['length'], int)
assert 'event_index' in data_config and isinstance(data_config['event_index'], int)
# Load data
assert data_config['path'][-4:] == '.npy'
self.data = np.load(data_config['path'])
assert isinstance(self.data, np.ndarray)
# Check dataset.shape
assert len(self.data.shape) == 3
assert self.data.shape == (data_config['shape'][0], data_config['shape'][1], data_config['length'])
self.dataset_shape = (self.data.shape[0], self.data.shape[1])
self.sequence_length = self.data.shape[2]
# Check event_index
assert 0 <= data_config['event_index'] < self.sequence_length
self.event_index = data_config['event_index']
# Flatten data
self.data = np.reshape(self.data, (-1, self.data.shape[-1]))
self.data = np.expand_dims(self.data, axis=2) # last dimension 1 since coherence values are scalars
def remove_nans(self):
"""Remove sequences with nan values from dataset."""
nans = np.isnan(self.data)
nan_count = np.sum(np.sum(nans, axis=-1), axis=-1)
self.not_nan_inds = np.where(nan_count == 0)[0]
self.data = self.data[self.not_nan_inds]
def unbound(self,transform):
"""
Transform coherence values into unbounded range with inverse sigmoid. Can transform coherence or squared coherence
Transform on squared coherence closely matches cramer-rao bound on phase variance (see paper)
Can also add further transforms here
"""
if transform == 'logit_squared':
# Convert to higher precision to avoid divide by zero error in log
# Don't seem to need this with logit transform
self.data = np.float64(self.data)
# Make sure all values in [0,1] range first
eps = 1e-6 # small epsilon value
self.data[self.data <= 0.0] = eps
self.data[self.data >= 1.0] = 1.0-eps
# Apply inverse sigmoid
print('Using transform: {}'.format(transform))
if transform == 'logit':
self.data = np.log(self.data/(1-self.data))
elif transform == 'logit_squared':
self.data = np.log(np.square(self.data)/(1.0-np.square(self.data)))
else:
raise Exception('Data transform not defined')
def create_test_set(self, train_split=0.8, seed=128):
"""
Create test dataset.
This is memory efficient and doesn't duplicate self.data
The training set is: self.data[self.shuffle_inds[:self.train_set_size]]
The test set is: self.data[self.shuffle_inds[self.train_set_size:]]
Args:
train_split: proportion of data to use for training, rest for test.
seed: seed to fix randomness.
"""
np.random.seed(seed) # fix randomness
self.shuffle_inds = np.random.permutation(len(self.data)) # shuffle a random permutation
self.train_set_size = int(train_split*len(self.data)) # set training set size
def deploy(self):
self.mode = Mode.DEPLOY
def train(self):
self.mode = Mode.TRAIN
def test(self):
self.mode = Mode.TEST
def __len__(self):
"""
Length of dataset.
Must override this method when extending Dataset object.
"""
if self.mode == Mode.DEPLOY:
return len(self.data)
elif self.mode == Mode.TRAIN:
return self.train_set_size
elif self.mode == Mode.TEST:
return len(self.data)-self.train_set_size
else:
raise NotImplementedError
def __getitem__(self, index):
"""
For getting data with indices.
Must override this method when extending Dataset object.
Return:
(preseismic timeseries, coseismic coherence)
"""
if self.mode == Mode.DEPLOY:
batch_preseismic = self.data[index,:self.event_index]
batch_coseismic = self.data[index,self.event_index]
elif self.mode == Mode.TRAIN:
train_index = self.shuffle_inds[index]
batch_preseismic = self.data[train_index,:self.event_index]
batch_coseismic = self.data[train_index,self.event_index]
elif self.mode == Mode.TEST:
test_index = self.shuffle_inds[index+self.train_set_size]
batch_preseismic = self.data[test_index,:self.event_index]
batch_coseismic = self.data[test_index,self.event_index]
else:
raise NotImplementedError
return torch.tensor(batch_preseismic).float(), torch.tensor(batch_coseismic).float()
| 37.160839
| 122
| 0.629093
|
d3825a517263d7c77e0670ea7e6260ca8ae1d204
| 7,178
|
bzl
|
Python
|
apple/internal/binary_support.bzl
|
acecilia/rules_apple
|
3a295d2a2702800d77e2c583f3a77262e764823e
|
[
"Apache-2.0"
] | 1
|
2022-02-16T12:42:23.000Z
|
2022-02-16T12:42:23.000Z
|
apple/internal/binary_support.bzl
|
acecilia/rules_apple
|
3a295d2a2702800d77e2c583f3a77262e764823e
|
[
"Apache-2.0"
] | 11
|
2019-10-15T23:03:57.000Z
|
2020-06-14T16:10:12.000Z
|
apple/internal/binary_support.bzl
|
acecilia/rules_apple
|
3a295d2a2702800d77e2c583f3a77262e764823e
|
[
"Apache-2.0"
] | 7
|
2019-07-04T14:23:54.000Z
|
2020-04-27T08:52:51.000Z
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary creation support functions."""
load(
"@build_bazel_rules_apple//apple/internal:entitlement_rules.bzl",
"entitlements",
)
load(
"@build_bazel_rules_apple//apple/internal:exported_symbols_lists_rules.bzl",
"exported_symbols_lists",
)
load(
"@build_bazel_rules_apple//apple/internal:swift_support.bzl",
"swift_runtime_linkopts",
)
def _create_swift_runtime_linkopts_target(
name,
deps,
is_static,
is_test,
tags,
testonly):
"""Creates a build target to propagate Swift runtime linker flags.
Args:
name: The name of the base target.
deps: The list of dependencies of the base target.
is_static: True to use the static Swift runtime, or False to use the
dynamic Swift runtime.
is_test: True to make sure test specific linkopts are propagated.
tags: Tags to add to the created targets.
testonly: Whether the target should be testonly.
Returns:
A build label that can be added to the deps of the binary target.
"""
swift_runtime_linkopts_name = name + ".swift_runtime_linkopts"
swift_runtime_linkopts(
name = swift_runtime_linkopts_name,
is_static = is_static,
is_test = is_test,
testonly = testonly,
tags = tags,
deps = deps,
)
return ":" + swift_runtime_linkopts_name
def _add_entitlements_and_swift_linkopts(
name,
platform_type,
product_type,
include_entitlements = True,
is_stub = False,
link_swift_statically = False,
is_test = False,
exported_symbols_lists = None,
**kwargs):
"""Adds entitlements and Swift linkopts targets for a bundle target.
This function creates an entitlements target to ensure that a binary
created using the `link_multi_arch_binary` API or by copying a stub
executable gets signed appropriately.
Similarly, for bundles with user-provided binaries, this function also
adds any Swift linkopts that are necessary for it to link correctly.
Args:
name: The name of the bundle target, from which the targets' names
will be derived.
platform_type: The platform type of the bundle.
product_type: The product type of the bundle.
include_entitlements: True/False, indicates whether to include an entitlements target.
Defaults to True.
is_stub: True/False, indicates whether the function is being called for a bundle that uses a
stub executable.
link_swift_statically: True/False, indicates whether the static versions of the Swift standard
libraries should be used during linking. Only used if include_swift_linkopts is True.
is_test: True/False, indicates if test specific linker flags should be propagated.
exported_symbols_lists: A list of text files representing exported symbols lists that should
be linked with thefinal binary.
**kwargs: The arguments that were passed into the top-level macro.
Returns:
A modified copy of `**kwargs` that should be passed to the bundling rule.
"""
bundling_args = dict(kwargs)
tags = bundling_args.get("tags", None)
testonly = bundling_args.get("testonly", None)
additional_deps = []
if include_entitlements:
entitlements_value = bundling_args.get("entitlements")
provisioning_profile = bundling_args.get("provisioning_profile")
entitlements_name = "%s_entitlements" % name
entitlements(
name = entitlements_name,
bundle_id = bundling_args.get("bundle_id"),
entitlements = entitlements_value,
platform_type = platform_type,
product_type = product_type,
provisioning_profile = provisioning_profile,
tags = tags,
testonly = testonly,
validation_mode = bundling_args.get("entitlements_validation"),
)
# Replace the `entitlements` attribute with the preprocessed entitlements.
bundling_args["entitlements"] = ":" + entitlements_name
if not is_stub:
# Also add the target as a dependency if the target is not a stub, since it may
# propagate linkopts.
additional_deps.append(":{}".format(entitlements_name))
exported_symbols_list_deps = _add_exported_symbols_lists(
name,
exported_symbols_lists,
)
deps = bundling_args.get("deps", [])
if not is_stub:
# Propagate the linker flags that dynamically link the Swift runtime, if Swift was used. If
# it wasn't, this target propagates no linkopts.
additional_deps.append(
_create_swift_runtime_linkopts_target(
name,
deps,
link_swift_statically,
bool(is_test or testonly),
tags = tags,
testonly = testonly,
),
)
all_deps = deps + additional_deps + exported_symbols_list_deps
if all_deps:
bundling_args["deps"] = all_deps
return bundling_args
def _add_exported_symbols_lists(name, exported_symbols_lists_value):
"""Adds one or more exported symbols lists to a bundle target.
These lists are references to files that provide a list of global symbol names that will remain
as global symbols in the output file. All other global symbols will be treated as if they were
marked as __private_extern__ (aka visibility=hidden) and will not be global in the output file.
See the man page documentation for ld(1) on macOS for more details.
Args:
name: The name of the bundle target, from which the binary target's name will be derived.
exported_symbols_lists_value: A list of text files representing exported symbols lists that
should be linked with thefinal binary.
Returns:
A modified copy of `**kwargs` that should be passed to the bundling rule.
"""
if exported_symbols_lists_value:
exported_symbols_lists_name = "%s_exported_symbols_lists" % name
exported_symbols_lists(
name = exported_symbols_lists_name,
lists = exported_symbols_lists_value,
)
exported_symbols_list_deps = [":" + exported_symbols_lists_name]
else:
exported_symbols_list_deps = []
return exported_symbols_list_deps
# Define the loadable module that lists the exported symbols in this file.
binary_support = struct(
add_entitlements_and_swift_linkopts = _add_entitlements_and_swift_linkopts,
)
| 38.180851
| 100
| 0.690582
|
b126aeab73d672fb64e7e9f736b3cc2c4ee0a256
| 5,270
|
py
|
Python
|
salt/_compat.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | 1
|
2016-03-13T09:05:15.000Z
|
2016-03-13T09:05:15.000Z
|
salt/_compat.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | null | null | null |
salt/_compat.py
|
moniker-dns/salt
|
0e1cd880dc7831b9f937a213dd90cc32e2a09884
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Salt compatibility code
'''
# pylint: disable=W0611
# Import python libs
import sys
import types
try:
import cPickle as pickle
except ImportError:
import pickle
try:
# Python >2.5
import xml.etree.cElementTree as ElementTree
except ImportError:
try:
# Python >2.5
import xml.etree.ElementTree as ElementTree
except ImportError:
try:
# normal cElementTree install
import cElementTree as ElementTree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as ElementTree
except ImportError:
raise
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
MAX_SIZE = sys.maxsize
else:
MAX_SIZE = sys.maxint
# pylint: disable=C0103
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
long = int
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
long = long
if PY3:
def callable(obj):
return any('__call__' in klass.__dict__ for klass in type(obj).__mro__)
else:
callable = callable
def text_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
'''
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
'''
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s
if PY3:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
return str(s, 'ascii', 'strict')
else:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
return str(s)
ascii_native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s)``
'''
if PY3:
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s``, otherwise return ``str(s, encoding, errors)``
'''
if isinstance(s, text_type):
return s
return str(s, encoding, errors)
else:
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if isinstance(s, text_type):
return s.encode(encoding, errors)
return str(s)
native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if PY3:
# pylint: disable=E0611
from urllib.parse import urlparse
from urllib.parse import urlunparse
from urllib.error import URLError
import http.server as BaseHTTPServer
from urllib.error import HTTPError
from urllib.parse import quote as url_quote
from urllib.parse import quote_plus as url_quote_plus
from urllib.parse import unquote as url_unquote
from urllib.parse import urlencode as url_encode
from urllib.request import urlopen as url_open
from urllib.request import HTTPPasswordMgrWithDefaultRealm as url_passwd_mgr
from urllib.request import HTTPBasicAuthHandler as url_auth_handler
from urllib.request import build_opener as url_build_opener
from urllib.request import install_opener as url_install_opener
url_unquote_text = url_unquote
url_unquote_native = url_unquote
else:
from urlparse import urlparse
from urlparse import urlunparse
import BaseHTTPServer
from urllib2 import HTTPError, URLError
from urllib import quote as url_quote
from urllib import quote_plus as url_quote_plus
from urllib import unquote as url_unquote
from urllib import urlencode as url_encode
from urllib2 import urlopen as url_open
from urllib2 import HTTPPasswordMgrWithDefaultRealm as url_passwd_mgr
from urllib2 import HTTPBasicAuthHandler as url_auth_handler
from urllib2 import build_opener as url_build_opener
from urllib2 import install_opener as url_install_opener
def url_unquote_text(v, encoding='utf-8', errors='replace'):
v = url_unquote(v)
return v.decode(encoding, errors)
def url_unquote_native(v, encoding='utf-8', errors='replace'):
return native_(url_unquote_text(v, encoding, errors))
if PY3:
zip = zip
else:
from future_builtins import zip
if PY3:
from io import StringIO
else:
from StringIO import StringIO
if PY3:
import queue as Queue
else:
import Queue
# pylint: enable=C0103
| 28.333333
| 80
| 0.663378
|
b28a39c4dde1a8fc70edad7bc8121516ad6030e5
| 6,669
|
py
|
Python
|
Room.py
|
AldoAbdn/Polar-Simulator-2017
|
1824cc1faa76fbaed8e4fa143679f1aed8685590
|
[
"MIT"
] | null | null | null |
Room.py
|
AldoAbdn/Polar-Simulator-2017
|
1824cc1faa76fbaed8e4fa143679f1aed8685590
|
[
"MIT"
] | null | null | null |
Room.py
|
AldoAbdn/Polar-Simulator-2017
|
1824cc1faa76fbaed8e4fa143679f1aed8685590
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from EventManager import EventManager
from EventHandlers import RoomEventHandlers
from SoundManager import SoundManager
from Crate import Crate
from Wall import Wall
from WarehouseKeeper import WarehouseKeeper
from Tile import Diamond
from Tile import Tile
from Grid import Grid
class Room(object):
"""These are the 'levels' of the game. There will be 5 in total"""
def __init__(self,level,map,roomRes,imagePath=""):
self.roomResolution = roomRes
self.level = level
self.moves = 0
self.grid = Grid(map, roomRes)
self.player = self.grid.getPlayer()
self.eventHandlers = RoomEventHandlers(self)
#Getters
def getMoves(self):
return self.moves
def getLevel(self):
return self.level
def getEventHandlers(self):
return self.eventHandlers
def getGrid(self):
return self.grid
def __getResolution__(self):
return self.roomResolution
def __getPlayer__(self):
return self.player
#Setters
def setMoves(self, value):
self.moves = value
def __setLevel__(self, value):
self.level = value
def __setEventHandlers__(self, value):
self.eventHandlers = value
def __setResolution__(self,value):
self.roomResolution = value
def __setGrid(self, value):
self.grid = value
def __setPlayer__(self, value):
self.player = value
#Draw
def draw(self, surface):
self.grid.draw(surface) #Draws all the sprites
def clickPlayerMove(self, pos):
possiblePlayerCoords = self.grid.predictPlayerPossibleCoords()
clickedItem = self.grid.getItemByPos(pos)
if clickedItem:
clickedItemCoords = clickedItem.getCoordinates()
for coords in possiblePlayerCoords:
if clickedItemCoords == coords:
direction = self.grid.convertCoordsToDirection(coords)
if direction:
self.playerMove(direction)
return
#This is what is called when a WASD is pressed, checks if player, or player and crate can move
def playerMove(self, direction):
playerMove = self.__spriteMoved__(direction)
if playerMove:
self.__incrementMoves__()
pygame.event.post(EventManager.Events["Player Move"]) #If the player moves, call player move event to update count
roomOver = self.__isRoomOver__()
if roomOver:
pygame.event.post(EventManager.Events["Room Over"])
#Used to predict where a sprite will be if it were to move a certain direction
def __predictCoordinates__(self,currentCoordinates, direction):
if direction.lower() == "up":
possibleCoordinates = (currentCoordinates[0] - 1, currentCoordinates[1])
elif direction.lower() == "down":
possibleCoordinates = (currentCoordinates[0] + 1, currentCoordinates[1])
elif direction.lower() == "left":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] - 1)
elif direction.lower() == "right":
possibleCoordinates = (currentCoordinates[0], currentCoordinates[1] + 1)
return possibleCoordinates
#Takes in a sprite, checks if hits a wall. Moves sprite depending on type
def __spriteMoved__(self, direction):
if not self.player.getMoving() and not self.grid.crateMoving():
#Predicts next possible position based on direction
currentCoordinates = self.player.getCoordinates() #Gets current coordinates
possibleCoordinates = self.__predictCoordinates__(currentCoordinates,direction)
roomOver= False
canMove = None
# try: #Catches out of range exeption if player tries to move out of grid and there is no wall
if possibleCoordinates[0] >= 0 and possibleCoordinates[0] < self.grid.rows and possibleCoordinates[1] >= 0 and possibleCoordinates[1] < self.grid.cols:
items = self.grid.getItems(possibleCoordinates[0], possibleCoordinates[1])
if any(isinstance(x, Wall) for x in items):
return False
elif any(isinstance(x, Crate) for x in items):
crate = None
for possibleCrate in items:
if isinstance(possibleCrate, Crate):
crate = possibleCrate
break
crateCurrentCoordinates = crate.getCoordinates()
cratePossibleCoordinates = self.__predictCoordinates__(crateCurrentCoordinates, direction)
if cratePossibleCoordinates[0] >= 0 and cratePossibleCoordinates[0] < self.grid.rows and cratePossibleCoordinates[1] >= 0 and cratePossibleCoordinates[1] < self.grid.cols:
items = self.grid.getItems(cratePossibleCoordinates[0], cratePossibleCoordinates[1])
if any(isinstance(x, Wall) for x in items) or any(isinstance(x, Crate) for x in items):
return False
elif any(isinstance(x, Diamond) for x in items):
crate.toggleActive(True)
else:
crate.toggleActive(False)
self.player.setCoordinates(possibleCoordinates)
crate.setCoordinates(cratePossibleCoordinates)
return True
else:
self.player.setCoordinates(possibleCoordinates)
return True
else:
return False
def __incrementMoves__(self): #Increments moves for the room
self.moves += 1
def __isRoomOver__(self): #Checks for room over, compares crate positions to diamond positions
crates = self.grid.getCrates()
diamonds = self.grid.getDiamonds()
counter = 0
#Compares the coordinates of each crate to each diamond in a room, adds to a counter if they are in the same position
for i in range(0, len(crates)):
for j in range(0, len(diamonds)):
if crates[i].getCoordinates() == diamonds[j].getCoordinates():
counter += 1
#If all crates are on diamonds, room over return true
if counter == len(crates):
return True
else:
return False
| 40.664634
| 192
| 0.606838
|
35cfe3e9fb76f502a6b60ce3f92e37e261cfc81a
| 6,096
|
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
MichaelHDesigns/Bitcoin-Token-Core
|
61f7837fe83994cb9b76d28a55e75dc9fa86f1fd
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
MichaelHDesigns/Bitcoin-Token-Core
|
61f7837fe83994cb9b76d28a55e75dc9fa86f1fd
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
MichaelHDesigns/Bitcoin-Token-Core
|
61f7837fe83994cb9b76d28a55e75dc9fa86f1fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("btctvj7kcklujarx.onion:88888", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "btctvj7kcklujarx.onion")
assert_equal(cmd.port, 88888)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.753425
| 145
| 0.652887
|
519e7cc48cab3c9573d007ce00af528a04335789
| 1,322
|
py
|
Python
|
commit_data.py
|
iwangjian/ByteCup2018
|
c59c6a495f81c493eaaf7fda710c8acd7ef148b9
|
[
"MIT"
] | 80
|
2018-09-08T01:11:36.000Z
|
2022-01-18T13:41:30.000Z
|
commit_data.py
|
BoostMom/ByteCup2018
|
c59c6a495f81c493eaaf7fda710c8acd7ef148b9
|
[
"MIT"
] | 3
|
2018-12-02T15:08:05.000Z
|
2020-02-10T04:11:28.000Z
|
commit_data.py
|
BoostMom/ByteCup2018
|
c59c6a495f81c493eaaf7fda710c8acd7ef148b9
|
[
"MIT"
] | 21
|
2018-10-27T07:40:25.000Z
|
2022-03-28T12:30:01.000Z
|
import os
import argparse
def process_decoded(args):
if not os.path.exists(args.result_dir):
os.mkdir(args.result_dir)
punct = ["/", "`", "+", "-", ";", "-lrb-", "-rrb-", "``", "|", "~", """]
for file in os.listdir(args.decode_dir):
file_path = os.path.join(args.decode_dir, file)
file_id = int(str(file).split('.')[0]) + 1
res_file = str(file_id) + '.txt'
res_path = os.path.join(args.result_dir, res_file)
temp = []
with open(file_path, 'r') as fr:
text = fr.read().strip()
data = text.split(" ")
for word in data:
if not word in punct:
temp.append(word)
with open(res_path, 'w', encoding='utf-8') as fw:
fw.write(" ".join(temp))
fw.write('\n')
print("Finished: %s" % args.result_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert decoded files to commit files')
parser.add_argument('--decode_dir', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('--result_dir', action='store', required=True,
help='directory of submission')
args = parser.parse_args()
process_decoded(args)
| 33.897436
| 89
| 0.553707
|
886c36e0f2c94072803f50ab03678d988783fba7
| 349
|
py
|
Python
|
JustPy/04.Handling/mail.py
|
sarincr/Python-Web-Frameworks-and-Template-Engines
|
a830cafb0075539527688ea3c12e3e2d97d3519f
|
[
"MIT"
] | null | null | null |
JustPy/04.Handling/mail.py
|
sarincr/Python-Web-Frameworks-and-Template-Engines
|
a830cafb0075539527688ea3c12e3e2d97d3519f
|
[
"MIT"
] | null | null | null |
JustPy/04.Handling/mail.py
|
sarincr/Python-Web-Frameworks-and-Template-Engines
|
a830cafb0075539527688ea3c12e3e2d97d3519f
|
[
"MIT"
] | null | null | null |
import justpy as jp
def my_click(self, msg):
self.text = 'I was clicked'
print(msg.event_type)
print(msg['event_type'])
print(msg)
def event_demo():
wp = jp.WebPage()
d = jp.P(text='Not clicked yet', a=wp, classes='text-xl m-2 p-2 bg-blue-500 text-white')
d.on('click', my_click)
return wp
jp.justpy(event_demo)
| 20.529412
| 92
| 0.636103
|
e14ddb6cae4737692f361260b3fece66704c6fc7
| 836
|
py
|
Python
|
visualize.py
|
nime6/voxeldc-gan
|
e11e8248dc8ce0e3930de0f2867ef0fb395d5934
|
[
"MIT"
] | 146
|
2016-09-30T01:41:23.000Z
|
2022-03-10T15:16:45.000Z
|
visualize.py
|
nime6/voxeldc-gan
|
e11e8248dc8ce0e3930de0f2867ef0fb395d5934
|
[
"MIT"
] | 7
|
2017-03-02T14:13:32.000Z
|
2020-01-07T07:22:00.000Z
|
visualize.py
|
nime6/voxeldc-gan
|
e11e8248dc8ce0e3930de0f2867ef0fb395d5934
|
[
"MIT"
] | 40
|
2017-01-20T13:36:33.000Z
|
2020-04-07T04:25:28.000Z
|
import tensorflow as tf
import numpy as np
import util
import config
from model import *
netG = Generator()
z = tf.placeholder(tf.float32, [config.batch_size, config.nz])
train = tf.placeholder(tf.bool)
x = netG(z, train, config.nsf, config.nvx)
t_vars = tf.trainable_variables()
varsG = [var for var in t_vars if var.name.startswith('G')]
saver = tf.train.Saver(varsG + tf.moving_average_variables())
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
with tf.Session(config=config_proto) as sess:
saver.restore(sess, config.params_path)
batch_z = np.random.uniform(-1, 1, [config.batch_size, config.nz]).astype(np.float32)
x_g = sess.run(x, feed_dict={z:batch_z, train:False})
for i, data in enumerate(x_g):
util.save_binvox("out/{0}.binvox".format(i), data[:, :, :, 0] > 0.9)
| 29.857143
| 89
| 0.715311
|
d9dadb00c503d68ce7fa353d6d1f317a72e28769
| 929
|
py
|
Python
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/operations/__init__.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/operations/__init__.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/operations/__init__.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .registries_operations import RegistriesOperations
from .operations import Operations
from .replications_operations import ReplicationsOperations
from .webhooks_operations import WebhooksOperations
from .runs_operations import RunsOperations
from .tasks_operations import TasksOperations
__all__ = [
'RegistriesOperations',
'Operations',
'ReplicationsOperations',
'WebhooksOperations',
'RunsOperations',
'TasksOperations',
]
| 34.407407
| 76
| 0.662002
|
f717953473fd5d44045b712a4f139e641a8c661b
| 27,730
|
py
|
Python
|
InstallOpenface/fix_sklearn/label.py
|
s123600g/openfaceInstallscript
|
962b4b89c5626318b5701d7297d49df3423b0fe4
|
[
"MIT"
] | null | null | null |
InstallOpenface/fix_sklearn/label.py
|
s123600g/openfaceInstallscript
|
962b4b89c5626318b5701d7297d49df3423b0fe4
|
[
"MIT"
] | null | null | null |
InstallOpenface/fix_sklearn/label.py
|
s123600g/openfaceInstallscript
|
962b4b89c5626318b5701d7297d49df3423b0fe4
|
[
"MIT"
] | null | null | null |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import sparse_min_max
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
# raise ValueError("y contains new labels: %s" % str(diff))
raise ValueError("y contains previously unseen labels: % s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
# if diff:
# raise ValueError("y contains new labels: %s" % str(diff))
if len(diff):
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| 33.329327
| 87
| 0.588136
|
d8d281e08b8a9f30580b7ba5501ec6efe951376e
| 17,695
|
py
|
Python
|
experiments/net_rec.py
|
small-yellow-duck/titanic-ede
|
0a3e1b352ccff83a8308e190955bf839be9d2ef5
|
[
"BSD-3-Clause"
] | 1
|
2019-07-24T14:15:38.000Z
|
2019-07-24T14:15:38.000Z
|
experiments/net_rec.py
|
small-yellow-duck/titanic-ede
|
0a3e1b352ccff83a8308e190955bf839be9d2ef5
|
[
"BSD-3-Clause"
] | null | null | null |
experiments/net_rec.py
|
small-yellow-duck/titanic-ede
|
0a3e1b352ccff83a8308e190955bf839be9d2ef5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from torch.nn.parameter import Parameter
from torch.nn import functional as F
def get_embedding_weight(num_embeddings, embedding_dim, use_cuda):
t = torch.rand(num_embeddings, embedding_dim)
t = t - torch.mean(t, 0).view(1, -1).repeat(num_embeddings, 1)
if use_cuda:
return Parameter(t).cuda()
else:
return Parameter(t)
class VariationalEmbedding(nn.Module):
def __init__(self, n_tokens, embdim, _weight=None, _spread=None):
super(VariationalEmbedding, self).__init__()
self.weight = Parameter(_weight)
self.spread = Parameter(_spread)
self.n_tokens = self.weight.size(0)
self.embdim = embdim
assert list(_weight.shape) == [n_tokens, embdim], \
'Shape of weight does not match num_embeddings and embedding_dim'
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, indices):
mu = F.embedding(indices, self.weight)
std = F.embedding(indices, self.spread)
return self.reparameterize(mu, std)
class EmbeddingToIndex(nn.Module):
def __init__(self, n_tokens, embdim, _weight=None):
super(EmbeddingToIndex, self).__init__()
self.weight = Parameter(_weight)
self.n_tokens = self.weight.size(0)
self.embdim = embdim
assert list(_weight.shape) == [n_tokens, embdim], \
'Shape of weight does not match num_embeddings and embedding_dim'
def forward(self, X):
mb_size = X.size(0)
adotb = torch.matmul(X, self.weight.permute(1, 0))
if len(X.size()) == 3:
seqlen = X.size(1)
adota = torch.matmul(X.view(-1, seqlen, 1, self.embdim),
X.view(-1, seqlen, self.embdim, 1))
adota = adota.view(-1, seqlen, 1).repeat(1, 1, self.n_tokens)
else: # (X.size()) == 2:
adota = torch.matmul(X.view(-1, 1, self.embdim), X.view(-1, self.embdim, 1))
adota = adota.view(-1, 1).repeat(1, self.n_tokens)
bdotb = torch.bmm(self.weight.unsqueeze(-1).permute(0, 2, 1), self.weight.unsqueeze(-1)).permute(1, 2, 0)
if len(X.size()) == 3:
bdotb = bdotb.repeat(mb_size, seqlen, 1)
else:
bdotb = bdotb.reshape(1, self.n_tokens).repeat(mb_size, 1)
dist = adota - 2 * adotb + bdotb
return torch.min(dist, dim=len(dist.size()) - 1)[1]
class MakeMissing(nn.Module):
def __init__(self, dropoutrate):
super(MakeMissing, self).__init__()
self._name = 'MakeMissing'
self.dropoutrate = dropoutrate
def forward(self, inputs):
rvals = torch.rand(inputs.size()).cuda()
r = torch.gt(torch.rand(inputs.size()).cuda(), self.dropoutrate * torch.ones_like(inputs)).float()
return r * inputs - rvals * (torch.ones_like(r) - r)
class ReLUSigmoid(nn.Module):
def __init__(self):
super(ReLUSigmoid, self).__init__()
self._name = 'ReLUSigmoid'
def forward(self, inputs):
r = torch.lt(inputs[:, 0].cuda(), torch.zeros_like(inputs[:, 0])).float()
# rvals = torch.rand(inputs[:, 0].size()).cuda()
# return -r*rvals + (torch.ones_like(r)-r)*torch.sigmoid(inputs[:, 1])
return -r * torch.sigmoid(inputs[:, 1]) + (torch.ones_like(r) - r) * torch.sigmoid(inputs[:, 1])
class PlaceHolder(nn.Module):
def __init__(self):
super(PlaceHolder, self).__init__()
self._name = 'place_holder'
def forward(self, inputs):
if len(inputs.size()) == 1:
return inputs.view(-1, 1)
else:
return inputs
# choose an item from a tuple or list
class SelectItem(nn.Module):
def __init__(self, item_index):
super(SelectItem, self).__init__()
self._name = 'selectitem'
self.item_index = item_index
def forward(self, inputs):
return inputs[self.item_index]
class SwapAxes(nn.Module):
def __init__(self, reordered_axes):
super(SwapAxes, self).__init__()
self._name = 'SwapAxes'
self.reordered_axes = reordered_axes
def forward(self, inputs):
return inputs.permute(self.reordered_axes)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
self._name = 'Flatten'
def forward(self, inputs):
return inputs.reshape(inputs.size(0), -1)
class Reshape(nn.Module):
def __init__(self, out_shape):
super(Reshape, self).__init__()
self._name = 'Reshape'
self.out_shape = out_shape
def forward(self, inputs):
return inputs.reshape((inputs.size(0),) + self.out_shape)
class Repeat(nn.Module):
def __init__(self, viewshape, repeat):
super(Repeat, self).__init__()
self._name = 'Repeat'
self.viewshape = viewshape # (input.size(0), 1, -1)
self.repeat = repeat # (1, self.maxlen, 1)
def forward(self, input):
return input.view((input.size(0),) + self.viewshape).repeat((1,) + self.repeat)
class DropAll(nn.Module):
def __init__(self, dropoutrate):
super(DropAll, self).__init__()
self._name = 'DropAll'
self.dropoutrate = dropoutrate
def forward(self, inputs):
if self.training:
r = torch.lt(torch.rand((inputs.size(0))).cuda(), self.dropoutrate).float()
r = r.reshape([-1] + [1 for i in inputs.size()[1:]]).repeat((1,) + inputs.size()[1:])
return (1 - r) * inputs # -r*torch.ones_like(inputs) + (1-r)*inputs
else:
return inputs
def calc_input_size(input_dict, recurrent_hidden_size, num_layers, bidirectional, continuous_input_dim):
input_size = 0
if bidirectional:
bidir = 2
else:
bidir = 1
for inp in input_dict['discrete']:
input_size += continuous_input_dim
print(inp, continuous_input_dim)
for inp in input_dict['continuous']:
input_size += continuous_input_dim
print(inp, continuous_input_dim)
for inp in input_dict['text']:
input_size += bidir * recurrent_hidden_size * num_layers # continuous_input_dim #
print(inp, continuous_input_dim)
for inp in input_dict['onehot']:
input_size += input_dict['onehot'][inp] # continuous_input_dim #
print(inp, input_dict['onehot'][inp]) # continuous_input_dim
return input_size
class Decoder(nn.Module):
def __init__(self, input_dict, maxlens, dim=24, recurrent_hidden_size=8):
super(Decoder, self).__init__()
self._name = 'decoder'
self.dim = dim
self.recurrent_hidden_size = recurrent_hidden_size
self.input_dict = input_dict
self.maxlens = maxlens
self.hidden_dim = 8 * self.dim
self.bidirectional_rnn = True
self.num_layers = 2
self.preprocess = nn.Sequential(
nn.utils.weight_norm(nn.Linear(self.dim, int(1 * self.hidden_dim / 2)), dim=None),
# nn.Linear(self.dim, int(self.hidden_dim/2)),
nn.ReLU(True),
nn.utils.weight_norm(nn.Linear(int(1 * self.hidden_dim / 2), int(self.hidden_dim / 1)), dim=None),
nn.ReLU(True),
)
self.main = nn.Sequential(
nn.utils.weight_norm(nn.Conv2d(int(self.hidden_dim / 1), int(self.hidden_dim / 1), 1)),
# nn.Conv2d(self.hidden_dim, self.hidden_dim, 1),
nn.ReLU(True),
nn.utils.weight_norm(nn.Conv2d(int(self.hidden_dim / 1), int(self.hidden_dim / 1), 1)),
# nn.Conv2d(self.hidden_dim, self.hidden_dim, 1),
nn.ReLU(True),
Flatten()
)
self.outputs = {dtype: {} for dtype in self.input_dict.keys()}
for col in self.input_dict['discrete']:
# self.outputs['discrete'][col] = nn.utils.weight_norm(nn.Linear(self.hidden_dim, 1))
self.outputs['discrete'][col] = self.get_numeric(self.hidden_dim)
self.add_module(col + '_output', self.outputs['discrete'][col])
for col in self.input_dict['continuous']:
# self.outputs['continuous'][col] = nn.utils.weight_norm(nn.Linear(self.hidden_dim, 1))
self.outputs['continuous'][col] = self.get_numeric(self.hidden_dim)
self.add_module(col + '_output', self.outputs['continuous'][col])
for col, emb_size in self.input_dict['text'].items():
# self.outputs['text'][col] = nn.GRU(self.hidden_dim, emb_size, bidirectional=False, batch_first=True)
self.outputs['text'][col] = self.get_rec(emb_size, self.maxlens[col])
self.add_module(col + '_output', self.outputs['text'][col])
for col, emb_size in self.input_dict['onehot'].items():
# self.outputs['onehot'][col] = nn.utils.weight_norm(nn.Linear(self.hidden_dim, emb_size))
self.outputs['onehot'][col] = self.get_lin(self.hidden_dim, emb_size)
self.add_module(col + '_output', self.outputs['onehot'][col])
def get_numeric(self, hidden_size):
net = nn.Sequential(
nn.utils.weight_norm(nn.Linear(hidden_size, 1)),
# Reshape((hidden_size, 1)),
# nn.utils.weight_norm(nn.Conv1d(hidden_size, self.dim, 1)),
# nn.ReLU(),
# nn.utils.weight_norm(nn.Conv1d(self.dim, 1, 1)),
# Flatten(),
nn.Tanh()
# ReLUSigmoid()
)
return net
def get_lin(self, hidden_size, emb_size):
net = nn.Sequential(
nn.utils.weight_norm(nn.Linear(hidden_size, emb_size)),
)
return net
def get_rec(self, emb_size, string_len):
if self.bidirectional_rnn:
scale = 2
else:
scale = 1
net = nn.Sequential(
# nn.utils.weight_norm(nn.Linear(self.hidden_dim, self.dim)),
# nn.Tanh(),
Reshape((self.hidden_dim, 1)),
nn.utils.weight_norm(nn.Conv1d(self.hidden_dim, self.dim, 1)),
nn.Tanh(),
Flatten(),
Repeat((1, -1), (string_len, 1)),
nn.GRU(self.dim, emb_size, batch_first=True, bidirectional=self.bidirectional_rnn,
num_layers=self.num_layers), # ,
SelectItem(0)
)
return net
def forward(self, input):
x = self.preprocess(input)
x = x.view(x.size(0), -1, 1, 1)
x = self.main(x)
x = x.view(input.size(0), -1)
output_dict = {}
for col in self.input_dict['discrete']:
output_dict[col] = self.outputs['discrete'][col](x)
for col in self.input_dict['continuous']:
output_dict[col] = self.outputs['continuous'][col](x)
for col, emb_size in self.input_dict['text'].items():
# n_tokens = self.input_dict['text'][col]
# print(x.size())
# xrep = x2.view(input.size(0), 1, -1).repeat(1, self.maxlens[col], 1)
# print('xrep_'+col, xrep.size())
# output_dict[col] = self.outputs['text'][col](xrep)[:, :, -emb_size:] #[0]
output_dict[col] = self.outputs['text'][col](x)[:, :, -emb_size:] # [0]
# print(col, output_dict[col].size())
for col in self.input_dict['onehot'].keys():
output_dict[col] = self.outputs['onehot'][col](x)
return output_dict
# https://github.com/neale/Adversarial-Autoencoder/blob/master/generators.py
class Encoder(nn.Module):
# can't turn dropout off completely because otherwise the loss -> NaN....
# batchnorm does not seem to help things...
def __init__(self, input_dict, dim=24, recurrent_hidden_size=8, sigmoidout=False):
super(Encoder, self).__init__()
self._name = 'encoder'
self.sigmoidout = sigmoidout
self.input_dict = input_dict
self.dim = dim
if self.sigmoidout:
self.outputdim = 1
else:
self.outputdim = dim
self.hidden_dim = 8 * self.dim
self.recurrent_hidden_size = recurrent_hidden_size
self.dropout = 0.0 #2.0 / self.outputdim # 0.0625 #0.03125 #0.015625# 0.125 #
self.bidirectional_rnn = True
self.num_layers = 2
self.continuous_input_dim = 1
self.input_size = calc_input_size(self.input_dict, self.recurrent_hidden_size, self.num_layers,
self.bidirectional_rnn, self.continuous_input_dim)
# self.rec = {inp: nn.GRU(emb_size, self.recurrent_hidden_size, bidirectional=False) for inp, emb_size in self.input_dict['text'].items()}
# for inp, layer in self.rec.items():
# self.add_module(inp+'_rec', layer)
self.input_layers = {}
for datatype in self.input_dict.keys():
if datatype == 'text':
for inp, emb_size in self.input_dict[datatype].items():
self.input_layers[inp] = self.get_rec(emb_size) #
self.add_module(inp + '_input', self.input_layers[inp])
elif datatype == 'onehot':
for inp, emb_size in self.input_dict[datatype].items():
self.input_layers[inp] = self.get_onehot() # PlaceHolder()
self.add_module(inp + '_input', self.input_layers[inp])
else:
for inp in self.input_dict[datatype]:
self.input_layers[inp] = self.get_lin() # PlaceHolder()
self.add_module(inp + '_input', self.input_layers[inp])
self.main = {}
self.main[0] = nn.Dropout(p=self.dropout)
self.main[1] = nn.utils.weight_norm(nn.Conv2d(self.input_size, int(self.hidden_dim / 1), 1, dilation=1, stride=1))
self.main[2] = nn.ReLU(True)
self.main[3] = nn.utils.weight_norm(nn.Conv2d(self.input_size+int(self.hidden_dim / 1), int(self.hidden_dim / 1), 1, dilation=1, stride=1))
self.main[4] = nn.ReLU(True)
self.getmu = nn.Sequential(
nn.utils.weight_norm(nn.Linear(int(self.hidden_dim / 1), int(1 * self.hidden_dim / 2))),
nn.ReLU(True),
nn.utils.weight_norm(nn.Linear(int(1 * self.hidden_dim / 2), self.outputdim)),
)
self.getlogvar = nn.Sequential(
nn.utils.weight_norm(nn.Linear(int(self.hidden_dim / 1), int(1 * self.hidden_dim / 2))),
nn.ReLU(True),
nn.utils.weight_norm(nn.Linear(int(1 * self.hidden_dim / 2), self.outputdim)),
)
def get_main(self, x0):
for idx, op in self.main.items():
x = self.main[idx](x0)
def get_rec(self, emb_size):
if self.bidirectional_rnn:
bidir = 2
else:
bidir = 1
net = nn.Sequential(
nn.GRU(emb_size, self.recurrent_hidden_size, batch_first=True, bidirectional=self.bidirectional_rnn,
num_layers=self.num_layers), # nn.Dropout(p=self.dropout)
SelectItem(1),
SwapAxes((1, 0, 2)),
Flatten(),
)
return net
def get_lin(self):
net = nn.Sequential(
PlaceHolder(),
# Reshape((1, 1)),
# nn.utils.weight_norm(nn.Conv1d(1, self.outputdim, 1)),
# nn.ReLU(),
# nn.utils.weight_norm(nn.Conv1d(self.outputdim, 1, 1)),
# nn.ReLU(),
# Flatten()
)
return net
def get_onehot(self):
net = nn.Sequential(
PlaceHolder()
)
return net
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
# std = 0.5*torch.ones_like(mu)
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, inputs):
preprocessed_inputs = []
for inp in self.input_dict['discrete']:
# preprocessed_inputs.append(inputs[inp])
preprocessed_inputs.append(self.input_layers[inp](inputs[inp]))
for inp in self.input_dict['continuous']:
# preprocessed_inputs.append(inputs[inp])
t = self.input_layers[inp](inputs[inp])
preprocessed_inputs.append(t)
for inp in self.input_dict['text'].keys():
# GRU returns output and hn - we just want hn
# print('inp '+inp, inputs[inp].size())
# t = self.input_layers[inp](inputs[inp])[1].permute(1, 0, 2)
t = self.input_layers[inp](inputs[inp])
# print(t.size())
# t = torch.squeeze(t)
preprocessed_inputs.append(t.reshape(inputs[inp].size(0), -1))
for inp in self.input_dict['onehot'].keys():
# preprocessed_inputs.append(inputs[inp])
preprocessed_inputs.append(self.input_layers[inp](inputs[inp]))
# for p in preprocessed_inputs:
# print(p.size(), p.dtype)
x = torch.cat(preprocessed_inputs, 1)
x = x.view(x.size(0), -1, 1, 1)
x = self.main(x)
# x = self.main(x)
x = x.view(x.size(0), -1)
mu = self.getmu(x)
'''
if self.sigmoidout:
x = torch.sigmoid(x)
#print('sigmoid')
else:
x = torch.tanh(x)
#print('tanh')
'''
# print('enc out ', self.sigmoidout, x.size())
return mu, self.getlogvar(x)
| 36.559917
| 147
| 0.586324
|
6bcc38c2c21b6f3a3b0caf51bcc0b9113e5424f4
| 2,811
|
py
|
Python
|
src/emcr/skeleton.py
|
shashankvadrevu/EMCR
|
6ec548e3b9e1f97606b24e91807d655e2a180674
|
[
"MIT"
] | null | null | null |
src/emcr/skeleton.py
|
shashankvadrevu/EMCR
|
6ec548e3b9e1f97606b24e91807d655e2a180674
|
[
"MIT"
] | null | null | null |
src/emcr/skeleton.py
|
shashankvadrevu/EMCR
|
6ec548e3b9e1f97606b24e91807d655e2a180674
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = emcr.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
import argparse
import sys
import logging
from emcr import __version__
__author__ = "shashankvadrevu"
__copyright__ = "shashankvadrevu"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='EMCR {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 24.025641
| 77
| 0.635005
|
cb8bef2677dd3a83ffd137ece313c87f279d6f31
| 4,737
|
py
|
Python
|
demo_site/settings.py
|
williwacker/django-qr-code
|
17aba885a82a7fde1ac784e9c183f4c7e40cd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
demo_site/settings.py
|
williwacker/django-qr-code
|
17aba885a82a7fde1ac784e9c183f4c7e40cd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
demo_site/settings.py
|
williwacker/django-qr-code
|
17aba885a82a7fde1ac784e9c183f4c7e40cd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Django settings for qr_code_demo project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from distutils.version import StrictVersion
import django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from qr_code.qrcode import constants
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8l4)()f1&tg*dtxh6whlew#k-d5&79npe#j_dg9l0b)m8^g#8u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'qr_code',
'qr_code_demo'
]
django_version = StrictVersion(django.get_version())
if django_version >= StrictVersion('1.10'):
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
else:
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Caches.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'qr-code': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'qr-code-cache',
'TIMEOUT': 3600
}
}
# Django QR Code specific options.
QR_CODE_CACHE_ALIAS = 'qr-code'
QR_CODE_URL_PROTECTION = {
constants.TOKEN_LENGTH: 30, # Optional random token length for URL protection. Defaults to 20.
constants.SIGNING_KEY: 'my-secret-signing-key', # Optional signing key for URL token. Uses SECRET_KEY if not defined.
constants.SIGNING_SALT: 'my-signing-salt', # Optional signing salt for URL token.
constants.ALLOWS_EXTERNAL_REQUESTS_FOR_REGISTERED_USER: False # Tells whether a registered user can request the QR code URLs from outside a site that uses this app. It can be a boolean value used for any user, or a callable that takes a user as parameter. Defaults to False (nobody can access the URL without the security token).
}
| 31.370861
| 335
| 0.709943
|
5551d6fa219aee9b3ced7d5f7daceb7fb28b837a
| 3,239
|
py
|
Python
|
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/plugin/raw/util.py
|
Alibloke/holland
|
e630b511a95ed8e36205e8300e632018918223ff
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/plugin/raw/util.py
|
Alibloke/holland
|
e630b511a95ed8e36205e8300e632018918223ff
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/plugin/raw/util.py
|
Alibloke/holland
|
e630b511a95ed8e36205e8300e632018918223ff
|
[
"BSD-3-Clause"
] | null | null | null |
"""Utility functions to help out the mysql-lvm plugin"""
import os
import tempfile
import logging
from holland.core.backup import BackupError
from holland.lib.compression import open_stream
from holland.backup.mysql_lvm.actions import (
FlushAndLockMySQLAction,
RecordMySQLReplicationAction,
InnodbRecoveryAction,
TarArchiveAction,
DirArchiveAction,
)
from holland.backup.mysql_lvm.plugin.common import log_final_snapshot_size, connect_simple
from holland.backup.mysql_lvm.plugin.innodb import MySQLPathInfo, check_innodb
LOG = logging.getLogger(__name__)
def setup_actions(snapshot, config, client, snap_datadir, spooldir):
"""Setup actions for a LVM snapshot based on the provided
configuration.
Optional actions:
* MySQL locking
* InnoDB recovery
* Recording MySQL replication
"""
mysql = connect_simple(config["mysql:client"])
if mysql.show_variable("have_innodb") == "YES":
try:
pathinfo = MySQLPathInfo.from_mysql(mysql)
finally:
mysql.close()
try:
check_innodb(pathinfo, ensure_subdir_of_datadir=True)
except BackupError:
if not config["mysql-lvm"]["force-innodb-backup"]:
raise
if config["mysql-lvm"]["lock-tables"]:
extra_flush = config["mysql-lvm"]["extra-flush-tables"]
act = FlushAndLockMySQLAction(client, extra_flush)
snapshot.register("pre-snapshot", act, priority=100)
snapshot.register("post-snapshot", act, priority=100)
if config["mysql-lvm"].get("replication", True):
repl_cfg = config.setdefault("mysql:replication", {})
act = RecordMySQLReplicationAction(client, repl_cfg)
snapshot.register("pre-snapshot", act, 0)
if config["mysql-lvm"]["innodb-recovery"]:
mysqld_config = dict(config["mysqld"])
mysqld_config["datadir"] = snap_datadir
if not mysqld_config["tmpdir"]:
mysqld_config["tmpdir"] = tempfile.gettempdir()
ib_log_size = client.show_variable("innodb_log_file_size")
mysqld_config["innodb-log-file-size"] = ib_log_size
act = InnodbRecoveryAction(mysqld_config)
snapshot.register("post-mount", act, priority=100)
if config["mysql-lvm"]["archive-method"] == "dir":
try:
backup_datadir = os.path.join(spooldir, "backup_data")
os.mkdir(backup_datadir)
except OSError as exc:
raise BackupError("Unable to create archive directory '%s': %s" % (backup_datadir, exc))
act = DirArchiveAction(snap_datadir, backup_datadir, config["tar"])
snapshot.register("post-mount", act, priority=50)
else:
try:
archive_stream = open_stream(
os.path.join(spooldir, "backup.tar"), "w", **config["compression"]
)
except OSError as exc:
raise BackupError(
"Unable to create archive file '%s': %s"
% (os.path.join(spooldir, "backup.tar"), exc)
)
act = TarArchiveAction(snap_datadir, archive_stream, config["tar"])
snapshot.register("post-mount", act, priority=50)
snapshot.register("pre-remove", log_final_snapshot_size)
| 39.987654
| 100
| 0.658845
|
6a15b6798016b88098740182d6aaba11969bb524
| 11,660
|
py
|
Python
|
notify_sdk/model/easy_flow/target_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
notify_sdk/model/easy_flow/target_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
notify_sdk/model/easy_flow/target_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: target_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from notify_sdk.model.cmdb import cluster_info_pb2 as notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from notify_sdk.model.easy_flow import version_info_pb2 as notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='target_info.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x11target_info.proto\x12\teasy_flow\x1a(notify_sdk/model/cmdb/cluster_info.proto\x1a-notify_sdk/model/easy_flow/version_info.proto\"\x9b\x04\n\nTargetInfo\x12\x10\n\x08targetId\x18\x01 \x01(\t\x12\x12\n\ntargetName\x18\x02 \x01(\t\x12\x12\n\ninstanceId\x18\x03 \x01(\t\x12\"\n\x07\x63luster\x18\x04 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x38\n\x0cinstanceInfo\x18\x05 \x03(\x0b\x32\".easy_flow.TargetInfo.InstanceInfo\x12:\n\roperationInfo\x18\x06 \x03(\x0b\x32#.easy_flow.TargetInfo.OperationInfo\x1a\x8b\x01\n\x0cInstanceInfo\x12\x13\n\x0bversionName\x18\x01 \x01(\t\x12+\n\x0bversionInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x11\n\tpackageId\x18\x03 \x01(\t\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tversionId\x18\x05 \x01(\t\x1a\xaa\x01\n\rOperationInfo\x12\x11\n\toperation\x18\x01 \x01(\t\x12-\n\rversionToInfo\x18\x02 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12/\n\x0fversionFromInfo\x18\x03 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x13\n\x0binstallPath\x18\x04 \x01(\t\x12\x11\n\tpackageId\x18\x05 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2.DESCRIPTOR,])
_TARGETINFO_INSTANCEINFO = _descriptor.Descriptor(
name='InstanceInfo',
full_name='easy_flow.TargetInfo.InstanceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionName', full_name='easy_flow.TargetInfo.InstanceInfo.versionName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionInfo', full_name='easy_flow.TargetInfo.InstanceInfo.versionInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.InstanceInfo.packageId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.InstanceInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_flow.TargetInfo.InstanceInfo.versionId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=488,
)
_TARGETINFO_OPERATIONINFO = _descriptor.Descriptor(
name='OperationInfo',
full_name='easy_flow.TargetInfo.OperationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='easy_flow.TargetInfo.OperationInfo.operation', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionToInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionToInfo', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionFromInfo', full_name='easy_flow.TargetInfo.OperationInfo.versionFromInfo', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.TargetInfo.OperationInfo.installPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.TargetInfo.OperationInfo.packageId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=491,
serialized_end=661,
)
_TARGETINFO = _descriptor.Descriptor(
name='TargetInfo',
full_name='easy_flow.TargetInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targetId', full_name='easy_flow.TargetInfo.targetId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetName', full_name='easy_flow.TargetInfo.targetName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='easy_flow.TargetInfo.instanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.TargetInfo.cluster', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceInfo', full_name='easy_flow.TargetInfo.instanceInfo', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operationInfo', full_name='easy_flow.TargetInfo.operationInfo', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TARGETINFO_INSTANCEINFO, _TARGETINFO_OPERATIONINFO, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=661,
)
_TARGETINFO_INSTANCEINFO.fields_by_name['versionInfo'].message_type = notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_INSTANCEINFO.containing_type = _TARGETINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionToInfo'].message_type = notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.fields_by_name['versionFromInfo'].message_type = notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
_TARGETINFO_OPERATIONINFO.containing_type = _TARGETINFO
_TARGETINFO.fields_by_name['cluster'].message_type = notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_TARGETINFO.fields_by_name['instanceInfo'].message_type = _TARGETINFO_INSTANCEINFO
_TARGETINFO.fields_by_name['operationInfo'].message_type = _TARGETINFO_OPERATIONINFO
DESCRIPTOR.message_types_by_name['TargetInfo'] = _TARGETINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TargetInfo = _reflection.GeneratedProtocolMessageType('TargetInfo', (_message.Message,), {
'InstanceInfo' : _reflection.GeneratedProtocolMessageType('InstanceInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_INSTANCEINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.InstanceInfo)
})
,
'OperationInfo' : _reflection.GeneratedProtocolMessageType('OperationInfo', (_message.Message,), {
'DESCRIPTOR' : _TARGETINFO_OPERATIONINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo.OperationInfo)
})
,
'DESCRIPTOR' : _TARGETINFO,
'__module__' : 'target_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.TargetInfo)
})
_sym_db.RegisterMessage(TargetInfo)
_sym_db.RegisterMessage(TargetInfo.InstanceInfo)
_sym_db.RegisterMessage(TargetInfo.OperationInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.64
| 1,148
| 0.761407
|
a560a32700f691dc73b420e6e6856a4d4560d976
| 19,233
|
py
|
Python
|
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/GridData.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/GridData.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/GridData.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Name:
# GridData.py
# GFS1-NHD:A7800.0000-SCRIPT;1.25
#
# Status:
# DELIVERED
#
# History:
#
# Revision AWIPS II NJENSEN
# Updated to work with AWIPS II and directly retrieve GFE data
#
# Revision 1.25 (DELIVERED)
# Created: 29-OCT-2008 12:51:49 OBERFIEL
# Non-operationally important weather now ignored, e.g. FR ==
# frost
#
# Revision 1.24 (DELIVERED)
# Created: 18-APR-2008 14:53:34 OBERFIEL
# Updated header in TAF viewer
#
# Revision 1.23 (DELIVERED)
# Created: 13-JUN-2006 14:33:00 BLI
# Fixed for unable to handle missing cigHt
#
# Revision 1.22 (DELIVERED)
# Created: 12-JUN-2006 16:00:51 BLI
# Fixed for missing CigHt
#
# Revision 1.21 (DELIVERED)
# Created: 16-MAY-2006 12:06:55 BLI
# multiplied cig with 100
#
# Revision 1.20 (DELIVERED)
# Created: 04-MAY-2006 09:54:40 BLI
# Grids are now displaying 'CigHt' instead of 'CigCat'.
#
# Revision 1.19 (DELIVERED)
# Created: 29-JAN-2006 14:26:50 TROJAN
# Fixed fog key in the symbol translation dictionary
#
# Revision 1.18 (APPROVED)
# Created: 29-JAN-2006 12:16:16 TROJAN
# stdr 958
#
# Revision 1.17 (DELIVERED)
# Created: 06-JUL-2005 20:50:38 TROJAN
# spr 6910
#
# Revision 1.16 (DELIVERED)
# Created: 07-MAY-2005 11:33:53 OBERFIEL
# Added Item Header Block
#
# Revision 1.15 (DELIVERED)
# Created: 04-APR-2005 15:51:06 TROJAN
# spr 6775
#
# Revision 1.14 (APPROVED)
# Created: 21-MAR-2005 15:32:31 TROJAN
# spr 6733
#
# Revision 1.13 (DELIVERED)
# Created: 07-MAR-2005 22:43:02 TROJAN
# spr 6710
#
# Revision 1.12 (APPROVED)
# Created: 04-MAR-2005 15:22:45 TROJAN
# spr 6699
#
# Revision 1.11 (APPROVED)
# Created: 15-FEB-2005 18:12:21 TROJAN
# spr 6561
#
# Revision 1.10 (DELIVERED)
# Created: 04-FEB-2005 19:12:38 BLI
# Added variable wind
#
# Revision 1.9 (APPROVED)
# Created: 01-FEB-2005 20:30:15 BLI
# Fixed to include intensity
#
# Revision 1.8 (APPROVED)
# Created: 01-FEB-2005 18:59:02 BLI
# Fixed it to include prob30 group
#
# Revision 1.7 (APPROVED)
# Created: 01-FEB-2005 18:45:18 BLI
# Make GridViewer call a new taf formater
#
# Revision 1.6 (APPROVED)
# Created: 24-JAN-2005 15:51:13 TROJAN
# spr 6259
#
# Revision 1.5 (APPROVED)
# Created: 24-NOV-2004 19:15:00 OBERFIEL
# Fixed logic problem while processing new format IFPS grid
# files
#
# Revision 1.4 (APPROVED)
# Created: 21-OCT-2004 19:36:11 TROJAN
# spr 6420
#
# Revision 1.3 (APPROVED)
# Created: 30-SEP-2004 20:22:10 TROJAN
# stdr 873
#
# Revision 1.2 (APPROVED)
# Created: 19-AUG-2004 20:42:56 OBERFIEL
# Code change
#
# Revision 1.1 (APPROVED)
# Created: 01-JUL-2004 14:40:40 OBERFIEL
# date and time created -2147483647/-2147483648/-2147481748
# -2147483648:-2147483648:-2147483648 by oberfiel
#
# Change Document History:
# 1:
# Change Document: GFS1-NHD_SPR_7383
# Action Date: 06-NOV-2008 15:25:22
# Relationship Type: In Response to
# Status: TEST
# Title: AvnFPS: Lack of customization in QC check
#
# <pre>
#
# Retrieves data at airport locations as points from GFE and builds a data structure
# from that data. Partially ported from AWIPS 1 GridData.py.
#
# SOFTWARE HISTORY
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Initial creation.
# Mar 07, 2013 1735 rferrel Changes to obtain grid data for a list of sites.
# Apr 23, 2014 3006 randerso Fix Wx parsing, handling of missing pdcs
# Feb 23, 2018 7227 njensen Request Vsby, PredHgt, CigHgt to get variable
# visibility and ceiling in NDFD TAFs
#
##
#
##
# This is a base file that is not intended to be overridden.
##
import logging, time, ConfigParser
import Avn, AvnLib, AvnParser, JUtil, cPickle
import PointDataView, GfeValues
# These variables are unchanged from AWIPS 1 AvnFPS
_Missing = ''
_WxCode = {'L': 'DZ', 'R': 'RA', 'RW': 'SHRA', 'ZL': 'FZDZ', 'ZR': 'FZRA', \
'IP': 'PL', 'IPW': 'SHPL', 'S': 'SN', 'SP': 'SG', 'SW': 'SHSN', \
'K': 'FU', 'IC': 'IC', 'BR': 'BR', 'FG': 'FG', 'H': 'HZ', \
'BS': 'BLSN', 'BD': 'BLDU'}
_IntCode = {'--': '-', '-': '-', 'm': '', '+': '+'}
_Keys = ['Temp', 'DwptT', 'WDir', 'WSpd', 'WGust', 'Obvis', 'Vsby', \
'Sky', 'PrdHt', 'CigHt', 'Tstm', 'PTyp1', 'Ints1', 'Prob1', 'PTyp2', \
'Ints2', 'Prob2', 'PTyp3', 'Ints3', 'Prob3']
_NumHours = 36
# Parameters to request from GFE. These were determined by the parameters
# requested in the AWIPS 1 AvnFPS IFPS2AvnFPS.py's _makeProduct().
Parameters = ['Sky', 'T', 'Td', 'Wind', 'WindGust', 'PoP', 'Wx',
'Vsby', 'PredHgt', 'CigHgt']
# Translations of AvnFPS parameter names to GFE parameter names
Translate = { 'Sky':'Sky', 'Temp':'T', 'DwptT':'Td', 'WDir':'WindDir', 'WSpd':'WindSpd', 'WGust':'WindGust',
'PoP1h':'PoP', 'Obvis':'Wx', 'PoP':'PoP', 'Tstm':'Wx', 'Tint':'Wx', 'PTyp1':'Wx', 'Prob1':'Wx', 'Ints1':'Wx',
'PTyp2':'Wx', 'Prob2':'Wx', 'Ints2':'Wx', 'PTyp3':'Wx', 'Prob3':'Wx', 'Ints3':'Wx',
'Vsby': 'Vsby', 'PrdHt': 'PredHgt', 'CigHt': 'CigHgt' }
# 'PrdCt': 'PredHgtCat', 'CigCt': 'CigHgtCat'
_Logger = logging.getLogger(Avn.CATEGORY)
##############################################################################
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _wxcode(c):
return _WxCode.get(c, _Missing)
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _intcode(c):
return _IntCode.get(c, _Missing)
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _vis(c):
try:
v = int(c)
if v < 999:
return v/100.0
except ValueError:
pass
return 999
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _cldHgt(c):
try:
v = int(c)
if v < 999:
return AvnLib.fixCldBase(v)
except ValueError:
pass
return 999
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _cig(c):
try:
if c != '999':
v = int(c)/100
else:
v = int(c)
if v < 0:
v = 250
if v < 999:
return AvnLib.fixCldBase(v)
except ValueError:
pass
return 999
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _skycode(c):
try:
ic = int(c)
if ic == 0:
return 'SKC'
elif ic < 3:
return 'FEW'
elif ic < 6:
return 'SCT'
elif ic < 10:
return 'BKN'
elif ic == 10:
return 'OVC'
else:
return ''
except ValueError:
return ''
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _stripmsng(c):
if c == '999':
return _Missing
else:
return c
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _winddir(c):
try:
v = int(c)
if 0 <= v <= 36:
return 10*v
except ValueError:
pass
return None
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _windspd(c):
try:
v = int(c)
if 0 <= v <= 200:
return v
except ValueError:
pass
return None
# This function is ported from AWIPS 1 AvnFPS to return the same
# data structure as AWIPS 1 AvnFPS returned.
def _getData(pdc, firstTime):
organizedData = {}
data = []
if pdc is not None :
for i in range(pdc.getCurrentSz()):
jpdv = pdc.readRandom(i)
jpdv.setContainer(pdc)
pdv = PointDataView.PointDataView(jpdv)
fcstHr = round(((pdv['time'] - firstTime) / 1000) / 3600)
organizedData[fcstHr] = pdv
for n in range(_NumHours):
dd = {'time': 3600.0*n+(firstTime / 1000)}
dd = _createRecord(dd, organizedData[n])
data.append(dd)
else :
return None
return {'issuetime': (firstTime / 1000), 'record': data}
# This function is a port of AWIPS 1 AvnFPS' function _cvt(itime, d).
def _createRecord(dd, pdv):
for k in Translate:
try:
arg = pdv[Translate[k]]
except:
v = '999'
if k in ['Temp', 'DwptT']:
v = int(GfeValues.scalarValue(arg))
elif k == 'Obvis':
v = _wxcode(GfeValues.obvisValue(arg))
elif k == 'Sky':
v = _skycode(GfeValues.skyValue(arg))
elif k == 'WDir':
v = _winddir(GfeValues.windDirValue(arg))
elif k in ['WSpd', 'WGust']:
if k == 'WSpd':
v = GfeValues.windMagValue(arg)
elif k == 'WGust':
v = GfeValues.scalarValue(arg)
v = _windspd(v)
elif k == 'PrdHt':
v = _cldHgt(GfeValues.scalarValue(arg))
elif k == 'CigHt':
v = _cig(GfeValues.scalarValue(arg))
elif k == 'Vsby':
v = _vis(GfeValues.vsbyValue(arg))
elif k == 'Tstm':
v = _stripmsng(GfeValues.wxTstm(arg))
elif k == 'Tint':
v = _stripmsng(GfeValues.wxTstmInt(arg))
elif k[:4] == 'Prob':
v = _stripmsng(GfeValues.wxValCov(arg, int(k[4])))
elif k[:4] == 'PTyp':
v = _wxcode(GfeValues.wxVal(arg, int(k[4])))
elif k[:4] == 'Ints':
v = _intcode(GfeValues.wxValInst(arg, int(k[4])))
else:
v = _stripmsng(arg)
dd[k] = v
for ptype, pints, pprob in [('PTyp1','Ints1','Prob1'),
('PTyp2','Ints2','Prob2'),
('PTyp3', 'Ints3', 'Prob3')]:
try:
if dd[ptype] == _Missing:
dd[pints] = dd[pprob] = _Missing
except KeyError:
pass
return dd
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _setPop(config,prbStr,dt):
if dt<=9*3600:
return config['before9hr'][prbStr]
else:
return config['after9hr'][prbStr]
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _makePeriod(prbConfig,rec,itime):
grp = {'time': {'from': rec['time'], 'to': rec['time']+3600.0}}
dd = rec.get('WDir')
ff = rec.get('WSpd')
if not None in (dd, ff):
gg = rec.get('WGust')
if gg is None or gg <= ff:
gg = None
if gg is not None and gg > ff:
grp['wind'] = {'dd': dd, 'ff': ff, 'gg': gg, \
'str': '%03d%02dG%02dKT' % (dd, ff, gg)}
else:
grp['wind'] = {'dd': dd, 'ff': ff, 'str': '%03d%02dKT' % (dd, ff)}
pot = 0
if 'Tstm' in rec and rec['Tstm']:
pot = _setPop(prbConfig,rec['Tstm'],rec['time']-itime)
max_pop = 0
p_str = ""
for i in range(3):
pstr = 'Prob%d' % i; istr = 'Ints%d' % i; tstr = 'PTyp%d' % i
if pstr in rec and istr in rec and tstr in rec:
if rec[pstr]:
pop = _setPop(prbConfig,rec[pstr],rec['time']-itime)
if pop > max_pop:
max_pop = pop
p_str = rec[istr]+rec[tstr]
grp['pcp'] = {'str': p_str, 'pop': max_pop, 'pot': pot}
h = rec.get('PrdHt')
cig = rec.get('CigHt')
if cig:
cig = cig*100
else:
cig = 3000
if cig < 0:
cig = Avn.UNLIMITED
if h is None or h == 999:
h = 300
else:
h = AvnLib.fixCldBase(h)
s = rec.get('Sky', None)
if s:
cld = s
cover = {'SKC': 0, 'FEW': 1, 'SCT': 2, 'BKN': 3, 'OVC': 4}
if cld == 'SKC':
grp['sky'] = {'str': 'SKC', 'cig': Avn.UNLIMITED, \
'cover': cover[cld]}
else:
grp['sky'] = {'str': '%s%03d' % (cld, h), 'cig': cig, \
'cover': cover[cld]}
# 3.2 formatter adds CB when TS is present
# if 'Tstm' in rec and rec['Tstm']:
# grp['sky']['str'] = grp['sky']['str'] + 'CB'
obv = rec.get('Obvis')
v = rec.get('Vsby')
if v is None:
if obv:
grp['vsby'] = {'str': '6SM', 'value': 6.0}
else:
grp['vsby'] = {'str': 'P6SM', 'value': 99.0}
elif v >= 6.5:
grp['vsby'] = {'str': 'P6SM', 'value': 99.0}
else:
grp['vsby'] = AvnLib.fixTafVsby(v)
if not obv and grp['pcp']['str'] == '':
if v <= 0.6:
obv = 'FG'
else:
obv = 'BR'
grp['obv'] = {'str': obv}
return grp
# This function is unchanged from the AWIPS 1 AvnFPS function.
def _readPrbConf():
conf=ConfigParser.ConfigParser()
conf.read(Avn.getTafPath('XXXX', 'grid_prob.cfg'))
prb_conf={'before9hr': {},'after9hr': {}}
wxkeys = ['S', 'IS', 'WS', 'SC', 'NM', 'O', 'C', 'D', 'WP', 'L']
for wx in wxkeys:
prb_conf['before9hr'][wx] = conf.getint('before9hr',wx)
prb_conf['after9hr'][wx] = conf.getint('after9hr',wx)
return prb_conf
# This function is ported from AWIPS 1 AvnFPS to return the same
# data structure as AWIPS 1 AvnFPS returned.
def makeData(siteID, timeSeconds):
data = retrieveData(siteID, timeSeconds)
return formatData(siteID, timeSeconds, data)
# This function was extracted from AWIPS 1 AvnFPS' makeData(ident, text).
def formatData(siteID, timeSeconds, data):
if data is None or data['issuetime'] < time.time() - 86400:
msg = 'Grid data is not available'
_Logger.info(msg)
raise Avn.AvnError(msg)
d = __formatData(data, siteID)
return d
# This function was extracted from AWIPS 1 AvnFPS' makeData(ident, text).
def __formatData(data, siteID):
if data is not None:
prbConfig=_readPrbConf()
itime = data['issuetime']
d = {'itime': {'value': itime, 'str': time.strftime('%d%H%MZ', \
time.gmtime(itime))}, 'ident': {'str': siteID}}
d['group'] = [_makePeriod(prbConfig,data['record'][n],itime) \
for n in range(_NumHours)]
return d
# This function is unchanged from the AWIPS 1 AvnFPS function.
def makeTable(siteID, timeSeconds):
def _tostr(x):
s = str(x)
if s in ['-1', '999']:
s = ''
return s
data = retrieveData(siteID, timeSeconds)
if data is None or data.get('issuetime', 0.0) < time.time() - 86400:
msg = 'Grid data for %s is not available', siteID
_Logger.info(msg)
raise Avn.AvnError(msg)
rpt = {'header': '', 'hours': [], 'element': [], 'data': []}
itime = data['issuetime']
rpt = ['%s NDFD Guidance %s' % (siteID, \
time.strftime('%x %H%M UTC', time.gmtime(itime)))]
rpt.append('hour ' + ' '.join([time.strftime('%H', \
time.gmtime(itime+3600.0*n)) for n in range(_NumHours)]))
try:
for k in _Keys:
if not k in data['record'][0]:
continue
tok = [_tostr(data['record'][n][k]) for n in range(_NumHours)]
rpt.append('%-5s' % k + '%+5s' * _NumHours % tuple(tok))
return Avn.Bunch(data=__formatData(data, siteID), rpt=rpt)
except KeyError:
_Logger.info('Grid data for %s is not available', siteID)
msg = 'Grid data for %s is not available', siteID
raise Avn.AvnError(msg)
def retrieveData(siteID, timeSeconds, parameters=Parameters):
results = _retrieveMapData([siteID], timeSeconds, parameters)
return results[siteID]
def retrieveMapData(siteIDs, timeSeconds, parameters=Parameters):
r = _retrieveMapData(siteIDs, timeSeconds, parameters=Parameters)
results = {}
for siteID in siteIDs:
results[siteID] = cPickle.dumps(r[siteID])
return JUtil.pyDictToJavaMap(results)
# New function in AWIPS 2. Determine the latitude and longitude of each site
# in siteIDs and then makes a request to send to EDEX for GFE data.
def _retrieveMapData(siteIDs, timeSeconds, parameters=Parameters):
import JUtil
from com.raytheon.uf.common.dataplugin.gfe.request import GetPointDataRequest
from com.vividsolutions.jts.geom import Coordinate
from com.raytheon.viz.aviation.guidance import GuidanceUtil
from com.raytheon.uf.viz.core.localization import LocalizationManager
gfeSiteId = LocalizationManager.getInstance().getCurrentSite()
task = GetPointDataRequest()
task.setSiteID(gfeSiteId)
db = gfeSiteId + '_GRID__Official_00000000_0000'
task.setDatabaseID(db)
for siteID in siteIDs:
config = AvnParser.getTafSiteCfg(siteID)
lat = config['geography']['lat']
lon = config['geography']['lon']
c = Coordinate(float(lon), float(lat))
task.addCoordinate(c)
task.setNumberHours(_NumHours)
task.setStartTime(long(timeSeconds * 1000))
for p in parameters:
task.addParameter(p)
pdcs = GuidanceUtil.getGFEPointsData(task)
results = {}
if pdcs is None :
for siteId in siteIDs:
_Logger.info('Data not available for %s', siteID)
results[siteID] = None
return results
for i, siteID in enumerate(siteIDs):
data = None
if i < pdcs.getSize():
pdc = pdcs.getContainer(i)
data = _getData(pdc, timeSeconds * 1000)
if data is None:
_Logger.info('Data not available for %s', siteID)
results[siteID] = data
return results
###############################################################################
| 34.283422
| 122
| 0.542089
|
83318e398572b3738fc257571cdb6bc759a514c1
| 7,490
|
py
|
Python
|
addon-fEVR/rootfs/var/www/html/cgi-bin/events.py
|
BeardedTek/BeardedTek-hassio-addons
|
1f3468417d98c0939a405770a4efedf7e52a43df
|
[
"MIT"
] | 2
|
2021-12-26T03:03:48.000Z
|
2022-03-07T03:11:45.000Z
|
addon-fEVR/rootfs/var/www/html/cgi-bin/events.py
|
BeardedTek/BeardedTek-hassio-addons
|
1f3468417d98c0939a405770a4efedf7e52a43df
|
[
"MIT"
] | 1
|
2021-12-18T21:48:20.000Z
|
2022-03-07T17:51:35.000Z
|
addon-fEVR/rootfs/var/www/html/cgi-bin/events.py
|
BeardedTek/BeardedTek-hassio-addons
|
1f3468417d98c0939a405770a4efedf7e52a43df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# fEVR (Frigate Event Video Recorder) Events Menu
#
# Copyright (C) 2021-2022 The Bearded Tek (http://www.beardedtek.com) William Kenny
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
stub='../stub/event.html'
class events:
def __init__(self):
from os.path import basename
self.script = basename(__file__)
from logit import logit
self.error = logit()
self.getOptions()
self.noResults = False
def getOptions(self):
import cgi
fieldStorage = cgi.FieldStorage()
self.count=""
self.order=""
self.selectors={}
self.extraOptions={}
for key in fieldStorage.keys():
item = fieldStorage.getvalue(key)
if key == 'count':
self.count = item
elif key == 'order':
self.order = item
else:
for field in ['camera','type','ack','clip','snap','score']:
if key == field:
self.selectors[key] = item
else:
self.extraOptions[key] = item
def getEvent(self,event,thumbSize=180,location='/var/www/html/events/'):
eventPATH = f"{location}{event['id']}"
snapPATH = f"{eventPATH}{self.frigate['snap']}"
thumbPATH= f"{eventPATH}/thumb.jpg"
clipPATH = f"{location}{event['id']}{self.frigate['clip']}"
if not os.path.exists(thumbPATH):
if not os.path.exists(eventPATH):
os.makedirs(eventPATH)
snapURL = f"{self.frigate['url']}{self.frigate['api']}{event['id']}{self.frigate['snap']}"
clipURL = f"{self.frigate['url']}{self.frigate['api']}{event['id']}{self.frigate['clip']}"
import requests
snap = requests.get(snapURL, allow_redirects=True)
open(snapPATH,'wb').write(snap.content)
self.resizeImg(snapPATH,thumbPATH,thumbSize)
clip = requests.get(clipURL, allow_redirects=True)
open(clipPATH,'wb').write(clip.content)
def resizeImg(self,img,thumbPATH,height=180,ratio=(16/9)):
# Resizes an image from the filesystem
size = (int((height*ratio)),height)
from PIL import Image as CompressImage
picture = CompressImage.open(img)
thumb = picture.resize(size)
thumb.save(thumbPATH,"JPEG",optimize=True)
def convertTZ(self,dt_str):
from datetime import datetime
from dateutil import tz
import pytz
format = "%Y-%m-%d %H:%M:%S"
dt_utc = datetime.strptime(dt_str,format)
dt_utc = dt_utc.replace(tzinfo=pytz.UTC)
return dt_utc.astimezone(pytz.timezone('America/Anchorage'))
def noEvents(self):
if os.path.isfile(stub):
with open(stub) as eventStub:
url = "/install.html"
thumbURL = "/img/not_available.jpg"
caption = "No Events Found"
data = eventStub.read()
data = data.replace('##EVENT_URL##',url)
data = data.replace('##EVENT_IMG##',thumbURL)
data = data.replace('##EVENT_CAPTION##',caption)
if data:
return data
def generateEventDiv(self,event):
from datetime import datetime
time = datetime.fromtimestamp(int(event['id'].split('.')[0]))
ftime = str(self.convertTZ(str(time)))
event['time'] = ftime[ftime.index('-')+1:]
if os.path.isfile(stub):
with open(stub) as eventStub:
if str(event['ack']).lower() != "true":
newMarker = "NEW"
else:
newMarker = ""
url = f"/cgi-bin/event.py?id={event['id']}"
thumbURL = f"/events/{event['id']}/thumb.jpg"
caption = f"{event['time']}<br/>\n {event['type']} detected in {event['camera']}"
data = eventStub.read()
data = data.replace('##EVENT_URL##',url)
data = data.replace('##EVENT_IMG##',thumbURL)
data = data.replace('##EVENT_CAPTION##',caption)
data = data.replace('##NEW##',newMarker)
if data:
return data
def getStub(self,stub):
if os.path.isfile(stub):
with open(stub) as Stub:
return Stub.read()
def getEvents(self,count=False,selectors=False,order=False):
wheres = ""
if selectors:
for key in selectors:
self.error.execute(selectors[key],src=self.script)
self.error.execute(f"COUNT: {count}",src=self.script)
wheres += f"""WHERE {key}='{selectors[key]}'"""
sql = """SELECT * FROM events """
if wheres:
sql += wheres
if order:
if order['field'] and order['direction']:
sql += f"""ORDER BY {order['field']} {order['direction']}"""
else:
sql += """ORDER BY event DESC"""
if count:
sql += f""" LIMIT {str(count)};"""
from fetch import fetchEvent
from sqlite import sqlite
fsqlite = sqlite()
fsqlite.open(self.fevr['db'])
self.error.execute(sql,src=self.script)
items = fsqlite.retrieve(sql)
self.error.execute(f"# ITEMS: {len(items)}",src=self.script)
data =""
for row in items:
event = {}
event['id'] = row[1]
event['camera'] = row[2]
event['type'] = row[3]
event['ack'] = row[4]
event['clip'] = row[5]
event['snap'] = row[6]
event['score'] = row[7]
fetchevent = fetchEvent(self.frigate,event['id'])
fetchevent.execute()
data += self.generateEventDiv(event)
if len(items) < 1:
self.noResults = True
return data
def execute(self):
from config import Config
fconfig = Config()
print('content-type: text/html; charset=UTF-8\n\n')
print()
if fconfig.exists:
self.debug = fconfig.debug
self.config = fconfig.config
self.frigate = self.config['frigate']
self.fevr = self.config['fevr']
self.error.execute(self.frigate,src=self.script)
self.error.execute(self.fevr,src=self.script)
content = self.getEvents(self.count,self.selectors,self.order)
else:
content = self.getStub("/var/www/html/config.html")
if self.noResults:
content = self.noEvents()
header = self.getStub("/var/www/html/stub/eventsHeader.html")
footer = self.getStub("/var/www/html/stub/eventsFooter.html")
print(f"{header}{content}{footer}")
def main():
fevents = events()
fevents.execute()
main()
| 41.381215
| 102
| 0.557677
|
274400daaf7a8f1786e00c9d36a6514fcfbb666b
| 1,276
|
py
|
Python
|
predict.py
|
KelvinKim1/promotion_lab
|
7e2847146383dc8948f9004a1217c1263de4ab56
|
[
"MIT"
] | null | null | null |
predict.py
|
KelvinKim1/promotion_lab
|
7e2847146383dc8948f9004a1217c1263de4ab56
|
[
"MIT"
] | null | null | null |
predict.py
|
KelvinKim1/promotion_lab
|
7e2847146383dc8948f9004a1217c1263de4ab56
|
[
"MIT"
] | null | null | null |
import regression_tree
import Bayes_classifier
import sys
import csv
data = []
with open("training_set.csv") as csvfile, open("test_set.csv") as csvfile2, open("predicted.csv","w") as t:
readCSV = csv.reader(csvfile, delimiter=',')
readCSV2 = csv.reader(csvfile2, delimiter=',')
temp = csv.writer(t,delimiter=',')
for row in readCSV:
data.append(list(row))
tree = regression_tree.buildtree(data, min_gain = 0.001, min_samples = 5)
result = Bayes_classifier.make_naive_bayes_classfier(data)
instance = 0
count1 = 0
count2 = 0
for row in readCSV2:
instance+=1
probability = regression_tree.classify(row,tree)
if probability < 0.5:
predicted1 = 0
else:
predicted1 = 1
predicted2 = Bayes_classifier.naive_bayes_classify(row,result)
if predicted1 == int(row[-1]):
count1+=1
if predicted2 == int(row[-1]):
count2+=1
output = [instance,row[-1],predicted1, probability]
temp.writerow(output)
print("Accuracy of decision tree is ")
print(count1/instance)
print("Accuracy of naive Bayes classfier is ")
print(count2/instance)
csvfile.close()
csvfile2.close()
t.close()
| 25.019608
| 107
| 0.626176
|
2c2ad689e2c1ae754f37ee6ed5ba2c30ff57db1d
| 2,459
|
py
|
Python
|
recipe_scrapers/hellofresh.py
|
chrisbubernak/recipe-scrapers
|
1617a542f6e728552886aebd29f14e97feeb8f5c
|
[
"MIT"
] | 1
|
2021-02-07T17:48:09.000Z
|
2021-02-07T17:48:09.000Z
|
recipe_scrapers/hellofresh.py
|
chrisbubernak/recipe-scrapers
|
1617a542f6e728552886aebd29f14e97feeb8f5c
|
[
"MIT"
] | null | null | null |
recipe_scrapers/hellofresh.py
|
chrisbubernak/recipe-scrapers
|
1617a542f6e728552886aebd29f14e97feeb8f5c
|
[
"MIT"
] | null | null | null |
import re
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class HelloFresh(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"hellofresh.{domain}"
def title(self):
return self.soup.find("h1").get_text()
def total_time(self):
return get_minutes(
self.soup.find(
"span", {"data-translation-id": "recipe-detail.preparation-time"}
).parent.parent
)
def yields(self):
return get_yields(
self.soup.find(
"span",
{"data-translation-id": "recipe-detail.recipe-detail.serving-amount"},
)
.parent.parent.find("div", {"variant": "secondary"})
.contents[0]
)
def ingredients(self):
ingredients_container = self.soup.find(
"div", {"data-test-id": "recipeDetailFragment.ingredients"}
)
ingredients = ingredients_container.findAll("p")
return [
" ".join(
[
normalize_string(ingredient_first_part.get_text()),
normalize_string(ingredient_second_part.get_text()),
]
).strip()
for ingredient_first_part, ingredient_second_part in zip(
ingredients[0::2], ingredients[1::2]
)
]
def instructions(self):
instructions_regex = re.compile(r"recipeDetailFragment.instructions.step-(\d)")
instructions_container = self.soup.findAll(
"div", {"data-test-id": instructions_regex}
)
instructions = [subdiv.findAll("p") for subdiv in instructions_container]
instructions = sum(instructions, []) # flatten
return "\n".join(
[
" ".join(
[
str(instruction_order) + ")",
normalize_string(instruction.get_text()),
]
)
for instruction_order, instruction in zip(
range(1, len(instructions) + 1), instructions
)
]
)
def image(self):
container = self.soup.find("div", {"class": "recipe-header-left"})
if not container:
return None
image = container.find("img", {"src": True})
return image["src"] if image else None
| 29.626506
| 87
| 0.53233
|
72571bf4fad4e7b4e8346f4b64a7455ee6ce740c
| 30,036
|
py
|
Python
|
zerver/tests/test_docs.py
|
yuroitaki/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | 1
|
2022-01-26T14:45:16.000Z
|
2022-01-26T14:45:16.000Z
|
zerver/tests/test_docs.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_docs.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
from typing import TYPE_CHECKING, Any, Dict, Sequence
from unittest import mock, skipUnless
from urllib.parse import urlsplit
import orjson
from django.conf import settings
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from corporate.models import Customer, CustomerPlan
from zerver.context_processors import get_apps_page_url
from zerver.lib.integrations import INTEGRATIONS
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.models import Realm, get_realm
from zerver.views.documentation import add_api_uri_context
if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
class DocPageTest(ZulipTestCase):
def get_doc(self, url: str, subdomain: str) -> "TestHttpResponse":
if url[0:23] == "/integrations/doc-html/":
return self.client_get(url, subdomain=subdomain, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
return self.client_get(url, subdomain=subdomain)
def print_msg_if_error(self, url: str, response: "TestHttpResponse") -> None: # nocoverage
if response.status_code == 200:
return
print("Error processing URL:", url)
if response.get("Content-Type") == "application/json":
content = orjson.loads(response.content)
print()
print("======================================================================")
print("ERROR: {}".format(content.get("msg")))
print()
def _test(
self,
url: str,
expected_content: str,
extra_strings: Sequence[str] = [],
landing_missing_strings: Sequence[str] = [],
landing_page: bool = True,
doc_html_str: bool = False,
) -> None:
# Test the URL on the "zephyr" subdomain
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(
['<meta name="robots" content="noindex,nofollow" />'], result
)
# Test the URL on the root subdomain
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
if not doc_html_str:
self.assert_in_success_response(
['<meta name="robots" content="noindex,nofollow" />'], result
)
for s in extra_strings:
self.assertIn(s, str(result.content))
if not landing_page:
return
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# Test the URL on the root subdomain with the landing page setting
result = self.get_doc(url, subdomain="")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
for s in landing_missing_strings:
self.assertNotIn(s, str(result.content))
if not doc_html_str:
# Every page has a meta-description
self.assert_in_success_response(['<meta name="description" content="'], result)
self.assert_not_in_success_response(
['<meta name="robots" content="noindex,nofollow" />'], result
)
# Test the URL on the "zephyr" subdomain with the landing page setting
result = self.get_doc(url, subdomain="zephyr")
self.print_msg_if_error(url, result)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
if not doc_html_str:
self.assert_in_success_response(
['<meta name="robots" content="noindex,nofollow" />'], result
)
def test_api_doc_endpoints(self) -> None:
# We extract the set of /api/ endpoints to check by parsing
# the /api/ page sidebar for links starting with /api/.
api_page_raw = str(self.client_get("/api/").content)
ENDPOINT_REGEXP = re.compile(r"href=\"/api/\s*(.*?)\"")
endpoint_list_set = set(re.findall(ENDPOINT_REGEXP, api_page_raw))
endpoint_list = [f"/api/{endpoint}" for endpoint in endpoint_list_set]
# Validate that the parsing logic isn't broken, since if it
# broke, the below would become a noop.
self.assertGreater(len(endpoint_list), 70)
for endpoint in endpoint_list:
self._test(endpoint, "", doc_html_str=True)
result = self.client_get(
"/api/nonexistent-page",
follow=True,
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(result.status_code, 404)
def test_doc_endpoints(self) -> None:
self._test("/api/", "The Zulip API")
self._test("/api/api-keys", "be careful with it")
self._test("/api/installation-instructions", "No download required!")
self._test("/api/send-message", "steal away your hearts")
self._test("/api/render-message", "**foo**")
self._test("/api/get-streams", "include_public")
self._test("/api/get-stream-id", "The name of the stream to access.")
self._test("/api/get-subscriptions", "Get all streams that the user is subscribed to.")
self._test("/api/get-users", "client_gravatar")
self._test("/api/register-queue", "apply_markdown")
self._test("/api/get-events", "dont_block")
self._test("/api/delete-queue", "Delete a previously registered queue")
self._test("/api/update-message", "propagate_mode")
self._test("/api/get-own-user", "does not accept any parameters.")
self._test("/api/subscribe", "authorization_errors_fatal")
self._test("/api/create-user", "zuliprc-admin")
self._test("/api/unsubscribe", "not_removed")
if settings.ZILENCER_ENABLED:
self._test("/team/", "industry veterans")
self._test("/history/", "Cambridge, Massachusetts")
# Test the i18n version of one of these pages.
self._test("/en/history/", "Cambridge, Massachusetts")
if settings.ZILENCER_ENABLED:
self._test("/apps/", "Apps for every platform.")
self._test("/features/", "Beautiful messaging")
self._test("/use-cases/", "Use cases and customer stories")
self._test("/hello/", "Chat for distributed teams", landing_missing_strings=["Log in"])
self._test("/development-community/", "Zulip development community")
self._test("/why-zulip/", "Why Zulip?")
self._test("/for/open-source/", "for open source projects")
self._test("/for/events/", "for conferences and events")
self._test("/for/education/", "education pricing")
self._test("/case-studies/tum/", "Technical University of Munich")
self._test("/case-studies/ucsd/", "UCSD")
self._test("/case-studies/rust/", "Rust programming language")
self._test("/case-studies/recurse-center/", "Recurse Center")
self._test("/case-studies/lean/", "Lean theorem prover")
self._test("/case-studies/idrift/", "Case study: iDrift AS")
self._test("/case-studies/asciidoctor/", "Case study: Asciidoctor")
self._test("/for/research/", "for research")
self._test("/for/business/", "Communication efficiency represents")
self._test("/for/communities/", "Zulip for communities")
self._test("/self-hosting/", "Self-host Zulip")
self._test("/security/", "TLS encryption")
self._test("/attribution/", "Attributions")
self._test("/devlogin/", "Normal users", landing_page=False)
self._test("/devtools/", "Useful development URLs")
self._test("/errors/404/", "Page not found")
self._test("/errors/5xx/", "Internal server error")
self._test("/emails/", "manually generate most of the emails by clicking")
result = self.client_get(
"/integrations/doc-html/nonexistent_integration",
follow=True,
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(result.status_code, 404)
result = self.client_get("/new-user/")
self.assertEqual(result.status_code, 301)
self.assertIn("hello", result["Location"])
result = self.client_get("/developer-community/")
self.assertEqual(result.status_code, 301)
self.assertIn("development-community", result["Location"])
result = self.client_get("/for/companies/", follow=True)
self.assert_in_success_response(["Communication efficiency represents"], result)
def test_portico_pages_open_graph_metadata(self) -> None:
# Why Zulip
url = "/why-zulip/"
title = '<meta property="og:title" content="Team chat with first-class threading" />'
description = '<meta property="og:description" content="Most team chats are overwhelming'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Features
url = "/features/"
title = '<meta property="og:title" content="Zulip features" />'
description = '<meta property="og:description" content="First class threading'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
def test_integration_doc_endpoints(self) -> None:
self._test(
"/integrations/",
"native integrations.",
extra_strings=[
"And hundreds more through",
"Zapier",
"IFTTT",
],
)
for integration in INTEGRATIONS.keys():
url = f"/integrations/doc-html/{integration}"
self._test(url, "", doc_html_str=True)
def test_integration_pages_open_graph_metadata(self) -> None:
url = "/integrations/doc/github"
title = '<meta property="og:title" content="Connect GitHub to Zulip" />'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Test category pages
url = "/integrations/communication"
title = '<meta property="og:title" content="Connect your Communication tools to Zulip" />'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
# Test integrations page
url = "/integrations/"
title = '<meta property="og:title" content="Connect the tools you use to Zulip" />'
description = '<meta property="og:description" content="Zulip comes with over'
self._test(url, title, doc_html_str=True)
self._test(url, description, doc_html_str=True)
def test_doc_html_str_non_ajax_call(self) -> None:
# We don't need to test all the pages for 404
for integration in list(INTEGRATIONS.keys())[5]:
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
url = f"/en/integrations/doc-html/{integration}"
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
url = f"/en/integrations/doc-html/{integration}"
result = self.client_get(url, subdomain="", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get(url, subdomain="zephyr", follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get("/integrations/doc-html/nonexistent_integration", follow=True)
self.assertEqual(result.status_code, 404)
def test_electron_detection(self) -> None:
result = self.client_get("/accounts/password/reset/")
# TODO: Ideally, this Mozilla would be the specific browser.
self.assertTrue('data-platform="Mozilla"' in result.content.decode())
result = self.client_get("/accounts/password/reset/", HTTP_USER_AGENT="ZulipElectron/1.0.0")
self.assertTrue('data-platform="ZulipElectron"' in result.content.decode())
class HelpTest(ZulipTestCase):
def test_help_settings_links(self) -> None:
result = self.client_get("/help/change-the-time-format")
self.assertEqual(result.status_code, 200)
self.assertIn(
'Go to <a href="/#settings/display-settings">Display settings</a>', str(result.content)
)
# Check that the sidebar was rendered properly.
self.assertIn("Getting started with Zulip", str(result.content))
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/help/change-the-time-format", subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn("<strong>Display settings</strong>", str(result.content))
self.assertNotIn("/#settings", str(result.content))
def test_help_relative_links_for_gear(self) -> None:
result = self.client_get("/help/analytics")
self.assertIn('<a href="/stats">Usage statistics</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/help/analytics", subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn("<strong>Usage statistics</strong>", str(result.content))
self.assertNotIn("/stats", str(result.content))
def test_help_relative_links_for_stream(self) -> None:
result = self.client_get("/help/message-a-stream-by-email")
self.assertIn('<a href="/#streams/subscribed">Subscribed streams</a>', str(result.content))
self.assertEqual(result.status_code, 200)
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/help/message-a-stream-by-email", subdomain="")
self.assertEqual(result.status_code, 200)
self.assertIn("<strong>Manage streams</strong>", str(result.content))
self.assertNotIn("/#streams", str(result.content))
class IntegrationTest(ZulipTestCase):
def test_check_if_every_integration_has_logo_that_exists(self) -> None:
for integration in INTEGRATIONS.values():
path = urlsplit(integration.logo_url).path
self.assertTrue(os.path.isfile(settings.DEPLOY_ROOT + path), integration.name)
def test_api_url_view_subdomains_base(self) -> None:
context: Dict[str, Any] = {}
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "testserver/api")
self.assertEqual(context["api_url"], "http://testserver/api")
self.assertTrue(context["html_settings_links"])
@override_settings(ROOT_DOMAIN_LANDING_PAGE=True)
def test_api_url_view_subdomains_homepage_base(self) -> None:
context: Dict[str, Any] = {}
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["api_url_scheme_relative"], "yourZulipDomain.testserver/api")
self.assertEqual(context["api_url"], "http://yourZulipDomain.testserver/api")
self.assertFalse(context["html_settings_links"])
def test_api_url_view_subdomains_full(self) -> None:
context: Dict[str, Any] = {}
request = HostRequestMock(host="mysubdomain.testserver")
add_api_uri_context(context, request)
self.assertEqual(context["api_url_scheme_relative"], "mysubdomain.testserver/api")
self.assertEqual(context["api_url"], "http://mysubdomain.testserver/api")
self.assertTrue(context["html_settings_links"])
def test_html_settings_links(self) -> None:
context: Dict[str, Any] = {}
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["settings_html"], "Zulip settings page")
self.assertEqual(context["subscriptions_html"], "streams page")
context = {}
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
add_api_uri_context(context, HostRequestMock(host="mysubdomain.testserver"))
self.assertEqual(context["settings_html"], '<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context["subscriptions_html"], '<a target="_blank" href="/#streams">streams page</a>'
)
context = {}
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["settings_html"], '<a href="/#settings">Zulip settings page</a>')
self.assertEqual(
context["subscriptions_html"], '<a target="_blank" href="/#streams">streams page</a>'
)
class AboutPageTest(ZulipTestCase):
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
def test_endpoint(self) -> None:
with self.settings(CONTRIBUTOR_DATA_FILE_PATH="zerver/tests/fixtures/authors.json"):
result = self.client_get("/team/")
self.assert_in_success_response(["Our amazing community"], result)
self.assert_in_success_response(["2017-11-20"], result)
self.assert_in_success_response(["timabbott", "showell", "gnprice", "rishig"], result)
with mock.patch("zerver.views.portico.open", side_effect=FileNotFoundError) as m:
result = self.client_get("/team/")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Never ran"], result)
m.assert_called_once()
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get("/team/")
self.assertEqual(result.status_code, 301)
self.assertEqual(result["Location"], "https://zulip.com/team/")
class SmtpConfigErrorTest(ZulipTestCase):
def test_smtp_error(self) -> None:
result = self.client_get("/config-error/smtp")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["email configuration"], result)
class PlansPageTest(ZulipTestCase):
def test_plans_auth(self) -> None:
root_domain = ""
result = self.client_get("/plans/", subdomain=root_domain)
self.assert_in_success_response(["Self-host Zulip"], result)
self.assert_not_in_success_response(["/upgrade#sponsorship"], result)
self.assert_in_success_response(["/accounts/go/?next=%2Fupgrade%23sponsorship"], result)
non_existent_domain = "moo"
result = self.client_get("/plans/", subdomain=non_existent_domain)
self.assertEqual(result.status_code, 404)
self.assert_in_response("does not exist", result)
realm = get_realm("zulip")
realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "/accounts/login/?next=/plans")
guest_user = "polonius"
self.login(guest_user)
result = self.client_get("/plans/", subdomain="zulip", follow=True)
self.assertEqual(result.status_code, 404)
organization_member = "hamlet"
self.login(organization_member)
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response(["Current plan"], result)
self.assert_in_success_response(["/upgrade#sponsorship"], result)
self.assert_not_in_success_response(["/accounts/go/?next=%2Fupgrade%23sponsorship"], result)
# Test root domain, with login on different domain
result = self.client_get("/plans/", subdomain="")
# TODO: works in manual testing, but I suspect something is funny in
# the test environment
# self.assert_in_success_response(["Sign up now"], result)
def test_CTA_text_by_plan_type(self) -> None:
sign_up_now = "Create organization"
upgrade_to_standard = "Upgrade to Standard"
current_plan = "Current plan"
sponsorship_pending = "Sponsorship pending"
# Root domain
result = self.client_get("/plans/", subdomain="")
self.assert_in_success_response([sign_up_now, upgrade_to_standard], result)
self.assert_not_in_success_response([current_plan, sponsorship_pending], result)
realm = get_realm("zulip")
realm.plan_type = Realm.PLAN_TYPE_SELF_HOSTED
realm.save(update_fields=["plan_type"])
with self.settings(PRODUCTION=True):
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulip.com/plans")
self.login("iago")
# SELF_HOSTED should hide the local plans page, even if logged in
result = self.client_get("/plans/", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "https://zulip.com/plans")
# But in the development environment, it renders a page
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([sign_up_now, upgrade_to_standard], result)
self.assert_not_in_success_response([current_plan, sponsorship_pending], result)
realm.plan_type = Realm.PLAN_TYPE_LIMITED
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan, upgrade_to_standard], result)
self.assert_not_in_success_response([sign_up_now, sponsorship_pending], result)
with self.settings(FREE_TRIAL_DAYS=60):
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan, "Start 60 day free trial"], result)
self.assert_not_in_success_response(
[sign_up_now, sponsorship_pending, upgrade_to_standard], result
)
realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response(
[sign_up_now, upgrade_to_standard, sponsorship_pending], result
)
realm.plan_type = Realm.PLAN_TYPE_STANDARD
realm.save(update_fields=["plan_type"])
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_not_in_success_response(
[sign_up_now, upgrade_to_standard, sponsorship_pending], result
)
customer = Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_id")
plan = CustomerPlan.objects.create(
customer=customer,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.MONTHLY,
)
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response(["Current plan (free trial)"], result)
self.assert_not_in_success_response(
[sign_up_now, upgrade_to_standard, sponsorship_pending], result
)
realm.plan_type = Realm.PLAN_TYPE_LIMITED
realm.save()
customer.sponsorship_pending = True
customer.save()
plan.delete()
result = self.client_get("/plans/", subdomain="zulip")
self.assert_in_success_response([current_plan], result)
self.assert_in_success_response([current_plan, sponsorship_pending], result)
self.assert_not_in_success_response([sign_up_now, upgrade_to_standard], result)
class AppsPageTest(ZulipTestCase):
def test_get_apps_page_url(self) -> None:
with self.settings(ZILENCER_ENABLED=False):
apps_page_url = get_apps_page_url()
self.assertEqual(apps_page_url, "https://zulip.com/apps/")
with self.settings(ZILENCER_ENABLED=True):
apps_page_url = get_apps_page_url()
self.assertEqual(apps_page_url, "/apps/")
def test_apps_view(self) -> None:
result = self.client_get("/apps")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"].endswith("/apps/"))
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get("/apps/")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"] == "https://zulip.com/apps/")
with self.settings(ZILENCER_ENABLED=False):
result = self.client_get("/apps/linux")
self.assertEqual(result.status_code, 301)
self.assertTrue(result["Location"] == "https://zulip.com/apps/")
with self.settings(ZILENCER_ENABLED=True):
result = self.client_get("/apps/")
self.assertEqual(result.status_code, 200)
html = result.content.decode()
self.assertIn("Apps for every platform.", html)
def test_app_download_link_view(self) -> None:
return_value = "https://desktop-download.zulip.com/v5.4.3/Zulip-Web-Setup-5.4.3.exe"
with mock.patch(
"zerver.views.portico.get_latest_github_release_download_link_for_platform",
return_value=return_value,
) as m:
result = self.client_get("/apps/download/windows")
m.assert_called_once_with("windows")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"] == return_value)
result = self.client_get("/apps/download/plan9")
self.assertEqual(result.status_code, 404)
class PrivacyTermsTest(ZulipTestCase):
def test_terms_and_policies_index(self) -> None:
with self.settings(POLICIES_DIRECTORY="corporate/policies"):
response = self.client_get("/policies/")
self.assert_in_success_response(["Terms and policies"], response)
def test_custom_terms_of_service_template(self) -> None:
not_configured_message = "This server is an installation"
with self.settings(POLICIES_DIRECTORY="zerver/policies_absent"):
response = self.client_get("/policies/terms")
self.assert_in_response(not_configured_message, response)
with self.settings(POLICIES_DIRECTORY="corporate/policies"):
response = self.client_get("/policies/terms")
self.assert_in_success_response(["Kandra Labs"], response)
def test_custom_privacy_policy_template(self) -> None:
not_configured_message = "This server is an installation"
with self.settings(POLICIES_DIRECTORY="zerver/policies_absent"):
response = self.client_get("/policies/privacy")
self.assert_in_response(not_configured_message, response)
with self.settings(POLICIES_DIRECTORY="corporate/policies"):
response = self.client_get("/policies/privacy")
self.assert_in_success_response(["Kandra Labs"], response)
def test_custom_privacy_policy_template_with_absolute_url(self) -> None:
"""Verify that using our recommended production default of an absolute path
like /etc/zulip/policies/ works."""
current_dir = os.path.dirname(os.path.abspath(__file__))
abs_path = os.path.abspath(
os.path.join(current_dir, "..", "..", "templates/corporate/policies")
)
with self.settings(POLICIES_DIRECTORY=abs_path):
response = self.client_get("/policies/privacy")
self.assert_in_success_response(["Kandra Labs"], response)
with self.settings(POLICIES_DIRECTORY=abs_path):
response = self.client_get("/policies/nonexistent")
self.assert_in_response("No such page", response)
def test_redirects_from_older_urls(self) -> None:
with self.settings(POLICIES_DIRECTORY="corporate/policies"):
result = self.client_get("/privacy/", follow=True)
self.assert_in_success_response(["Kandra Labs"], result)
with self.settings(POLICIES_DIRECTORY="corporate/policies"):
result = self.client_get("/terms/", follow=True)
self.assert_in_success_response(["Kandra Labs"], result)
def test_no_nav(self) -> None:
# Test that our ?nav=0 feature of /privacy and /terms,
# designed to comply with the Apple App Store draconian
# policies that ToS/Privacy pages linked from an iOS app have
# no links to the rest of the site if there's pricing
# information for anything elsewhere on the site.
# We don't have this link at all on these pages; this first
# line of the test would change if we were to adjust the
# design.
response = self.client_get("/policies/terms")
self.assert_not_in_success_response(["Back to Zulip"], response)
response = self.client_get("/policies/terms", {"nav": "no"})
self.assert_not_in_success_response(["Back to Zulip"], response)
response = self.client_get("/policies/privacy", {"nav": "no"})
self.assert_not_in_success_response(["Back to Zulip"], response)
| 47.07837
| 100
| 0.659475
|
a7df4eec6c025af7efb1f55e70c01bdf539d5584
| 983
|
py
|
Python
|
budgetportal/migrations/0005_department-unique-together-constraints.py
|
fluenty/datamanager
|
97ba9d58d4527b7d61b730ea4896f09a56e6ae60
|
[
"MIT"
] | null | null | null |
budgetportal/migrations/0005_department-unique-together-constraints.py
|
fluenty/datamanager
|
97ba9d58d4527b7d61b730ea4896f09a56e6ae60
|
[
"MIT"
] | null | null | null |
budgetportal/migrations/0005_department-unique-together-constraints.py
|
fluenty/datamanager
|
97ba9d58d4527b7d61b730ea4896f09a56e6ae60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-14 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budgetportal', '0004_government-unique-together-constraints'),
]
operations = [
migrations.AlterField(
model_name='department',
name='name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='department',
name='slug',
field=models.SlugField(max_length=200),
),
migrations.AlterField(
model_name='department',
name='vote_number',
field=models.IntegerField(),
),
migrations.AlterUniqueTogether(
name='department',
unique_together=set([('government', 'slug'), ('government', 'name'), ('government', 'vote_number')]),
),
]
| 28.085714
| 113
| 0.581892
|
13ed68b0a627c01009fc291acf7e79224dc3d799
| 25,549
|
py
|
Python
|
pajbot/modules/playsound.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | null | null | null |
pajbot/modules/playsound.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | null | null | null |
pajbot/modules/playsound.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | null | null | null |
import json
import logging
import re
from argparse import ArgumentParser
from pajbot.managers.db import DBManager
from pajbot.models.playsound import Playsound
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class PlaysoundModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Playsound"
DESCRIPTION = "Play a sound on stream with !playsound"
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="point_cost",
label="Point cost",
type="number",
required=True,
placeholder="Point cost",
default=200,
constraints={"min_value": 0, "max_value": 999999},
),
ModuleSetting(
key="token_cost",
label="Token cost",
type="number",
required=True,
placeholder="Token cost",
default=0,
constraints={"min_value": 0, "max_value": 15},
),
ModuleSetting(
key="global_cd",
label="Global playsound cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=10,
constraints={"min_value": 0, "max_value": 600},
),
ModuleSetting(
key="default_sample_cd",
label="Default per-sample cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=30,
constraints={"min_value": 0, "max_value": 600},
),
ModuleSetting(
key="global_volume",
label="Global volume (0-100)",
type="number",
required=True,
placeholder="",
default=40,
constraints={"min_value": 0, "max_value": 100},
),
ModuleSetting(key="sub_only", label="Subscribers only", type="boolean", required=True, default=False),
ModuleSetting(key="can_whisper", label="Command can be whispered", type="boolean", required=True, default=True),
ModuleSetting(
key="confirmation_whisper",
label="Send user a whisper when sound was successfully played",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="global_cd_whisper",
label="Send user a whisper playsounds are on global cooldown",
type="boolean",
required=True,
default=True,
),
]
def __init__(self, bot):
super().__init__(bot)
# this is for the "Test on stream" button on the admin page
if bot:
bot.socket_manager.add_handler("playsound.play", self.on_web_playsound)
self.sample_cooldown = []
self.global_cooldown = False
# when a "Test on stream" is triggered via the Web UI.
def on_web_playsound(self, data):
# on playsound test triggered by the Web UI
# this works even if the module is not enabled.
playsound_name = data["name"]
with DBManager.create_session_scope() as session:
playsound = session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
log.warning("Web UI tried to play invalid playsound. Ignoring.")
return
payload = {
"link": playsound.link,
"volume": int(round(playsound.volume * self.settings["global_volume"] / 100)),
}
log.debug("Playsound module is emitting payload: {}".format(json.dumps(payload)))
self.bot.websocket_manager.emit("play_sound", payload)
def reset_global_cd(self):
self.global_cooldown = False
def play_sound(self, **options):
bot = options["bot"]
message = options["message"]
source = options["source"]
playsoundURL = "https://{}/playsounds".format(self.bot.config["web"]["domain"])
if not message:
bot.say("Playsounds can be tried out at {}".format(playsoundURL))
return False
playsound_name = message.split(" ")[0].lower()
with DBManager.create_session_scope() as session:
# load playsound from the database
playsound = session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
bot.whisper(
source.username,
"The playsound you gave does not exist. Check out all the valid playsounds here: "
+ "{}".format(playsoundURL),
)
return False
if self.global_cooldown and source.username not in ["admiralbulldog", "datguy1"]:
if self.settings["global_cd_whisper"]:
bot.whisper(
source.username,
"Another user played a sample too recently. Please try again after the global cooldown "
+ "of {} seconds has run out.".format(self.settings["global_cd"]),
)
return False
cooldown = playsound.cooldown
if cooldown is None:
cooldown = self.settings["default_sample_cd"]
if playsound_name in self.sample_cooldown and source.username not in ["admiralbulldog", "datguy1"]:
bot.whisper(
source.username,
"The playsound {0} was played too recently. ".format(playsound.name)
+ "Please wait until its cooldown of {} seconds has run out.".format(cooldown),
)
return False
cost = playsound.cost
if cost is None:
cost = self.settings["point_cost"]
if not source.can_afford(cost):
bot.whisper(
source.username,
"You need {0} points to play this playsound, ".format(cost) + "you have {}.".format(source.points),
)
return False
if not playsound.enabled:
bot.whisper(
source.username,
"The playsound you gave is disabled. Check out all the valid playsounds here: "
"https://{}/playsounds".format(self.bot.config["web"]["domain"]),
)
return False
payload = {
"link": playsound.link,
"volume": int(round(playsound.volume * self.settings["global_volume"] / 100)),
}
log.debug("Playsound module is emitting payload: {}".format(json.dumps(payload)))
bot.websocket_manager.emit("play_sound", payload)
if source.username not in ["datguy1", "admiralbulldog"]:
source.points = source.points - cost
if self.settings["confirmation_whisper"]:
bot.whisper(source.username, "Successfully played the sound {} on stream!".format(playsound_name))
self.global_cooldown = True
self.sample_cooldown.append(playsound.name)
bot.execute_delayed(cooldown, self.sample_cooldown.remove, (playsound.name,))
bot.execute_delayed(self.settings["global_cd"], self.reset_global_cd, ())
@staticmethod
def parse_playsound_arguments(message):
"""
Available options:
--volume VOLUME
--cooldown COOLDOWN
--enabled/--disabled
"""
parser = ArgumentParser()
parser.add_argument("--volume", dest="volume", type=int)
# we parse this manually so we can allow "none" and things like that to unset it
parser.add_argument("--cost", dest="cost", type=str)
parser.add_argument("--cooldown", dest="cooldown", type=str)
parser.add_argument("--enabled", dest="enabled", action="store_true")
parser.add_argument("--disabled", dest="enabled", action="store_false")
parser.set_defaults(volume=None, cooldown=None, enabled=None)
try:
args, unknown = parser.parse_known_args(message.split())
except SystemExit:
return False, False, False
except:
log.exception("Unhandled exception in add_command")
return False, False, False
# Strip options of any values that are set as None
options = {k: v for k, v in vars(args).items() if v is not None}
if len(unknown) < 1:
# no name
return False, False, False
name = unknown[0]
link = None if len(unknown) < 2 else " ".join(unknown[1:])
return options, name, link
@staticmethod
def validate_name(name):
return name is not None
re_valid_links = re.compile("^https://\\S*$")
@staticmethod
def validate_link(link):
return link is not None and PlaysoundModule.re_valid_links.match(link)
def update_link(self, bot, source, playsound, link):
if link is not None:
if not self.validate_link(link):
bot.whisper(
source.username,
"Error: Invalid link. Valid links must start with https:// " "and cannot contain spaces",
)
return False
playsound.link = link
return True
@staticmethod
def validate_volume(volume):
return volume is not None and 0 <= volume <= 100
def update_volume(self, bot, source, playsound, parsed_options):
if "volume" in parsed_options:
if not self.validate_volume(parsed_options["volume"]):
bot.whisper(source.username, "Error: Volume must be between 0 and 100.")
return False
playsound.volume = parsed_options["volume"]
return True
@staticmethod
def validate_cooldown(cooldown):
return cooldown is None or cooldown >= 0
def update_cooldown(self, bot, source, playsound, parsed_options):
if "cooldown" in parsed_options:
if parsed_options["cooldown"].lower() == "none":
cooldown_int = None
else:
try:
cooldown_int = int(parsed_options["cooldown"])
except ValueError:
bot.whisper(source.username, 'Error: Cooldown must be a number or the string "none".')
return False
if not self.validate_cooldown(cooldown_int):
bot.whisper(source.username, "Error: Cooldown must be positive.")
return False
playsound.cooldown = cooldown_int
return True
@staticmethod
def validate_cost(cost):
return cost is None or cost >= 0
def update_cost(self, bot, source, playsound, parsed_options):
if "cost" in parsed_options:
if parsed_options["cost"].lower() == "none":
cost_int = None
else:
try:
cost_int = int(parsed_options["cost"])
except ValueError:
bot.whisper(source.username, 'Error: Cost must be a number or the string "none".')
return False
if not self.validate_cost(cost_int):
bot.whisper(source.username, "Error: Cost must be positive.")
return False
playsound.cost = cost_int
return True
@staticmethod
def update_enabled(bot, source, playsound, parsed_options):
if "enabled" in parsed_options:
playsound.enabled = parsed_options["enabled"]
return True
def add_playsound_command(self, **options):
"""Method for creating playsounds.
Usage: !add playsound PLAYSOUNDNAME LINK [options]
Multiple options available:
--volume VOLUME
--cooldown COOLDOWN
--enabled/--disabled
"""
bot = options["bot"]
message = options["message"]
source = options["source"]
options, name, link = self.parse_playsound_arguments(message)
# the parser does not enforce a link being present because the edit function
# doesn't require it strictly, so apart from "False" link is being checked
# for being None here.
if options is False or name is False or link is False or link is None:
bot.whisper(
source.username,
"Invalid usage. Correct syntax: !add playsound <name> <link> "
+ "[--volume 0-100] [--cost 10000/none] [--cooldown 60/none] [--enabled/--disabled]",
)
return
with DBManager.create_session_scope() as session:
count = session.query(Playsound).filter(Playsound.name == name).count()
if count > 0:
bot.whisper(
source.username,
"A Playsound with that name already exists. Use !edit playsound "
+ "or !remove playsound to edit or delete it.",
)
return
playsound = Playsound(name=name)
if not self.update_link(bot, source, playsound, link):
return
if not self.update_volume(bot, source, playsound, options):
return
if not self.update_cost(bot, source, playsound, options):
return
if not self.update_cooldown(bot, source, playsound, options):
return
if not self.update_enabled(bot, source, playsound, options):
return
session.add(playsound)
bot.whisper(source.username, "Successfully added your playsound.")
def edit_playsound_command(self, **options):
"""Method for editing playsounds.
Usage: !edit playsound PLAYSOUNDNAME [LINK] [options]
Multiple options available:
--volume VOLUME
--cooldown COOLDOWN
--enabled/--disabled
"""
bot = options["bot"]
message = options["message"]
source = options["source"]
options, name, link = self.parse_playsound_arguments(message)
if options is False or name is False or link is False:
bot.whisper(
source.username,
"Invalid usage. Correct syntax: !edit playsound <name> [link] "
+ "[--volume 0-100] [--cost 10000/none] [--cooldown 60/none] [--enabled/--disabled]",
)
return
with DBManager.create_session_scope() as session:
playsound = session.query(Playsound).filter(Playsound.name == name).one_or_none()
if playsound is None:
bot.whisper(
source.username,
"No playsound with that name exists. You can create playsounds with "
"!add playsound <name> <link> [options].",
)
return
if not self.update_link(bot, source, playsound, link):
return
if not self.update_volume(bot, source, playsound, options):
return
if not self.update_cost(bot, source, playsound, options):
return
if not self.update_cooldown(bot, source, playsound, options):
return
if not self.update_enabled(bot, source, playsound, options):
return
session.add(playsound)
bot.whisper(source.username, "Successfully edited your playsound.")
@staticmethod
def remove_playsound_command(**options):
"""Method for removing playsounds.
Usage: !edit playsound PLAYSOUNDNAME
"""
bot = options["bot"]
message = options["message"]
source = options["source"]
playsound_name = message.split(" ")[0].lower()
# check for empty string
if not playsound_name:
bot.whisper(source.username, "Invalid usage. Correct syntax: !remove playsound <name>")
return
with DBManager.create_session_scope() as session:
playsound = session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
bot.whisper(source.username, "No playsound with that name exists.")
return
session.delete(playsound)
bot.whisper(source.username, "Successfully deleted your playsound.")
@staticmethod
def debug_playsound_command(**options):
"""Method for debugging (printing info about) playsounds.
Usage: !debug playsound PLAYSOUNDNAME
"""
bot = options["bot"]
message = options["message"]
source = options["source"]
playsound_name = message.split(" ")[0].lower()
# check for empty string
if not playsound_name:
bot.whisper(source.username, "Invalid usage. Correct syntax: !debug playsound <name>")
return
with DBManager.create_session_scope() as session:
playsound = session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
bot.whisper(source.username, "No playsound with that name exists.")
return
bot.whisper(
source.username,
"name={}, link={}, volume={}, cost={}, cooldown={}, enabled={}".format(
playsound.name,
playsound.link,
playsound.volume,
playsound.cost,
playsound.cooldown,
playsound.enabled,
),
)
def load_commands(self, **options):
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
self.commands["playsound"] = Command.raw_command(
self.play_sound,
cost=0,
sub_only=self.settings["sub_only"],
delay_all=0,
delay_user=0,
description="Play a sound on stream!",
can_execute_with_whisper=self.settings["can_whisper"],
examples=[
CommandExample(
None,
'Play the "doot" sample',
chat="user:!playsound doot\n" "bot>user:Successfully played the sound doot on stream!",
).parse()
],
)
self.commands["playsound"].long_description = 'Playsounds can be tried out <a href="/playsounds">here</a>'
self.commands["add"] = Command.multiaction_command(
level=100,
delay_all=0,
delay_user=0,
default=None,
command="add",
commands={
"playsound": Command.raw_command(
self.add_playsound_command,
level=500,
delay_all=0,
delay_user=0,
description="Creates a new playsound",
examples=[
CommandExample(
None,
"Create a new playsound",
chat="user:!add playsound doot https://i.nuuls.com/Bb4aX.mp3\n"
"bot>user:Successfully created your playsound",
description='Creates the "doot" playsound with the given link.',
).parse(),
CommandExample(
None,
"Create a new playsound and sets volume",
chat="user:!add playsound doot https://i.nuuls.com/Bb4aX.mp3 --volume 50\n"
"bot>user:Successfully created your playsound",
description='Creates the "doot" playsound with the given link and 50% volume.',
).parse(),
CommandExample(
None,
"Create a new playsound and sets cooldown",
chat="user:!add playsound doot https://i.nuuls.com/Bb4aX.mp3 --cooldown 60\n"
"bot>user:Successfully created your playsound",
description='Creates the "doot" playsound with the given link and 1 minute cooldown.',
).parse(),
CommandExample(
None,
"Create a new playsound and disable it",
chat="user:!add playsound doot https://i.nuuls.com/Bb4aX.mp3 --disabled\n"
"bot>user:Successfully created your playsound",
description='Creates the "doot" playsound with the given link and initially disables it.',
).parse(),
],
)
},
)
self.commands["edit"] = Command.multiaction_command(
level=100,
delay_all=0,
delay_user=0,
default=None,
command="edit",
commands={
"playsound": Command.raw_command(
self.edit_playsound_command,
level=500,
delay_all=0,
delay_user=0,
description="Edits an existing playsound",
examples=[
CommandExample(
None,
"Edit an existing playsound's link",
chat="user:!edit playsound doot https://i.nuuls.com/Bb4aX.mp3\n"
"bot>user:Successfully edited your playsound",
description='Updates the link of the "doot" playsound.',
).parse(),
CommandExample(
None,
"Edit an existing playsound's volume",
chat="user:!edit playsound doot --volume 50\n"
"bot>user:Successfully edited your playsound",
description='Updates the volume of the "doot" playsound to 50%.',
).parse(),
CommandExample(
None,
"Edit an existing playsound's cooldown",
chat="user:!edit playsound doot --cooldown 60\n"
"bot>user:Successfully edited your playsound",
description='Updates the cooldown of the "doot" playsound to 1 minute.',
).parse(),
CommandExample(
None,
"Disable an existing playsound",
chat="user:!edit playsound doot --disabled\n" "bot>user:Successfully edited your playsound",
description='Disables the "doot" playsound.',
).parse(),
CommandExample(
None,
"Enable an existing playsound",
chat="user:!edit playsound doot --enabled\n" "bot>user:Successfully edited your playsound",
description='Enables the "doot" playsound.',
).parse(),
],
)
},
)
self.commands["remove"] = Command.multiaction_command(
level=100,
delay_all=0,
delay_user=0,
default=None,
command="remove",
commands={
"playsound": Command.raw_command(
self.remove_playsound_command,
level=500,
delay_all=0,
delay_user=0,
description="Removes an existing playsound",
examples=[
CommandExample(
None,
"Remove an existing playsound",
chat="user:!remove playsound doot\n" "bot>user:Successfully removed your playsound",
description='Removes the "doot" playsound.',
).parse()
],
)
},
)
self.commands["debug"] = Command.multiaction_command(
level=100,
delay_all=0,
delay_user=0,
default=None,
command="debug",
commands={
"playsound": Command.raw_command(
self.debug_playsound_command,
level=250,
delay_all=0,
delay_user=0,
description="Prints data about a playsound",
examples=[
CommandExample(
None,
'Get information about the "doot" playsound',
chat="user:!debug playsound doot\n"
"bot>user: name=doot, link=https://i.nuuls.com/Bb4aX.mp3, volume=100, "
"cooldown=None, enabled=True",
).parse()
],
)
},
)
| 38.887367
| 120
| 0.526205
|
6df7d2565de18251cbc1741f5efc0493428dacfd
| 182
|
py
|
Python
|
hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/gridfit.py
|
frankrolf/hTools2_extension
|
9d73b8640c85209853a72f8d4b167768de5e0d60
|
[
"BSD-3-Clause"
] | 2
|
2019-12-18T16:12:07.000Z
|
2019-12-21T01:19:23.000Z
|
hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/gridfit.py
|
frankrolf/hTools2_extension
|
9d73b8640c85209853a72f8d4b167768de5e0d60
|
[
"BSD-3-Clause"
] | null | null | null |
hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/gridfit.py
|
frankrolf/hTools2_extension
|
9d73b8640c85209853a72f8d4b167768de5e0d60
|
[
"BSD-3-Clause"
] | null | null | null |
# [h] fit to grid dialog
import hTools2.dialogs.glyphs.gridfit
import importlib
importlib.reload(hTools2.dialogs.glyphs.gridfit)
hTools2.dialogs.glyphs.gridfit.roundToGridDialog()
| 22.75
| 50
| 0.824176
|
c961e7968ad6a762f86b400d59107435a0f7337a
| 926
|
py
|
Python
|
cosypose/simulator/textures.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 202
|
2020-08-19T19:28:03.000Z
|
2022-03-29T07:10:47.000Z
|
cosypose/simulator/textures.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-24T09:28:05.000Z
|
2022-03-31T07:11:06.000Z
|
cosypose/simulator/textures.py
|
ompugao/cosypose
|
4e471c16f19d5ee632668cd52eaa57b562f287d6
|
[
"MIT"
] | 66
|
2020-08-19T19:28:05.000Z
|
2022-03-18T20:47:55.000Z
|
import numpy as np
from collections import defaultdict
import pybullet as pb
def apply_random_textures(body, texture_ids, per_link=False, np_random=np.random):
data = body.visual_shape_data
visual_shapes_ids = [t[1] for t in data]
n_shapes = defaultdict(lambda: 0)
for i in visual_shapes_ids:
n_shapes[i] += 1
for link_id, n_shapes in n_shapes.items():
texture_id = np_random.choice(texture_ids)
for link_shape_id in range(n_shapes):
if per_link:
texture_id = np_random.choice(texture_ids)
specular = np_random.randint(0, 1000)
pb.changeVisualShape(body._body_id, link_id, link_shape_id,
textureUniqueId=texture_id, rgbaColor=[1, 1, 1, 1],
physicsClientId=body._client.client_id,
specularColor=specular * np.ones(3))
return
| 38.583333
| 84
| 0.62635
|
fa50320b27220309d4d11b8fd1707c8fc39e59d1
| 2,202
|
py
|
Python
|
src/features.py
|
sebastiani/versa
|
019503c568b9e62aec05dc29eb75db9ff7c1ab9c
|
[
"MIT"
] | null | null | null |
src/features.py
|
sebastiani/versa
|
019503c568b9e62aec05dc29eb75db9ff7c1ab9c
|
[
"MIT"
] | null | null | null |
src/features.py
|
sebastiani/versa
|
019503c568b9e62aec05dc29eb75db9ff7c1ab9c
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from utilities import conv2d_pool_block, dense_block, conv2d_transpose_layer, dense_layer
def extract_features(images, output_size, use_batch_norm, dropout_keep_prob):
"""
Based on the architecture described in 'Matching Networks for One-Shot Learning'
http://arxiv.org/abs/1606.04080.pdf.
:param images: batch of images.
:param output_size: dimensionality of the output features.
:param use_batch_norm: whether to use batch normalization or not.
:param dropout_keep_prob: keep probability parameter for dropout.
:return: features.
"""
# 4X conv2d + pool blocks
h = conv2d_pool_block(images, use_batch_norm, dropout_keep_prob, 'fe_block_1')
h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_2')
h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_3')
h = conv2d_pool_block(h, use_batch_norm, dropout_keep_prob, 'fe_block_4')
# flatten output
h = tf.contrib.layers.flatten(h)
# dense layer
h = dense_block(h, output_size, use_batch_norm, dropout_keep_prob, 'fe_dense')
return h
def generate_views(angles, adaptation_inputs):
"""
Based on the architecture described in 'Matching Networks for One-Shot Learning'
http://arxiv.org/abs/1606.04080.pdf.
:param angles: batch of orientationses.
:param adaptation_inputs: batch of adaptation_inputs.
:return: batch of generated views.
"""
h = tf.concat([angles, adaptation_inputs], -1)
h = dense_layer(inputs=h, output_size=512, activation=tf.nn.relu, use_bias=False, name='generate_dense_1')
h = dense_layer(inputs=h, output_size=1024, activation=tf.nn.relu, use_bias=False, name='generate_dense_2')
h = tf.reshape(h, shape=[-1, 2, 2, 256])
h = conv2d_transpose_layer(inputs=h, filters=128, activation=tf.nn.relu, name='generate_deconv_1')
h = conv2d_transpose_layer(inputs=h, filters=64, activation=tf.nn.relu, name='generate_deconv_2')
h = conv2d_transpose_layer(inputs=h, filters=32, activation=tf.nn.relu, name='generate_deconv_3')
h = conv2d_transpose_layer(inputs=h, filters=1, activation=tf.nn.sigmoid, name='generate_deconv_4')
return h
| 40.777778
| 111
| 0.737057
|
24add0adfd598625649d003ed0767b0d6acadca4
| 804
|
py
|
Python
|
04_jump_the_five/jump.py
|
claracoo/tiny_python_projects
|
120a414e3b6125b537ce94f1fdec7faa9cf59b1f
|
[
"MIT"
] | null | null | null |
04_jump_the_five/jump.py
|
claracoo/tiny_python_projects
|
120a414e3b6125b537ce94f1fdec7faa9cf59b1f
|
[
"MIT"
] | null | null | null |
04_jump_the_five/jump.py
|
claracoo/tiny_python_projects
|
120a414e3b6125b537ce94f1fdec7faa9cf59b1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Jump the Five"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
print(args.text.translate(str.maketrans(jumper)))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 25.125
| 65
| 0.458955
|
bc8fc1b51b55e07f3f5c533ec2e8a318aea38cb1
| 18,819
|
py
|
Python
|
log_complete/model_698.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_698.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_complete/model_698.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 174500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.354369
| 710
| 0.806525
|
75d746fd45140bb01d6d31ebc50bbb45880e0865
| 103,634
|
py
|
Python
|
flakylib.py
|
R-Mussabayev/flakylib
|
dc79ec1b1210ca91aac64fec28ba94fa087cc90e
|
[
"MIT"
] | 3
|
2020-07-08T18:31:31.000Z
|
2022-03-03T10:26:53.000Z
|
flakylib.py
|
R-Mussabayev/flakylib
|
dc79ec1b1210ca91aac64fec28ba94fa087cc90e
|
[
"MIT"
] | null | null | null |
flakylib.py
|
R-Mussabayev/flakylib
|
dc79ec1b1210ca91aac64fec28ba94fa087cc90e
|
[
"MIT"
] | null | null | null |
# Flaky Clustering Library v0.1
# Big MSSC (Minimum Sum-Of-Squares Clustering)
# Nenad Mladenovic, Rustam Mussabayev, Alexander Krassovitskiy
# rmusab@gmail.com
# v0.11 - 06/09/2021 - Revision of VNS logic in Membership_Shaking_VNS and Center_Shaking_VNS
# v0.1 - 30/06/2021 - Bug fixing in multi_portion_mssc
# v0.09 - 10/11/2020 - Revision of shake_centers logic
# v0.08 - 18/09/2020 - Bug fixing;
# v0.07 - 19/07/2020 - New functionality: distance matrices calculation routines with GPU support; different distance metrics; revision of optimal number of clusters routine;
# v0.06 - 05/06/2020 - New functionality: method sequencing;
# v0.05 - 04/06/2020 - New functionality: Simple center shaking VNS, Membership shaking VNS, Iterative extra center insertion/deletion, procedure for choosing the new n additional centers for existing ones using the k-means++ logic;
# v0.04 - 17/03/2020 - Different initialization modes were added to "Decomposition/aggregation k-means";
# v0.03 - 13/03/2020 - New functionality: k-means++;
# v0.02 - 10/03/2020 - New functionality: Decomposition/aggregation k-means;
# v0.01 - 27/02/2020 - Initial release: multiprocessing k-means.
import math
import time
import pickle
import threading
import cupy as cp
import numpy as np
import numba as nb
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from numba import njit, prange, objmode, cuda
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def save_obj(obj, name):
pickle.dump(obj,open(name + '.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
def normalization(X):
X_min = np.amin(X, axis=0)
X = X - X_min
X_max = np.amax(X, axis=0)
if X_max.ndim == 1:
X_max[X_max == 0.0] = 1.0
elif X_max.ndim == 0:
if X_max == 0.0:
X_max = 1.0
else:
X_max = 1.0
X = X / X_max
return X
@njit(parallel=True)
def normalization1D(X, min_scaling = True):
assert X.ndim == 1
n = X.shape[0]
X_min = np.inf
for i in range(n):
if X[i] < X_min:
X_min = X[i]
X_max = np.NINF
if min_scaling:
for i in range(n):
X[i] -= X_min
if X[i] > X_max:
X_max = X[i]
else:
for i in range(n):
if X[i] > X_max:
X_max = X[i]
if X_max != 0:
for i in prange(n):
X[i] = X[i]/X_max
@njit(parallel=True)
def normalization2D(X, min_scaling = True):
assert X.ndim == 2
n, m = X.shape
for i in prange(m):
min_val = np.inf
for j in range(n):
if X[j,i] < min_val:
min_val = X[j,i]
max_val = np.NINF
if min_scaling:
for j in range(n):
X[j,i] -= min_val
if X[j,i] > max_val:
max_val = X[j,i]
else:
for j in range(n):
if X[j,i] > max_val:
max_val = X[j,i]
if max_val != 0.0:
for j in range(n):
X[j,i] = X[j,i]/max_val
# Generate isotropic Gaussian blobs
def gaussian_blobs(n_features = 2, n_samples = 1000, n_clusters = 5, cluster_std = 0.1):
true_centers = np.random.rand(n_clusters, n_features)
X, labels = make_blobs(n_samples=n_samples, centers=true_centers, cluster_std=cluster_std)
N = np.concatenate((true_centers,X))
N = normalization(N)
true_centers = N[:n_clusters]
X = N[n_clusters:]
return X, true_centers, labels
def draw_dataset(X, true_centers, original_labels, title = 'DataSet'):
n_clusters = len(true_centers)
plt.rcParams['figure.figsize'] = [10,10]
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
if original_labels.shape[0] == X.shape[0]:
for k, col in zip(range(n_clusters), colors):
my_members = original_labels == k
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
if true_centers.shape[0] > 0:
cluster_center = true_centers[k]
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14)
else:
plt.plot(X[:, 0], X[:, 1], '.')
plt.title('DataSet')
plt.show()
def generate_grid_nodes(coordinates1D, n_dimensions=3):
coordinatesND = n_dimensions*[coordinates1D]
mesh = np.array(np.meshgrid(*coordinatesND))
grid_nodes = mesh.T.reshape(-1, n_dimensions)
return grid_nodes
def generate_blobs_on_grid(n_samples=3000, grid_size=3, n_features=3, standard_deviation = 0.1):
assert grid_size > 0
cell_size = 1/grid_size
half_cell_size = cell_size/2
coordinates1D = np.linspace(half_cell_size, 1.0-half_cell_size, grid_size)
true_centroids = generate_grid_nodes(coordinates1D, n_features)
samples, sample_membership = make_blobs(n_samples=n_samples, centers=true_centroids, cluster_std=standard_deviation)
mask = np.all((samples >= 0.0) & (samples <= 1.0) , axis = 1)
samples = samples[mask]
sample_membership = sample_membership[mask]
return samples, sample_membership, true_centroids
@njit(inline='always')
def condensed_size(matrix_size):
return int((matrix_size*(matrix_size-1))/2)
@njit(inline='always')
def condensed_idx(i,j,n):
return int(i*n + j - i*(i+1)/2 - i - 1)
@njit(inline='always')
def regular_idx(condensed_idx, n):
i = int(math.ceil((1/2.) * (- (-8*condensed_idx + 4 *n**2 -4*n - 7)**0.5 + 2*n -1) - 1))
ii = i+1
j = int(n - (ii * (n - 1 - ii) + (ii*(ii + 1))/2) + condensed_idx)
return i,j
@njit(parallel = True)
def row_of_condensed_matrix(row_ind, condensed_matrix):
assert row_ind > -1
condensed_len = len(condensed_matrix)
size = matrix_size(condensed_matrix)
assert row_ind < size
out = np.empty(size, dtype = condensed_matrix.dtype)
out[row_ind] = 0
if row_ind < size-1:
ind1 = condensed_idx(row_ind, row_ind+1, size)
ind2 = condensed_idx(row_ind, size-1, size)
out[row_ind+1:size] = condensed_matrix[ind1:ind2+1]
for i in prange(0,row_ind):
out[i] = condensed_matrix[condensed_idx(i,row_ind,size)]
return out
@njit
def matrix_size(condensed_matrix):
n = math.ceil((condensed_matrix.shape[0] * 2)**.5)
if (condensed_matrix.ndim != 1) or (n * (n - 1) / 2 != condensed_matrix.shape[0]):
raise ValueError('Incompatible vector size.')
return n
@njit(inline='always')
def matrix_element(i, j, N, condensed_matrix, diagonal_value):
if j > i:
return condensed_matrix[condensed_idx(i,j,N)]
elif j < i:
return condensed_matrix[condensed_idx(j,i,N)]
else:
return diagonal_value
# Extraction of submatrix from condensed_matrix where
# rows, cols - indices of rows and columns which must be included to submatrix
# diagonal_value - diagonal value in the original full square matrix
# Functionality is similar to Advanced Indexing in Numpy: submatrix = matrix[rows][:,cols]
def _submatrix(condensed_matrix, rows, cols, diagonal_value):
N = matrix_size(condensed_matrix)
if (condensed_matrix.ndim != 1) or (rows.ndim != 1) or (cols.ndim != 1) or ((N * (N - 1) / 2) != condensed_matrix.shape[0]):
raise ValueError('Incompatible vector size.')
if (N > 0) and (condensed_matrix.ndim == 1) and (rows.ndim == 1) and (cols.ndim == 1) and ((N * (N - 1) / 2) == condensed_matrix.shape[0]):
if len(rows) == 0:
new_rows = np.arange(N)
else:
new_rows = rows
if len(cols) == 0:
new_cols = np.arange(N)
else:
new_cols = cols
n_rows = len(new_rows)
n_cols = len(new_cols)
chunk = np.empty((n_rows, n_cols), dtype = condensed_matrix.dtype)
chunk = np.empty((n_rows, n_cols), dtype = condensed_matrix.dtype)
for i in prange(n_rows):
for j in range(n_cols):
chunk[i,j] = matrix_element(new_rows[i],new_cols[j],N,condensed_matrix,diagonal_value)
else:
chunk = np.empty((0, 0), dtype = condensed_matrix.dtype)
return chunk
submatrix = njit(parallel=False)(_submatrix)
submatrix_parallel = njit(parallel=True)(_submatrix)
# Squared Euclidian distance (standard realization)
@njit(inline='always')
def euclidian2_distance(u, v):
d = u.dtype.type(0.0)
for i in range(u.shape[0]):
d += (u[i] - v[i]) ** 2.0
return d
@njit(inline='always')
def cosine_distance(u, v):
n = u.shape[0]
udotv = 0.
u_norm = 0.
v_norm = 0.
for i in range(n):
udotv += u[i] * v[i]
u_norm += u[i] * u[i]
v_norm += v[i] * v[i]
if (u_norm == 0.) or (v_norm == 0.):
d = 1.
else:
d = abs(1.-udotv / (u_norm * v_norm) ** .5) # Return absolute value to avoid small negative value due to rounding
return u.dtype.type(d)
@njit(inline='always')
def calc_distance(u, v, distance_measure=0):
if distance_measure == 0:
d = euclidian2_distance(u, v)
else:
d = cosine_distance(u, v)
return d
@cuda.jit
def distance_matrix_gpu(X, out, distance_measure=0):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j < n):
if j > i:
d = calc_distance(X[i], X[j], distance_measure)
out[i,j] = d
out[j,i] = d
elif i == j:
out[i,j] = 0.
@cuda.jit
def distance_matrix_XY_gpu(X, Y, out, distance_measure=0):
i,j = cuda.grid(2)
nX = X.shape[0]
nY = Y.shape[0]
if (i < nX) and (j < nY):
out[i,j] = calc_distance(X[i], Y[j], distance_measure)
@cuda.jit
def distance_matrix_condensed_gpu(X, out, distance_measure=0):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j > i) and (j < n):
out[condensed_idx(i,j,n)] = calc_distance(X[i], X[j], distance_measure)
@cuda.jit
def distance_matrix_XY_part_of_symmetric_gpu(start_row, start_col, X, Y, out, distance_measure=0):
i,j = cuda.grid(2)
nX = X.shape[0]
nY = Y.shape[0]
global_i = start_row + i
global_j = start_col + j
if (i < nX) and (j < nY) and (global_i < global_j):
out[i,j] = calc_distance(X[i], Y[j], distance_measure)
@njit(parallel=True)
def distance_matrix_euclidean2_cpu(X):
n = X.shape[0]
out = np.dot(X, X.T)
for i in prange(n):
for j in range(i+1,n):
out[i,j] = out[i,i] - 2.*out[i,j] + out[j,j]
out[j,i] = out[i,j]
np.fill_diagonal(out, 0.)
return out
@njit(parallel=True)
def distance_matrix_euclidean2_XY_cpu(X,Y):
nX = X.shape[0]
nY = Y.shape[0]
out = np.dot(X, Y.T)
NX = np.sum(X*X, axis=1)
NY = np.sum(Y*Y, axis=1)
for i in prange(nX):
for j in range(nY):
out[i,j] = NX[i] - 2.*out[i,j] + NY[j]
return out
@njit(parallel=True)
def distance_matrix_euclidean2_XY_weighted_cpu(X, Y, weightsX, weightsY):
nX = X.shape[0]
nY = Y.shape[0]
n_weightsX = weightsX.shape[0]
n_weightsY = weightsY.shape[0]
weighted = (n_weightsX > 0) and (n_weightsY > 0)
out = np.dot(X, Y.T)
NX = np.sum(X*X, axis=1)
NY = np.sum(Y*Y, axis=1)
if weighted:
for i in prange(nX):
for j in range(nY):
out[i,j] = (NX[i] - 2. * out[i,j] + NY[j]) * weightsX[i] * weightsY[j]
else:
for i in prange(nX):
for j in range(nY):
out[i,j] = NX[i] - 2. * out[i,j] + NY[j]
return out
@njit(parallel=True)
def distance_matrix_euclidean2_condensed_cpu(X):
n = X.shape[0]
condensed_len = int((n*(n-1))/2)
out = np.empty(condensed_len, dtype = X.dtype)
gram = np.dot(X, X.T)
for i in prange(n):
for j in range(i+1,n):
out[condensed_idx(i,j,n)] = gram[i,i] - 2.*gram[i,j] + gram[j,j]
return out
@njit(parallel=True)
def distance_matrix_cosine_cpu(X):
n = X.shape[0]
out = np.dot(X, X.T)
for i in prange(n):
for j in range(i+1,n):
if out[i,i]==0. or out[j,j]==0.:
out[i,j] = 1.
else:
out[i,j] = abs(1.-out[i,j] / (out[i,i] * out[j,j]) ** .5) # Return absolute value to avoid small negative value due to rounding
out[j,i] = out[i,j]
np.fill_diagonal(out, 0.)
return out
@njit(parallel=True)
def distance_matrix_cosine_XY_cpu(X, Y):
nX = X.shape[0]
nY = Y.shape[0]
out = np.dot(X, Y.T)
NX = np.sum(X*X, axis=1)
NY = np.sum(Y*Y, axis=1)
for i in prange(nX):
for j in range(nY):
if NX[i]==0. or NY[j]==0.:
out[i,j] = 1.
else:
out[i,j] = abs(1.0-out[i,j] / (NX[i] * NY[j]) ** .5) # Return absolute value to avoid small negative value due to rounding
return out
@njit(parallel=True)
def distance_matrix_cosine_XY_weighted_cpu(X, Y, weightsX, weightsY):
nX = X.shape[0]
nY = Y.shape[0]
n_weightsX = weightsX.shape[0]
n_weightsY = weightsY.shape[0]
weighted = (n_weightsX > 0) and (n_weightsY > 0)
out = np.dot(X, Y.T)
NX = np.sum(X*X, axis=1)
NY = np.sum(Y*Y, axis=1)
if weighted:
for i in prange(nX):
for j in range(nY):
if NX[i]==0. or NY[j]==0.:
out[i,j] = 1. * weightsX[i] * weightsY[j]
else:
out[i,j] = abs(1.0-out[i,j] / (NX[i] * NY[j]) ** .5) * weightsX[i] * weightsY[j]
else:
for i in prange(nX):
for j in range(nY):
if NX[i]==0. or NY[j]==0.:
out[i,j] = 1.
else:
out[i,j] = abs(1.0-out[i,j] / (NX[i] * NY[j]) ** .5)
return out
@njit(parallel=True)
def distance_matrix_cosine_condensed_cpu(X):
n = X.shape[0]
condensed_len = int((n*(n-1))/2)
gram = np.dot(X, X.T)
out = np.empty(condensed_len, dtype = X.dtype)
for i in prange(n):
for j in range(i+1,n):
if gram[i,i]==0. or gram[j,j]==0.:
out[condensed_idx(i,j,n)] = 1.
else:
out[condensed_idx(i,j,n)] = 1.-gram[i,j] / (gram[i,i] * gram[j,j]) ** .5
return out
@njit(parallel=True)
def distance_matrix_cpu(X, distance_measure=0):
n = X.shape[0]
out = np.empty((n,n), dtype = X.dtype)
for i in prange(n):
u = X[i]
out[i,i] = 0.0
for j in range(i+1, n):
d = calc_distance(u, X[j], distance_measure)
out[i,j] = d
out[j,i] = d
return out
@njit(parallel=True)
def distance_matrix_XY_cpu(X, Y, distance_measure=0):
nX = X.shape[0]
nY = Y.shape[0]
out = np.empty((nX, nY), dtype = X.dtype)
for i in prange(nX):
u = X[i]
for j in range(0, nY):
out[i,j] = calc_distance(u, Y[j], distance_measure)
return out
@njit(parallel=True)
def distance_matrix_condensed_cpu(X, distance_measure=0):
n = X.shape[0]
condensed_len = int((n*(n-1))/2)
out = np.empty(condensed_len, dtype = X.dtype)
for i in prange(n):
u = X[i]
for j in range(i+1, n):
out[condensed_idx(i,j,n)] = calc_distance(u, X[j], distance_measure)
return out
def pairwise_distances_cpu(X, Y = None, distance_measure=0, condensed = True):
assert ((Y is None) or (X.dtype == Y.dtype)) and (X.dtype == np.float32 or X.dtype == np.float64)
if distance_measure == 0:
if Y is None:
if condensed:
D = distance_matrix_euclidean2_condensed_cpu(X)
else:
D = distance_matrix_euclidean2_cpu(X)
else:
D = distance_matrix_euclidean2_XY_cpu(X,Y)
else:
if Y is None:
if condensed:
D = distance_matrix_cosine_condensed_cpu(X)
else:
D = distance_matrix_cosine_cpu(X)
else:
D = distance_matrix_cosine_XY_cpu(X,Y)
return D
@cuda.jit
def distance_matrix_euclidean2_XY_gpu(X, Y, NX, NY, out):
i,j = cuda.grid(2)
nX = X.shape[0]
nY = Y.shape[0]
if (i < nX) and (j < nY):
out[i,j] = NX[i] - 2.*out[i,j] + NY[j]
# Diagonal must be filled separately by zeros
@cuda.jit
def distance_matrix_euclidean2_gpu(X, out):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j < n) and (j > i):
d = out[i,i] - 2.*out[i,j] + out[j,j]
out[i,j] = d
out[j,i] = d
@cuda.jit
def distance_matrix_euclidean2_condensed_gpu(X, gram, out):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j > i) and (j < n):
out[condensed_idx(i,j,n)] = gram[i,i] - 2.*gram[i,j] + gram[j,j]
@cuda.jit
def distance_matrix_cosine_XY_gpu(X, Y, NX, NY, out):
i,j = cuda.grid(2)
nX = X.shape[0]
nY = Y.shape[0]
if (i < nX) and (j < nY):
if NX[i]==0. or NY[j]==0.:
out[i,j] = 1.
else:
out[i,j] = 1.-out[i,j] / (NX[i] * NY[j]) ** .5
# Diagonal must be filled separately by zeros
@cuda.jit
def distance_matrix_cosine_gpu(X, out):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j < n) and (j > i):
if out[i,i]==0. or out[j,j]==0.:
out[i,j] = 1.
else:
out[i,j] = 1.-out[i,j] / (out[i,i] * out[j,j]) ** .5
@cuda.jit
def distance_matrix_cosine_condensed_gpu(X, gram, out):
i,j = cuda.grid(2)
n = X.shape[0]
if (i < n) and (j > i) and (j < n):
if gram[i,i]==0. or gram[j,j]==0.:
out[condensed_idx(i,j,n)] = 1.
else:
out[condensed_idx(i,j,n)] = 1.-gram[i,j] / (gram[i,i] * gram[j,j]) ** .5
def pairwise_distances_gpu(X, Y = None, distance_measure=0, condensed = True, gpu_device_id = 0, threads_per_block = (4, 16)):
assert (len(X.shape) == 2) and (X.shape[0] > 0)
available_gpu_ids = set([gpu.id for gpu in nb.cuda.gpus.lst])
assert (gpu_device_id > -1) and (gpu_device_id in available_gpu_ids)
assert ((Y is None) or (X.dtype == Y.dtype)) and (X.dtype == np.float32 or X.dtype == np.float64)
gpu = nb.cuda.select_device(gpu_device_id)
cp.cuda.Device(gpu_device_id).use()
nX = X.shape[0]
X_gpu = cp.asarray(X)
if Y is None:
nY = 0
grid_dim = (int(nX/threads_per_block[0] + 1), int(nX/threads_per_block[1] + 1))
if condensed:
condensed_len = condensed_size(n_rowsX)
gram_gpu = X_gpu.dot(X_gpu.T)
out_gpu = cp.empty(condensed_len, dtype = X_gpu.dtype)
if distance_measure == 0:
distance_matrix_euclidean2_condensed_gpu[grid_dim, threads_per_block](X_gpu, gram_gpu, out_gpu)
else:
distance_matrix_cosine_condensed_gpu[grid_dim, threads_per_block](X_gpu, gram_gpu, out_gpu)
else:
out_gpu = X_gpu.dot(X_gpu.T)
if distance_measure == 0:
distance_matrix_euclidean2_gpu[grid_dim, threads_per_block](X_gpu, out_gpu)
else:
distance_matrix_cosine_gpu[grid_dim, threads_per_block](X_gpu, out_gpu)
cp.fill_diagonal(out_gpu, 0.)
else:
assert (len(Y.shape) == 2) and (Y.shape[0] > 0) and (X.shape[1]==Y.shape[1])
nY = Y.shape[0]
Y_gpu = cp.asarray(Y)
grid_dim = (int(nX/threads_per_block[0] + 1), int(nY/threads_per_block[1] + 1))
out_gpu = cp.dot(X_gpu, Y_gpu.T)
NX_gpu = cp.sum(X_gpu*X_gpu, axis=1)
NY_gpu = cp.sum(Y_gpu*Y_gpu, axis=1)
if distance_measure == 0:
distance_matrix_euclidean2_XY_gpu[grid_dim, threads_per_block](X_gpu, Y_gpu, NX_gpu, NY_gpu, out_gpu)
else:
distance_matrix_cosine_XY_gpu[grid_dim, threads_per_block](X_gpu, Y_gpu, NX_gpu, NY_gpu, out_gpu)
out = cp.asnumpy(out_gpu)
return out
# gpu_device_id - ID of GPU divice that will be used for perform calculations
# if gpu_device_id = -1 then the CPUs will be used for calculations
def distance_matrix(X, Y = None, distance_measure=0, condensed = True, gpu_device_id = -1, threads_per_block = (4, 16)):
if gpu_device_id > -1:
pairwise_distances_gpu(X, Y, distance_measure, condensed, gpu_device_id, threads_per_block)
else:
out = pairwise_distances_cpu(X, Y, distance_measure, condensed)
return out
# # https://stackoverflow.com/questions/58002793/how-can-i-use-multiple-gpus-in-cupy
# # https://github.com/numba/numba/blob/master/numba/cuda/tests/cudapy/test_multigpu.py
# # usage
# def pairwise_distances_multigpu(X, Y = None, distance_measure=0, devices = [], memory_usage = 0.95, threads_per_block = (4, 16)):
# assert ((Y is None) or (X.dtype == Y.dtype)) and (X.dtype == np.float32 or X.dtype == np.float64)
# assert memory_usage > 0. and memory_usage <= 1.
# nX = X.shape[0]
# n_devices = len(devices)
# available_devices = [gpu.id for gpu in nb.cuda.gpus.lst]
# if n_devices == 0:
# used_devices = available_devices
# else:
# used_devices = list(set(devices).intersection(set(available_devices)))
# n_used_devices = len(used_devices)
# capacities = np.empty(n_used_devices)
# for i in range(n_used_devices):
# capacities[i] = nb.cuda.current_context(used_devices[i]).get_memory_info().free * memory_usage
# full_capacity = np.sum(capacities)
# fractions = capacities / full_capacity
# n_elements = condensed_size(nX)
# if X.dtype == np.float32:
# n_bytes = n_elements * 4
# else:
# n_bytes = n_elements * 8
# n_portions = n_bytes / full_capacity
######################################
#Multi-GPU distance matrix calculation
######################################
# Split the dataset into portions and calculate the distance matrix for each portion on multiple GPUs in parallel.
# X: array of pairwise distances between samples, or a feature array;
# Y: an optional second feature array;
# D: a distance matrix D such that D_{i, j} is the distance between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then D_{i, j} is the distance between the ith array from X and the jth array from Y.
# sizeX: portion size for X;
# sizeY: portion size for Y;
# If condensed = True, then condenced representation for matrix D will be used;
# distance_measure: 0 - Squared Euclidean distance; 1 - Euclidean distance; 2 - cosine distance;
# gpu_device_ids: list of GPU ids that will be used for calculations
def distance_matrix_multi_gpu(X, Y = None, sizeX = None, sizeY = None, condensed = True, distance_measure=0, gpu_device_ids = [], show_progress = False, threads_per_block = (4, 16)):
@njit(parallel = True)
def aggregation_1D_1D(out, size, sub_mat, row, col):
sub_mat_condensed_len = sub_mat.shape[0]
sub_mat_size = matrix_size(sub_mat)
for i in prange(sub_mat_condensed_len):
x,y = regular_idx(i, sub_mat_size)
x += row
y += col
if x < y:
out[condensed_idx(x,y,size)] = sub_mat[i]
@njit(parallel = True)
def aggregation_1D_2D(out, size, sub_mat, row, col):
n_rows, n_cols = sub_mat.shape
for i in prange(n_rows):
for j in range(n_cols):
x = row + i
y = col + j
if x < y:
out[condensed_idx(x,y,size)] = sub_mat[i,j]
@njit(parallel = True)
def aggregation_2D_1D(out, sub_mat, row, col):
sub_mat_condensed_len = sub_mat.shape[0]
sub_mat_size = matrix_size(sub_mat)
for i in prange(sub_mat_condensed_len):
x,y = regular_idx(i, sub_mat_size)
x += row
y += col
out[x,y] = sub_mat[i]
out[y,x] = sub_mat[i]
@njit(parallel = True)
def aggregation_2D_2D_symmetric(out, sub_mat, row, col):
n_rows, n_cols = sub_mat.shape
for i in prange(n_rows):
for j in range(n_cols):
x = row + i
y = col + j
if x < y:
out[x,y] = sub_mat[i,j]
out[y,x] = sub_mat[i,j]
@njit(parallel = True)
def aggregation_2D_2D_asymmetric(out, sub_mat, row, col):
n_rows, n_cols = sub_mat.shape
for i in prange(n_rows):
for j in range(n_cols):
x = row + i
y = col + j
out[x,y] = sub_mat[i,j]
def calc_submatrix(X, Y, i, j, row1, row2, col1, col2, out, threads_per_block, distance_measure):
NX = X.shape[0]
nX = row2-row1
nY = col2-col1
symmetric = Y is None
is_condensed_out = out.ndim == 1
stream = cuda.stream()
grid_dim = (int(nX/threads_per_block[0] + 1), int(nY/threads_per_block[1] + 1))
X_cu = cuda.to_device(X[row1:row2], stream=stream)
if symmetric and (row1 < col2-1):
if (i == j) and (nX == nY):
sub_mat_cu = cuda.device_array(shape=int((nX*(nX-1))/2), dtype = X_cu.dtype, stream=stream)
distance_matrix_condensed_gpu[grid_dim, threads_per_block, stream](X_cu, sub_mat_cu, distance_measure)
sub_mat = sub_mat_cu.copy_to_host(stream=stream)
if is_condensed_out:
aggregation_1D_1D(out, NX, sub_mat, row1, col1)
else:
aggregation_2D_1D(out, sub_mat, row1, col1)
else:
Y_cu = cuda.to_device(X[col1:col2], stream=stream)
sub_mat_cu = cuda.device_array(shape=(nX,nY), dtype = X_cu.dtype, stream=stream)
distance_matrix_XY_part_of_symmetric_gpu[grid_dim, threads_per_block, stream](row1, col1, X_cu, Y_cu, sub_mat_cu, distance_measure)
sub_mat = sub_mat_cu.copy_to_host(stream=stream)
if is_condensed_out:
aggregation_1D_2D(out, NX, sub_mat, row1, col1)
else:
aggregation_2D_2D_symmetric(out, sub_mat, row1, col1)
elif (not symmetric):
Y_cu = cuda.to_device(Y[col1:col2], stream=stream)
sub_mat_cu = cuda.device_array(shape=(nX,nY), dtype = Y_cu.dtype, stream=stream)
distance_matrix_XY_gpu[grid_dim, threads_per_block, stream](X_cu, Y_cu, sub_mat_cu, distance_measure)
sub_mat = sub_mat_cu.copy_to_host(stream=stream)
aggregation_2D_2D_asymmetric(out, sub_mat, row1, col1)
def calc_portion(portion, bounds, X, Y, out, device_id, threads_per_block, distance_measure):
if device_id > -1:
gpu = nb.cuda.select_device(device_id)
for B in [bounds[i] for i in portion]:
calc_submatrix(X, Y, B[0], B[1], B[2], B[3], B[4], B[5], out, threads_per_block, distance_measure)
if show_progress:
print(device_id, B)
available_gpu_device_ids = [gpu.id for gpu in nb.cuda.gpus.lst]
symmetric = Y is None
assert (len(X.shape) == 2) and (X.shape[0] > 0)
assert symmetric or ((len(Y.shape) == 2) and (Y.shape[0] > 0) and (Y.shape[1]==X.shape[1]))
NX = X.shape[0]
if (sizeX is None) or (sizeX < 1) or (sizeX > NX):
sizeX = NX
else:
sizeX = int(sizeX)
n_partsX = math.ceil(NX / sizeX)
if symmetric:
NY = NX
sizeY = sizeX
n_partsY = n_partsX
else:
NY = Y.shape[0]
if (sizeY is None) or (sizeY < 1) or (sizeY > NY):
sizeY = NY
else:
sizeY = int(sizeY)
n_partsY = math.ceil(NY / sizeY)
if condensed and symmetric:
out = np.empty(shape = int((NX*(NX-1))/2), dtype = X.dtype)
else:
out = np.empty((NX,NY), dtype = X.dtype)
bounds = []
for i in range(n_partsX):
row1 = i*sizeX
row2 = min(row1 + sizeX, NX)
for j in range(n_partsY):
col1 = j*sizeY
col2 = min(col1 + sizeY, NY)
bounds.append((i, j, row1, row2, col1, col2))
n_bounds = len(bounds)
if n_bounds > 0:
used_gpu_device_ids = list(set(gpu_device_ids).intersection(set(available_gpu_device_ids)))
n_gpu = len(used_gpu_device_ids)
if (n_gpu > 0) and (n_bounds > 1):
sequence = np.random.permutation(n_bounds)
if n_bounds < n_gpu:
n_portions = n_bounds
portion_size = 1
else:
n_portions = n_gpu
portion_size = n_bounds // n_gpu
portions = []
for i in range(n_portions):
a = i * portion_size
if i < n_portions-1:
b = a + portion_size
else:
b = n_bounds
portions.append(sequence[a:b])
threads = [threading.Thread(target=calc_portion, args=(portions[i], bounds, X, Y, out, used_gpu_device_ids[i], threads_per_block, distance_measure)) for i in range(n_portions)]
for th in threads:
th.start()
for th in threads:
th.join()
else:
for B in bounds:
calc_submatrix(X, Y, B[0], B[1], B[2], B[3], B[4], B[5], out, threads_per_block, distance_measure)
return out
@njit
def search_sorted(X, val):
n = X.shape[0]
ind = -1
if n > 0:
for i in range(0,n):
if val <= X[i]:
ind = i
break
if ind == -1:
ind = n
return ind
@njit
def cum_sum(X, cumsum):
summ = 0.0
for i in range(X.shape[0]):
if math.isnan(X[i]):
cumsum[i] = X[i]
else:
summ += X[i]
cumsum[i] = summ
@njit
def random_choice(cumsum):
potential = cumsum[cumsum.shape[0]-1]
rand_val = np.random.random_sample() * potential
ind = search_sorted(cumsum, rand_val)
max_ind = cumsum.shape[0]-1
if ind > max_ind:
ind = max_ind
return ind
@njit
def cum_search(X, vals, out):
n = X.shape[0]
n_vals = vals.shape[0]
assert n>0 and n_vals == out.shape[0] and n_vals>0
cum_sum = 0.0
ind_vals = 0
sorted_inds = np.argsort(vals)
for i in range(n):
if not math.isnan(X[i]):
cum_sum += X[i]
while vals[sorted_inds[ind_vals]] <= cum_sum:
out[sorted_inds[ind_vals]] = i
ind_vals += 1
if ind_vals == n_vals:
return
out[sorted_inds[ind_vals: n_vals]] = n-1
@njit(parallel = True)
def k_means_pp_naive(samples, sample_weights, n_clusters, distance_measure=0):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
assert ((n_samples == n_sample_weights) or (n_sample_weights == 0))
centroid_inds = np.full(n_clusters, -1)
if (n_samples > 0) and (n_features > 0):
if n_sample_weights > 0:
weights = np.copy(sample_weights)
else:
weights = np.full(n_samples, 1.0)
cumsum = np.empty(n_samples)
cum_sum(weights, cumsum)
new_centroid = random_choice(cumsum)
n_centroids = 0
centroid_inds[n_centroids] = new_centroid
n_centroids += 1
while n_centroids < n_clusters:
for i in prange(n_samples):
weights[i] = 0.0
for i in prange(n_samples):
min_dist = np.inf
for j in range(n_centroids):
dist = calc_distance(samples[i], samples[centroid_inds[j]], distance_measure)
dist *= sample_weights[i]*sample_weights[centroid_inds[j]]
if dist < min_dist:
min_dist = dist
if min_dist < np.inf:
weights[i] = min_dist
cum_sum(weights, cumsum)
new_centroid = random_choice(cumsum)
centroid_inds[n_centroids] = new_centroid
n_centroids += 1
return centroid_inds
@njit(parallel=True)
def additional_centers_naive(samples, sample_weights, centroids, n_additional_centers=1, distance_measure=0):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
n_centers = centroids.shape[0]
assert ((n_samples == n_sample_weights) or (n_sample_weights == 0))
center_inds = np.full(n_additional_centers, -1)
nondegenerate_mask = np.sum(np.isnan(centroids), axis = 1) == 0
n_nondegenerate_clusters = np.sum(nondegenerate_mask)
center_inds = np.full(n_additional_centers, -1)
if (n_samples > 0) and (n_features > 0) and (n_additional_centers > 0):
cumsum = np.empty(n_samples)
n_added_centers = 0
if n_nondegenerate_clusters == 0:
if n_sample_weights > 0:
cum_sum(sample_weights, cumsum)
center_inds[0] = random_choice(cumsum)
else:
center_inds[0] = np.random.randint(n_samples)
n_added_centers += 1
nondegenerate_centroids = centroids[nondegenerate_mask]
weights = np.empty(n_samples)
for c in range(n_added_centers, n_additional_centers):
for i in prange(n_samples):
weights[i] = 0.0
for i in prange(n_samples):
min_dist = np.inf
for j in range(n_nondegenerate_clusters):
dist = calc_distance(samples[i], nondegenerate_centroids[j], distance_measure)
if dist < min_dist:
min_dist = dist
for j in range(n_added_centers):
dist = calc_distance(samples[i], samples[center_inds[j]], distance_measure)
if dist < min_dist:
min_dist = dist
if min_dist < np.inf:
weights[i] = min_dist * sample_weights[i]
cum_sum(weights, cumsum)
new_centroid = random_choice(cumsum)
center_inds[c] = new_centroid
n_added_centers += 1
return center_inds
# k-means++ : algorithm for choosing the initial cluster centers (or "seeds") for the k-means clustering algorithm
# samples должны быть хорошо перемешаны ??????? !!!!!!!!!!!!!!!!!!
@njit(parallel=True)
def k_means_pp(samples, sample_weights, n_centers=3, n_candidates=3, distance_measure=0):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
assert ((n_samples == n_sample_weights) or (n_sample_weights == 0))
center_inds = np.full(n_centers, -1)
if (n_samples > 0) and (n_features > 0) and (n_centers > 0):
if (n_candidates <= 0) or (n_candidates is None):
n_candidates = 2 + int(np.log(n_centers))
if n_sample_weights > 0:
cumsum = np.empty(n_samples)
cum_sum(sample_weights, cumsum)
center_inds[0] = random_choice(cumsum)
else:
center_inds[0] = np.random.randint(n_samples)
#dist_mat = np.empty((1,n_samples))
indices = np.full(1, center_inds[0])
if distance_measure == 1:
dist_mat = distance_matrix_cosine_XY_weighted_cpu(samples[indices], samples, sample_weights[indices], sample_weights)
else:
dist_mat = distance_matrix_euclidean2_XY_weighted_cpu(samples[indices], samples, sample_weights[indices], sample_weights)
closest_dist_sq = dist_mat[0]
current_pot = 0.0
for i in prange(n_samples):
current_pot += closest_dist_sq[i]
candidate_ids = np.full(n_candidates, -1)
#distance_to_candidates = np.empty((n_candidates,n_samples))
candidates_pot = np.empty(n_candidates)
for c in range(1,n_centers):
rand_vals = np.random.random_sample(n_candidates) * current_pot
cum_search(closest_dist_sq, rand_vals, candidate_ids)
if distance_measure == 1:
distance_to_candidates = distance_matrix_cosine_XY_weighted_cpu(samples[candidate_ids], samples, sample_weights[candidate_ids], sample_weights)
else:
distance_to_candidates = distance_matrix_euclidean2_XY_weighted_cpu(samples[candidate_ids], samples, sample_weights[candidate_ids], sample_weights)
for i in prange(n_candidates):
candidates_pot[i] = 0.0
for j in range(n_samples):
distance_to_candidates[i,j] = min(distance_to_candidates[i,j],closest_dist_sq[j])
candidates_pot[i] += distance_to_candidates[i,j]
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
for i in prange(n_samples):
closest_dist_sq[i] = distance_to_candidates[best_candidate][i]
center_inds[c] = candidate_ids[best_candidate]
return center_inds
# choosing the new n additional centers for existing ones using the k-means++ logic
@njit(parallel=True)
def additional_centers(samples, sample_weights, centroids, n_additional_centers=3, n_candidates=3, distance_measure=0):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
n_centers = centroids.shape[0]
assert ((n_samples == n_sample_weights) or (n_sample_weights == 0))
center_inds = np.full(n_additional_centers, -1)
nondegenerate_mask = np.sum(np.isnan(centroids), axis = 1) == 0
n_nondegenerate_clusters = np.sum(nondegenerate_mask)
center_inds = np.full(n_additional_centers, -1)
if (n_samples > 0) and (n_features > 0) and (n_additional_centers > 0):
if (n_candidates <= 0) or (n_candidates is None):
n_candidates = 2 + int(np.log(n_nondegenerate_clusters+n_additional_centers))
if n_nondegenerate_clusters > 0:
closest_dist_sq = np.full(n_samples, np.inf)
#distance_to_centroids = np.empty((n_nondegenerate_clusters, n_samples))
centroid_weights = np.ones(n_nondegenerate_clusters)
if distance_measure == 1:
distance_to_centroids = distance_matrix_cosine_XY_weighted_cpu(centroids[nondegenerate_mask], samples, centroid_weights, sample_weights)
else:
distance_to_centroids = distance_matrix_euclidean2_XY_weighted_cpu(centroids[nondegenerate_mask], samples, centroid_weights, sample_weights)
current_pot = 0.0
for i in prange(n_samples):
for j in range(n_nondegenerate_clusters):
closest_dist_sq[i] = min(distance_to_centroids[j,i],closest_dist_sq[i])
current_pot += closest_dist_sq[i]
n_added_centers = 0
else:
if n_sample_weights > 0:
cumsum = np.empty(n_samples)
cum_sum(sample_weights, cumsum)
center_inds[0] = random_choice(cumsum)
else:
center_inds[0] = np.random.randint(n_samples)
#dist_mat = np.empty((1,n_samples))
indices = np.full(1, center_inds[0])
if distance_measure == 1:
dist_mat = distance_matrix_cosine_XY_weighted_cpu(samples[indices], samples, sample_weights[indices], sample_weights)
else:
dist_mat = distance_matrix_euclidean2_XY_weighted_cpu(samples[indices], samples, sample_weights[indices], sample_weights)
closest_dist_sq = dist_mat[0]
current_pot = 0.0
for i in prange(n_samples):
current_pot += closest_dist_sq[i]
n_added_centers = 1
candidate_ids = np.full(n_candidates, -1)
#distance_to_candidates = np.empty((n_candidates,n_samples))
candidates_pot = np.empty(n_candidates)
for c in range(n_added_centers, n_additional_centers):
rand_vals = np.random.random_sample(n_candidates) * current_pot
cum_search(closest_dist_sq, rand_vals, candidate_ids)
if distance_measure == 1:
distance_to_candidates = distance_matrix_cosine_XY_weighted_cpu(samples[candidate_ids], samples, sample_weights[candidate_ids], sample_weights)
else:
distance_to_candidates = distance_matrix_euclidean2_XY_weighted_cpu(samples[candidate_ids], samples, sample_weights[candidate_ids], sample_weights)
for i in prange(n_candidates):
candidates_pot[i] = 0.0
for j in range(n_samples):
distance_to_candidates[i,j] = min(distance_to_candidates[i,j],closest_dist_sq[j])
candidates_pot[i] += distance_to_candidates[i,j]
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
for i in prange(n_samples):
closest_dist_sq[i] = distance_to_candidates[best_candidate][i]
center_inds[c] = candidate_ids[best_candidate]
return center_inds
@njit
def check_shapes(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives):
assert samples.ndim == 2
n_samples, n_features = samples.shape
assert (sample_weights.shape == (n_samples,)) or (sample_weights.shape == (0,))
assert sample_membership.shape == (n_samples,)
assert (sample_objectives.shape == (n_samples,)) or (sample_objectives.shape[0] == 0)
assert (centroids.ndim == 2) and (centroids.shape[1] == n_features)
n_clusters = centroids.shape[0]
assert centroid_sums.shape == (n_clusters, n_features)
assert centroid_counts.shape == (n_clusters, )
assert centroid_objectives.shape == (n_clusters,)
@njit
def empty_state(n_samples, n_features, n_clusters):
sample_weights = np.ones(n_samples)
sample_membership = np.full(n_samples, -1)
sample_objectives = np.full(n_samples, np.nan)
centroids = np.full((n_clusters, n_features), np.nan)
centroid_sums = np.full((n_clusters, n_features), np.nan)
centroid_counts = np.full(n_clusters, 0.0)
centroid_objectives = np.full(n_clusters, np.nan)
return sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives
@njit
def sub_sections(n_samples, n_sections):
n_samples = int(abs(n_samples)) # Распространить этот подход на другие функции
n_sections = int(abs(n_sections))
samples_per_section, n_extras = divmod(n_samples, n_sections)
if samples_per_section == 0:
n_sections = n_extras
points = np.full(n_sections, samples_per_section)
for i in range(n_extras):
points[i] += 1
cumsum = 0
for i in range(n_sections):
cumsum += points[i]
points[i] = cumsum
sections = np.empty((n_sections,2), dtype = points.dtype)
start_ind = 0
for i in range(n_sections):
sections[i,0] = start_ind
sections[i,1] = points[i]
start_ind = points[i]
return sections
@njit
def random_membership(n_samples, n_clusters, sample_membership):
sample_membership[:] = np.random.randint(0, n_clusters, n_samples)
@njit(parallel = True)
def initialize_by_membership(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
centroids.fill(np.nan)
centroid_sums.fill(0.0)
centroid_counts.fill(0.0)
centroid_objectives.fill(0.0)
thread_ranges = sub_sections(n_samples, nb.config.NUMBA_NUM_THREADS)
n_threads = thread_ranges.shape[0]
thread_centroid_sums = np.zeros((n_threads,n_clusters,n_features))
thread_centroid_counts = np.zeros((n_threads,n_clusters))
if n_sample_weights > 0:
for i in prange(n_threads):
for j in range(thread_ranges[i,0],thread_ranges[i,1]):
centroid_ind = sample_membership[j]
for k in range(n_features):
thread_centroid_sums[i,centroid_ind,k] += sample_weights[j] * samples[j,k]
thread_centroid_counts[i,centroid_ind] += sample_weights[j]
else:
for i in prange(n_threads):
for j in range(thread_ranges[i,0],thread_ranges[i,1]):
centroid_ind = sample_membership[j]
for k in range(n_features):
thread_centroid_sums[i,centroid_ind,k] += samples[j,k]
thread_centroid_counts[i,centroid_ind] += 1.0
for i in range(n_threads):
for j in range(n_clusters):
centroid_counts[j] += thread_centroid_counts[i,j]
for k in range(n_features):
centroid_sums[j,k] += thread_centroid_sums[i,j,k]
for i in range(n_clusters):
if centroid_counts[i] > 0.0:
for j in range(n_features):
centroids[i,j] = centroid_sums[i,j] / centroid_counts[i]
objective = 0.0
for i in range(n_samples):
centroid_ind = sample_membership[i]
dist = 0.0
for j in range(n_features):
dist += (centroids[centroid_ind,j]-samples[i,j])**2
if n_sample_weights > 0:
sample_objectives[i] = sample_weights[i]*dist
else:
sample_objectives[i] = dist
centroid_objectives[centroid_ind] += dist
objective += dist
return objective
@njit
def cluster_objective_change(sample, sample_weight, centroid, centroid_count):
n_features = sample.shape[0]
dist = 0.0
for i in range(n_features):
dist += (centroid[i] - sample[i])**2
return centroid_count/(centroid_count+sample_weight)*dist
@njit
def reallocation_effect(sample, sample_weight, origin_centroid, origin_centroid_counts, destination_centroid, destination_centroid_counts):
origin_objective_change = cluster_objective_change(sample,
-sample_weight,
origin_centroid,
origin_centroid_counts)
destination_objective_change = cluster_objective_change(sample,
sample_weight,
destination_centroid,
destination_centroid_counts)
objective_change = destination_objective_change - origin_objective_change
return objective_change
# Hartigan–Wong method
# first-improvement strategy
# https://en.wikipedia.org/wiki/K-means_clustering#Hartigan%E2%80%93Wong_method
@njit
def h_means_first(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, max_iters = 300, tol=0.0001):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
sample_objectives = np.full(0, np.nan)
check_shapes(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
improved = True
n_iters = 0
tolerance = np.inf
while improved and (objective > 0.0) and (n_iters < max_iters) and (tolerance > tol):
improved = False
previous_objective = objective
for i in range(n_samples):
sample = samples[i]
if n_sample_weights > 0:
sample_weight = sample_weights[i]
else:
sample_weight = 1.0
weighted_sample = sample*sample_weight
cluster_i = sample_membership[i]
count_i = centroid_counts[cluster_i]
if count_i-sample_weight != 0.0:
objective_change_i = cluster_objective_change(sample, -sample_weight, centroids[cluster_i], count_i)
else:
objective_change_i = centroid_objectives[cluster_i]
best_cluster_j = -1
best_objective_change = np.inf
best_objective_change_j = np.nan
for cluster_j in range(n_clusters):
if cluster_j != cluster_i:
count_j = centroid_counts[cluster_j]
if count_j+sample_weight != 0.0:
objective_change_j = cluster_objective_change(sample, sample_weight, centroids[cluster_j], count_j)
objective_change = objective_change_j - objective_change_i
if objective_change < best_objective_change:
best_cluster_j = cluster_j
best_objective_change_j = objective_change_j
best_objective_change = objective_change
if (best_cluster_j > -1) and (best_objective_change < 0.0):
centroid_sums[cluster_i] -= weighted_sample
centroid_counts[cluster_i] -= sample_weight
if centroid_counts[cluster_i] != 0.0:
centroids[cluster_i] = centroid_sums[cluster_i]/centroid_counts[cluster_i]
else:
centroids[cluster_i].fill(np.nan)
centroid_objectives[cluster_i] -= objective_change_i
sample_membership[i] = best_cluster_j
centroid_sums[best_cluster_j] += weighted_sample
centroid_counts[best_cluster_j] += sample_weight
if centroid_counts[best_cluster_j] != 0.0:
centroids[best_cluster_j] = centroid_sums[best_cluster_j]/centroid_counts[best_cluster_j]
else:
centroids[best_cluster_j].fill(np.nan)
centroid_objectives[best_cluster_j] += best_objective_change_j
objective += best_objective_change
improved = True
n_iters += 1
tolerance = 1 - objective/previous_objective
return objective, n_iters
# Hartigan–Wong method
# best-improvement strategy
@njit(parallel = True)
def h_means_best(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, max_iters = 3000, tol=0.0001):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
sample_objectives = np.full(0,np.nan)
check_shapes(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
improved = True
n_iters = 0
tolerance = np.inf
objective_changes = np.full(n_samples, 0.0)
new_cluster_inds = np.full(n_samples, -1)
while improved and (objective > 0.0) and (n_iters < max_iters) and (tolerance > tol):
improved = False
previous_objective = objective
objective_changes.fill(0.0)
new_cluster_inds.fill(-1)
for i in prange(n_samples):
p_sample = samples[i]
if n_sample_weights > 0:
p_sample_weight = sample_weights[i]
else:
p_sample_weight = 1.0
p_cluster_i = sample_membership[i]
p_count_i = centroid_counts[p_cluster_i]
if p_count_i-p_sample_weight != 0.0:
p_objective_change_i = cluster_objective_change(p_sample, -p_sample_weight, centroids[p_cluster_i], p_count_i)
else:
p_objective_change_i = centroid_objectives[p_cluster_i]
p_best_cluster_j = -1
p_best_objective_change = np.inf
p_best_objective_change_j = np.nan
for p_cluster_j in range(n_clusters):
if p_cluster_j != p_cluster_i:
p_count_j = centroid_counts[p_cluster_j]
if p_count_j+p_sample_weight != 0.0:
p_objective_change_j = cluster_objective_change(p_sample, p_sample_weight, centroids[p_cluster_j], p_count_j)
p_objective_change = p_objective_change_j - p_objective_change_i
if p_objective_change < p_best_objective_change:
p_best_cluster_j = p_cluster_j
p_best_objective_change_j = p_objective_change_j
p_best_objective_change = p_objective_change
if (p_best_cluster_j > -1) and (p_best_objective_change < 0.0):
objective_changes[i] = p_best_objective_change
new_cluster_inds[i] = p_best_cluster_j
best_sample_ind = -1
best_objective_change = np.inf
for i in range(n_samples):
if objective_changes[i] < best_objective_change:
best_sample_ind = i
best_objective_change = objective_changes[i]
if (best_sample_ind > -1) and (new_cluster_inds[best_sample_ind] > -1) and (best_objective_change < 0.0):
sample = samples[best_sample_ind]
if n_sample_weights > 0:
sample_weight = sample_weights[best_sample_ind]
else:
sample_weight = 1.0
weighted_sample = sample_weight * samples[best_sample_ind]
cluster_i = sample_membership[best_sample_ind]
cluster_j = new_cluster_inds[best_sample_ind]
if centroid_counts[cluster_j]+sample_weight != 0.0:
if centroid_counts[cluster_i]-sample_weight != 0.0:
objective_change_i = cluster_objective_change(sample, -sample_weight, centroids[cluster_i], centroid_counts[cluster_i])
else:
objective_change_i = centroid_objectives[cluster_i]
centroid_objectives[cluster_i] -= objective_change_i
objective_change_j = cluster_objective_change(sample, sample_weight, centroids[cluster_j], centroid_counts[cluster_j])
centroid_objectives[cluster_j] += objective_change_j
centroid_sums[cluster_i] -= weighted_sample
centroid_counts[cluster_i] -= sample_weight
if centroid_counts[cluster_i] != 0.0:
centroids[cluster_i] = centroid_sums[cluster_i]/centroid_counts[cluster_i]
else:
centroids[cluster_i].fill(np.nan)
sample_membership[best_sample_ind] = cluster_j
centroid_sums[cluster_j] += weighted_sample
centroid_counts[cluster_j] += sample_weight
if centroid_counts[cluster_j] != 0.0:
centroids[cluster_j] = centroid_sums[cluster_j]/centroid_counts[cluster_j]
else:
centroids[cluster_j].fill(np.nan)
objective += best_objective_change
improved = True
n_iters += 1
tolerance = 1 - objective/previous_objective
return objective, n_iters
def _assignment(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_objectives):
n_samples, n_features = samples.shape
n_centroids = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
n_sample_membership = sample_membership.shape[0]
n_sample_objectives = sample_objectives.shape[0]
n_centroid_objectives = centroid_objectives.shape[0]
if n_centroid_objectives > 0:
centroid_objectives.fill(np.nan)
objective = 0.0
n_changed_membership = 0
for i in prange(n_samples):
min_dist2 = np.inf
min_ind = -1
for j in range(n_centroids):
if not np.isnan(centroids[j,0]):
dist2 = 0.0
for h in range(n_features):
dist2 += (centroids[j,h] - samples[i,h])**2
if dist2 < min_dist2:
min_dist2 = dist2
min_ind = j
if min_ind == -1: min_dist2 = np.nan
if (n_sample_membership > 0) and (sample_membership[i] != min_ind):
n_changed_membership += 1
sample_membership[i] = min_ind
if n_sample_weights > 0:
sample_objective = sample_weights[i]*min_dist2
else:
sample_objective = min_dist2
if n_sample_objectives > 0:
sample_objectives[i] = sample_objective
if (n_centroid_objectives > 0) and (min_ind > -1):
if np.isnan(centroid_objectives[min_ind]):
centroid_objectives[min_ind] = sample_objective
else:
centroid_objectives[min_ind] += sample_objective
objective += sample_objective
return objective, n_changed_membership
assignment = njit(parallel=False)(_assignment)
assignment_parallel = njit(parallel=True)(_assignment)
@njit(parallel = False)
def update_centroids(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
for i in range(n_clusters):
centroid_counts[i] = 0.0
for j in range(n_features):
centroid_sums[i,j] = 0.0
centroids[i,j] = np.nan
if n_sample_weights > 0:
for i in range(n_samples):
centroid_ind = sample_membership[i]
for j in range(n_features):
centroid_sums[centroid_ind,j] += sample_weights[i] * samples[i,j]
centroid_counts[centroid_ind] += sample_weights[i]
else:
for i in range(n_samples):
centroid_ind = sample_membership[i]
for j in range(n_features):
centroid_sums[centroid_ind,j] += samples[i,j]
centroid_counts[centroid_ind] += 1.0
for i in range(n_clusters):
if centroid_counts[i] > 0.0:
for j in range(n_features):
centroids[i,j] = centroid_sums[i,j] / centroid_counts[i]
@njit(parallel = True)
def update_centroids_parallel(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
n_sample_weights = sample_weights.shape[0]
for i in range(n_clusters):
centroid_counts[i] = 0.0
for j in range(n_features):
centroid_sums[i,j] = 0.0
thread_ranges = sub_sections(n_samples, nb.config.NUMBA_NUM_THREADS)
n_threads = thread_ranges.shape[0]
thread_centroid_sums = np.zeros((n_threads,n_clusters,n_features))
thread_centroid_counts = np.zeros((n_threads,n_clusters))
if n_sample_weights > 0:
for i in prange(n_threads):
for j in range(thread_ranges[i,0],thread_ranges[i,1]):
centroid_ind = sample_membership[j]
for k in range(n_features):
thread_centroid_sums[i,centroid_ind,k] += sample_weights[j] * samples[j,k]
thread_centroid_counts[i,centroid_ind] += sample_weights[j]
else:
for i in prange(n_threads):
for j in range(thread_ranges[i,0],thread_ranges[i,1]):
centroid_ind = sample_membership[j]
for k in range(n_features):
thread_centroid_sums[i,centroid_ind,k] += samples[j,k]
thread_centroid_counts[i,centroid_ind] += 1.0
for i in range(n_threads):
for j in range(n_clusters):
centroid_counts[j] += thread_centroid_counts[i,j]
for k in range(n_features):
centroid_sums[j,k] += thread_centroid_sums[i,j,k]
for i in range(n_clusters):
if centroid_counts[i] > 0.0:
for j in range(n_features):
centroids[i,j] = centroid_sums[i,j] / centroid_counts[i]
else:
for j in range(n_features):
centroids[i,j] = np.nan
centroid_sums[i,j] = np.nan
@njit
def k_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, max_iters = 300, tol=0.0001, parallel = True):
n_samples, n_features = samples.shape
n_clusters = centroids.shape[0]
check_shapes(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
objective = np.inf
n_iters = 0
sample_membership.fill(-1)
sample_objectives.fill(np.nan)
centroid_objectives.fill(np.nan)
if (n_samples > 0) and (n_features > 0) and (n_clusters > 0):
n_changed_membership = 1
objective_previous = np.inf
tolerance = np.inf
while True:
if parallel:
objective, n_changed_membership = assignment_parallel(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_objectives)
else:
objective, n_changed_membership = assignment(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_objectives)
tolerance = 1 - objective/objective_previous
objective_previous = objective
n_iters += 1
if (n_iters >= max_iters) or (n_changed_membership <= 0) or (tolerance <= tol) or (objective <= 0.0):
break
if parallel:
update_centroids_parallel(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts)
else:
update_centroids(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts)
return objective, n_iters
@njit
def k_h_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, k_max_iters = 300, h_max_iters = 300, k_tol=0.0001, h_tol=0.0001):
k_objective, k_iters = k_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, k_max_iters, k_tol, True)
update_centroids_parallel(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts)
h_objective, h_iters = h_means_first(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts, centroid_objectives, k_objective, h_max_iters, h_tol)
return h_objective, k_iters+h_iters
# Local search heuristic for solving the minimum sum of squares clustering problem by iteratively
# new extra center insertion, searching and worst centroid deletion. New center insertions are made
# by using k-means++ logic. Worst center for deletion identified by its largest objective value.
@njit(parallel = True)
def iterative_extra_center_insertion_deletion(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters = 300, local_tol=0.0001, max_iters = 300, tol=0.0001, max_cpu_time=10, n_candidates=3, printing=False):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
n_centers = centroids.shape[0]
with objmode(start_time = 'float64'):
start_time = time.perf_counter()
n_iters = 0
n_local_iters = 0
cpu_time = 0.0
if (n_samples > 0) and (n_features > 0) and (n_centers > 0):
n_centers_ext = n_centers + 1
ext_sample_membership = np.full(n_samples, -1)
ext_sample_objectives = np.full(n_samples, np.nan)
ext_centroids = np.full((n_centers_ext, n_features), np.nan)
ext_centroid_sums = np.full((n_centers_ext, n_features), np.nan)
ext_centroid_counts = np.full(n_centers_ext, 0.0)
ext_centroid_objectives = np.full(n_centers_ext, np.nan)
best_sample_membership = np.full(n_samples, -1)
best_sample_objectives = np.full(n_samples, np.nan)
best_centroids = np.full((n_centers_ext, n_features), np.nan)
best_centroid_sums = np.full((n_centers_ext, n_features), np.nan)
best_centroid_counts = np.full(n_centers_ext, 0.0)
best_centroid_objectives = np.full(n_centers_ext, np.nan)
best_objective = objective
best_excess_centroid_ind = -1
for i in prange(n_samples):
best_sample_membership[i] = sample_membership[i]
best_sample_objectives[i] = sample_objectives[i]
for i in range(n_centers):
best_centroid_counts[i] = centroid_counts[i]
best_centroid_objectives[i] = centroid_objectives[i]
for j in range(n_features):
best_centroids[i,j] = centroids[i,j]
best_centroid_sums[i,j] = centroid_sums[i,j]
n_iters = 0
tolerance = np.inf
cumsum = np.empty(n_centers_ext)
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time - start_time
if printing:
with objmode:
print ('%-30s%-15s%-15s' % ('objective', 'n_iters', 'cpu_time'))
while (cpu_time < max_cpu_time) and (n_iters < max_iters) and (tolerance > tol):
for i in prange(n_samples):
ext_sample_membership[i] = best_sample_membership[i]
for i in range(n_centers_ext):
for j in range(n_features):
ext_centroids[i,j] = best_centroids[i,j]
degenerate_mask = ext_centroid_counts == 0.0
n_degenerate = np.sum(degenerate_mask)
new_center_inds = additional_centers(samples, sample_weights, centroids, n_degenerate, n_candidates, distance_measure=0)
ext_centroids[degenerate_mask,:] = samples[new_center_inds,:]
ext_objective, ext_n_iters = k_means(samples, sample_weights, ext_sample_membership, ext_sample_objectives, ext_centroids, ext_centroid_sums, ext_centroid_counts, ext_centroid_objectives, local_max_iters, local_tol, True)
cum_sum(ext_centroid_objectives, cumsum)
excess_centroid_ind = random_choice(cumsum)
for i in range(n_features):
ext_centroids[excess_centroid_ind,i] = np.nan
ext_objective, ext_n_iters = k_means(samples, sample_weights, ext_sample_membership, ext_sample_objectives, ext_centroids, ext_centroid_sums, ext_centroid_counts, ext_centroid_objectives, local_max_iters, local_tol, True)
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time - start_time
if ext_objective < best_objective:
tolerance = 1 - ext_objective/best_objective
best_objective = ext_objective
for i in prange(n_samples):
best_sample_membership[i] = ext_sample_membership[i]
best_sample_objectives[i] = ext_sample_objectives[i]
for i in range(n_centers_ext):
best_centroid_counts[i] = ext_centroid_counts[i]
best_centroid_objectives[i] = ext_centroid_objectives[i]
for j in range(n_features):
best_centroids[i,j] = ext_centroids[i,j]
best_centroid_sums[i,j] = ext_centroid_sums[i,j]
best_excess_centroid_ind = excess_centroid_ind
n_local_iters = ext_n_iters
if printing:
with objmode:
print ('%-30f%-15i%-15.2f' % (best_objective, n_iters, cpu_time))
n_iters += 1
if best_excess_centroid_ind > -1:
objective = best_objective
for i in prange(n_samples):
sample_objectives[i] = best_sample_objectives[i]
if best_sample_membership[i] <= best_excess_centroid_ind:
sample_membership[i] = best_sample_membership[i]
else:
sample_membership[i] = best_sample_membership[i]-1
for i in range(n_centers_ext):
if i <= best_excess_centroid_ind:
ind = i
else:
ind = i-1
if i != best_excess_centroid_ind:
centroid_counts[ind] = best_centroid_counts[i]
centroid_objectives[ind] = best_centroid_objectives[i]
for j in range(n_features):
centroids[ind,j] = best_centroids[i,j]
centroid_sums[ind,j] = best_centroid_sums[i,j]
if printing:
with objmode:
print ('%-30f%-15i%-15.2f' % (best_objective, n_iters, cpu_time))
return objective, n_iters, n_local_iters
@njit
def empty_solution(sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives):
empty_sample_membership = np.empty_like(sample_membership)
empty_sample_objectives = np.empty_like(sample_objectives)
empty_centroids = np.empty_like(centroids)
empty_centroid_sums = np.empty_like(centroid_sums)
empty_centroid_counts = np.empty_like(centroid_counts)
empty_centroid_objectives = np.empty_like(centroid_objectives)
return empty_sample_membership, empty_sample_objectives, empty_centroids, empty_centroid_sums, empty_centroid_counts, empty_centroid_objectives
@njit(parallel = True)
def copy_solution(sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, copy_sample_membership, copy_sample_objectives, copy_centroids, copy_centroid_sums, copy_centroid_counts, copy_centroid_objectives):
for i in prange(sample_membership.shape[0]):
copy_sample_membership[i] = sample_membership[i]
copy_sample_objectives[i] = sample_objectives[i]
for i in range(centroids.shape[0]):
copy_centroid_counts[i] = centroid_counts[i]
copy_centroid_objectives[i] = centroid_objectives[i]
for j in range(centroids.shape[1]):
copy_centroids[i,j] = centroids[i,j]
copy_centroid_sums[i,j] = centroid_sums[i,j]
@njit
def shake_membership(n_reallocations, n_samples, n_clusters, sample_membership):
for i in range(n_reallocations):
sample_membership[np.random.randint(n_samples)] = np.random.randint(n_clusters)
# Попробовать сделать так чтобы принимелись не любые даже сколько-нибудь малые улучшения,
# а только значимые улучшения, т.е. выше определённого порога (для этого ввести соответствующий дополнительный параметр)
@njit(parallel = True)
def Membership_Shaking_VNS(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, k_max_iters=300, h_max_iters=300, k_tol=0.0001, h_tol=0.0001, kmax=5, max_cpu_time=10, max_iters=100, printing=False):
n_samples, n_features = samples.shape
n_centers = centroids.shape[0]
cpu_time = 0.0
n_iters = 0
k = 1
n_iters_k = 0
if printing:
with objmode:
print ('%-30s%-7s%-15s%-15s%-15s' % ('objective', 'k', 'n_iters', 'n_iters_k', 'cpu_time'))
with objmode(start_time = 'float64'):
start_time = time.perf_counter()
best_sample_membership, best_sample_objectives, best_centroids, best_centroid_sums, best_centroid_counts, best_centroid_objectives = empty_solution(sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
best_objective = objective
copy_solution(sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, best_sample_membership, best_sample_objectives, best_centroids, best_centroid_sums, best_centroid_counts, best_centroid_objectives)
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time- start_time
while (cpu_time < max_cpu_time) and (n_iters < max_iters):
# neighborhood solution
for i in prange(n_samples):
sample_membership[i] = best_sample_membership[i]
shake_membership(k, n_samples, n_centers, sample_membership)
objective = initialize_by_membership(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
objective, n_local_iters = k_h_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, k_max_iters, h_max_iters, k_tol, h_tol)
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time - start_time
if objective < best_objective:
best_objective = objective
copy_solution(sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, best_sample_membership, best_sample_objectives, best_centroids, best_centroid_sums, best_centroid_counts, best_centroid_objectives)
if printing:
with objmode:
print ('%-30f%-7i%-15i%-15i%-15.2f' % (best_objective, k, n_iters, n_iters_k, cpu_time))
k = 1
n_iters_k += 1
else:
if k < kmax:
k += 1
n_iters += 1
objective = best_objective
copy_solution(best_sample_membership, best_sample_objectives, best_centroids, best_centroid_sums, best_centroid_counts, best_centroid_objectives, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives)
if printing:
with objmode:
print ('%-30f%-7i%-15i%-15i%-15.2f' % (best_objective, k, n_iters, n_iters_k, cpu_time))
return objective, n_iters
# The parameter "shaking_mode" is used to define the used logic of centroid-to-entity reallocations:
# 0 - "Lumped mode" - finding a center with the worst objective and k-1 closest to it other centers, then replace these k centers with new ones (like in J-means);
# 1 - "Scatter mode" - finding a k centers distributed in the space and not connected to each other, then replace them with new ones (like in J-means);
# 2 - finding the k centers with the worst objectives and replace each of them with a random internal entity from their corresponding clusters (like I-means in [Nilolaev and Mladenovic 2015])
# 3 - replace all nondegenerate centers with a random internal entity from their corresponding clusters
@njit
def shake_centers(k, samples, sample_weights, sample_membership, centroids, centroid_counts, centroid_objectives, n_candidates=3, shaking_mode=1, fully_random = False):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
n_centers = centroids.shape[0]
degenerate_mask = np.sum(np.isnan(centroids), axis = 1) > 0
nondegenerate_mask = ~degenerate_mask
n_degenerate = np.sum(degenerate_mask)
n_nondegenerate = n_centers-n_degenerate
if (k > 0) and (n_nondegenerate > 0):
if k > n_nondegenerate:
k = n_nondegenerate
nondegenerate_inds = np.arange(n_centers)[nondegenerate_mask]
nondegenerate_objectives = centroid_objectives[nondegenerate_mask]
sum_of_centroid_objectives = np.sum(nondegenerate_objectives)
if shaking_mode == 0:
if fully_random:
target = nondegenerate_inds[np.random.randint(nondegenerate_inds.shape[0])]
else:
rand_val = np.random.random_sample(1) * sum_of_centroid_objectives
target_ind = np.full(1, -1)
cum_search(nondegenerate_objectives, rand_val, target_ind)
target = nondegenerate_inds[target_ind[0]]
if target > -1:
if k-1 > 0:
centroid_weights = np.empty(0)
dists = distance_matrix_euclidean2_XY_weighted_cpu(centroids[np.array([target])], centroids[nondegenerate_mask], centroid_weights, centroid_weights)[0]
replaced_inds = np.argsort(dists)[:k]
else:
replaced_inds = np.full(1, target)
centroids[nondegenerate_inds[replaced_inds],:] = np.nan
if fully_random:
additional_center_inds = np.random.choice(n_samples, k, replace=False)
else:
additional_center_inds = additional_centers(samples, sample_weights, centroids, k, n_candidates, distance_measure=0)
centroids[nondegenerate_inds[replaced_inds],:] = samples[additional_center_inds,:]
elif shaking_mode == 1:
if fully_random:
replaced_inds = nondegenerate_inds[np.random.choice(nondegenerate_inds.shape[0], k, replace=False)]
else:
rand_vals = np.random.random_sample(k) * sum_of_centroid_objectives
replaced_inds = np.full(k, -1)
cum_search(nondegenerate_objectives, rand_vals, replaced_inds)
centroids[nondegenerate_inds[replaced_inds],:] = np.nan
if fully_random:
additional_center_inds = np.random.choice(n_samples, k, replace=False)
else:
additional_center_inds = additional_centers(samples, sample_weights, centroids, k, n_candidates, distance_measure=0)
centroids[nondegenerate_inds[replaced_inds],:] = samples[additional_center_inds,:]
elif shaking_mode == 2:
if fully_random:
replaced_inds = nondegenerate_inds[np.random.choice(nondegenerate_inds.shape[0], k, replace=False)]
else:
rand_vals = np.random.random_sample(k) * sum_of_centroid_objectives
replaced_inds = np.full(k, -1)
cum_search(nondegenerate_objectives, rand_vals, replaced_inds)
sample_inds = np.arange(n_samples)
target_inds = nondegenerate_inds[replaced_inds]
for i in range(k):
candidate_inds = sample_inds[sample_membership == target_inds[i]]
sample_ind = candidate_inds[np.random.randint(candidate_inds.shape[0])]
centroids[target_inds[i],:] = samples[sample_ind,:]
elif shaking_mode == 3:
sample_inds = np.arange(n_samples)
for i in nondegenerate_inds:
candidate_inds = sample_inds[sample_membership == i]
sample_ind = candidate_inds[np.random.randint(candidate_inds.shape[0])]
centroids[i,:] = samples[sample_ind,:]
else:
raise KeyError
if n_degenerate > 0:
additional_center_inds = additional_centers(samples, sample_weights, centroids, n_degenerate, n_candidates, distance_measure=0)
centroids[degenerate_mask,:] = samples[additional_center_inds,:]
# Simple center shaking VNS
@njit(parallel = True)
def Center_Shaking_VNS(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters=300, local_tol=0.0001, kmax=3, max_cpu_time=10, max_iters=100, n_candidates=3, shaking_mode = 0, fully_random=False, printing=False):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
n_centers = centroids.shape[0]
cpu_time = 0.0
n_iters = 0
k = 1
n_iters_k = 0
if printing:
with objmode:
print ('%-30s%-7s%-15s%-15s%-15s' % ('objective', 'k', 'n_iters', 'n_iters_k', 'cpu_time'))
with objmode(start_time = 'float64'):
start_time = time.perf_counter()
best_objective = objective
best_n_local_iters = 0
if (n_samples > 0) and (n_features > 0) and (n_centers > 0):
# Empty Neighborhood Solution
neighborhood_sample_membership = np.full(n_samples, -1)
neighborhood_sample_objectives = np.full(n_samples, np.nan)
neighborhood_centroids = np.full((n_centers, n_features), np.nan)
neighborhood_centroid_sums = np.full((n_centers, n_features), np.nan)
neighborhood_centroid_counts = np.full(n_centers, 0.0)
neighborhood_centroid_objectives = np.full(n_centers, np.nan)
# Empty Best Solution
best_sample_membership = np.full(n_samples, -1)
best_sample_objectives = np.full(n_samples, np.nan)
best_centroids = np.full((n_centers, n_features), np.nan)
best_centroid_sums = np.full((n_centers, n_features), np.nan)
best_centroid_counts = np.full(n_centers, 0.0)
best_centroid_objectives = np.full(n_centers, np.nan)
# Best Solution is the Current One
for i in prange(n_samples):
best_sample_membership[i] = sample_membership[i]
best_sample_objectives[i] = sample_objectives[i]
for i in range(n_centers):
best_centroid_counts[i] = centroid_counts[i]
best_centroid_objectives[i] = centroid_objectives[i]
for j in range(n_features):
best_centroids[i,j] = centroids[i,j]
best_centroid_sums[i,j] = centroid_sums[i,j]
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time - start_time
k = 1
while (cpu_time < max_cpu_time) and (n_iters < max_iters):
# Neighborhood Solution
for i in prange(n_samples):
neighborhood_sample_membership[i] = best_sample_membership[i]
neighborhood_sample_objectives[i] = best_sample_objectives[i]
for i in range(n_centers):
neighborhood_centroid_counts[i] = best_centroid_counts[i]
neighborhood_centroid_objectives[i] = best_centroid_objectives[i]
for j in range(n_features):
neighborhood_centroids[i,j] = best_centroids[i,j]
neighborhood_centroid_sums[i,j] = best_centroid_sums[i,j]
shake_centers(k, samples, sample_weights, sample_membership, neighborhood_centroids, neighborhood_centroid_counts, neighborhood_centroid_objectives, n_candidates, shaking_mode, fully_random)
# Local Search Initialized by Neighborhood Solution
neighborhood_objective, neighborhood_n_iters = k_means(samples, sample_weights, neighborhood_sample_membership, neighborhood_sample_objectives, neighborhood_centroids, neighborhood_centroid_sums, neighborhood_centroid_counts, neighborhood_centroid_objectives, local_max_iters, local_tol, True)
with objmode(current_time = 'float64'):
current_time = time.perf_counter()
cpu_time = current_time - start_time
# Check for the Best
if neighborhood_objective < best_objective:
best_objective = neighborhood_objective
best_n_local_iters = neighborhood_n_iters
if printing:
with objmode:
print ('%-30f%-7i%-15i%-15i%-15.2f' % (best_objective, k, n_iters, n_iters_k, cpu_time))
k = 1
n_iters_k += 1
# Remember the Best Solution
for i in prange(n_samples):
best_sample_membership[i] = neighborhood_sample_membership[i]
best_sample_objectives[i] = neighborhood_sample_objectives[i]
for i in range(n_centers):
best_centroid_counts[i] = neighborhood_centroid_counts[i]
best_centroid_objectives[i] = neighborhood_centroid_objectives[i]
for j in range(n_features):
best_centroids[i,j] = neighborhood_centroids[i,j]
best_centroid_sums[i,j] = neighborhood_centroid_sums[i,j]
else:
if k < kmax:
k += 1
n_iters += 1
# Replace Current Solution by the Best One
for i in prange(n_samples):
sample_membership[i] = best_sample_membership[i]
sample_objectives[i] = best_sample_objectives[i]
for i in range(n_centers):
centroid_counts[i] = best_centroid_counts[i]
centroid_objectives[i] = best_centroid_objectives[i]
for j in range(n_features):
centroids[i,j] = best_centroids[i,j]
centroid_sums[i,j] = best_centroid_sums[i,j]
# if printing:
# with objmode:
# print ('%-30f%-7i%-15i%-15i%-15.2f' % (best_objective, k, n_iters, n_iters_k, cpu_time))
return best_objective, n_iters, best_n_local_iters
# Для обработки больших данных сделать возможность обработки разряженного входного датасета??? (или кому надо тот сам на вход подаст разряженный датасет???)
# Доработать эту процедуру чтобы можно было выбрать метрику.
# Использовать k-medoids вместо k-means чтобы можно было использовать полную предрасчитанную матрицу расстояний
#
# The idea of the algorithm is inspired by:
# Likasa A., Vlassisb N., Verbeek J.J. The global k-means clustering algorithm //
# Pattern Recognition 36 (2003), pp. 451 – 461
@njit(parallel=True)
def number_of_clusters(samples, min_num=-1, max_num=-1, max_iters=300, tol=0.0001):
n_samples = samples.shape[0]
n_features = samples.shape[1]
if min_num < 2 or min_num > n_samples:
min_num = 2
if max_num < 0 or max_num > n_samples:
max_num = n_samples
if n_samples > 0 and n_features > 0 and min_num < max_num:
objectives = np.full(max_num, 0.0)
used_samples = np.full(n_samples, False)
global_centroid = np.reshape(np.sum(samples, axis=0) / n_samples, (1, samples.shape[1]))
D = distance_matrix_euclidean2_XY_cpu(global_centroid, samples)
medoid_ind = np.argmin(D[0])
n_centroids = 1
sample_weights, sample_membership, sample_objectives, centroids2, centroid_sums, centroid_counts, centroid_objectives = empty_state(n_samples, n_features, n_centroids)
centroids2[0, :] = samples[medoid_ind, :]
centroids = np.full((max_num, n_features), np.nan)
centroids[0, :] = samples[medoid_ind, :]
used_samples[medoid_ind] = True
objectives[0], _ = assignment(samples, sample_weights, sample_membership, sample_objectives, centroids2, centroid_objectives)
local_objectives = np.empty(n_samples)
for i in range(1, max_num):
local_objectives.fill(np.inf)
for j in prange(n_samples):
if not used_samples[j]:
sample_weights, sample_membership, sample_objectives, centroids2, centroid_sums, centroid_counts, centroid_objectives = empty_state(n_samples, n_features, n_centroids)
centroids2 = np.concatenate((centroids[:n_centroids], np.reshape(samples[j], (1, samples.shape[1]))))
local_objectives[j], _ = assignment(samples, sample_weights, sample_membership, sample_objectives, centroids2, centroid_objectives)
min_ind = np.argmin(local_objectives)
used_samples[min_ind] = True
centroids[n_centroids, :] = samples[min_ind, :]
objectives[n_centroids] = local_objectives[min_ind]
n_centroids += 1
cluster_nums = np.arange(min_num, max_num)
drop_rates = np.empty(cluster_nums.shape[0])
for i in range(min_num - 1, max_num - 1):
p1 = objectives[i - 1]
p2 = objectives[i]
p3 = objectives[i + 1]
d1 = abs(p1 - p2)
d2 = abs(p2 - p3)
#d1 = p1 - p2
#d2 = p2 - p3
if d2 != 0.0:
drop_rates[i - min_num + 1] = d1 / d2
else:
drop_rates[i - min_num + 1] = 0.0
n_clusters = cluster_nums[np.argmax(drop_rates)]
else:
n_clusters = -1
cluster_nums = np.full(0, -1)
drop_rates = np.empty(0)
objectives = np.empty(0)
return n_clusters, cluster_nums, drop_rates, objectives
@njit
def method_sequencing(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, local_max_iters=300, local_tol=0.00001, n_candidates=3, shaking_mode = 0, printing=False):
assert method_sequence.ndim == 1 and time_sequence.ndim == 1 and kmax_sequence.ndim == 1
sequence_size = method_sequence.shape[0]
assert time_sequence.shape[0] == sequence_size and max_iters_sequence.shape[0] == sequence_size and kmax_sequence.shape[0] == sequence_size
methods = {0,1,2,3,4,5,6}
for i in range(sequence_size):
method = method_sequence[i]
assert method in methods
if method == 1:
if printing: print('H-means (first improvement strategy):')
objective, n_iters = h_means_first(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters, local_tol)
if printing:
print(objective)
print()
elif method == 2:
if printing: print('H-means (best-improvement strategy):')
objective, n_iters = h_means_best(samples, sample_weights, sample_membership, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters, local_tol)
if printing:
print(objective)
print()
elif method == 3:
if printing: print('K-H-means:')
objective, n_iters = k_h_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, local_max_iters, local_max_iters, local_tol, local_tol)
if printing:
print(objective)
print()
elif method == 4:
if printing: print('Center Shaking VNS:')
objective, n_iters, n_local_iters = Center_Shaking_VNS(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters, local_tol, kmax_sequence[i], time_sequence[i], max_iters_sequence[i], n_candidates, shaking_mode, False, printing)
if printing: print()
elif method == 5:
if printing: print('Membership Shaking VNS:')
objective, n_iters = Membership_Shaking_VNS(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters, local_max_iters, local_tol, local_tol, kmax_sequence[i], time_sequence[i], max_iters_sequence[i], printing)
if printing: print()
elif method == 6:
if printing: print('Extra Center Insertion/Deletion:')
objective, n_iters, n_local_iters = iterative_extra_center_insertion_deletion(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, objective, local_max_iters, local_tol, max_iters_sequence[i], local_tol, time_sequence[i], n_candidates)
if printing: print()
else:
if printing: print('K-means:')
objective, n_iters = k_means(samples, sample_weights, sample_membership, sample_objectives, centroids, centroid_sums, centroid_counts, centroid_objectives, local_max_iters, local_tol, True)
if printing:
print(objective)
print()
return objective
# Parallel multi-portion Minimum Sum-of-Squares Clustering (MSSC)
@njit(parallel = True)
def multi_portion_mssc(samples, sample_weights, centers, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, n_clusters = 3, portion_size = -1, n_portions = 3, init_method = 1, local_max_iters=300, local_tol=0.0001, n_candidates=3):
n_samples, n_features = samples.shape
n_centers, n_center_features = centers.shape
n_sample_weights, = sample_weights.shape
init_methods = {0,1,2}
assert ((n_samples == n_sample_weights) or (n_sample_weights == 0))
assert ((init_method != 2) or (n_features == n_center_features))
assert (((init_method != 2) and (n_centers == 0)) or (n_centers == n_clusters))
assert (portion_size == -1) or (portion_size <= n_samples)
assert init_method in init_methods
collected_centroids = np.full((0, 0, 0), np.nan)
collected_centroid_counts = np.full((0, 0), 0.0)
collected_centroid_objectives = np.full((0, 0), np.nan)
collected_objectives = np.full(0, np.nan)
if (n_samples > 0) and (n_features > 0) and (n_clusters > 0) and (n_portions > 0) and ((portion_size > 0) or (portion_size == -1)) and (portion_size < n_samples):
collected_centroids = np.full((n_portions, n_clusters, n_features), np.nan)
collected_centroid_counts = np.full((n_portions, n_clusters), 0.0)
collected_centroid_objectives = np.full((n_portions, n_clusters), np.nan)
collected_objectives = np.full(n_portions, np.nan)
if portion_size == -1:
p_samples = samples
p_n_samples = n_samples
p_sample_weights = sample_weights
for i in prange(n_portions):
if portion_size > 0:
p_inds = np.random.choice(n_samples, portion_size, replace = False)
p_samples = samples[p_inds]
p_n_samples = portion_size
if n_sample_weights > 0:
p_sample_weights = sample_weights[p_inds]
else:
p_sample_weights = np.full(0, 0.0)
if init_method == 1:
collected_centroids[i] = p_samples[k_means_pp(p_samples, p_sample_weights, n_clusters, n_candidates, distance_measure=0)]
elif init_method == 2:
collected_centroids[i] = np.copy(centers)
else:
collected_centroids[i] = np.random.rand(n_clusters, n_features)
p_sample_membership = np.empty_like(p_inds)
p_sample_objectives = np.empty(p_n_samples)
p_centroid_sums = np.empty((n_clusters, n_features))
collected_objectives[i] = method_sequencing(p_samples, p_sample_weights, p_sample_membership, p_sample_objectives, collected_centroids[i], p_centroid_sums, collected_centroid_counts[i], collected_centroid_objectives[i], np.inf, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, local_max_iters, local_tol, n_candidates, False)
return collected_centroids, collected_centroid_counts, collected_centroid_objectives, collected_objectives
@njit(parallel = True)
def decomposition_aggregation_mssc(samples, sample_weights, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, n_clusters = 3, portion_size = -1, n_portions = 3, init_method = 1, local_max_iters=300, local_tol=0.0001, n_candidates=3, aggregation_method = 0, basis_n_init = 3):
n_samples, n_features = samples.shape
n_sample_weights, = sample_weights.shape
final_objective = np.inf
final_n_iters = 0
final_sample_membership = np.full(n_samples, -1)
final_sample_objectives = np.full(n_samples, np.nan)
final_centroids = np.full((n_clusters,n_features), np.nan)
final_centroid_sums = np.full((n_clusters,n_features), np.nan)
final_centroid_counts = np.full(n_clusters, 0.0)
final_centroid_objectives = np.full(n_clusters, np.nan)
if (n_samples > 0) and (n_features > 0) and (n_portions > 0) and (portion_size > 0) and (portion_size <= n_samples) and (basis_n_init > 0):
centers = np.empty((0, n_features))
centroids, centroid_counts, centroid_objectives, objectives = multi_portion_mssc(samples, sample_weights, centers, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, n_clusters, portion_size, n_portions, init_method, local_max_iters, local_tol, n_candidates)
full_objectives = np.empty_like(objectives)
sample_membership = np.full(0, 0)
sample_objectives = np.full(0, 0.0)
centroid_objectives = np.empty((n_portions, n_clusters))
for i in prange(n_portions):
full_objectives[i], n_changed_membership = assignment(samples, sample_weights, sample_membership, sample_objectives, centroids[i], centroid_objectives[i])
if aggregation_method == 0:
min_ind = np.argmin(full_objectives)
final_centroids[:,:] = centroids[min_ind,:,:]
else:
n_basis_samples = np.sum(centroid_counts > 0.0)
basis_samples = np.empty((n_basis_samples, n_features), dtype = samples.dtype)
basis_weights = np.empty(n_basis_samples)
ind = 0
for i in range(n_portions):
for j in range(n_clusters):
if centroid_counts[i,j] > 0.0:
basis_samples[ind] = centroids[i,j]
#basis_weights[ind] = centroid_objectives[i,j]*full_objectives[i]
#basis_weights[ind] = centroid_objectives[i,j]
#basis_weights[ind] = centroid_objectives[i,j]/centroid_counts[i,j] #!!!!!!!!!
basis_weights[ind] = full_objectives[i]
#basis_weights[ind] = (centroid_objectives[i,j]*full_objectives[i])/centroid_counts[i,j]
ind += 1
normalization1D(basis_weights, True)
for i in range(n_basis_samples):
basis_weights[i] = np.exp(1-basis_weights[i])
basis_objectives = np.empty(basis_n_init)
basis_centroids = np.full((basis_n_init, n_clusters, n_features), np.nan)
for i in prange(basis_n_init):
basis_sample_membership = np.full(n_basis_samples, -1)
basis_sample_objectives = np.full(n_basis_samples, np.nan)
basis_centroid_sums = np.full((n_clusters,n_features), np.nan)
basis_centroid_counts = np.full(n_clusters, 0.0)
basis_centroid_objectives = np.full(n_clusters, np.nan)
basis_centroids[i,:,:] = basis_samples[k_means_pp(basis_samples, basis_weights, n_clusters, n_candidates, distance_measure=0)][:,:]
basis_objectives[i] = method_sequencing(basis_samples, basis_weights, basis_sample_membership, basis_sample_objectives, basis_centroids[i], basis_centroid_sums, basis_centroid_counts, basis_centroid_objectives, np.inf, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, local_max_iters, local_tol, n_candidates, False)
min_ind = np.argmin(basis_objectives)
final_centroids[:,:] = basis_centroids[min_ind,:,:]
final_objective = method_sequencing(samples, sample_weights, final_sample_membership, final_sample_objectives, final_centroids, final_centroid_sums, final_centroid_counts, final_centroid_objectives, np.inf, method_sequence, time_sequence, max_iters_sequence, kmax_sequence, local_max_iters, local_tol, n_candidates, False)
return final_objective, final_sample_membership, final_sample_objectives, final_centroids, final_centroid_sums, final_centroid_counts, final_centroid_objectives
| 41.974079
| 355
| 0.607947
|
d258d57a19f8c4d94fa0aab9a02b2d728f8ab239
| 3,132
|
py
|
Python
|
zunis_lib/zunis/utils/function_wrapper.py
|
NGoetz/zunis
|
a2d3ba2392ef802349723465c6559e8eb407db54
|
[
"MIT"
] | 18
|
2020-08-20T15:29:12.000Z
|
2022-03-21T09:41:44.000Z
|
zunis_lib/zunis/utils/function_wrapper.py
|
NGoetz/zunis
|
a2d3ba2392ef802349723465c6559e8eb407db54
|
[
"MIT"
] | 13
|
2020-08-25T10:53:58.000Z
|
2022-03-25T15:25:10.000Z
|
zunis_lib/zunis/utils/function_wrapper.py
|
NGoetz/zunis
|
a2d3ba2392ef802349723465c6559e8eb407db54
|
[
"MIT"
] | 2
|
2020-09-07T17:17:36.000Z
|
2020-12-30T01:22:23.000Z
|
"""Function wrappers to provide API-compatible functions"""
import torch
def wrap_numpy_hypercube_batch_function(f):
"""Take a function that evaluates on numpy batches with values in the unit hypercube
and return a function that evaluates on pytorch batches in the unit hypercube
"""
def torchf(x):
npx = x.detach().cpu().numpy()
npfx = f(npx)
return torch.tensor(npfx, device=x.device)
return torchf
def wrap_numpy_compact_batch_function(f, dimensions):
"""Take a function that evaluates on numpy batches with values in compact intervals provided
as a list of shape (d,2) where each element is the pair (lower,upper) of interval boundaries.
and return a function that evaluates on pytorch batches in the unit hypercube,
weighted by the proper Jacobian factor to preserve integrals.
"""
tdim = torch.tensor(dimensions)
assert tdim.shape[1] == 2, "argument dimensions is expected to have shape (N,2)"
assert torch.all(tdim[:, 1] > tdim[:, 0]), "Each dimension is expected to be (a,b) with a<b"
starts = tdim[:, 0]
lengths = tdim[:, 1] - tdim[:, 0]
jac = torch.prod(lengths).cpu().item()
def torchf(x):
npx = (x * lengths.to(x.device) + starts.to(x.device)).detach().cpu().numpy()
npfx = f(npx)
return (torch.tensor(npfx) * jac).to(x.device)
return torchf
def wrap_compact_arguments_function(f, dimensions):
"""Take a function that evaluates on a sequence of arguments with values in compact intervals provided
as a list of shape (d,2) where each element is the pair (lower,upper) of interval boundaries and
return a function that evaluates on pytorch batches in the unit hypercube,
weighted by the proper Jacobian factor to preserve integrals.
Explicitly: f(x_1,x_2,...,x_N) where x_i are numbers in [dimensions[i][0], dimensions[i][1]] returns a single float.
"""
tdim = torch.tensor(dimensions)
assert tdim.shape[1] == 2, "argument dimensions is expected to have shape (N,2)"
assert torch.all(tdim[:, 1] > tdim[:, 0]), "Each dimension is expected to be (a,b) with a<b"
starts = tdim[:, 0]
lengths = tdim[:, 1] - tdim[:, 0]
jac = torch.prod(lengths).item()
def torchf(x):
lxs = (x * lengths.to(x.device) + starts.to(x.device)).detach().cpu().tolist()
fxs = torch.zeros(x.shape[0], device=x.device)
for i, lx in enumerate(lxs):
fxs[i] = f(*lx)
return fxs * jac
return torchf
def wrap_hypercube_arguments_function(f):
"""Take a function that evaluates on a sequence of arguments with values in the unit hypercube and
return a function that evaluates on pytorch batches in the unit hypercube,
weighted by the proper Jacobian factor to preserve integrals.
Explicitly: f(x_1,x_2,...,x_N) where x_i are numbers in [0, 1] returns a single float.
"""
def torchf(x):
lxs = x.detach().cpu().tolist()
fxs = torch.zeros(x.shape[0], device=x.device)
for i, lx in enumerate(lxs):
fxs[i] = f(*lx)
return fxs
return torchf
| 39.64557
| 120
| 0.6606
|
1737012686d95b66a64c0594f35e9dc641baecc5
| 47,655
|
py
|
Python
|
calico/felix/plugins/fiptgenerator.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | 6
|
2016-10-18T04:04:25.000Z
|
2016-10-18T04:06:49.000Z
|
calico/felix/plugins/fiptgenerator.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | 1
|
2021-06-01T21:45:37.000Z
|
2021-06-01T21:45:37.000Z
|
calico/felix/plugins/fiptgenerator.py
|
ozdanborne/felix
|
5eff313e6498b3a7d775aa16cb09fd4578331701
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.fiptgenerator.py
~~~~~~~~~~~~
Default implementation of the Felix iptables Generator plugin. This module is
responsible for generating the vast majority of rules that get programmed to
iptables. Specifically this includes:
- the per endpoint chains
- the per profile chains
- the global Felix PREROUTING, INPUT and FORWARD chains.
Exceptions are the single rules that get inserted into the top level kernel
chains (owned instead by the felix.frules module) and the dispatch chains
that fan packets out to the appropriate endpoint chains
(owned instead by felix.dispatch).
This module is loaded by core Felix as a plugin via the
calico.felix.iptables_generator entrypoint, making it theoretically possible
for alternative implementations to be loaded instead. However, this interface
is currently HIGHLY EXPERIMENTAL. It should not be considered stable, and may
change significantly, or be removed completely, in future releases.
"""
import logging
import re
import itertools
import syslog
from calico.common import KNOWN_RULE_KEYS
from calico.datamodel_v1 import TieredPolicyId
from calico.felix import futils
from calico.felix.fplugin import FelixPlugin
from calico.felix.profilerules import UnsupportedICMPType
from calico.felix.frules import (CHAIN_TO_ENDPOINT, CHAIN_FROM_ENDPOINT,
CHAIN_TO_PREFIX, CHAIN_FROM_PREFIX,
CHAIN_PREROUTING, CHAIN_POSTROUTING,
CHAIN_INPUT, CHAIN_FORWARD,
FELIX_PREFIX, CHAIN_FIP_DNAT, CHAIN_FIP_SNAT,
CHAIN_TO_IFACE, CHAIN_FROM_IFACE,
CHAIN_OUTPUT, CHAIN_FAILSAFE_IN,
CHAIN_FAILSAFE_OUT)
CHAIN_PROFILE_PREFIX = FELIX_PREFIX + "p-"
_log = logging.getLogger(__name__)
# Maximum number of port entries in a "multiport" match rule. Ranges count for
# 2 entries.
MAX_MULTIPORT_ENTRIES = 15
# The default syslog level that packets get logged at when using the log
# action.
DEFAULT_PACKET_LOG_LEVEL = syslog.LOG_NOTICE
class FelixIptablesGenerator(FelixPlugin):
"""
Felix plugin responsible for generating the actual rules that get written
to iptables.
"""
def __init__(self):
self.IFACE_PREFIX = None
self.IFACE_MATCH = None
self.DEFAULT_INPUT_CHAIN_ACTION = None
self.METADATA_IP = None
self.METADATA_PORT = None
self.IPTABLES_MARK_ACCEPT = None
self.IPTABLES_MARK_NEXT_TIER = None
self.IPTABLES_MARK_ENDPOINTS = None
self.FAILSAFE_INBOUND_PORTS = None
self.FAILSAFE_OUTBOUND_PORTS = None
self.ACTION_ON_DROP = None
def store_and_validate_config(self, config):
# We don't have any plugin specific parameters, but we need to save
# off any other global config that we're interested in at this point.
super(FelixIptablesGenerator, self).store_and_validate_config(config)
self.IFACE_PREFIX = config.IFACE_PREFIX
self.IFACE_MATCH = [prefix + "+" for prefix in self.IFACE_PREFIX]
self.METADATA_IP = config.METADATA_IP
self.METADATA_PORT = config.METADATA_PORT
self.DEFAULT_INPUT_CHAIN_ACTION = config.DEFAULT_INPUT_CHAIN_ACTION
self.IPTABLES_MARK_ACCEPT = config.IPTABLES_MARK_ACCEPT
self.IPTABLES_MARK_NEXT_TIER = config.IPTABLES_MARK_NEXT_TIER
self.IPTABLES_MARK_ENDPOINTS = config.IPTABLES_MARK_ENDPOINTS
self.FAILSAFE_INBOUND_PORTS = config.FAILSAFE_INBOUND_PORTS
self.FAILSAFE_OUTBOUND_PORTS = config.FAILSAFE_OUTBOUND_PORTS
self.ACTION_ON_DROP = config.ACTION_ON_DROP
def raw_rpfilter_failed_chain(self, ip_version):
"""
Generate the RAW felix-PREROUTING chain -- currently only IPv6.
Returns a list of iptables fragments with which to program the
felix-PREROUTING chain that is invoked from the IPv6 RAW PREROUTING
kernel chain. Note this chain is ONLY INVOKED in the case that packets
fail the rpfilter match.
The list returned here should be the complete set of rules required
as any existing chain will be overwritten.
:param ip_version. Currently always 6.
:returns Tuple: list of rules, set of deps.
"""
# We only program this chain for IPv6
assert ip_version == 6
chain = self.drop_rules(ip_version,
CHAIN_PREROUTING,
None,
"IPv6 rpfilter failed")
return chain, {}
def nat_prerouting_chain(self, ip_version):
"""
Generate the NAT felix-PREROUTING chain.
Returns a list of iptables fragments with which to program the
felix-PREROUTING chain which is unconditionally invoked from the
NAT PREROUTING chain.
Note that the list returned here should be the complete set of rules
required as any existing chain will be overwritten.
:param ip_version.
:returns Tuple: list of rules, set of deps.
"""
chain = ["--append %s --jump %s" % (CHAIN_PREROUTING, CHAIN_FIP_DNAT)]
deps = set([CHAIN_FIP_DNAT])
if ip_version == 4 and self.METADATA_IP is not None:
# Need to expose the metadata server on a link-local.
# DNAT tcp -- any any anywhere 169.254.169.254
# tcp dpt:http to:127.0.0.1:9697
chain.append(
"--append " + CHAIN_PREROUTING + " "
"--protocol tcp "
"--dport 80 "
"--destination 169.254.169.254/32 "
"--jump DNAT --to-destination %s:%s" %
(self.METADATA_IP, self.METADATA_PORT))
return chain, deps
def nat_postrouting_chain(self, ip_version):
"""
Generate the NAT felix-POSTROUTING chain.
Returns a list of iptables fragments with which to program the
felix-POSTROUTING chain which is unconditionally invoked from the
NAT POSTROUTING chain.
Note that the list returned here should be the complete set of rules
required as any existing chain will be overwritten.
:param ip_version.
:returns Tuple: list of rules, set of deps.
"""
chain = ["--append %s --jump %s" % (CHAIN_POSTROUTING, CHAIN_FIP_SNAT)]
deps = set([CHAIN_FIP_SNAT])
return chain, deps
def filter_input_chain(self, ip_version, hosts_set_name=None):
"""
Generate the IPv4/IPv6 FILTER felix-INPUT chains.
Returns a list of iptables fragments with which to program the
felix-INPUT chain that is unconditionally invoked from both the IPv4
and IPv6 FILTER INPUT kernel chains.
Note that the list returned here should be the complete set of rules
required as any existing chain will be overwritten.
:param ip_version. Whether this is the IPv4 or IPv6 FILTER table.
:returns Tuple: list of rules, set of deps.
"""
if ip_version == 4:
metadata_addr = self.METADATA_IP
metadata_port = self.METADATA_PORT
dhcp_src_port = 68
dhcp_dst_port = 67
else:
metadata_addr = None
metadata_port = None
dhcp_src_port = 546
dhcp_dst_port = 547
chain = []
deps = set()
if hosts_set_name:
# IP-in-IP enabled, drop any IP-in-IP packets that are not from
# other Calico hosts.
_log.info("IPIP enabled, dropping IPIP packets from non-Calico "
"hosts.")
# The ipencap proctol uses the ID "4". Some versions of iptables
# can't understand protocol names.
chain.extend(self.drop_rules(
ip_version,
CHAIN_INPUT,
"--protocol 4 --match set "
"! --match-set %s src" % hosts_set_name,
None)
)
# Allow established connections via the conntrack table.
chain.extend(self.drop_rules(ip_version,
CHAIN_INPUT,
"--match conntrack --ctstate INVALID",
None))
chain.append("--append %s --match conntrack "
"--ctstate RELATED,ESTABLISHED --jump ACCEPT" %
CHAIN_INPUT)
chain.append(
"--append {chain} --jump MARK --set-mark 0/{mark}".format(
chain=CHAIN_INPUT, mark=self.IPTABLES_MARK_ENDPOINTS)
)
for iface_match in self.IFACE_MATCH:
chain.append(
"--append {chain} --in-interface {iface} "
"--jump MARK --set-mark {mark}/{mark}".format(
chain=CHAIN_INPUT, iface=iface_match,
mark=self.IPTABLES_MARK_ENDPOINTS)
)
# Incoming traffic on host endpoints.
chain.append(
"--append {chain} --goto {goto} --match mark "
"--mark 0/{mark}".format(
chain=CHAIN_INPUT, goto=CHAIN_FROM_IFACE,
mark=self.IPTABLES_MARK_ENDPOINTS)
)
deps.add(CHAIN_FROM_IFACE)
# To act as a router for IPv6, we have to accept various types of
# ICMPv6 messages, as follows:
#
# - 130: multicast listener query.
# - 131: multicast listener report.
# - 132: multicast listener done.
# - 133: router solicitation, which an endpoint uses to request
# configuration information rather than waiting for an
# unsolicited router advertisement.
# - 135: neighbor solicitation.
# - 136: neighbor advertisement.
if ip_version == 6:
for icmp_type in ["130", "131", "132", "133", "135", "136"]:
chain.append("--append %s --jump ACCEPT "
"--protocol ipv6-icmp "
"--icmpv6-type %s" %
(CHAIN_INPUT, icmp_type))
if metadata_addr is not None:
_log.info("Metadata address specified, whitelisting metadata "
"service")
chain.append(
"--append %s --protocol tcp "
"--destination %s --dport %s --jump ACCEPT" %
(CHAIN_INPUT, metadata_addr, metadata_port)
)
# Special-case: allow DHCP.
chain.append(
"--append %s --protocol udp --sport %d "
"--dport %s --jump ACCEPT" %
(CHAIN_INPUT, dhcp_src_port, dhcp_dst_port)
)
# Special-case: allow DNS.
dns_dst_port = 53
chain.append(
"--append %s --protocol udp --dport %s --jump ACCEPT" %
(CHAIN_INPUT, dns_dst_port)
)
if self.DEFAULT_INPUT_CHAIN_ACTION != "DROP":
# Optimisation: the from-ENDPOINT chain signals acceptance of a
# packet by RETURNing. If we're going to drop the packet
# anyway, don't bother applying the from-ENDPOINT chain.
_log.info("Default endpoint->host action set to %s, felix will "
"apply per-endpoint policy to packets in the INPUT "
"chain.",
self.DEFAULT_INPUT_CHAIN_ACTION)
chain.append(
"--append %s --jump %s" %
(CHAIN_INPUT, CHAIN_FROM_ENDPOINT)
)
deps.add(CHAIN_FROM_ENDPOINT)
if self.DEFAULT_INPUT_CHAIN_ACTION != "RETURN":
# Optimisation: RETURN is the default if the packet reaches the end
# of the chain so no need to program it.
if self.DEFAULT_INPUT_CHAIN_ACTION == "DROP":
chain.extend(self.drop_rules(ip_version,
CHAIN_INPUT,
None,
"Drop all packets from "
"endpoints to the host"))
else:
chain.append(
"--append %s --jump %s" %
(CHAIN_INPUT, self.DEFAULT_INPUT_CHAIN_ACTION)
)
return chain, deps
def filter_output_chain(self, ip_version, hosts_set_name=None):
"""
Generate the IPv4/IPv6 FILTER felix-OUTPUT chains.
Returns a list of iptables fragments with which to program the
felix-OUTPUT chain that is unconditionally invoked from both the IPv4
and IPv6 FILTER OUTPUT kernel chains.
Note that the list returned here should be the complete set of rules
required as any existing chain will be overwritten.
:param ip_version. Whether this is the IPv4 or IPv6 FILTER table.
:returns Tuple: list of rules, set of deps.
"""
chain = []
deps = set()
# Allow established connections via the conntrack table.
chain.extend(self.drop_rules(ip_version,
CHAIN_OUTPUT,
"--match conntrack --ctstate INVALID",
None))
chain.append("--append %s --match conntrack "
"--ctstate RELATED,ESTABLISHED --jump ACCEPT" %
CHAIN_OUTPUT)
chain.append(
"--append {chain} --jump MARK --set-mark 0/{mark}".format(
chain=CHAIN_OUTPUT, mark=self.IPTABLES_MARK_ENDPOINTS)
)
# Outgoing traffic on host endpoints.
for iface_match in self.IFACE_MATCH:
chain.append(
"--append {chain} --out-interface {iface} "
"--jump MARK --set-mark {mark}/{mark}".format(
chain=CHAIN_OUTPUT, iface=iface_match,
mark=self.IPTABLES_MARK_ENDPOINTS)
)
chain.append(
"--append {chain} --goto {goto} --match mark "
"--mark 0/{mark}".format(
chain=CHAIN_OUTPUT, goto=CHAIN_TO_IFACE,
mark=self.IPTABLES_MARK_ENDPOINTS)
)
deps.add(CHAIN_TO_IFACE)
return chain, deps
def filter_forward_chain(self, ip_version):
"""
Generate the IPv4/IPv6 FILTER felix-FORWARD chains.
Returns a list of iptables fragments with which to program the
felix-FORWARD chain that is unconditionally invoked from both the IPv4
and IPv6 FILTER FORWARD kernel chains.
Note that the list returned here should be the complete set of rules
required as any existing chain will be overwritten.
:param ip_version. Whether this is the IPv4 or IPv6 FILTER table.
:returns Tuple: list of rules, set of deps.
"""
forward_chain = []
for iface_match in self.IFACE_MATCH:
forward_chain.extend(self.drop_rules(
ip_version, CHAIN_FORWARD,
"--in-interface %s --match conntrack --ctstate "
"INVALID" % iface_match, None))
forward_chain.extend(
self.drop_rules(
ip_version, CHAIN_FORWARD,
"--out-interface %s --match conntrack --ctstate "
"INVALID" % iface_match, None))
forward_chain.extend([
# First, a pair of conntrack rules, which accept established
# flows to/from workload interfaces.
"--append %s --in-interface %s --match conntrack "
"--ctstate RELATED,ESTABLISHED --jump ACCEPT" %
(CHAIN_FORWARD, iface_match),
"--append %s --out-interface %s --match conntrack "
"--ctstate RELATED,ESTABLISHED --jump ACCEPT" %
(CHAIN_FORWARD, iface_match),
# Then, for traffic from a workload interface, jump to the
# from endpoint chain. It will either DROP the packet or,
# if policy allows, return it to this chain for further
# processing.
"--append %s --jump %s --in-interface %s" %
(CHAIN_FORWARD, CHAIN_FROM_ENDPOINT, iface_match),
# Then, for traffic to a workload interface, jump to the
# "to endpoint" chain. Note: a packet that's going from one
# endpoint to another on the same host will go through both
# the "from" and "to" chains.
"--append %s --jump %s --out-interface %s" %
(CHAIN_FORWARD, CHAIN_TO_ENDPOINT, iface_match),
# Finally, if the packet is from/to a workload and it passes
# both the "from" and "to" chains without being dropped, it
# must be allowed by policy; ACCEPT it.
"--append %s --jump ACCEPT --in-interface %s" %
(CHAIN_FORWARD, iface_match),
"--append %s --jump ACCEPT --out-interface %s" %
(CHAIN_FORWARD, iface_match),
])
return forward_chain, set([CHAIN_FROM_ENDPOINT, CHAIN_TO_ENDPOINT])
def endpoint_chain_names(self, endpoint_suffix):
"""
Returns the set of chains belonging to a given endpoint. This is used
e.g. to identify the set of chains that should be deleted to clean up
a endpoint.
:param endpoint_suffix: The suffix of the endpoint we want to know
the chains for.
:returns set[string]: the set of chain names
"""
to_chain_name = (CHAIN_TO_PREFIX + endpoint_suffix)
from_chain_name = (CHAIN_FROM_PREFIX + endpoint_suffix)
return set([to_chain_name, from_chain_name])
def host_endpoint_updates(self, ip_version, endpoint_id, suffix,
profile_ids, pol_ids_by_tier):
return self.endpoint_updates(
ip_version=ip_version,
endpoint_id=endpoint_id,
suffix=suffix,
mac=None,
profile_ids=profile_ids,
pol_ids_by_tier=pol_ids_by_tier,
to_direction="outbound",
from_direction="inbound",
with_failsafe=True,
)
def endpoint_updates(self, ip_version, endpoint_id, suffix, mac,
profile_ids, pol_ids_by_tier, to_direction="inbound",
from_direction="outbound", with_failsafe=False):
"""
Generate a set of iptables updates that will program all of the chains
needed for a given endpoint.
For each endpoint the following two top level chains must be defined
as they are referenced from the dispatch chains programmed by core
Felix.
- CHAIN_TO_PREFIX + endpoint_suffix
- CHAIN_FROM_PREFIX + endpoint_suffix
:param ip_version. Whether these are for the IPv4 or IPv6 iptables.
:param endpoint_id: The endpoint's ID.
:param suffix: The endpoint's suffix.
:param mac: The endpoint's MAC address
:param profile_ids: the set of profile_ids associated with this
endpoint
:param OrderedDict pol_ids_by_tier: ordered dict mapping tier name
to list of profiles.
:returns Tuple: updates, deps
"""
to_chain_name = (CHAIN_TO_PREFIX + suffix)
from_chain_name = (CHAIN_FROM_PREFIX + suffix)
to_chain, to_deps = self._build_to_or_from_chain(
ip_version,
endpoint_id,
profile_ids,
pol_ids_by_tier,
to_chain_name,
to_direction,
with_failsafe=with_failsafe,
)
from_chain, from_deps = self._build_to_or_from_chain(
ip_version,
endpoint_id,
profile_ids,
pol_ids_by_tier,
from_chain_name,
from_direction,
expected_mac=mac,
with_failsafe=with_failsafe,
)
updates = {to_chain_name: to_chain, from_chain_name: from_chain}
deps = {to_chain_name: to_deps, from_chain_name: from_deps}
return updates, deps
def failsafe_in_chain(self):
updates = []
for port in self.FAILSAFE_INBOUND_PORTS:
updates.append("--append %s --protocol tcp --dport %s "
"--jump ACCEPT" %
(CHAIN_FAILSAFE_IN, port))
deps = set()
return updates, deps
def failsafe_out_chain(self):
updates = []
for port in self.FAILSAFE_OUTBOUND_PORTS:
updates.append("--append %s --protocol tcp --dport %s "
"--jump ACCEPT" %
(CHAIN_FAILSAFE_OUT, port))
deps = set()
return updates, deps
def profile_chain_names(self, profile_id):
"""
Returns the set of chains belonging to a given profile. This is used
e.g. to identify the set of chains that should be deleted to clean up
a profile.
:param profile_id: The profile ID we want to know the chains for.
:returns set[string]: the set of chain names
"""
return set([self._profile_to_chain_name("inbound", profile_id),
self._profile_to_chain_name("outbound", profile_id)])
def profile_updates(self, profile_id, profile, ip_version, tag_to_ipset,
selector_to_ipset, comment_tag=None):
"""
Generate a set of iptables updates that will program all of the chains
needed for a given profile.
:returns Tuple: updates, deps
"""
# Generates an inbound and an outbound chain for each profile.
# Within each chain, the logic is as follows:
# * If we hit an allow rule, we'll return with the Accept mark bit set
# to indicate that we matched.
# * If we hit a deny rule, we'll drop the packet immediately.
# * If we reach the end of the chain, we'll return with no mark set.
updates = {}
deps = {}
for direction in ("inbound", "outbound"):
chain_name = self._profile_to_chain_name(direction, profile_id)
rules_key = "%s_rules" % direction
rules = profile.get(rules_key, [])
fragments = []
for r in rules:
rule_version = r.get('ip_version')
if rule_version is None or rule_version == ip_version:
fragments.extend(self._rule_to_iptables_fragments(
chain_name,
r,
ip_version,
tag_to_ipset,
selector_to_ipset))
updates[chain_name] = fragments
return updates, deps
def logged_drop_rules(self, ip_version, chain_name, rule_spec=None,
comment=None, ipt_action="--append", log_pfx=None):
"""
Return a list of iptables updates that can be applied to a chain to
drop packets that meet a given rule_spec, with optional log.
:param ip_version. Whether these are for the IPv4 or IPv6 iptables.
:param chain_name: the chain that the updates will be applied to
:param rule_spec: the rule spec (e.g. match criteria). May be None
to drop all packets.
:param comment: any comment that should be associated with the
rule. May be None to not include a comment.
:param ipt_action: the action that should be used to apply the rule
(e.g. --append or --insert)
:param log_pfx: If not None, the rules will trigger an iptables LOG
action with this log prefix before dropping the packet.
:return list: a list of iptables fragments of the form
[ipt_action] [chain_name] [rule_spec] [action] [comment] e.g.
--append my_chain --match conntrack --ctstate INVALID --jump DROP
"""
drop_rules = self.drop_rules(ip_version, chain_name,
rule_spec=rule_spec, comment=comment,
ipt_action=ipt_action)
if log_pfx is not None:
log_target = self._log_target(log_pfx=log_pfx)
log_rule = " ".join(
[ipt_action, chain_name, rule_spec, "--jump", log_target]
)
drop_rules[0:0] = [log_rule]
return drop_rules
def drop_rules(self, ip_version, chain_name, rule_spec=None, comment=None,
ipt_action="--append"):
"""
Return a list of iptables updates that can be applied to a chain to
drop packets that meet a given rule_spec.
:param ip_version. Whether these are for the IPv4 or IPv6 iptables.
:param chain_name: the chain that the updates will be applied to
:param rule_spec: the rule spec (e.g. match criteria). May be None
to drop all packets.
:param comment: any comment that should be associated with the
rule. May be None to not include a comment.
:param ipt_action: the action that should be used to apply the rule
(e.g. --append or --insert)
:return list: a list of iptables fragments of the form
[ipt_action] [chain_name] [rule_spec] [action] [comment] e.g.
--append my_chain --match conntrack --ctstate INVALID --jump DROP
"""
comment_str = None
if comment is not None:
comment = comment[:255] # Limit imposed by iptables.
assert re.match(r'[\w: ]{,255}', comment), \
"Invalid comment %r" % comment
comment_str = '-m comment --comment "%s"' % comment
rules = []
if self.ACTION_ON_DROP.startswith("LOG-"):
# log-and-accept, log-and-drop.
log_spec = '--jump LOG --log-prefix "calico-drop: " --log-level 4'
log_rule = " ".join(
[p for p in [ipt_action, chain_name, rule_spec, log_spec,
comment_str] if p is not None]
)
rules.append(log_rule)
if self.ACTION_ON_DROP.endswith("ACCEPT"):
action_spec = (
'--jump ACCEPT -m comment '
'--comment "!SECURITY DISABLED! DROP overridden to ACCEPT"'
)
else:
assert self.ACTION_ON_DROP.endswith("DROP")
action_spec = "--jump DROP"
drop_rule = " ".join(
[p for p in [ipt_action, chain_name, rule_spec, action_spec,
comment_str] if p is not None]
)
rules.append(drop_rule)
return rules
def _build_to_or_from_chain(self, ip_version, endpoint_id, profile_ids,
prof_ids_by_tier, chain_name, direction,
expected_mac=None, with_failsafe=False):
"""
Generate the necessary set of iptables fragments for a to or from
chain for a given endpoint.
:param ip_version. Whether this chain is for IPv4 or IPv6 iptables.
:param endpoint_id: The endpoint's ID.
:param profile_ids: The set of profile_ids associated with this
endpoint.
:param chain_name: The name of the chain to generate.
:param direction: One of "inbound" or "outbound".
:param expected_mac: The expected source MAC address. If not None
then the chain will explicitly drop any packets that do not have this
expected source MAC address.
:returns Tuple: chain, deps. Chain is a list of fragments that can
be submitted to iptables to program the requested chain. Deps is a
set containing names of chains that this endpoint chain depends on.
"""
if with_failsafe:
if direction == "inbound":
failsafe_chain = CHAIN_FAILSAFE_IN
else:
failsafe_chain = CHAIN_FAILSAFE_OUT
chain = [
"--append %(chain)s --jump %(failsafe_chain)s" % {
"chain": chain_name,
"failsafe_chain": failsafe_chain
}
]
deps = {failsafe_chain}
else:
chain = []
deps = set()
# Ensure the Accept MARK is set to 0 when we start so that unmatched
# packets will be dropped.
chain.append(
"--append %(chain)s --jump MARK --set-mark 0/%(mark)s" % {
'chain': chain_name,
'mark': self.IPTABLES_MARK_ACCEPT
}
)
if expected_mac:
_log.debug("Policing source MAC: %s", expected_mac)
chain.extend(self.drop_rules(
ip_version,
chain_name,
"--match mac ! --mac-source %s" % expected_mac,
"Incorrect source MAC"))
# Tiered policies come first.
# Each tier must either accept the packet outright or pass it to the
# next tier for further processing.
for tier, pol_ids in prof_ids_by_tier.iteritems():
# Zero the "next-tier packet" mark. Then process each policy
# in turn.
chain.append('--append %(chain)s '
'--jump MARK --set-mark 0/%(mark)s '
'--match comment --comment "Start of tier %(tier)s"' %
{
"chain": chain_name,
"mark": self.IPTABLES_MARK_NEXT_TIER,
"tier": tier,
})
for pol_id in pol_ids:
policy_chain = self._profile_to_chain_name(direction, pol_id)
deps.add(policy_chain)
# Only process the profile if none of the previous profiles
# set the next-tier mark.
chain.append("--append %(chain)s "
"--match mark --mark 0/%(mark)s "
"--jump %(pol_chain)s" %
{
"chain": chain_name,
"mark": self.IPTABLES_MARK_NEXT_TIER,
"pol_chain": policy_chain,
})
# If the policy accepted the packet, it sets the Accept
# MARK==1. Immediately RETURN the packet to signal that it's
# been accepted.
chain.append('--append %(chain)s '
'--match mark --mark %(mark)s/%(mark)s '
'--match comment '
'--comment "Return if policy accepted" '
'--jump RETURN' %
{
"chain": chain_name,
"mark": self.IPTABLES_MARK_ACCEPT,
})
chain.extend(self.drop_rules(
ip_version,
chain_name,
"--match mark --mark 0/%s" % self.IPTABLES_MARK_NEXT_TIER,
comment="Drop if no policy in tier passed"
))
# Then, jump to each directly-referenced profile in turn. The profile
# will do one of the following:
#
# * DROP the packet; in which case we won't see it again.
# * RETURN the packet with Accept MARK==1, indicating it accepted the
# packet. In which case, we RETURN and skip further profiles.
# * RETURN the packet with Accept MARK==0, indicating it did not match
# the packet. In which case, we carry on and process the next
# profile.
for profile_id in profile_ids:
policy_chain = self._profile_to_chain_name(direction, profile_id)
deps.add(policy_chain)
chain.append("--append %s --jump %s" % (chain_name, policy_chain))
# If the profile accepted the packet, it sets Accept MARK==1.
# Immediately RETURN the packet to signal that it's been accepted.
chain.append(
'--append %(chain)s --match mark --mark %(mark)s/%(mark)s '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN' % {
'chain': chain_name,
'mark': self.IPTABLES_MARK_ACCEPT
}
)
# Default drop rule.
chain.extend(
self.drop_rules(
ip_version,
chain_name,
None,
"Packet did not match any profile (endpoint %s)" % endpoint_id
)
)
return chain, deps
def _profile_to_chain_name(self, inbound_or_outbound, profile_id):
"""
Returns the name of the chain to use for a given profile (and
direction).
The profile ID that we are supplied might be (far) too long for us
to use, but truncating it is dangerous (for example, in OpenStack
the profile is the ID of each security group in use, joined with
underscores). Hence we make a unique string out of it and use that.
:param inbound_or_outbound: Either "inbound" or "outbound".
:param profile_id: The profile ID we want to know a name for.
:returns string: The name of the chain
"""
if isinstance(profile_id, TieredPolicyId):
profile_id = "%s/%s" % (profile_id.tier, profile_id.policy_id)
profile_string = futils.uniquely_shorten(profile_id, 16)
return CHAIN_PROFILE_PREFIX + "%s-%s" % (profile_string,
inbound_or_outbound[:1])
def _rule_to_iptables_fragments(self, chain_name, rule, ip_version,
tag_to_ipset, selector_to_ipset):
"""
Convert a rule dict to a list of iptables fragments suitable to use
with iptables-restore.
Most rules result in result list containing one item.
:param str chain_name: Name of the chain this rule belongs to (used in
the --append)
:param dict[str,str|list|int] rule: Rule dict.
:param ip_version. Whether these are for the IPv4 or IPv6 iptables.
:param dict[str] tag_to_ipset: dictionary mapping from tag key to ipset
name.
:param dict[SelectorExpression,str] selector_to_ipset: dict mapping
from selector to the name of the ipset that represents it.
:return list[str]: iptables --append fragments.
"""
# Check we've not got any unknown fields.
unknown_keys = set(rule.keys()) - KNOWN_RULE_KEYS
assert not unknown_keys, "Unknown keys: %s" % ", ".join(unknown_keys)
# Ports are special, we have a limit on the number of ports that can go
# in one rule so we need to break up rules with a lot of ports into
# chunks. We take the cross product of the chunks to cover all the
# combinations. If there are not ports or if there are only a few ports
# then the cross product ends up with only one entry.
src_ports = rule.get("src_ports", [])
dst_ports = rule.get("dst_ports", [])
src_port_chunks = self._split_port_lists(src_ports)
dst_port_chunks = self._split_port_lists(dst_ports)
# Only need a shallow copy so we can replace ports.
rule_copy = dict(rule)
try:
fragments = []
for src_ports, dst_ports in itertools.product(src_port_chunks,
dst_port_chunks):
rule_copy["src_ports"] = src_ports
rule_copy["dst_ports"] = dst_ports
frags = self._rule_to_iptables_fragments_inner(
chain_name,
rule_copy,
ip_version,
tag_to_ipset,
selector_to_ipset)
fragments.extend(frags)
return fragments
except Exception as e:
# Defensive: isolate failures to parse the rule (which has already
# passed validation by this point) to this chain.
_log.exception("Failed to parse rules: %r", e)
return self.drop_rules(ip_version,
chain_name,
None,
"ERROR failed to parse rules")
def _split_port_lists(self, ports):
"""
Splits a list of ports and port ranges into chunks that are
small enough to use with the multiport match.
:param list[str|int] ports: list of ports or ranges, specified with
":"; for example, '1024:6000'
:return list[list[str]]: list of chunks. If the input is empty, then
returns a list containing a single empty list.
"""
chunks = []
chunk = []
entries_in_chunk = 0
for port_or_range in ports:
port_or_range = str(port_or_range) # Defensive, support ints too.
if ":" in port_or_range:
# This is a range, which counts for 2 entries.
num_entries = 2
else:
# Just a port.
num_entries = 1
if entries_in_chunk + num_entries > MAX_MULTIPORT_ENTRIES:
chunks.append(chunk)
chunk = []
entries_in_chunk = 0
chunk.append(port_or_range)
entries_in_chunk += num_entries
if chunk or not chunks:
chunks.append(chunk)
return chunks
def _rule_to_iptables_fragments_inner(self, chain_name, rule, ip_version,
tag_to_ipset, selector_to_ipset):
"""
Convert a rule dict to iptables fragments suitable to use with
iptables-restore.
:param str chain_name: Name of the chain this rule belongs to (used in
the --append)
:param dict[str,str|list|int] rule: Rule dict.
:param ip_version. Whether these are for the IPv4 or IPv6 iptables.
:param dict[str] tag_to_ipset: dictionary mapping from tag key to ipset
name.
:param dict[SelectorExpression,str] selector_to_ipset: dict mapping
from selector to the name of the ipset that represents it.
:returns list[str]: list of iptables --append fragments.
"""
# Check we've not got any unknown fields.
_log.debug("converting rule %s to iptables fragments", rule)
unknown_keys = set(rule.keys()) - KNOWN_RULE_KEYS
assert not unknown_keys, "Unknown keys: %s" % ", ".join(unknown_keys)
# Build up the update in chunks and join them below.
rule_spec = []
append = lambda *args: rule_spec.extend(args)
proto = rule.get("protocol")
for neg_pfx in ["", "!"]:
maybe_neg_proto = rule.get(neg_pfx + "protocol")
if maybe_neg_proto:
append(neg_pfx, "--protocol", str(maybe_neg_proto))
for dirn in ["src", "dst"]:
# Some params use the long-form of the name.
direction = "source" if dirn == "src" else "destination"
# Network (CIDR).
net_key = neg_pfx + dirn + "_net"
if net_key in rule and rule[net_key] is not None:
ip_or_cidr = rule[net_key]
if (":" in ip_or_cidr) == (ip_version == 6):
append(neg_pfx, "--%s" % direction, ip_or_cidr)
# Tag, which maps to an ipset.
tag_key = neg_pfx + dirn + "_tag"
if tag_key in rule and rule[tag_key] is not None:
ipset_name = tag_to_ipset[rule[tag_key]]
append("--match set",
neg_pfx, "--match-set", ipset_name, dirn)
# Selector, likewise.
sel_key = neg_pfx + dirn + "_selector"
if sel_key in rule and rule[sel_key] is not None:
ipset_name = selector_to_ipset[rule[sel_key]]
append("--match set",
neg_pfx, "--match-set", ipset_name, dirn)
# Port lists/ranges, which we map to multiport.
ports_key = neg_pfx + dirn + "_ports"
ports = rule.get(ports_key)
if ports: # Ignore empty list.
# Can only match if the (non-negated) is set to a supported
# value.
assert proto in ["tcp", "udp"], \
"Protocol %s not supported with %s (%s)" % \
(proto, ports_key, rule)
if neg_pfx == '':
# Positive match; caller has already chunked the
# ports list up into blocks of suitable length for
# _ports_to_multiport. We only see one chunk.
ports_str = self._ports_to_multiport(ports)
append("--match multiport", "--%s-ports" % direction,
ports_str)
else:
# This is a negative match. While an individual
# multi-port match can only match 15 ports we can
# supply multiple multi-port matches, which will be
# and-ed together. (This doesn't work for positive
# matches because we need those to be or-ed together.)
port_chunks = self._split_port_lists(ports)
for chunk in port_chunks:
ports_str = self._ports_to_multiport(chunk)
append("--match multiport",
"!", "--%s-ports" % direction,
ports_str)
icmp_type = rule.get(neg_pfx + "icmp_type")
icmp_code = rule.get(neg_pfx + "icmp_code")
if icmp_type is not None:
_log.debug("ICMP type set to %s, checking for a more "
"detailed code", icmp_type)
if icmp_type == 255:
# Temporary work-around for this issue:
# https://github.com/projectcalico/felix/issues/451
# This exception will be caught by the caller, which will
# replace this rule with a DROP rule. That's arguably
# better than forbidding this case in the validation
# routine, which would replace the whole chain with a DROP.
_log.error("Kernel doesn't support matching on ICMP type "
"255.")
raise UnsupportedICMPType()
assert isinstance(icmp_type, int), "ICMP type should be an int"
if icmp_code is not None:
_log.debug("ICMP code set to %s", icmp_code)
assert isinstance(icmp_code, int), "ICMP code should be " \
"an int"
icmp_filter = "%s/%s" % (icmp_type, icmp_code)
else:
icmp_filter = icmp_type
if proto == "icmp" and ip_version == 4:
append("--match icmp", neg_pfx, "--icmp-type", icmp_filter)
elif ip_version == 6:
assert proto == "icmpv6"
# Note variant spelling of icmp[v]6
append("--match icmp6",
neg_pfx, "--icmpv6-type", icmp_filter)
action = rule.get("action", "allow")
extra_rules = []
if action in {"allow", "next-tier"}:
if action == "allow":
mark_bit = self.IPTABLES_MARK_ACCEPT
else:
mark_bit = self.IPTABLES_MARK_NEXT_TIER
# allow and next-tier require two rules, one to mark the packet
# so the parent chain knows what happened and a second rule to
# return to the parent chain if the packet was marked.
ipt_target = "MARK --set-mark %(mark)s/%(mark)s" % {
"mark": mark_bit
}
mark_match_fragment = (
"--append %(chain)s --match mark --mark %(mark)s/%(mark)s " %
{
"chain": chain_name,
"mark": mark_bit,
}
)
if "log_prefix" in rule:
# We've been asked to log when we hit this rule.
extra_rules.append(
mark_match_fragment + "--jump " +
self._log_target(rule=rule)
)
extra_rules.append(mark_match_fragment + "--jump RETURN")
elif action == "log":
ipt_target = self._log_target(rule=rule)
elif action == "deny":
ipt_target = "DROP"
else:
# Validation should prevent unknown actions from getting this
# far.
raise ValueError("Unknown rule action %s" % action)
rule_spec_str = " ".join(str(x) for x in rule_spec if x != "")
if ipt_target == "DROP":
rules = self.logged_drop_rules(ip_version, chain_name,
rule_spec_str,
log_pfx=rule.get("log_prefix"))
else:
rules = [" ".join(["--append", chain_name, rule_spec_str,
"--jump", ipt_target])]
if extra_rules:
rules.extend(extra_rules)
return rules
def _log_target(self, rule=None, log_pfx=None):
"""
:return: an iptables logging target "LOG --log-prefix ..." for the
given rule or explicit prefix.
"""
if log_pfx is None:
log_pfx = rule.get("log_prefix", "calico-packet")
log_target = (
'LOG --log-prefix "%s: " --log-level %s' %
(log_pfx, DEFAULT_PACKET_LOG_LEVEL)
)
return log_target
def _ports_to_multiport(self, ports):
"""
Convert a list of ports and ranges into a multiport match string.
:param list[int|str] ports: List of ports as per the datamodel.
"""
ports_str = ','.join([str(p) for p in ports])
# Check that the output has at most 15 port numbers in i, which is a
# limit imposed by iptables. Ranges, such as "1234:5678" count as 2
# numbers.
assert (ports_str.count(",") + ports_str.count(":") + 1) <= 15, \
"Too many ports (%s)" % ports_str
return ports_str
| 43.009928
| 79
| 0.562333
|
409f4fe3277097ce43757facb093414115570ce0
| 182
|
py
|
Python
|
app/ssp_module/errors.py
|
khushalt/ssp_india
|
02d760ae25917e86ea03ac5f5845068d7d987bdb
|
[
"MIT"
] | 1
|
2020-07-04T14:42:49.000Z
|
2020-07-04T14:42:49.000Z
|
app/ssp_module/errors.py
|
khushalt/ssp_india
|
02d760ae25917e86ea03ac5f5845068d7d987bdb
|
[
"MIT"
] | null | null | null |
app/ssp_module/errors.py
|
khushalt/ssp_india
|
02d760ae25917e86ea03ac5f5845068d7d987bdb
|
[
"MIT"
] | null | null | null |
from flask import render_template
from app.ssp_module import create_app
@create_app().errorhandler(404)
def not_found_error(error):
return render_template('error/404.html'), 404
| 30.333333
| 49
| 0.807692
|
e969021a6cfb1ee2142d5bf0508049c43785e67f
| 5,913
|
py
|
Python
|
sct_custom/spinalcordtoolbox/scripts/sct_crop_image.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | 1
|
2021-09-07T08:52:21.000Z
|
2021-09-07T08:52:21.000Z
|
sct_custom/spinalcordtoolbox/scripts/sct_crop_image.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
sct_custom/spinalcordtoolbox/scripts/sct_crop_image.py
|
nidebroux/lumbosacral_segmentation
|
3217960c6f0f5c3886dfdf46e1286ad2f737f4aa
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
#
# CLI script to crop an image.
#
# Copyright (c) 2019 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad
#
# About the license: see the file LICENSE.TXT
import sys
import os
from spinalcordtoolbox.cropping import ImageCropper, BoundingBox
from spinalcordtoolbox.image import Image, add_suffix
from spinalcordtoolbox.utils import SCTArgumentParser, Metavar, init_sct, display_viewer_syntax, set_global_loglevel
def get_parser():
parser = SCTArgumentParser(
description="Tools to crop an image. Either via command line or via a Graphical User Interface (GUI). See "
"example usage at the end.",
epilog="EXAMPLES:\n"
"- To crop an image using the GUI (this does not allow to crop along the right-left dimension):\n"
"sct_crop_image -i t2.nii.gz -g 1\n\n"
"- To crop an image using a binary mask:\n"
"sct_crop_image -i t2.nii.gz -m mask.nii.gz\n\n"
"- To crop an image using a reference image:\n"
"sct_crop_image -i t2.nii.gz -ref mt1.nii.gz\n\n"
"- To crop an image by specifying min/max (you don't need to specify all dimensions). In the example "
"below, cropping will occur between x=5 and x=60, and between z=5 and z=zmax-1\n"
"sct_crop_image -i t2.nii.gz -xmin 5 -xmax 60 -zmin 5 -zmax -2\n\n"
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-i',
required=True,
help="Input image. Example: t2.nii.gz",
metavar=Metavar.file,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
'-h',
'--help',
action='help',
help="Show this help message and exit")
optional.add_argument(
'-o',
help="Output image. By default, the suffix '_crop' will be added to the input image.",
metavar=Metavar.str,
)
optional.add_argument(
'-g',
type=int,
help="0: Cropping via command line | 1: Cropping via GUI. Has priority over -m.",
choices=(0, 1),
default=0,
)
optional.add_argument(
'-m',
help="Binary mask that will be used to extract bounding box for cropping the image. Has priority over -ref.",
metavar=Metavar.file,
)
optional.add_argument(
'-ref',
help="Image which dimensions (in the physical coordinate system) will be used as a reference to crop the "
"input image. Only works for 3D images. Has priority over min/max method.",
metavar=Metavar.file,
)
optional.add_argument(
'-xmin',
type=int,
default=0,
help="Lower bound for cropping along X.",
metavar=Metavar.int,
)
optional.add_argument(
'-xmax',
type=int,
default=-1,
help="Higher bound for cropping along X. Setting '-1' will crop to the maximum dimension (i.e. no change), "
"'-2' will crop to the maximum dimension minus 1 slice, etc.",
metavar=Metavar.int,
)
optional.add_argument(
'-ymin',
type=int,
default=0,
help="Lower bound for cropping along Y.",
metavar=Metavar.int,
)
optional.add_argument(
'-ymax',
type=int,
default=-1,
help="Higher bound for cropping along Y. Follows the same rules as xmax.",
metavar=Metavar.int,
)
optional.add_argument(
'-zmin',
type=int,
default=0,
help="Lower bound for cropping along Z.",
metavar=Metavar.int,
)
optional.add_argument(
'-zmax',
type=int,
default=-1,
help="Higher bound for cropping along Z. Follows the same rules as xmax.",
metavar=Metavar.int,
)
optional.add_argument(
'-b',
type=int,
default=None,
help="If this flag is declared, the image will not be cropped (i.e. the dimension will not change). Instead, "
"voxels outside the bounding box will be set to the value specified by this flag. For example, to have "
"zeros outside the bounding box, use: '-b 0'",
metavar=Metavar.int,
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# initialize ImageCropper
cropper = ImageCropper(Image(arguments.i))
cropper.verbose = verbose
# Switch across cropping methods
if arguments.g:
cropper.get_bbox_from_gui()
elif arguments.m:
cropper.get_bbox_from_mask(Image(arguments.m))
elif arguments.ref:
cropper.get_bbox_from_ref(Image(arguments.ref))
else:
cropper.get_bbox_from_minmax(
BoundingBox(arguments.xmin, arguments.xmax,
arguments.ymin, arguments.ymax,
arguments.zmin, arguments.zmax)
)
# Crop image
img_crop = cropper.crop(background=arguments.b)
# Write cropped image to file
if arguments.o is None:
fname_out = add_suffix(arguments.i, '_crop')
else:
fname_out = arguments.o
img_crop.save(fname_out)
display_viewer_syntax([arguments.i, fname_out])
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| 32.489011
| 118
| 0.611703
|
1b21b3ce9aa871f15ec056aca3271ed0019d0b68
| 1,589
|
gyp
|
Python
|
third_party/fontconfig/fontconfig.gyp
|
iplo/Chain
|
8bc8943d66285d5258fffc41bed7c840516c4422
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
third_party/fontconfig/fontconfig.gyp
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-02-14T21:55:58.000Z
|
2017-02-14T21:55:58.000Z
|
third_party/fontconfig/fontconfig.gyp
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'fontconfig',
'type': '<(component)',
'dependencies' : [
'../zlib/zlib.gyp:zlib',
'../../build/linux/system.gyp:freetype2',
'../libxml/libxml.gyp:libxml',
],
'defines': [
'HAVE_CONFIG_H',
'FC_CACHEDIR="/var/cache/fontconfig"',
'FONTCONFIG_PATH="/etc/fonts"',
],
'sources': [
'src/src/fcarch.h',
'src/src/fcatomic.c',
'src/src/fcblanks.c',
'src/src/fccache.c',
'src/src/fccfg.c',
'src/src/fccharset.c',
'src/src/fccompat.c',
'src/src/fcdbg.c',
'src/src/fcdefault.c',
'src/src/fcdir.c',
'src/src/fcformat.c',
'src/src/fcfreetype.c',
'src/src/fcfs.c',
'src/src/fchash.c',
'src/src/fcinit.c',
'src/src/fclang.c',
'src/src/fclist.c',
'src/src/fcmatch.c',
'src/src/fcmatrix.c',
'src/src/fcname.c',
'src/src/fcobjs.c',
'src/src/fcpat.c',
'src/src/fcserialize.c',
'src/src/fcstat.c',
'src/src/fcstr.c',
'src/src/fcxml.c',
'src/src/ftglue.h',
'src/src/ftglue.c',
],
'include_dirs': [
'src',
'include',
'include/src',
],
'direct_dependent_settings': {
'include_dirs': [
'src',
],
},
},
],
}
| 25.222222
| 72
| 0.488357
|
bcfbfe486a266b015cb48714e8753a1d0909ad34
| 1,126
|
py
|
Python
|
UltronRoBo/utils/filter_groups.py
|
UltronRoBo/UltronRoBoAssistant
|
874dcf725d453ffabd85543533d2a07676af4d65
|
[
"MIT"
] | null | null | null |
UltronRoBo/utils/filter_groups.py
|
UltronRoBo/UltronRoBoAssistant
|
874dcf725d453ffabd85543533d2a07676af4d65
|
[
"MIT"
] | null | null | null |
UltronRoBo/utils/filter_groups.py
|
UltronRoBo/UltronRoBoAssistant
|
874dcf725d453ffabd85543533d2a07676af4d65
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 UltronRoBo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
karma_positive_group = 3
karma_negative_group = 4
| 41.703704
| 78
| 0.801066
|
f63217b8619e680626a8582b22c6519590b2800c
| 227
|
py
|
Python
|
Programmers/[cutz]hate same num.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | 3
|
2019-11-26T14:31:01.000Z
|
2020-01-10T18:19:46.000Z
|
Programmers/[cutz]hate same num.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
Programmers/[cutz]hate same num.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
import sys
def solution(arr):
# 중복 수 제거 / 순서 유지
# 10**6 --> O(n)
answer = [arr[0]]
for a in arr[1:]:
if a == answer[-1]:
continue
else:
answer.append(a)
return answer
| 18.916667
| 28
| 0.45815
|
9b14420a0061ef096b38daa64cd2955d62ae77e1
| 21,427
|
py
|
Python
|
lms/resources/_js_config/__init__.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 38
|
2017-12-30T23:49:53.000Z
|
2022-02-15T21:07:49.000Z
|
lms/resources/_js_config/__init__.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 1,733
|
2017-11-09T18:46:05.000Z
|
2022-03-31T11:05:50.000Z
|
lms/resources/_js_config/__init__.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 10
|
2018-07-11T17:12:46.000Z
|
2022-01-07T20:00:23.000Z
|
import functools
from enum import Enum
from typing import List, Optional
from lms.models import ApplicationInstance, GroupInfo, HUser
from lms.resources._js_config.file_picker_config import FilePickerConfig
from lms.services import HAPIError
from lms.validation.authentication import BearerTokenSchema
from lms.views.helpers import via_url
class JSConfig:
"""The config for the app's JavaScript code."""
class Mode(str, Enum):
OAUTH2_REDIRECT_ERROR = "oauth2-redirect-error"
BASIC_LTI_LAUNCH = "basic-lti-launch"
CONTENT_ITEM_SELECTION = "content-item-selection"
ERROR_DIALOG = "error-dialog"
class ErrorCode(str, Enum):
BLACKBOARD_MISSING_INTEGRATION = "blackboard_missing_integration"
CANVAS_INVALID_SCOPE = "canvas_invalid_scope"
REUSED_CONSUMER_KEY = "reused_consumer_key"
def __init__(self, context, request):
self._context = context
self._request = request
self._authority = request.registry.settings["h_authority"]
self._grading_info_service = request.find_service(name="grading_info")
self._lti_user = request.lti_user
@property
def _h_user(self):
return self._lti_user.h_user
@property
def _consumer_key(self):
return self._lti_user.oauth_consumer_key
def _application_instance(self):
"""
Return the current request's ApplicationInstance.
:raise ApplicationInstanceNotFound: if request.lti_user.oauth_consumer_key isn't in the DB
"""
return self._request.find_service(name="application_instance").get_current()
def add_document_url(self, document_url):
"""
Set the document to the document at the given document_url.
This configures the frontend to inject the Via iframe with this URL as
its src immediately, without making any further API requests to get the
Via URL.
:raise HTTPBadRequest: if a request param needed to generate the config
is missing
"""
if document_url.startswith("blackboard://"):
self._config["api"]["viaUrl"] = {
"authUrl": self._request.route_url("blackboard_api.oauth.authorize"),
"path": self._request.route_path(
"blackboard_api.files.via_url",
course_id=self._request.params["context_id"],
_query={"document_url": document_url},
),
}
elif document_url.startswith("canvas://"):
self._config["api"]["viaUrl"] = {
"authUrl": self._request.route_url("canvas_api.oauth.authorize"),
"path": self._request.route_path(
"canvas_api.files.via_url",
resource_link_id=self._request.params["resource_link_id"],
),
}
else:
self._config["viaUrl"] = via_url(self._request, document_url)
self._add_canvas_speedgrader_settings(document_url=document_url)
def add_vitalsource_launch_config(self, book_id, cfi=None):
vitalsource_svc = self._request.find_service(name="vitalsource")
launch_url, launch_params = vitalsource_svc.get_launch_params(
book_id, cfi, self._request.lti_user
)
self._config["vitalSource"] = {
"launchUrl": launch_url,
"launchParams": launch_params,
}
self._add_canvas_speedgrader_settings(
vitalsource_book_id=book_id, vitalsource_cfi=cfi
)
def asdict(self):
"""
Return the configuration for the app's JavaScript code.
:raise HTTPBadRequest: if a request param needed to generate the config
is missing
:rtype: dict
"""
return self._config
def enable_oauth2_redirect_error_mode(
self,
auth_route: str,
error_code=None,
error_details: Optional[dict] = None,
canvas_scopes: List[str] = None,
):
"""
Configure the frontend to show the "Authorization failed" dialog.
This is shown when authorizing with a third-party OAuth API like the
Canvas API or the Blackboard API fails after the redirect to the
third-party authorization endpoint.
:param error_code: Code identifying a particular error
:param error_details: JSON-serializable technical details about the error
:param auth_route: route for the "Try again" button in the dialog
:param canvas_scopes: List of scopes that were requested
"""
if self._lti_user:
bearer_token = BearerTokenSchema(self._request).authorization_param(
self._lti_user
)
auth_url = self._request.route_url(
auth_route,
_query=[("authorization", bearer_token)],
)
else:
auth_url = None
self._config.update(
{
"mode": JSConfig.Mode.OAUTH2_REDIRECT_ERROR,
"OAuth2RedirectError": {
"authUrl": auth_url,
"errorCode": error_code,
"canvasScopes": canvas_scopes or [],
},
}
)
if error_details:
self._config["OAuth2RedirectError"]["errorDetails"] = error_details
def enable_error_dialog_mode(self, error_code, error_details=None):
self._config.update(
{
"mode": JSConfig.Mode.ERROR_DIALOG,
"errorDialog": {"errorCode": error_code},
}
)
if error_details:
self._config["errorDialog"]["errorDetails"] = error_details
def enable_lti_launch_mode(self):
"""
Put the JavaScript code into "LTI launch" mode.
This mode launches an assignment.
:raise ApplicationInstanceNotFound: if request.lti_user.oauth_consumer_key isn't in the DB
"""
self._config["mode"] = JSConfig.Mode.BASIC_LTI_LAUNCH
self._config["api"]["sync"] = self._sync_api()
# The config object for the Hypothesis client.
# Our JSON-RPC server passes this to the Hypothesis client over
# postMessage.
self._config["hypothesisClient"] = self._hypothesis_client
self._config["rpcServer"] = {
"allowedOrigins": self._request.registry.settings["rpc_allowed_origins"]
}
def _create_assignment_api(self):
if not self._context.is_canvas:
return None
return {
"path": self._request.route_path("api.assignments.create"),
"data": {
"ext_lti_assignment_id": self._request.params["ext_lti_assignment_id"],
"course_id": self._request.params["custom_canvas_course_id"],
},
}
def enable_content_item_selection_mode(self, form_action, form_fields):
"""
Put the JavaScript code into "content item selection" mode.
This mode shows teachers an assignment configuration UI where they can
choose the document to be annotated for the assignment.
:param form_action: the HTML `action` attribute for the URL that we'll
submit the user's chosen document to
:param form_fields: the fields (keys and values) to include in the
HTML form that we submit
:raise ApplicationInstanceNotFound: if request.lti_user.oauth_consumer_key isn't in the DB
"""
args = self._context, self._request, self._application_instance()
self._config.update(
{
"mode": JSConfig.Mode.CONTENT_ITEM_SELECTION,
"filePicker": {
"formAction": form_action,
"formFields": form_fields,
"createAssignmentAPI": self._create_assignment_api(),
# The "content item selection" that we submit to Canvas's
# content_item_return_url is actually an LTI launch URL with
# the selected document URL or file_id as a query parameter. To
# construct these launch URLs our JavaScript code needs the
# base URL of our LTI launch endpoint.
"ltiLaunchUrl": self._request.route_url("lti_launches"),
# Specific config for pickers
"blackboard": FilePickerConfig.blackboard_config(*args),
"canvas": FilePickerConfig.canvas_config(*args),
"google": FilePickerConfig.google_files_config(*args),
"microsoftOneDrive": FilePickerConfig.microsoft_onedrive(*args),
"vitalSource": FilePickerConfig.vital_source_config(*args),
},
}
)
def maybe_enable_grading(self):
"""Enable our LMS app's built-in assignment grading UI, if appropriate."""
if not self._lti_user.is_instructor:
# Only instructors can grade assignments.
return
if "lis_outcome_service_url" not in self._request.params:
# Only "gradeable" assignments can be graded.
# Assignments that don't have the lis_outcome_service_url param
# aren't set as gradeable in the LMS.
return
if self._context.is_canvas:
# Don't show our built-in grader in Canvas because it has its own
# "SpeedGrader" and we support that instead.
return
self._config["grading"] = {
"enabled": True,
"courseName": self._request.params.get("context_title"),
"assignmentName": self._request.params.get("resource_link_title"),
"students": list(self._get_students()),
}
def maybe_set_focused_user(self):
"""
Configure the Hypothesis client to focus on a particular user.
If there is a focused_user request param then add the necessary
Hypothesis client config to get the client to focus on the particular
user identified by the focused_user param, showing only that user's
annotations and not others.
In practice the focused_user param is only ever present in Canvas
SpeedGrader launches. We add a focused_user query param to the
SpeedGrader LTI launch URLs that we submit to Canvas for each student
when the student launches an assignment. Later, Canvas uses these URLs
to launch us when a teacher grades the assignment in SpeedGrader.
In theory, though, the focused_user param could work outside of Canvas
as well if we ever want it to.
:raise ApplicationInstanceNotFound: if request.lti_user.oauth_consumer_key isn't in the DB
"""
focused_user = self._request.params.get("focused_user")
if not focused_user:
return
self._hypothesis_client["focus"] = {"user": {"username": focused_user}}
# Unfortunately we need to pass the user's current display name to the
# Hypothesis client, and we need to make a request to the h API to
# retrieve that display name.
try:
display_name = (
self._request.find_service(name="h_api")
.get_user(focused_user)
.display_name
)
except HAPIError:
display_name = "(Couldn't fetch student name)"
self._hypothesis_client["focus"]["user"]["displayName"] = display_name
def _add_canvas_speedgrader_settings(self, **kwargs):
"""
Add config used by the JS to call our record_canvas_speedgrader_submission API.
:raise HTTPBadRequest: if a request param needed to generate the config
is missing
"""
lis_result_sourcedid = self._request.params.get("lis_result_sourcedid")
lis_outcome_service_url = self._request.params.get("lis_outcome_service_url")
# Don't set the Canvas submission params in non-Canvas LMS's.
if not self._context.is_canvas:
return
# When a Canvas assignment is launched by a teacher or other
# non-gradeable user there's no lis_result_sourcedid in the LTI
# launch params.
# Don't post submission to Canvas for these cases.
if not lis_result_sourcedid:
return
# When a Canvas assignment isn't gradeable there's no
# lis_outcome_service_url.
# Don't post submission to Canvas for these cases.
if not lis_outcome_service_url:
return
self._config["canvas"]["speedGrader"] = {
"submissionParams": {
"h_username": self._h_user.username,
"lis_result_sourcedid": lis_result_sourcedid,
"lis_outcome_service_url": lis_outcome_service_url,
"learner_canvas_user_id": self._request.params["custom_canvas_user_id"],
"group_set": self._request.params.get("group_set"),
# Canvas doesn't send the right value for this on speed grader launches
# sending instead the same value as for "context_id"
"resource_link_id": self._request.params.get("resource_link_id"),
# Canvas doesn't send this value at all on speed grader submissions
"ext_lti_assignment_id": self._request.params.get(
"ext_lti_assignment_id"
),
**kwargs,
},
}
def _auth_token(self):
"""Return the authToken setting."""
return BearerTokenSchema(self._request).authorization_param(self._lti_user)
@property
@functools.lru_cache()
def _config(self):
"""
Return the current configuration dict.
This method populates the default parameters used by all frontend
apps. The `enable_xxx_mode` methods configures the specific parameters
needed by a particular frontend mode.
:raise HTTPBadRequest: if a request param needed to generate the config
is missing
:rtype: dict
"""
# This is a lazy-computed property so that if it's going to raise an
# exception that doesn't happen until someone actually reads it.
# If it instead crashed in JSConfig.__init__() that would happen
# earlier in the request processing pipeline and could change the error
# response.
#
# We cache this property (@functools.lru_cache()) so that it's
# mutable. You can do self._config["foo"] = "bar" and the mutation will
# be preserved.
config = {
# Settings to do with the API that the backend provides for the
# frontend to call.
"api": {
# The auth token that the JavaScript code will use to
# authenticate itself to the API.
"authToken": self._auth_token()
},
"canvas": {},
# Some debug information, currently used in the Gherkin tests.
"debug": {"tags": []},
# Tell the JavaScript code whether we're in "dev" mode.
"dev": self._request.registry.settings["dev"],
# What "mode" to put the JavaScript code in.
# For example in "basic-lti-launch" mode the JavaScript code
# launches its BasicLtiLaunchApp, whereas in
# "content-item-selection" mode it launches its FilePickerApp.
"mode": None,
}
if self._lti_user:
config["debug"]["tags"].append(
"role:instructor" if self._lti_user.is_instructor else "role:learner"
)
return config
def _get_students(self):
"""
Yield the student dicts for the request.
Yield one student dict for each student who has launched the assignment
and had grading info recorded for them.
"""
grading_infos = self._grading_info_service.get_by_assignment(
oauth_consumer_key=self._consumer_key,
context_id=self._request.params.get("context_id"),
resource_link_id=self._request.params.get("resource_link_id"),
)
# Yield a "student" dict for each GradingInfo.
for grading_info in grading_infos:
h_user = HUser(
username=grading_info.h_username,
display_name=grading_info.h_display_name,
)
yield {
"userid": h_user.userid(self._authority),
"lmsId": grading_info.user_id,
"displayName": h_user.display_name,
"LISResultSourcedId": grading_info.lis_result_sourcedid,
"LISOutcomeServiceUrl": grading_info.lis_outcome_service_url,
}
@property
@functools.lru_cache()
def _hypothesis_client(self):
"""
Return the config object for the Hypothesis client.
:raise HTTPBadRequest: if a request param needed to generate the config
is missing
:raise ApplicationInstanceNotFound: if request.lti_user.oauth_consumer_key isn't in the DB
"""
# This is a lazy-computed property so that if it's going to raise an
# exception that doesn't happen until someone actually reads it.
# If it instead crashed in JSConfig.__init__() that would happen
# earlier in the request processing pipeline and could change the error
# response.
#
# We cache this property (@functools.lru_cache()) so that it's
# mutable. You can do self._hypothesis_client["foo"] = "bar" and the
# mutation will be preserved.
if not self._application_instance().provisioning:
return {}
api_url = self._request.registry.settings["h_api_url_public"]
# Generate a short-lived login token for the Hypothesis client.
grant_token_svc = self._request.find_service(name="grant_token")
grant_token = grant_token_svc.generate_token(self._h_user)
return {
# For documentation of these Hypothesis client settings see:
# https://h.readthedocs.io/projects/client/en/latest/publishers/config/#configuring-the-client-using-json
"services": [
{
"allowFlagging": False,
"allowLeavingGroups": False,
"apiUrl": api_url,
"authority": self._authority,
"enableShareLinks": False,
"grantToken": grant_token,
"groups": self._groups(),
}
]
}
def _groups(self):
if self._context.canvas_sections_enabled or self._context.is_group_launch:
return "$rpc:requestGroups"
return [self._context.h_group.groupid(self._authority)]
def _canvas_sync_api(self):
req = self._request
sync_api_config = {
"authUrl": req.route_url("canvas_api.oauth.authorize"),
"path": req.route_path("canvas_api.sync"),
"data": {
"lms": {
"tool_consumer_instance_guid": req.params[
"tool_consumer_instance_guid"
],
},
"course": {
"context_id": req.params["context_id"],
"custom_canvas_course_id": req.params["custom_canvas_course_id"],
"group_set": req.params.get("group_set"),
},
"group_info": {
key: value
for key, value in req.params.items()
if key in GroupInfo.columns()
},
},
}
if "learner_canvas_user_id" in req.params:
sync_api_config["data"]["learner"] = {
"canvas_user_id": req.params["learner_canvas_user_id"],
"group_set": req.params.get("group_set"),
}
return sync_api_config
def _blackboard_sync_api(self):
req = self._request
return {
"authUrl": req.route_url("blackboard_api.oauth.authorize"),
"path": req.route_path("blackboard_api.sync"),
"data": {
"lms": {
"tool_consumer_instance_guid": req.params[
"tool_consumer_instance_guid"
],
},
"course": {
"context_id": req.params["context_id"],
},
"assignment": {
"resource_link_id": req.params["resource_link_id"],
},
"group_info": {
key: value
for key, value in req.params.items()
if key in GroupInfo.columns()
},
},
}
def _sync_api(self):
if self._context.is_canvas and (
self._context.canvas_sections_enabled
or self._context.canvas_is_group_launch
):
return self._canvas_sync_api()
if (
self._application_instance().product
== ApplicationInstance.Product.BLACKBOARD
and self._context.is_blackboard_group_launch
):
return self._blackboard_sync_api()
return None
| 38.817029
| 117
| 0.601344
|
cb7c5b7df70ada1b943c479aede913bb7f43898a
| 1,328
|
py
|
Python
|
setup.py
|
CIGIHub/newsletter_generator
|
2af3e183ffedce55ad26bf998c488282f94d477e
|
[
"MIT"
] | null | null | null |
setup.py
|
CIGIHub/newsletter_generator
|
2af3e183ffedce55ad26bf998c488282f94d477e
|
[
"MIT"
] | null | null | null |
setup.py
|
CIGIHub/newsletter_generator
|
2af3e183ffedce55ad26bf998c488282f94d477e
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
from setuptools import find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='newsletter-generator',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
"django>=1.7",
],
license='MIT License',
description='Newsletter Generator.',
long_description=README,
url='http://dm.cigionline.org/',
author='Caroline Simpson',
author_email='csimpson@cigionline.org',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 32.390244
| 78
| 0.636295
|
f3bb25ac43535b771a6d2ab70dc603b83d78d59a
| 4,519
|
py
|
Python
|
test/generic_distributed_util_test.py
|
jerryzh168/ClassyVision-1
|
6acfb00a77487a9015803fbaad805330081293a9
|
[
"MIT"
] | 1
|
2020-04-13T03:50:26.000Z
|
2020-04-13T03:50:26.000Z
|
test/generic_distributed_util_test.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | null | null | null |
test/generic_distributed_util_test.py
|
pkassotis/ClassyVision
|
e8704ecaa59a15dbb2f4b0724e85d6e5cb2f704e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
from itertools import product
from typing import Any, Dict, List
import torch
from classy_vision.generic.distributed_util import _PRIMARY_RANK, broadcast_object
from torch.multiprocessing import Event, Process, Queue
def init_and_run_process(
rank, world_size, filename, fn, input, q, wait_event, backend="gloo"
):
torch.distributed.init_process_group(
backend, init_method=f"file://{filename}", rank=rank, world_size=world_size
)
r = fn(*input)
q.put(r)
wait_event.wait()
return
def run_in_process_group(filename: str, calls: List[Dict[str, Any]]):
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
processes = []
q = Queue()
wait_event = Event()
# run the remaining processes
# for rank in range(world_size - 1):
for rank, call in enumerate(calls):
p = Process(
target=init_and_run_process,
args=(
rank,
call["world_size"],
filename,
call["function"],
call["inputs"],
q,
wait_event,
),
)
p.start()
processes.append(p)
# fetch the results from the queue before joining, the background processes
# need to be alive if the queue contains tensors. See
# https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847/3 # noqa: B950
results = []
for _ in range(len(processes)):
results.append(q.get())
wait_event.set()
for p in processes:
p.join()
return results
class TestDistributedUtil(unittest.TestCase):
@staticmethod
def _get_test_objects():
return [
{"a": 12, "b": [2, 3, 4], "tensor": torch.randn(10, 10)},
None,
{"tensor": torch.randn(10000, 10000)}, # 400 MB
]
def test_broadcast_object(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
inputs[0] = obj # only the master worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, _PRIMARY_RANK, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
def test_broadcast_object_pick_source(self):
world_size = 3
for use_disk, obj in product([True, False], self._get_test_objects()):
filename = tempfile.NamedTemporaryFile(delete=True).name
inputs = [None] * world_size
source_rank = 1
inputs[source_rank] = obj # only the rank 1 worker has the object
calls = [
{
"world_size": world_size,
"function": broadcast_object,
"inputs": [i, source_rank, use_disk],
}
for i in inputs
]
results = run_in_process_group(filename, calls)
# check that all replicas got identical objects
self.assertEqual(len(results), world_size)
for result in results:
if isinstance(obj, dict):
for key in obj:
if key == "tensor":
self.assertTrue(torch.allclose(result[key], obj[key]))
else:
self.assertEqual(result[key], obj[key])
else:
self.assertEqual(result, obj)
| 32.985401
| 116
| 0.55322
|
f609b5fb8bc157283e76ef0f5e2a04ed34fef994
| 13,556
|
py
|
Python
|
doc/source/conf.py
|
walterst/bipy
|
49a3a9c33fd0c8c12bbb8f5cbb7e171f78fc4a96
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
walterst/bipy
|
49a3a9c33fd0c8c12bbb8f5cbb7e171f78fc4a96
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
walterst/bipy
|
49a3a9c33fd0c8c12bbb8f5cbb7e171f78fc4a96
|
[
"BSD-3-Clause"
] | 1
|
2018-09-21T01:58:43.000Z
|
2018-09-21T01:58:43.000Z
|
import shutil
import glob
import sys
import os
import sphinx_bootstrap_theme
import skbio
# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
# scikit-bio/licenses/scipy.txt for scipy's license.
# If readthedocs.org is building the project, delete the generated/ directory,
# which is created by autosummary when generating the API reference. For some
# reason, when RTD rebuilds the docs (i.e., not starting from a fresh build),
# some links are not generated correctly if generated/ already exists. Links to
# modules/subpackages work, but links aren't created for classes, methods,
# attributes, etc. and we get a bunch of warnings. This does not happen when
# building the docs locally or on Travis.
#
# Code to check whether RTD is building our project is taken from
# http://read-the-docs.readthedocs.org/en/latest/faq.html
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
generated_path = os.path.join(os.path.dirname(__file__), 'generated')
if os.path.exists(generated_path):
shutil.rmtree(generated_path)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext/numpydoc'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.autosummary'
]
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-bio'
copyright = u'2014--, scikit-bio development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skbio.__version__
# The full version, including alpha/beta/rc tags.
release = skbio.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# Exclude this file since it is only used by autosummary to generate other RST
# files during the build process, and it will generate sphinx errors and
# warnings otherwise.
exclude_patterns = ['_templates/autosummary/*.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': 'scikit-bio docs',
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': 'spacelab',
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': False
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static/']
html_style = 'skbio.css'
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-biodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scikit-bio.tex', u'scikit-bio Documentation',
u'scikit-bio development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-bio', u'scikit-bio Documentation',
[u'scikit-bio development team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scikit-bio', u'scikit-bio Documentation',
u'scikit-bio development team', 'scikit-bio',
'Core objects, functions and statistics for working with biological data '
'in Python.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for autosummary ----------------------------------------------
autosummary_generate = glob.glob('*.rst')
# -- Options for numpydoc -------------------------------------------------
# Generate plots for example sections
numpydoc_use_plots = True
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
#plot_formats = [('png', 96), 'pdf']
#plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skbio.__file__))
if 'dev' in skbio.__version__:
return "http://github.com/biocore/scikit-bio/blob/master/skbio/%s%s" % (
fn, linespec)
else:
return "http://github.com/biocore/scikit-bio/blob/%s/skbio/%s%s" % (
skbio.__version__, fn, linespec)
#------------------------------------------------------------------------------
# linkcheck
#------------------------------------------------------------------------------
# Link-checking on Travis sometimes times out.
linkcheck_timeout = 30
| 31.163218
| 80
| 0.673134
|
681419994324038e4930e366c07abd5f6ae43dc1
| 1,555
|
py
|
Python
|
stubs.min/Autodesk/Revit/DB/__init___parts/CurtainCell.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2017-07-25T14:30:18.000Z
|
2017-07-25T14:30:18.000Z
|
stubs.min/Autodesk/Revit/DB/__init___parts/CurtainCell.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/Autodesk/Revit/DB/__init___parts/CurtainCell.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class CurtainCell(APIObject,IDisposable):
""" Represents a CurtainCell within Autodesk Revit. """
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CurveLoops=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The cell boundaries on the reference face. The boundaries can have more than one CurveLoop. Each item in the returned array represents a CurveLoop containing 3 or more than 3 edges.
Get: CurveLoops(self: CurtainCell) -> CurveArrArray
"""
PlanarizedCurveLoops=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The planarized curve loops for cell boundaries. The boundaries can have more than one CurveLoop. Each item in the returned array represents a CurveLoop containing 3 or more than 3 edges.
Get: PlanarizedCurveLoops(self: CurtainCell) -> CurveArrArray
"""
| 43.194444
| 215
| 0.725402
|
620a33e4fc35345e615b41847b46ef5b53c016b9
| 214
|
py
|
Python
|
dosing_rl_gym/__init__.py
|
strongio/dosing-rl-gym
|
e9f0553080830dc621e97e0652c68b86788b7296
|
[
"MIT"
] | 6
|
2020-01-30T11:31:53.000Z
|
2021-12-02T10:35:27.000Z
|
dosing_rl_gym/__init__.py
|
strongio/dosing-rl-gym
|
e9f0553080830dc621e97e0652c68b86788b7296
|
[
"MIT"
] | null | null | null |
dosing_rl_gym/__init__.py
|
strongio/dosing-rl-gym
|
e9f0553080830dc621e97e0652c68b86788b7296
|
[
"MIT"
] | 3
|
2019-11-13T15:56:14.000Z
|
2021-04-12T07:20:23.000Z
|
from gym.envs.registration import register
register(
id='Diabetic-v0',
entry_point='dosing_rl_gym.envs:Diabetic0Env'
)
register(
id='Diabetic-v1',
entry_point='dosing_rl_gym.envs:Diabetic1Env'
)
| 16.461538
| 49
| 0.733645
|
79e36026c8411796fc4a9913d922d28f169892ba
| 89,274
|
py
|
Python
|
lib/sqlalchemy/orm/strategies.py
|
rgargente/sqlalchemy
|
2734439fff953a7bb8aecdedb5f851441b5122e9
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/strategies.py
|
rgargente/sqlalchemy
|
2734439fff953a7bb8aecdedb5f851441b5122e9
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/strategies.py
|
rgargente/sqlalchemy
|
2734439fff953a7bb8aecdedb5f851441b5122e9
|
[
"MIT"
] | null | null | null |
# orm/strategies.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from __future__ import absolute_import
import collections
import itertools
from . import attributes
from . import exc as orm_exc
from . import interfaces
from . import loading
from . import properties
from . import query
from . import unitofwork
from . import util as orm_util
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .interfaces import LoaderStrategy
from .interfaces import StrategizedProperty
from .session import _state_session
from .state import InstanceState
from .util import _none_set
from .util import aliased
from .. import event
from .. import exc as sa_exc
from .. import inspect
from .. import log
from .. import sql
from .. import util
from ..sql import util as sql_util
from ..sql import visitors
def _register_attribute(
prop,
mapper,
useobject,
compare_function=None,
typecallable=None,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
listen_hooks = []
uselist = useobject and prop.uselist
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc, prop.key, fn, **opts
)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
if useobject:
backref = prop.back_populates
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc, backref, uselist
)
)
# a single MapperProperty is shared down a class inheritance
# hierarchy, so we set up attribute instrumentation and backref event
# for each mapper down the hierarchy.
# typically, "mapper" is the same as prop.parent, due to the way
# the configure_mappers() process runs, however this is not strongly
# enforced, and in the case of a second configure_mappers() run the
# mapper here might not be prop.parent; also, a subclass mapper may
# be called here before a superclass mapper. That is, can't depend
# on mappers not already being set up so we have to check each one.
for m in mapper.self_and_descendants:
if prop is m._props.get(
prop.key
) and not m.class_manager._attr_has_impl(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
trackparent=useobject
and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY
),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = ("columns",)
def __init__(self, parent, strategy_key):
super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = "columns", "is_composite"
def __init__(self, parent, strategy_key):
super(ColumnLoader, self).__init__(parent, strategy_key)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, "composite_class")
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = (
self.parent_property.active_history
or self.columns[0].primary_key
or mapper.version_id_col in set(self.columns)
)
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=coltype.compare_values,
active_history=active_history,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(query_expression=True)
class ExpressionColumnLoader(ColumnLoader):
def __init__(self, parent, strategy_key):
super(ExpressionColumnLoader, self).__init__(parent, strategy_key)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kwargs
):
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for c in columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# look through list of columns represented here
# to see which, if any, is present in the row.
if loadopt and "expression" in loadopt.local_opts:
columns = [loadopt.local_opts["expression"]]
for col in columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
accepts_scalar_loader=False,
)
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
@properties.ColumnProperty.strategy_for(
deferred=True, instrument=True, raiseload=True
)
@properties.ColumnProperty.strategy_for(do_nothing=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
super(DeferredColumnLoader, self).__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite " "types not implemented yet"
)
self.raiseload = self.strategy_opts.get("raiseload", False)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
# for a DeferredColumnLoader, this method is only used during a
# "row processor only" query; see test_deferred.py ->
# tests with "rowproc_only" in their name. As of the 1.0 series,
# loading._instance_processor doesn't use a "row processing" function
# to populate columns, instead it uses data in the "populators"
# dictionary. Normally, the DeferredColumnLoader.setup_query()
# sets up that data in the "memoized_populators" dictionary
# and "create_row_processor()" here is never invoked.
if not self.is_class_level:
if self.raiseload:
set_deferred_for_local_state = (
self.parent_property._raise_column_loader
)
else:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
load_on_unexpire=False,
)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw
):
if (
(
loadopt
and "undefer_pks" in loadopt.local_opts
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
elif not self.raiseload:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
else:
memoized_populators[self.parent_property] = _RAISE_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
if self.raiseload:
self._invoke_raise_load(state, passive, "raise")
query = session.query(localparent)
if (
loading.load_on_ident(
query, state.key, only_load_props=group, refresh_state=state
)
is None
):
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to raiseload=True" % (self,)
)
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key, raiseload=False):
self.key = key
self.raiseload = raiseload
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
if self.raiseload:
strategy_key = (
("deferred", True),
("instrument", True),
("raiseload", True),
)
else:
strategy_key = (("deferred", True), ("instrument", True))
strategy = prop._get_strategy(strategy_key)
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super(AbstractRelationshipLoader, self).__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(do_nothing=True)
class DoNothingLoader(LoaderStrategy):
"""Relationship loader that makes no change to the object's state.
Compared to NoLoader, this loader does not initialize the
collection/attribute to empty/none; the usual default LazyLoader will
take effect.
"""
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=True,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def invoke_no_load(state, dict_, row):
if self.uselist:
attributes.init_state_collection(state, dict_, self.key)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
@properties.RelationshipProperty.strategy_for(lazy="raise")
@properties.RelationshipProperty.strategy_for(lazy="raise_on_sql")
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
"_bakery",
)
def __init__(self, parent, strategy_key):
super(LazyLoader, self).__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use query.get() to " "optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history
or self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (self._lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
if (not passive & attributes.SQL_OK and not self.use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return attributes.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & attributes.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not self.use_get
or passive & attributes.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & attributes.NO_RAISE:
return attributes.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if attributes.PASSIVE_NO_RESULT in primary_key_identity:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in primary_key_identity:
return attributes.NEVER_SET
if _none_set.issuperset(primary_key_identity):
return None
if self.key in state.dict:
return attributes.ATTR_WAS_SET
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
return instance
elif (
not passive & attributes.SQL_OK
or not passive & attributes.RELATED_OBJECT_OK
):
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(
session, state, primary_key_identity, passive
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, primary_key_identity, passive
):
# emit lazy load now using BakedQuery, to cut way down on the overhead
# of generating queries.
# there are two big things we are trying to guard against here:
#
# 1. two different lazy loads that need to have a different result,
# being cached on the same key. The results between two lazy loads
# can be different due to the options passed to the query, which
# take effect for descendant objects. Therefore we have to make
# sure paths and load options generate good cache keys, and if they
# don't, we don't cache.
# 2. a lazy load that gets cached on a key that includes some
# "throwaway" object, like a per-query AliasedClass, meaning
# the cache key will never be seen again and the cache itself
# will fill up. (the cache is an LRU cache, so while we won't
# run out of memory, it will perform terribly when it's full. A
# warning is emitted if this occurs.) We must prevent the
# generation of a cache key that is including a throwaway object
# in the key.
# note that "lazy='select'" and "lazy=True" make two separate
# lazy loaders. Currently the LRU cache is local to the LazyLoader,
# however add ourselves to the initial cache key just to future
# proof in case it moves
q = self._bakery(lambda session: session.query(self.entity), self)
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property,
)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q: q.select_from(
self.mapper, self.parent_property.secondary
)
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_options:
# here, if any of the options cannot return a cache key,
# the BakedQuery "spoils" and caching will not occur. a path
# that features Cls.attribute.of_type(some_alias) will cancel
# caching, for example, since "some_alias" is user-defined and
# is usually a throwaway object.
effective_path = state.load_path[self.parent_property]
q._add_lazyload_options(state.load_options, effective_path)
if self.use_get:
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
return (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
._load_on_pk_identity(
session.query(self.mapper), primary_key_identity
)
)
if self.parent_property.order_by:
q.add_criteria(
lambda q: q.order_by(
*util.to_list(self.parent_property.order_by)
)
)
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, LazyLoader)
):
q.add_criteria(
lambda q: q.options(
strategy_options.Load.for_existing_path(
q._current_path[rev.parent]
).lazyload(rev.key)
)
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
if self.key in state.dict:
return attributes.ATTR_WAS_SET
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql:
self._invoke_raise_load(state, passive, "raise_on_sql")
q.add_criteria(lambda q: q.filter(lazy_clause))
# set parameters in the query such that we don't overwrite
# parameters that are already set within it
def set_default_params(q):
params.update(q._params)
q._params = params
return q
result = (
q(session)
.with_post_criteria(lambda q: q._set_lazyload_from(state))
.with_post_criteria(set_default_params)
.all()
)
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(mapper.class_manager, LoadLazyAttribute(key, self), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, initiating_strategy):
self.key = key
self.strategy_key = initiating_strategy.strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
class PostLoader(AbstractRelationshipLoader):
"""A relationship loader that emits a second SELECT statement."""
def _immediateload_create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(PostLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
pass
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(PostLoader):
__slots__ = ("join_depth",)
def __init__(self, parent, strategy_key):
super(SubqueryLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
**kwargs
):
if not context.query._enable_eagerloads or context.refresh_state:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
subq_path = context.attributes.get(
("subquery_path", None), orm_util.PathRegistry.root
)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if (
(
context.query._current_path.length
if context.query._current_path
else 0
)
+ path.length
) / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
(
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
) = self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader), context.query
)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
entity.entity_zero,
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
("subquery_path", None): subq_path,
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = self._prep_for_joins(
left_alias, subq_path
)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias, parent_alias, effective_entity
)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if (
self.parent.isa(subq_mapper)
and self.parent_property is subq_path[1]
):
leftmost_mapper, leftmost_prop = self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = subq_mapper, subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity, leftmost_mapper._columntoproperty[c].key
)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query,
leftmost_mapper,
leftmost_attr,
leftmost_relationship,
orig_entity,
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set the query's "FROM" list explicitly to what the
# FROM list would be in any case, as we will be limiting
# the columns in the SELECT list which may no longer include
# all entities mentioned in things like WHERE, JOIN, etc.
if not q._from_obj:
q._set_select_from(
list(
set(
[
ent["entity"]
for ent in orig_query.column_descriptions
if ent["entity"] is not None
]
)
),
False,
)
# select from the identity columns of the outer (specifically, these
# are the 'local_cols' of the property). This will remove
# other columns from the query that might suggest the right entity
# which is why we do _set_select_from above.
target_cols = q._adapt_col_list(leftmost_attr)
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q, use_mapper_path=True
)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
else:
info = inspect(to_join[-1][0])
if info.is_aliased_class:
parent_alias = info.entity
else:
# alias a plain mapper as we may be
# joining multiple times
parent_alias = orm_util.AliasedClass(
info.entity, use_mapper_path=True
)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias, effective_entity
):
ltj = len(to_join)
if ltj == 1:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(effective_entity)
]
elif ltj == 2:
to_join = [
getattr(left_alias, to_join[0][1]).of_type(parent_alias),
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
),
]
elif ltj > 2:
middle = [
(
orm_util.AliasedClass(item[0])
if not inspect(item[0]).is_aliased_class
else item[0].entity,
item[1],
)
for item in to_join[1:-1]
]
inner = []
while middle:
item = middle.pop(0)
attr = getattr(item[0], item[1])
if middle:
attr = attr.of_type(middle[0][0])
else:
attr = attr.of_type(parent_alias)
inner.append(attr)
to_join = (
[getattr(left_alias, to_join[0][1]).of_type(inner[0].parent)]
+ inner
+ [
getattr(parent_alias, to_join[-1][1]).of_type(
effective_entity
)
]
)
for attr in to_join:
q = q.join(attr, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(self.subq, lambda x: x[1:])
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
path = path[self.parent_property]
subq = path.get(context.attributes, "subquery")
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, "collections", collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, result, collections, local_cols, populators
)
else:
self._create_scalar_loader(
context, result, collections, local_cols, populators
)
def _create_collection_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_collection_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), ())
state.get_impl(self.key).set_committed_value(
state, dict_, collection
)
def load_collection_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_collection_from_subq(state, dict_, row)
populators["new"].append((self.key, load_collection_from_subq))
populators["existing"].append(
(self.key, load_collection_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, result, collections, local_cols, populators
):
tuple_getter = result._tuple_getter(local_cols)
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(tuple_getter(row), (None,))
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' " % self
)
scalar = collection[0]
state.get_impl(self.key).set_committed_value(state, dict_, scalar)
def load_scalar_from_subq_existing_row(state, dict_, row):
if self.key not in dict_:
load_scalar_from_subq(state, dict_, row)
populators["new"].append((self.key, load_scalar_from_subq))
populators["existing"].append(
(self.key, load_scalar_from_subq_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = "join_depth", "_aliased_class_pool"
def __init__(self, parent, strategy_key):
super(JoinedLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
self._aliased_class_pool = []
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def setup_query(
self,
context,
query_entity,
path,
loadopt,
adapter,
column_collection=None,
parentmapper=None,
chained_from_outerjoin=False,
**kwargs
):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
(
clauses,
adapter,
add_to_collection,
) = self._setup_query_on_user_defined_adapter(
context, query_entity, path, adapter, user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
(
clauses,
adapter,
add_to_collection,
chained_from_outerjoin,
) = self._generate_row_adapter(
context,
query_entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
)
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
with_polymorphic = inspect(
with_poly_entity
).with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.entity]
loading._setup_entity_query(
context,
self.mapper,
query_entity,
path,
clauses,
add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin,
)
if with_poly_entity is not None and None in set(
context.secondary_columns
):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes, "user_defined_eager_row_processor", False
)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias, equivalents=prop.mapper._equivalent_columns
)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic"
)
adapter = orm_util.ORMAdapter(
with_poly_entity,
equivalents=prop.mapper._equivalent_columns,
)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None
)
path.set(
context.attributes, "user_defined_eager_row_processor", adapter
)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity, path, adapter, user_defined_adapter
):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes,
"user_defined_eager_row_processor",
user_defined_adapter,
)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _gen_pooled_aliased_class(self, context):
# keep a local pool of AliasedClass objects that get re-used.
# we need one unique AliasedClass per query per appearance of our
# entity in the query.
if inspect(self.entity).is_aliased_class:
alt_selectable = inspect(self.entity).selectable
else:
alt_selectable = None
key = ("joinedloader_ac", self)
if key not in context.attributes:
context.attributes[key] = idx = 0
else:
context.attributes[key] = idx = context.attributes[key] + 1
if idx >= len(self._aliased_class_pool):
to_adapt = orm_util.AliasedClass(
self.mapper,
alias=alt_selectable.alias(flat=True)
if alt_selectable is not None
else None,
flat=True,
use_mapper_path=True,
)
# load up the .columns collection on the Alias() before
# the object becomes shared among threads. this prevents
# races for column identities.
inspect(to_adapt).selectable.c
self._aliased_class_pool.append(to_adapt)
return self._aliased_class_pool[idx]
def _generate_row_adapter(
self,
context,
entity,
path,
loadopt,
adapter,
column_collection,
parentmapper,
chained_from_outerjoin,
):
with_poly_entity = path.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity:
to_adapt = with_poly_entity
else:
to_adapt = self._gen_pooled_aliased_class(context)
clauses = inspect(to_adapt)._memo(
("joinedloader_ormadapter", self),
orm_util.ORMAdapter,
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True,
allow_label_resolve=False,
anonymize_labels=True,
)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get("innerjoin", self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join,
entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self,
context,
query_entity,
path,
adapter,
parentmapper,
clauses,
innerjoin,
chained_from_outerjoin,
):
if parentmapper is None:
localparent = query_entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = (
context.multi_row_eager_loaders
and context.query._should_nest_selectable
)
query_entity_key = None
if (
query_entity not in context.eager_joins
and not should_nest_selectable
and context.from_clause
):
indexes = sql_util.find_left_clause_that_matches_given(
context.from_clause, query_entity.selectable
)
if len(indexes) > 1:
# for the eager load case, I can't reproduce this right
# now. For query.join() I can.
raise sa_exc.InvalidRequestError(
"Can't identify which query entity in which to joined "
"eager load from. Please use an exact match when "
"specifying the join path."
)
if indexes:
clause = context.from_clause[indexes[0]]
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
query_entity_key, default_towrap = indexes[0], clause
if query_entity_key is None:
query_entity_key, default_towrap = (
query_entity,
query_entity.selectable,
)
towrap = context.eager_joins.setdefault(
query_entity_key, default_towrap
)
if adapter:
if getattr(adapter, "aliased_class", None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class)._entity_for_mapper(
localparent
if localparent.isa(self.parent)
else self.parent
)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(efm.entity, self.key, self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent, adapter.selectable, use_mapper_path=True
),
self.key,
self.parent_property,
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin
or not innerjoin
or innerjoin == "unnested"
or query_entity.entity_zero.represents_outer_join
)
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
or query_entity.entity_zero.represents_outer_join
or (chained_from_outerjoin and isinstance(towrap, sql.Join)),
_left_memo=self.parent,
_right_memo=self.mapper,
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause
)
context.eager_joins[query_entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = query_entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin
):
if localparent.persist_selectable.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += (
eagerjoin._target_adapter.copy_and_process
)(util.to_list(self.parent_property.order_by))
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False
):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj,
clauses.aliased_class,
onclause,
isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper,
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses, onclause, join_obj._right_memo
)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses, onclause, join_obj._left_memo
)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert (
splicing is not False
), "assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left,
target_join,
join_obj.onclause,
isouter=join_obj.isouter,
_left_memo=join_obj._left_memo,
)
else:
eagerjoin = orm_util._ORMJoin(
target_join,
join_obj.right,
join_obj.onclause,
isouter=join_obj.isouter,
_right_memo=join_obj._right_memo,
)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = (
self._init_user_defined_eager_proc(loadopt, context)
if loadopt
else False
)
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context, result, adapter, our_path, loadopt
)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.entity],
eager_adapter,
)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators
)
else:
self.parent_property._get_strategy(
(("lazy", "select"),)
).create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
# note this must unconditionally clear out any existing collection.
# an existing collection would be present only in the case of
# populate_existing().
collection = attributes.init_state_collection(state, dict_, key)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key
)
result_list = util.UniqueAppender(
collection, "append_without_event"
)
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append(
(self.key, load_collection_from_joined_new_row)
)
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec)
)
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
# conflicting value already loaded, this shouldn't happen
if key in dict_:
if existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self
)
else:
# this case is when one row has multiple loads of the
# same entity (e.g. via aliasing), one has an attribute
# that the other doesn't.
dict_[key] = existing
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row)
)
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_scalar_from_joined_exec)
)
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="selectin")
class SelectInLoader(PostLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
"_bakery",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super(SelectInLoader, self).__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = aliased(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
@util.dependencies("sqlalchemy.ext.baked")
def _memoized_attr__bakery(self, baked):
return baked.bakery(size=50)
def create_row_processor(
self, context, path, loadopt, mapper, result, adapter, populators
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context, path, loadopt, mapper, result, adapter, populators
)
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
selectin_path = (
context.query._current_path or orm_util.PathRegistry.root
) + path
if not orm_util._entity_isa(path[-1], self.parent):
return
if loading.PostLoad.path_exists(context, selectin_path, self.key):
return
path_w_prop = path[self.parent_property]
selectin_path_w_prop = selectin_path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = with_poly_entity
else:
effective_entity = self.entity
if not path_w_prop.contains(context.attributes, "loader"):
if self.join_depth:
if selectin_path_w_prop.length / 2 > self.join_depth:
return
elif selectin_path_w_prop.contains_mapper(self.mapper):
return
loading.PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.key,
self._load_for_path,
effective_entity,
)
@util.dependencies("sqlalchemy.ext.baked")
def _load_for_path(
self, baked, context, path, states, load_only, effective_entity
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if attributes.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
insp = inspect(effective_entity)
if insp.is_aliased_class:
pk_cols = [insp._adapt_element(col) for col in pk_cols]
in_expr = insp._adapt_element(in_expr)
pk_cols = [insp._adapt_element(col) for col in pk_cols]
q = self._bakery(
lambda session: session.query(
query.Bundle("pk", *pk_cols), effective_entity
),
self,
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q.add_criteria(lambda q: q.select_from(effective_entity))
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
pa = self._parent_alias
q.add_criteria(
lambda q: q.select_from(pa).join(
getattr(pa, self.parent_property.key).of_type(
effective_entity
)
)
)
if query_info.load_only_child:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
)
)
else:
q.add_criteria(
lambda q: q.filter(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*pk_cols)
)
orig_query = context.query
q._add_lazyload_options(
orig_query._with_options, path[self.parent_property]
)
if orig_query._populate_existing:
q.add_criteria(lambda q: q.populate_existing())
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if insp.is_aliased_class:
eager_order_by = [
insp._adapt_element(elem) for elem in eager_order_by
]
q.add_criteria(lambda q: q.order_by(*eager_order_by))
else:
def _setup_outermost_orderby(q):
# imitate the same method that subquery eager loading uses,
# looking for the adapted "secondary" table
eagerjoin = q._from_obj[0]
return q.order_by(
*eagerjoin._target_adapter.copy_and_process(
util.to_list(self.parent_property.order_by)
)
)
q.add_criteria(_setup_outermost_orderby)
if query_info.load_only_child:
self._load_via_child(
our_states, none_states, query_info, q, context
)
else:
self._load_via_parent(our_states, query_info, q, context)
def _load_via_child(self, our_states, none_states, query_info, q, context):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in q(context.session).params(
primary_keys=[
key[0] if query_info.zero_idx else key for key in chunk
]
)
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(self, our_states, query_info, q, context):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = {
k: [vv[1] for vv in v]
for k, v in itertools.groupby(
q(context.session).params(primary_keys=primary_keys),
lambda x: x[0],
)
}
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| 34.736965
| 79
| 0.578153
|
17ce1ce266694cb465d80ed7bccf45dac81bc227
| 11,220
|
py
|
Python
|
hummingbot/market/bittrex/bittrex_order_book_tracker.py
|
fakecoinbase/Grandthraxslashhumming_extended
|
54e959520f515697b5a88d3fc3636e702ea381e5
|
[
"Apache-2.0"
] | 2
|
2020-08-02T17:56:53.000Z
|
2021-02-12T18:47:04.000Z
|
hummingbot/market/bittrex/bittrex_order_book_tracker.py
|
Grandthrax/humming_extended
|
54e959520f515697b5a88d3fc3636e702ea381e5
|
[
"Apache-2.0"
] | 1
|
2019-06-05T23:13:35.000Z
|
2019-06-05T23:13:35.000Z
|
hummingbot/market/bittrex/bittrex_order_book_tracker.py
|
Grandthrax/humming_extended
|
54e959520f515697b5a88d3fc3636e702ea381e5
|
[
"Apache-2.0"
] | 2
|
2020-08-02T17:59:03.000Z
|
2021-09-05T22:19:28.000Z
|
#!/usr/bin/env python
import asyncio
import bisect
import logging
import time
from collections import (
defaultdict,
deque,
)
from typing import (
Optional,
Dict,
List,
Set,
Deque,
)
from hummingbot.core.data_type.order_book_message import (
OrderBookMessageType,
OrderBookMessage,
)
from hummingbot.core.event.events import TradeType
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.order_book_tracker import OrderBookTracker, OrderBookTrackerDataSourceType
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.market.bittrex.bittrex_active_order_tracker import BittrexActiveOrderTracker
from hummingbot.market.bittrex.bittrex_api_order_book_data_source import BittrexAPIOrderBookDataSource
from hummingbot.market.bittrex.bittrex_order_book import BittrexOrderBook
from hummingbot.market.bittrex.bittrex_order_book_message import BittrexOrderBookMessage
from hummingbot.market.bittrex.bittrex_order_book_tracker_entry import BittrexOrderBookTrackerEntry
class BittrexOrderBookTracker(OrderBookTracker):
_btobt_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._btobt_logger is None:
cls._btobt_logger = logging.getLogger(__name__)
return cls._btobt_logger
def __init__(
self,
data_source_type: OrderBookTrackerDataSourceType = OrderBookTrackerDataSourceType.EXCHANGE_API,
trading_pairs: Optional[List[str]] = None,
):
super().__init__(data_source_type=data_source_type)
self._ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self._data_source: Optional[OrderBookTrackerDataSource] = None
self._order_book_snapshot_stream: asyncio.Queue = asyncio.Queue()
self._order_book_diff_stream: asyncio.Queue = asyncio.Queue()
self._process_msg_deque_task: Optional[asyncio.Task] = None
self._past_diffs_windows: Dict[str, Deque] = {}
self._order_books: Dict[str, BittrexOrderBook] = {}
self._saved_message_queues: Dict[str, Deque[BittrexOrderBookMessage]] = defaultdict(lambda: deque(maxlen=1000))
self._active_order_trackers: Dict[str, BittrexActiveOrderTracker] = defaultdict(BittrexActiveOrderTracker)
self._trading_pairs: Optional[List[str]] = trading_pairs
self._order_book_stream_listener_task: Optional[asyncio.Task] = None
@property
def data_source(self) -> OrderBookTrackerDataSource:
if not self._data_source:
if self._data_source_type is OrderBookTrackerDataSourceType.EXCHANGE_API:
self._data_source = BittrexAPIOrderBookDataSource(trading_pairs=self._trading_pairs)
else:
raise ValueError(f"data_source_type {self._data_source_type} is not supported.")
return self._data_source
@property
def exchange_name(self) -> str:
return "bittrex"
async def _refresh_tracking_tasks(self):
"""
Starts tracking for any new trading pairs, and stop tracking for any inactive trading pairs.
"""
tracking_trading_pair: Set[str] = set(
[key for key in self._tracking_tasks.keys() if not self._tracking_tasks[key].done()]
)
available_pairs: Dict[str, BittrexOrderBookTrackerEntry] = await self.data_source.get_tracking_pairs()
available_trading_pair: Set[str] = set(available_pairs.keys())
new_trading_pair: Set[str] = available_trading_pair - tracking_trading_pair
deleted_trading_pair: Set[str] = tracking_trading_pair - available_trading_pair
for trading_pair in new_trading_pair:
order_book_tracker_entry: BittrexOrderBookTrackerEntry = available_pairs[trading_pair]
self._active_order_trackers[trading_pair] = order_book_tracker_entry.active_order_tracker
self._order_books[trading_pair] = order_book_tracker_entry.order_book
self._tracking_message_queues[trading_pair] = asyncio.Queue()
self._tracking_tasks[trading_pair] = asyncio.ensure_future(self._track_single_book(trading_pair))
self.logger().info(f"Started order book tracking for {trading_pair}.")
for trading_pair in deleted_trading_pair:
self._tracking_tasks[trading_pair].cancel()
del self._tracking_tasks[trading_pair]
del self._order_books[trading_pair]
del self._active_order_trackers[trading_pair]
del self._tracking_message_queues[trading_pair]
self.logger().info(f"Stopped order book tracking for {trading_pair}.")
async def _order_book_diff_router(self):
"""
Route the real-time order book diff messages to the correct order book.
"""
last_message_timestamp: float = time.time()
message_queued: int = 0
message_accepted: int = 0
message_rejected: int = 0
while True:
try:
ob_message: BittrexOrderBookMessage = await self._order_book_diff_stream.get()
trading_pair: str = ob_message.trading_pair
if trading_pair not in self._tracking_message_queues:
message_queued += 1
# Save diff messages received before snaphsots are ready
self._saved_message_queues[trading_pair].append(ob_message)
continue
message_queue: asyncio.Queue = self._tracking_message_queues[trading_pair]
# Check the order book's initial update ID. If it's larger, don't bother.
order_book: BittrexOrderBook = self._order_books[trading_pair]
if order_book.snapshot_uid > ob_message.update_id:
message_rejected += 1
continue
await message_queue.put(ob_message)
message_accepted += 1
if len(ob_message.content["f"]) != 0:
for trade in ob_message.content["f"]:
trade_type = float(TradeType.SELL.value) if trade["OT"].upper() == "SELL" \
else float(TradeType.BUY.value)
self._order_book_trade_stream.put_nowait(OrderBookMessage(OrderBookMessageType.TRADE, {
"trading_pair": ob_message.trading_pair,
"trade_type": trade_type,
"trade_id": trade["FI"],
"update_id": trade["T"],
"price": trade["R"],
"amount": trade["Q"]
}, timestamp=trade["T"]))
# Log some statistics
now: float = time.time()
if int(now / 60.0) > int(last_message_timestamp / 60.0):
self.logger().debug(
f"Diff message processed: "
f"{message_accepted}, "
f"rejected: {message_rejected}, "
f"queued: {message_queue}"
)
message_accepted = 0
message_rejected = 0
message_queued = 0
last_message_timestamp = now
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
f"Unexpected error routing order book messages.",
exc_info=True,
app_warning_msg=f"Unexpected error routing order book messages. Retrying after 5 seconds.",
)
await asyncio.sleep(5.0)
async def _track_single_book(self, trading_pair: str):
past_diffs_window: Deque[BittrexOrderBookMessage] = deque()
self._past_diffs_windows[trading_pair] = past_diffs_window
message_queue: asyncio.Queue = self._tracking_message_queues[trading_pair]
order_book: BittrexOrderBook = self._order_books[trading_pair]
active_order_tracker: BittrexActiveOrderTracker = self._active_order_trackers[trading_pair]
last_message_timestamp = order_book.snapshot_uid
diff_message_accepted: int = 0
while True:
try:
message: BittrexOrderBookMessage = None
save_messages: Deque[BittrexOrderBookMessage] = self._saved_message_queues[trading_pair]
# Process saved messages first if there are any
if len(save_messages) > 0:
message = save_messages.popleft()
elif message_queue.qsize() > 0:
message = await message_queue.get()
else:
# Waits to received some diff messages
await asyncio.sleep(3)
continue
# Processes diff stream
if message.type is OrderBookMessageType.DIFF:
bids, asks = active_order_tracker.convert_diff_message_to_order_book_row(message)
order_book.apply_diffs(bids, asks, message.update_id)
past_diffs_window.append(message)
while len(past_diffs_window) > self.PAST_DIFF_WINDOW_SIZE:
past_diffs_window.popleft()
diff_message_accepted += 1
# Output some statistics periodically.
now: float = message.update_id
if now > last_message_timestamp:
self.logger().debug(f"Processed {diff_message_accepted} order book diffs for {trading_pair}")
diff_message_accepted = 0
last_message_timestamp = now
# Processes snapshot stream
elif message.type is OrderBookMessageType.SNAPSHOT:
past_diffs: List[BittrexOrderBookMessage] = list(past_diffs_window)
# only replay diffs later than snapshot, first update active order with snapshot then replay diffs
replay_position = bisect.bisect_right(past_diffs, message)
replay_diffs = past_diffs[replay_position:]
s_bids, s_asks = active_order_tracker.convert_snapshot_message_to_order_book_row(message)
order_book.apply_snapshot(s_bids, s_asks, message.update_id)
for diff_message in replay_diffs:
d_bids, d_asks = active_order_tracker.convert_diff_message_to_order_book_row(diff_message)
order_book.apply_diffs(d_bids, d_asks, diff_message.update_id)
self.logger().debug("Processed order book snapshot for %s.", trading_pair)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
f"Unexpected error processing order book messages for {trading_pair}.",
exc_info=True,
app_warning_msg=f"Unexpected error processing order book messages. Retrying after 5 seconds.",
)
await asyncio.sleep(5.0)
| 48.571429
| 119
| 0.638948
|
9c6abf8233686ef9c771045703fdf4b5eaa98a4e
| 2,686
|
py
|
Python
|
cloudmesh/common/systeminfo.py
|
aporlowski/cloudmesh-common
|
ff991c07eb17b899742a1535665118a33d679f2d
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh/common/systeminfo.py
|
aporlowski/cloudmesh-common
|
ff991c07eb17b899742a1535665118a33d679f2d
|
[
"Apache-2.0"
] | null | null | null |
cloudmesh/common/systeminfo.py
|
aporlowski/cloudmesh-common
|
ff991c07eb17b899742a1535665118a33d679f2d
|
[
"Apache-2.0"
] | null | null | null |
import platform
import sys
import os
from pathlib import Path
from cloudmesh.common.util import readfile
from collections import OrderedDict
import pip
import psutil
import humanize
def sys_user():
if "COLAB_GPU" in os.environ:
return "collab"
elif sys.platform == "win32":
return os.environ["USERNAME"]
else:
return os.environ["USER"]
def get_platform():
if sys.platform == "darwin":
return "macos"
elif sys.platform == "win32":
return "windows"
try:
content = readfile('/etc/os-release')
if sys.platform == 'linux' and "raspbian" in content:
return "raspberry"
else:
return sys.platform
except:
return sys.platform
def systeminfo():
uname = platform.uname()
mem = psutil.virtual_memory()
# noinspection PyPep8
def add_binary(value):
try:
r = humanize.naturalsize(value, binary=True)
except:
r = ""
return r
data = OrderedDict({
'uname.system': uname.system,
'uname.node': uname.node,
'uname.release': uname.release,
'uname.version': uname.version,
'uname.machine': uname.machine,
'uname.processor': uname.processor,
'sys.platform': sys.platform,
'python': sys.version,
'python.version': sys.version.split(" ", 1)[0],
'python.pip': pip.__version__,
'user': sys_user(),
'mem.percent': str(mem.percent) + " %",
})
for attribute in ["total",
"available",
"used",
"free",
"active",
"inactive",
"wired"
]:
try:
data[f"mem.{attribute}"] = \
humanize.naturalsize(getattr(mem, attribute), binary=True)
except:
pass
# svmem(total=17179869184, available=6552825856, percent=61.9,
if data['sys.platform'] == 'darwin':
data['platform.version'] = platform.mac_ver()[0]
elif data['sys.platform'] == 'win32':
data['platform.version'] = platform.win32_ver()
else:
data['platform.version'] = uname.version
try:
release_files = Path("/etc").glob("*release")
for filename in release_files:
content = readfile(filename.resolve()).splitlines()
for line in content:
if "=" in line:
attribute, value = line.split("=", 1)
attribute = attribute.replace(" ", "")
data[attribute] = value
except:
pass
return dict(data)
| 27.690722
| 74
| 0.538347
|
ca25a67cc33477e6815dd3de8155b2d2d72696c6
| 4,504
|
py
|
Python
|
cgc/ThreeColors.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
cgc/ThreeColors.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
cgc/ThreeColors.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
from .Wavefunction import Wavefunction
import numpy as np
from scipy.fft import ifft2, fft2
from scipy.linalg import expm
class Nucleus(Wavefunction):
_wilsonLine = None
_adjointWilsonLine = None
# Some variables to keep track of what has been calculated/generated so far
# allowing us to avoid redundant computations
_wilsonLineExists = False
_adjointWilsonLineExists = False
# The Gell-Mann matrices, for use in calculating the adjoint representation of the wilson line
# specific to using 3 color charges
# First entry is the identity, latter 8 are the proper Gell-Mann matrices
_gell_mann = np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 0], [1, 0, 0], [0, 0, 0]],
[[0, -1.j, 0], [1.j, 0, 0], [0, 0, 0]],
[[1, 0, 0], [0, -1, 0], [0, 0, 0]],
[[0, 0, 1], [0, 0, 0], [1, 0, 0]],
[[0, 0, -1.j], [0, 0, 0], [1.j, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 1, 0]],
[[0, 0, 0], [0, 0, -1.j], [0, 1, 0]],
[[1/np.sqrt(3), 0, 0], [0, 1/np.sqrt(3), 0], [0, 0, -2/np.sqrt(3)]]
], dtype='complex')
def __init__(self, N, delta, mu, fftNormalization=None, M=.5, g=1):
r"""
Constructor
-----------
Wrapper for `super.__init__` with `colorCharges` = 3.
Parameters
----------
N : positive integer
The size of the square lattice to simulate
delta : positive float
The distance between adjacent lattice sites
mu : positive float
The scaling for the random gaussian distribution that generates the color charge density
fftNormalization : None | "backward" | "ortho" | "forward"
Normalization procedure used when computing fourier transforms; see [scipy documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fft.html) for more information
M : float
Experimental parameter in the laplace equation for the gauge field
g : float
Parameter in the laplace equation for the gauge field
"""
super().__init__(3, N, delta, mu, fftNormalization, M, g) # Super constructor with colorCharges=3
def wilsonLine(self):
"""
Calculate the Wilson line by numerically computing the
exponential of the gauge field times the Gell-mann matrices.
Numerical calculation is done using [scipy's expm](https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.expm.html)
If the line already exists, it is simply returned and no calculation is done.
"""
if self._wilsonLineExists:
return self._wilsonLine
# Make sure the gauge field has already been calculated
if not self._gaugeFieldExists:
self.gaugeField()
# We have a 3x3 matrix at each lattice point
self._wilsonLine = np.zeros([self.N, self.N, 3, 3], dtype='complex')
for i in range(self.N):
for j in range(self.N):
# Numerical form for SU(n)
self._wilsonLine[i,j] = expm(1.j*sum([self._gaugeField[k,i,j]*self._gell_mann[k+1] for k in range(self.gluonDOF)]))
self._wilsonLineExists = True
return self._wilsonLine
def adjointWilsonLine(self):
"""
Calculate the Wilson line in the adjoint representation.
If the line already exists, it is simply returned and no calculation is done.
"""
if self._adjointWilsonLineExists:
return self._adjointWilsonLine
# Make sure the wilson line has already been calculated
if not self._wilsonLineExists:
self.wilsonLine()
self._adjointWilsonLine = np.zeros([self.gluonDOF+1, self.gluonDOF+1, self.N, self.N], dtype='complex')
for a in range(self.gluonDOF+1):
for b in range(self.gluonDOF+1):
for i in range(self.N):
for j in range(self.N):
V = self._wilsonLine[i,j]
Vdag = np.conjugate(np.transpose(V))
self._adjointWilsonLine[a,b,i,j] = .5 * np.trace(np.dot(np.dot(self._gell_mann[a], V), np.dot(self._gell_mann[b], Vdag)))
self._adjointWilsonLineExists = True
return self._adjointWilsonLine
| 37.22314
| 195
| 0.574822
|
e43767ef2b648d0d5d57c00f38ccbd38390e38da
| 19,134
|
py
|
Python
|
tb_check.py
|
N1kYan/vssil
|
214363f1a924414415cfef940404d8057f6912e7
|
[
"MIT"
] | 6,139
|
2017-06-13T02:43:20.000Z
|
2022-03-30T23:56:28.000Z
|
tb_check.py
|
N1kYan/vssil
|
214363f1a924414415cfef940404d8057f6912e7
|
[
"MIT"
] | 3,239
|
2017-06-14T15:49:50.000Z
|
2022-03-31T23:43:15.000Z
|
tb_check.py
|
N1kYan/vssil
|
214363f1a924414415cfef940404d8057f6912e7
|
[
"MIT"
] | 1,882
|
2017-06-15T01:33:54.000Z
|
2022-03-30T15:27:25.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Self-diagnosis script for TensorBoard.
Instructions: Save this script to your local machine, then execute it in
the same environment (virtualenv, Conda, etc.) from which you normally
run TensorBoard. Read the output and follow the directions.
"""
# This script may only depend on the Python standard library. It is not
# built with Bazel and should not assume any third-party dependencies.
import collections
import errno
import functools
import hashlib
import inspect
import logging
import os
import pipes
import shlex
import socket
import subprocess
import sys
import tempfile
import textwrap
import traceback
# A *check* is a function (of no arguments) that performs a diagnostic,
# writes log messages, and optionally yields suggestions. Each check
# runs in isolation; exceptions will be caught and reported.
CHECKS = []
# A suggestion to the end user.
# headline (str): A short description, like "Turn it off and on
# again". Should be imperative with no trailing punctuation. May
# contain inline Markdown.
# description (str): A full enumeration of the steps that the user
# should take to accept the suggestion. Within this string, prose
# should be formatted with `reflow`. May contain Markdown.
Suggestion = collections.namedtuple("Suggestion", ("headline", "description"))
def check(fn):
"""Decorator to register a function as a check.
Checks are run in the order in which they are registered.
Args:
fn: A function that takes no arguments and either returns `None` or
returns a generator of `Suggestion`s. (The ability to return
`None` is to work around the awkwardness of defining empty
generator functions in Python.)
Returns:
A wrapped version of `fn` that returns a generator of `Suggestion`s.
"""
@functools.wraps(fn)
def wrapper():
result = fn()
return iter(()) if result is None else result
CHECKS.append(wrapper)
return wrapper
def reflow(paragraph):
return textwrap.fill(textwrap.dedent(paragraph).strip())
def pip(args):
"""Invoke command-line Pip with the specified args.
Returns:
A bytestring containing the output of Pip.
"""
# Suppress the Python 2.7 deprecation warning.
PYTHONWARNINGS_KEY = "PYTHONWARNINGS"
old_pythonwarnings = os.environ.get(PYTHONWARNINGS_KEY)
new_pythonwarnings = "%s%s" % (
"ignore:DEPRECATION",
",%s" % old_pythonwarnings if old_pythonwarnings else "",
)
command = [sys.executable, "-m", "pip", "--disable-pip-version-check"]
command.extend(args)
try:
os.environ[PYTHONWARNINGS_KEY] = new_pythonwarnings
return subprocess.check_output(command)
finally:
if old_pythonwarnings is None:
del os.environ[PYTHONWARNINGS_KEY]
else:
os.environ[PYTHONWARNINGS_KEY] = old_pythonwarnings
def which(name):
"""Return the path to a binary, or `None` if it's not on the path.
Returns:
A bytestring.
"""
binary = "where" if os.name == "nt" else "which"
try:
return subprocess.check_output([binary, name])
except subprocess.CalledProcessError:
return None
def sgetattr(attr, default):
"""Get an attribute off the `socket` module, or use a default."""
sentinel = object()
result = getattr(socket, attr, sentinel)
if result is sentinel:
print("socket.%s does not exist" % attr)
return default
else:
print("socket.%s = %r" % (attr, result))
return result
@check
def autoidentify():
"""Print the Git hash of this version of `diagnose_tensorboard.py`.
Given this hash, use `git cat-file blob HASH` to recover the
relevant version of the script.
"""
module = sys.modules[__name__]
try:
source = inspect.getsource(module).encode("utf-8")
except TypeError:
logging.info("diagnose_tensorboard.py source unavailable")
else:
# Git inserts a length-prefix before hashing; cf. `git-hash-object`.
blob = b"blob %d\0%s" % (len(source), source)
hash = hashlib.sha1(blob).hexdigest()
logging.info("diagnose_tensorboard.py version %s", hash)
@check
def general():
logging.info("sys.version_info: %s", sys.version_info)
logging.info("os.name: %s", os.name)
na = type("N/A", (object,), {"__repr__": lambda self: "N/A"})
logging.info(
"os.uname(): %r",
getattr(os, "uname", na)(),
)
logging.info(
"sys.getwindowsversion(): %r",
getattr(sys, "getwindowsversion", na)(),
)
@check
def package_management():
conda_meta = os.path.join(sys.prefix, "conda-meta")
logging.info("has conda-meta: %s", os.path.exists(conda_meta))
logging.info("$VIRTUAL_ENV: %r", os.environ.get("VIRTUAL_ENV"))
@check
def installed_packages():
freeze = pip(["freeze", "--all"]).decode("utf-8").splitlines()
packages = {line.split("==")[0]: line for line in freeze}
packages_set = frozenset(packages)
# For each of the following families, expect exactly one package to be
# installed.
expect_unique = [
frozenset(
[
"tensorboard",
"tb-nightly",
"tensorflow-tensorboard",
]
),
frozenset(
[
"tensorflow",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-2.0-preview",
"tf-nightly-gpu",
"tf-nightly-gpu-2.0-preview",
]
),
frozenset(
[
"tensorflow-estimator",
"tensorflow-estimator-2.0-preview",
"tf-estimator-nightly",
]
),
]
salient_extras = frozenset(["tensorboard-data-server"])
found_conflict = False
for family in expect_unique:
actual = family & packages_set
for package in actual:
logging.info("installed: %s", packages[package])
if len(actual) == 0:
logging.warning("no installation among: %s", sorted(family))
elif len(actual) > 1:
logging.warning("conflicting installations: %s", sorted(actual))
found_conflict = True
for package in sorted(salient_extras & packages_set):
logging.info("installed: %s", packages[package])
if found_conflict:
preamble = reflow(
"""
Conflicting package installations found. Depending on the order
of installations and uninstallations, behavior may be undefined.
Please uninstall ALL versions of TensorFlow and TensorBoard,
then reinstall ONLY the desired version of TensorFlow, which
will transitively pull in the proper version of TensorBoard. (If
you use TensorBoard without TensorFlow, just reinstall the
appropriate version of TensorBoard directly.)
"""
)
packages_to_uninstall = sorted(
frozenset().union(*expect_unique) & packages_set
)
commands = [
"pip uninstall %s" % " ".join(packages_to_uninstall),
"pip install tensorflow # or `tensorflow-gpu`, or `tf-nightly`, ...",
]
message = "%s\n\nNamely:\n\n%s" % (
preamble,
"\n".join("\t%s" % c for c in commands),
)
yield Suggestion("Fix conflicting installations", message)
wit_version = packages.get("tensorboard-plugin-wit")
if wit_version == "tensorboard-plugin-wit==1.6.0.post2":
# This is only incompatible with TensorBoard prior to 2.2.0, but
# we just issue a blanket warning so that we don't have to pull
# in a `pkg_resources` dep to parse the version.
preamble = reflow(
"""
Versions of the What-If Tool (`tensorboard-plugin-wit`)
prior to 1.6.0.post3 are incompatible with some versions of
TensorBoard. Please upgrade this package to the latest
version to resolve any startup errors:
"""
)
command = "pip install -U tensorboard-plugin-wit"
message = "%s\n\n\t%s" % (preamble, command)
yield Suggestion("Upgrade `tensorboard-plugin-wit`", message)
@check
def tensorboard_python_version():
from tensorboard import version
logging.info("tensorboard.version.VERSION: %r", version.VERSION)
@check
def tensorflow_python_version():
import tensorflow as tf
logging.info("tensorflow.__version__: %r", tf.__version__)
logging.info("tensorflow.__git_version__: %r", tf.__git_version__)
@check
def tensorboard_data_server_version():
try:
import tensorboard_data_server
except ImportError:
logging.info("no data server installed")
return
path = tensorboard_data_server.server_binary()
logging.info("data server binary: %r", path)
if path is None:
return
try:
subprocess_output = subprocess.run(
[path, "--version"],
capture_output=True,
check=True,
)
except subprocess.CalledProcessError as e:
logging.info("failed to check binary version: %s", e)
else:
logging.info(
"data server binary version: %s", subprocess_output.stdout.strip()
)
@check
def tensorboard_binary_path():
logging.info("which tensorboard: %r", which("tensorboard"))
@check
def addrinfos():
sgetattr("has_ipv6", None)
family = sgetattr("AF_UNSPEC", 0)
socktype = sgetattr("SOCK_STREAM", 0)
protocol = 0
flags_loopback = sgetattr("AI_ADDRCONFIG", 0)
flags_wildcard = sgetattr("AI_PASSIVE", 0)
hints_loopback = (family, socktype, protocol, flags_loopback)
infos_loopback = socket.getaddrinfo(None, 0, *hints_loopback)
print("Loopback flags: %r" % (flags_loopback,))
print("Loopback infos: %r" % (infos_loopback,))
hints_wildcard = (family, socktype, protocol, flags_wildcard)
infos_wildcard = socket.getaddrinfo(None, 0, *hints_wildcard)
print("Wildcard flags: %r" % (flags_wildcard,))
print("Wildcard infos: %r" % (infos_wildcard,))
@check
def readable_fqdn():
# May raise `UnicodeDecodeError` for non-ASCII hostnames:
# https://github.com/tensorflow/tensorboard/issues/682
try:
logging.info("socket.getfqdn(): %r", socket.getfqdn())
except UnicodeDecodeError as e:
try:
binary_hostname = subprocess.check_output(["hostname"]).strip()
except subprocess.CalledProcessError:
binary_hostname = b"<unavailable>"
is_non_ascii = not all(
0x20
<= (ord(c) if not isinstance(c, int) else c)
<= 0x7E # Python 2
for c in binary_hostname
)
if is_non_ascii:
message = reflow(
"""
Your computer's hostname, %r, contains bytes outside of the
printable ASCII range. Some versions of Python have trouble
working with such names (https://bugs.python.org/issue26227).
Consider changing to a hostname that only contains printable
ASCII bytes.
"""
% (binary_hostname,)
)
yield Suggestion("Use an ASCII hostname", message)
else:
message = reflow(
"""
Python can't read your computer's hostname, %r. This can occur
if the hostname contains non-ASCII bytes
(https://bugs.python.org/issue26227). Consider changing your
hostname, rebooting your machine, and rerunning this diagnosis
script to see if the problem is resolved.
"""
% (binary_hostname,)
)
yield Suggestion("Use a simpler hostname", message)
raise e
@check
def stat_tensorboardinfo():
# We don't use `manager._get_info_dir`, because (a) that requires
# TensorBoard, and (b) that creates the directory if it doesn't exist.
path = os.path.join(tempfile.gettempdir(), ".tensorboard-info")
logging.info("directory: %s", path)
try:
stat_result = os.stat(path)
except OSError as e:
if e.errno == errno.ENOENT:
# No problem; this is just fine.
logging.info(".tensorboard-info directory does not exist")
return
else:
raise
logging.info("os.stat(...): %r", stat_result)
logging.info("mode: 0o%o", stat_result.st_mode)
if stat_result.st_mode & 0o777 != 0o777:
preamble = reflow(
"""
The ".tensorboard-info" directory was created by an old version
of TensorBoard, and its permissions are not set correctly; see
issue #2010. Change that directory to be world-accessible (may
require superuser privilege):
"""
)
# This error should only appear on Unices, so it's okay to use
# Unix-specific utilities and shell syntax.
quote = getattr(shlex, "quote", None) or pipes.quote # Python <3.3
command = "chmod 777 %s" % quote(path)
message = "%s\n\n\t%s" % (preamble, command)
yield Suggestion('Fix permissions on "%s"' % path, message)
@check
def source_trees_without_genfiles():
roots = list(sys.path)
if "" not in roots:
# Catch problems that would occur in a Python interactive shell
# (where `""` is prepended to `sys.path`) but not when
# `diagnose_tensorboard.py` is run as a standalone script.
roots.insert(0, "")
def has_tensorboard(root):
return os.path.isfile(os.path.join(root, "tensorboard", "__init__.py"))
def has_genfiles(root):
sample_genfile = os.path.join("compat", "proto", "summary_pb2.py")
return os.path.isfile(os.path.join(root, "tensorboard", sample_genfile))
def is_bad(root):
return has_tensorboard(root) and not has_genfiles(root)
tensorboard_roots = [root for root in roots if has_tensorboard(root)]
bad_roots = [root for root in roots if is_bad(root)]
logging.info(
"tensorboard_roots (%d): %r; bad_roots (%d): %r",
len(tensorboard_roots),
tensorboard_roots,
len(bad_roots),
bad_roots,
)
if bad_roots:
if bad_roots == [""]:
message = reflow(
"""
Your current directory contains a `tensorboard` Python package
that does not include generated files. This can happen if your
current directory includes the TensorBoard source tree (e.g.,
you are in the TensorBoard Git repository). Consider changing
to a different directory.
"""
)
else:
preamble = reflow(
"""
Your Python path contains a `tensorboard` package that does
not include generated files. This can happen if your current
directory includes the TensorBoard source tree (e.g., you are
in the TensorBoard Git repository). The following directories
from your Python path may be problematic:
"""
)
roots = []
realpaths_seen = set()
for root in bad_roots:
label = repr(root) if root else "current directory"
realpath = os.path.realpath(root)
if realpath in realpaths_seen:
# virtualenvs on Ubuntu install to both `lib` and `local/lib`;
# explicitly call out such duplicates to avoid confusion.
label += " (duplicate underlying directory)"
realpaths_seen.add(realpath)
roots.append(label)
message = "%s\n\n%s" % (
preamble,
"\n".join(" - %s" % s for s in roots),
)
yield Suggestion(
"Avoid `tensorboard` packages without genfiles", message
)
# Prefer to include this check last, as its output is long.
@check
def full_pip_freeze():
logging.info(
"pip freeze --all:\n%s", pip(["freeze", "--all"]).decode("utf-8")
)
def set_up_logging():
# Manually install handlers to prevent TensorFlow from stomping the
# default configuration if it's imported:
# https://github.com/tensorflow/tensorflow/issues/28147
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(handler)
def main():
set_up_logging()
print("### Diagnostics")
print()
print("<details>")
print("<summary>Diagnostics output</summary>")
print()
markdown_code_fence = "``````" # seems likely to be sufficient
print(markdown_code_fence)
suggestions = []
for (i, check) in enumerate(CHECKS):
if i > 0:
print()
print("--- check: %s" % check.__name__)
try:
suggestions.extend(check())
except Exception:
traceback.print_exc(file=sys.stdout)
pass
print(markdown_code_fence)
print()
print("</details>")
for suggestion in suggestions:
print()
print("### Suggestion: %s" % suggestion.headline)
print()
print(suggestion.description)
print()
print("### Next steps")
print()
if suggestions:
print(
reflow(
"""
Please try each suggestion enumerated above to determine whether
it solves your problem. If none of these suggestions works,
please copy ALL of the above output, including the lines
containing only backticks, into your GitHub issue or comment. Be
sure to redact any sensitive information.
"""
)
)
else:
print(
reflow(
"""
No action items identified. Please copy ALL of the above output,
including the lines containing only backticks, into your GitHub
issue or comment. Be sure to redact any sensitive information.
"""
)
)
if __name__ == "__main__":
main()
| 33.865487
| 82
| 0.610902
|
20e7f581435e325d89fb845110fc48bbbdf3cb72
| 2,465
|
py
|
Python
|
opentelemetry-api/setup.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-api/setup.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
opentelemetry-api/setup.py
|
ThePumpingLemma/opentelemetry-python
|
9ed98eb9320b9064e43c3b43ee7c4990eec3657a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR, "src", "opentelemetry", "util", "version.py"
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(
name="opentelemetry-api",
version=PACKAGE_INFO["__version__"],
author="OpenTelemetry Authors",
author_email="cncf-opentelemetry-contributors@lists.cncf.io",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="OpenTelemetry Python API",
include_package_data=True,
long_description=open("README.rst").read(),
long_description_content_type="text/x-rst",
install_requires=[
"typing; python_version<'3.5'",
"aiocontextvars; python_version<'3.7' and python_version>='3.5'",
],
extras_require={},
license="Apache-2.0",
package_dir={"": "src"},
packages=setuptools.find_namespace_packages(
where="src", include="opentelemetry.*"
),
url=(
"https://github.com/open-telemetry/opentelemetry-python"
"/tree/master/opentelemetry-api"
),
zip_safe=False,
entry_points={
"opentelemetry_context": [
"contextvars_context = "
"opentelemetry.context.contextvars_context:"
"ContextVarsRuntimeContext",
"threadlocal_context = "
"opentelemetry.context.threadlocal_context:"
"ThreadLocalRuntimeContext",
]
},
)
| 33.767123
| 74
| 0.664909
|
33d63c5c5b02e606550a6c37cb50f553f4f798a5
| 282
|
py
|
Python
|
solutions/Luck_check.py
|
AlexGameAndWebDev/CodeWars-Python
|
222b8244e9f248dbb4e5fabd390dd4cce446dc84
|
[
"MIT"
] | 44
|
2015-05-24T13:46:22.000Z
|
2022-03-22T10:40:10.000Z
|
solutions/Luck_check.py
|
badruu/CodeWars-Python
|
222b8244e9f248dbb4e5fabd390dd4cce446dc84
|
[
"MIT"
] | 3
|
2016-09-10T07:14:02.000Z
|
2021-09-14T12:16:25.000Z
|
solutions/Luck_check.py
|
badruu/CodeWars-Python
|
222b8244e9f248dbb4e5fabd390dd4cce446dc84
|
[
"MIT"
] | 48
|
2016-04-03T04:48:33.000Z
|
2022-03-14T23:32:17.000Z
|
"""
Luck check
http://www.codewars.com/kata/5314b3c6bb244a48ab00076c/train/python
"""
def luck_check(string):
i, j, total = 0, len(string) - 1, 0
while i != j and i < j:
total += (int(string[i]) - int(string[j]))
i += 1
j -= 1
return total == 0
| 21.692308
| 66
| 0.556738
|
aa75c2452fa34b449c1595bc69b1e225d220c8d1
| 541
|
py
|
Python
|
glashammer/utils/log.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | 1
|
2016-07-04T15:23:59.000Z
|
2016-07-04T15:23:59.000Z
|
glashammer/utils/log.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | null | null | null |
glashammer/utils/log.py
|
passy/glashammer-rdrei
|
9e56952d70b961d8945707469aad9cfe97c4e7b7
|
[
"MIT"
] | null | null | null |
def add_log_handler(handler):
root_logger = getLogger('')
root_logger.addHandler(handler)
def debug(msg):
# Racy
from glashammer.utils import emit_event
emit_event('log', 'debug', msg)
def info(msg):
# Racy
from glashammer.utils import emit_event
emit_event('log', 'debug', msg)
def warning(msg):
# Racy
from glashammer.utils import emit_event
emit_event('log', 'warning', msg)
def error(msg):
# Racy
from glashammer.utils import emit_event
emit_event('log', 'error', msg)
| 16.90625
| 43
| 0.665434
|
0b14731419a7f2ec299cdbe928ffacc7ee1f1165
| 1,278
|
py
|
Python
|
dataset.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
dataset.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
dataset.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
import config
import torch
from torch.utils.data import Dataset, DataLoader
import transformers
from transformers import DistilBertModel, DistilBertTokenizer
class Triage(Dataset):
def __init__(self, dataframe):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = config.tokenizer
self.max_len = config.MAX_LEN
def __getitem__(self, index):
title = str(self.data.TITLE[index])
title = " ".join(title.split())
inputs = self.tokenizer.encode_plus(
title,
None,
add_special_tokens=True,
max_length=self.max_len,
padding=True,
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
padding_length = self.max_len - len(ids)
ids = ids + ([0] * padding_length)
mask = mask + ([0] * padding_length)
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.data.ENCODE_CAT[index], dtype=torch.long)
}
def __len__(self):
return self.len
| 31.170732
| 83
| 0.576682
|
ddf20b6a36293ec87eda1b7bbedccaf358e26505
| 1,242
|
py
|
Python
|
load_testing/ial2_sign_up.locustfile.py
|
18F/identity-loadtes
|
b437643f21bb47a1b2c7eff82e9bd6569e9f081b
|
[
"CC0-1.0"
] | null | null | null |
load_testing/ial2_sign_up.locustfile.py
|
18F/identity-loadtes
|
b437643f21bb47a1b2c7eff82e9bd6569e9f081b
|
[
"CC0-1.0"
] | 18
|
2020-03-25T21:44:21.000Z
|
2021-12-16T00:47:59.000Z
|
load_testing/ial2_sign_up.locustfile.py
|
18F/identity-loadtes
|
b437643f21bb47a1b2c7eff82e9bd6569e9f081b
|
[
"CC0-1.0"
] | 2
|
2021-01-12T10:08:38.000Z
|
2021-08-24T17:35:40.000Z
|
from locust import HttpUser, TaskSet, task, between
from common_flows import flow_ial2_proofing, flow_sign_up, flow_helper
import logging
class IAL2SignUpLoad(TaskSet):
# Preload drivers license data
license_front = flow_helper.load_fixture("mock-front.jpeg")
license_back = flow_helper.load_fixture("mock-back.jpeg")
def on_start(self):
logging.info("*** Starting Sign-Up and IAL2 proof load tests ***")
def on_stop(self):
logging.info("*** Ending IAL2 Sign-Up load tests ***")
""" @task(<weight>) : value=3 executes 3x as often as value=1 """
""" Things inside task are synchronous. Tasks are async """
@task(1)
def sign_up_and_proof_load_test(self):
# Sign up flow
flow_sign_up.do_sign_up(self)
# Get /account page
flow_helper.do_request(self, "get", "/account", "/account", "")
# IAL2 Proofing flow
flow_ial2_proofing.do_ial2_proofing(self)
# Get the /account page now
flow_helper.do_request(self, "get", "/account", "/account", "")
# Now log out
flow_helper.do_request(self, "get", "/logout", "/", "")
class WebsiteUser(HttpUser):
tasks = [IAL2SignUpLoad]
wait_time = between(5, 9)
| 29.571429
| 74
| 0.652979
|
015b1a814579cf43b5516d3c75b393af0664560e
| 1,016
|
py
|
Python
|
esp32-pulsetimer/test-pt.py
|
tve/mpy-lib
|
9f102459c61a5be424291a277e421bd1fc16843a
|
[
"MIT"
] | 6
|
2020-02-27T11:17:54.000Z
|
2020-12-04T10:14:26.000Z
|
esp32-pulsetimer/test-pt.py
|
tve/mpy-lib
|
9f102459c61a5be424291a277e421bd1fc16843a
|
[
"MIT"
] | 4
|
2020-07-29T14:07:04.000Z
|
2021-05-19T05:10:33.000Z
|
esp32-pulsetimer/test-pt.py
|
tve/mpy-lib
|
9f102459c61a5be424291a277e421bd1fc16843a
|
[
"MIT"
] | 3
|
2020-05-16T08:15:16.000Z
|
2021-09-30T10:39:37.000Z
|
import time, machine
out_pin = 21
in_pin = 22
print("Testing pulsetimer, outputting on pin %d and reading pulsetimer on pin %d", out_pin, in_pin)
print("Loading pulsetimer module")
time.sleep_ms(100)
import pulsetimer
print("Instantiating output pin")
out = machine.Pin(out_pin, machine.Pin.OUT, value=0)
print("Setting up pulsetimer")
pt_pin = machine.Pin(in_pin, machine.Pin.IN)
def dummy(p):
pass
q = []
def pulse(t):
q.append(t)
pt_pin.irq(dummy, machine.Pin.IRQ_RISING)
pulsetimer.set_time_handler(in_pin, pulse)
if True:
print("Testing positive edge")
assert len(q) == 0
out(1)
time.sleep_ms(100)
print(q)
assert len(q) == 1
print("Testing negative edge")
out(0)
time.sleep_ms(100)
assert len(q) == 1
print("Testing 10 pulses")
for i in range(10):
out(1)
out(0)
time.sleep_ms(20)
time.sleep_ms(100)
print([time.ticks_diff(q[i], q[i - 1]) for i in range(1, len(q))])
assert len(q) == 11
print("DONE")
| 17.824561
| 99
| 0.647638
|
f0aaeb810c20debffc4af960dcd2634ce2c58be8
| 4,476
|
py
|
Python
|
lite/examples/speech_commands/ml/train.py
|
samirma/examples
|
eab959e9033aff9b08ac8e8f90715f71ce7861ec
|
[
"Apache-2.0"
] | null | null | null |
lite/examples/speech_commands/ml/train.py
|
samirma/examples
|
eab959e9033aff9b08ac8e8f90715f71ce7861ec
|
[
"Apache-2.0"
] | null | null | null |
lite/examples/speech_commands/ml/train.py
|
samirma/examples
|
eab959e9033aff9b08ac8e8f90715f71ce7861ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.callbacks import TensorBoard
from callbacks import ConfusionMatrixCallback
from model import speech_model, prepare_model_settings
from generator import AudioProcessor, prepare_words_list
from classes import get_classes
from utils import data_gen
parser = argparse.ArgumentParser(description='set input arguments')
parser.add_argument(
'-sample_rate',
action='store',
dest='sample_rate',
type=int,
default=16000,
help='Sample rate of audio')
parser.add_argument(
'-batch_size',
action='store',
dest='batch_size',
type=int,
default=32,
help='Size of the training batch')
parser.add_argument(
'-output_representation',
action='store',
dest='output_representation',
type=str,
default='raw',
help='raw, spec, mfcc or mfcc_and_raw')
parser.add_argument(
'-data_dirs',
'--list',
dest='data_dirs',
nargs='+',
required=True,
help='<Required> The list of data directories. e.g., data/train')
args = parser.parse_args()
parser.print_help()
print('input args: ', args)
if __name__ == '__main__':
data_dirs = args.data_dirs
output_representation = args.output_representation
sample_rate = args.sample_rate
batch_size = args.batch_size
classes = get_classes(wanted_only=True)
model_settings = prepare_model_settings(
label_count=len(prepare_words_list(classes)),
sample_rate=sample_rate,
clip_duration_ms=1000,
window_size_ms=30.0,
window_stride_ms=10.0,
dct_coefficient_count=80,
num_log_mel_features=60,
output_representation=output_representation)
print(model_settings)
ap = AudioProcessor(
data_dirs=data_dirs,
wanted_words=classes,
silence_percentage=13.0,
unknown_percentage=60.0,
validation_percentage=10.0,
testing_percentage=0.0,
model_settings=model_settings,
output_representation=output_representation)
train_gen = data_gen(ap, sess, batch_size=batch_size, mode='training')
val_gen = data_gen(ap, sess, batch_size=batch_size, mode='validation')
model = speech_model(
'conv_1d_time_stacked',
model_settings['fingerprint_size']
if output_representation != 'raw' else model_settings['desired_samples'],
# noqa
num_classes=model_settings['label_count'],
**model_settings)
# embed()
checkpoints_path = os.path.join('checkpoints', 'conv_1d_time_stacked_model')
if not os.path.exists(checkpoints_path):
os.makedirs(checkpoints_path)
callbacks = [
ConfusionMatrixCallback(
val_gen,
ap.set_size('validation') // batch_size,
wanted_words=prepare_words_list(get_classes(wanted_only=True)),
all_words=prepare_words_list(classes),
label2int=ap.word_to_index),
ReduceLROnPlateau(
monitor='val_categorical_accuracy',
mode='max',
factor=0.5,
patience=4,
verbose=1,
min_lr=1e-5),
TensorBoard(log_dir='logs'),
ModelCheckpoint(
os.path.join(checkpoints_path,
'ep-{epoch:03d}-vl-{val_loss:.4f}.hdf5'),
save_best_only=True,
monitor='val_categorical_accuracy',
mode='max')
]
model.fit_generator(
train_gen,
steps_per_epoch=ap.set_size('training') // batch_size,
epochs=100,
verbose=1,
callbacks=callbacks)
eval_res = model.evaluate_generator(val_gen,
ap.set_size('validation') // batch_size)
print(eval_res)
| 31.521127
| 80
| 0.691466
|
51f5f52d487bac13720b21b2e472aabcc4ea0532
| 261
|
py
|
Python
|
pgmpy/utils/decorators.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
pgmpy/utils/decorators.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
pgmpy/utils/decorators.py
|
predictive-analytics-lab/pgmpy
|
6c2a31641adc72793acd130d007190fdb1632271
|
[
"MIT"
] | null | null | null |
def convert_args_tuple(func):
def _convert_param_to_tuples(obj, variable, parents=tuple(), complete_samples_only=None):
parents = tuple(parents)
return func(obj, variable, parents, complete_samples_only)
return _convert_param_to_tuples
| 37.285714
| 93
| 0.758621
|
1264a8bb915513b19aa6ec501f841ec3f3944db5
| 7,481
|
py
|
Python
|
pybind/nos/v7_1_0/rbridge_id/route_map/content/set_/ip/global_/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/route_map/content/set_/ip/global_/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/route_map/content/set_/ip/global_/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import next_global_hop
class global_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/route-map/content/set/ip/global. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Next hop Global IP address
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__next_global_hop',)
_yang_name = 'global'
_rest_name = 'global'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__next_global_hop = YANGDynClass(base=YANGListType("next_hop",next_global_hop.next_global_hop, yang_name="next-global-hop", rest_name="next-global-hop", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='next-hop', extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-global-hop", rest_name="next-global-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'route-map', u'content', u'set', u'ip', u'global']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'route-map', u'set', u'ip', u'global']
def _get_next_global_hop(self):
"""
Getter method for next_global_hop, mapped from YANG variable /rbridge_id/route_map/content/set/ip/global/next_global_hop (list)
"""
return self.__next_global_hop
def _set_next_global_hop(self, v, load=False):
"""
Setter method for next_global_hop, mapped from YANG variable /rbridge_id/route_map/content/set/ip/global/next_global_hop (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_global_hop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_global_hop() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("next_hop",next_global_hop.next_global_hop, yang_name="next-global-hop", rest_name="next-global-hop", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='next-hop', extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-global-hop", rest_name="next-global-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_global_hop must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("next_hop",next_global_hop.next_global_hop, yang_name="next-global-hop", rest_name="next-global-hop", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='next-hop', extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-global-hop", rest_name="next-global-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__next_global_hop = t
if hasattr(self, '_set'):
self._set()
def _unset_next_global_hop(self):
self.__next_global_hop = YANGDynClass(base=YANGListType("next_hop",next_global_hop.next_global_hop, yang_name="next-global-hop", rest_name="next-global-hop", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='next-hop', extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-global-hop", rest_name="next-global-hop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbrglobalnexthop-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
next_global_hop = __builtin__.property(_get_next_global_hop, _set_next_global_hop)
_pyangbind_elements = {'next_global_hop': next_global_hop, }
| 59.373016
| 825
| 0.725972
|
cd63b278f1d4c6a14a58216501e618ab745f04bd
| 1,619
|
py
|
Python
|
atst/forms/forms.py
|
philip-dds/atst
|
a227044ccf464dd0e3144dd74cecfafe8d6841b9
|
[
"MIT"
] | 1
|
2020-01-16T16:15:52.000Z
|
2020-01-16T16:15:52.000Z
|
atst/forms/forms.py
|
philip-dds/atst
|
a227044ccf464dd0e3144dd74cecfafe8d6841b9
|
[
"MIT"
] | null | null | null |
atst/forms/forms.py
|
philip-dds/atst
|
a227044ccf464dd0e3144dd74cecfafe8d6841b9
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from flask import current_app, request as http_request
import re
from atst.utils.flash import formatted_flash as flash
EMPTY_LIST_FIELD = ["", None]
def remove_empty_string(value):
# only return strings that contain non whitespace characters
if value and re.search(r"\S", value):
return value.strip()
else:
return None
class BaseForm(FlaskForm):
def __init__(self, formdata=None, **kwargs):
# initialize the form with data from the cache
formdata = formdata or {}
cached_data = current_app.form_cache.from_request(http_request)
cached_data.update(formdata)
super().__init__(cached_data, **kwargs)
@property
def data(self):
# remove 'csrf_token' key/value pair
# remove empty strings and None from list fields
# prevent values that are not an option in a RadioField from being saved to the DB
_data = super().data
_data.pop("csrf_token", None)
for field in _data:
if _data[field].__class__.__name__ == "list":
_data[field] = [el for el in _data[field] if el not in EMPTY_LIST_FIELD]
if self[field].__class__.__name__ == "RadioField":
choices = [el[0] for el in self[field].choices]
if _data[field] not in choices:
_data[field] = None
return _data
def validate(self, *args, flash_invalid=True, **kwargs):
valid = super().validate(*args, **kwargs)
if not valid and flash_invalid:
flash("form_errors")
return valid
| 34.446809
| 90
| 0.639284
|
20b514c63c39a8e277d8f178ee401ad7385f98c1
| 17,111
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowInterfaces/cli/equal/golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"Port-channel12": {
"flow_control": {"send": False, "receive": False},
"err_disabled": False,
"suspended": False,
"type": "EtherChannel",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d23h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 2000,
"in_rate_pkts": 2,
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 961622,
"in_multicast_pkts": 4286699522,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 72614643,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 944818,
"out_pkts": 39281,
"out_late_collision": 0,
"out_octets": 6235318,
"in_overrun": 0,
"out_babble": 0,
},
"auto_negotiate": True,
"phys_address": "0057.d2ff.422a",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "full",
"link_type": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo",
},
"encapsulations": {
"encapsulation": "qinq virtual lan",
"first_dot1q": "10",
"second_dot1q": "20",
},
"last_input": "never",
"last_output": "1d22h",
"line_protocol": "up",
"mac_address": "0057.d2ff.422a",
"connected": True,
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": ["GigabitEthernet1/0/2"],
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000mb/s",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255",
},
"GigabitEthernet1/0/1": {
"flow_control": {"send": False, "receive": False},
"err_disabled": False,
"suspended": False,
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 30,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0,
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 12127,
"in_multicast_pkts": 4171,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 2297417,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 4173,
"out_pkts": 12229,
"out_late_collision": 0,
"out_octets": 2321107,
"in_overrun": 0,
"out_babble": 0,
},
"phys_address": "0057.d2ff.428c",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"description": "desc",
"oper_status": "down",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo",
},
"ipv4": {"10.1.1.1/24": {"prefix_length": "24", "ip": "10.1.1.1"}},
"encapsulations": {"encapsulation": "arpa"},
"last_input": "never",
"last_output": "04:39:18",
"line_protocol": "down",
"mac_address": "0057.d2ff.428c",
"connected": False,
"port_channel": {"port_channel_member": False},
"media_type": "10/100/1000BaseTX",
"bandwidth": 768,
"port_speed": "1000mb/s",
"enabled": False,
"arp_timeout": "04:00:00",
"mtu": 1500,
"delay": 3330,
"reliability": "255/255",
},
"GigabitEthernet3": {
"flow_control": {"send": False, "receive": False},
"type": "CSR vNIC",
"auto_negotiate": True,
"duplex_mode": "full",
"link_type": "auto",
"media_type": "RJ45",
"port_speed": "1000mbps",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "never",
"out_interface_resets": 1,
"in_mac_pause_frames": 0,
"out_collision": 0,
"in_crc_errors": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0,
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 6,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 480,
"out_unknown_protocl_drops": 0,
"out_no_carrier": 0,
"out_lost_carrier": 0,
"in_broadcast_pkts": 0,
"out_pkts": 28,
"out_late_collision": 0,
"out_octets": 7820,
"in_overrun": 0,
"out_babble": 0,
},
"phys_address": "5254.00ff.0e7e",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo",
},
"ipv4": {
"192.168.154.1/24": {"prefix_length": "24", "ip": "192.168.154.1"},
"unnumbered": {"interface_ref": "Loopback0"},
},
"encapsulations": {"encapsulation": "arpa"},
"last_output": "00:00:27",
"line_protocol": "up",
"mac_address": "5254.00ff.0e7e",
"oper_status": "up",
"port_channel": {"port_channel_member": False},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never",
},
"Loopback0": {
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 75,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo",
},
"mtu": 1514,
"encapsulations": {"encapsulation": "loopback"},
"last_output": "never",
"type": "Loopback",
"line_protocol": "up",
"oper_status": "up",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0,
},
"in_pkts": 0,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 0,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5760,
"in_overrun": 0,
"in_abort": 0,
},
"reliability": "255/255",
"bandwidth": 8000000,
"port_channel": {"port_channel_member": False},
"enabled": True,
"ipv4": {"192.168.154.1/24": {"prefix_length": "24", "ip": "192.168.154.1"}},
"rxload": "1/255",
"delay": 5000,
"last_input": "1d02h",
},
"Vlan100": {
"type": "Ethernet SVI",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0,
},
"in_pkts": 50790,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 3657594,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5526,
"in_overrun": 0,
},
"phys_address": "0057.d2ff.4279",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo",
},
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"output_hang": "never",
"ipv4": {"192.168.234.1/24": {"prefix_length": "24", "ip": "192.168.234.1"}},
"encapsulations": {"encapsulation": "arpa"},
"last_output": "1d03h",
"line_protocol": "up",
"mac_address": "0057.d2ff.4279",
"oper_status": "up",
"port_channel": {"port_channel_member": False},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never",
},
"GigabitEthernet1/0/2": {
"flow_control": {"send": False, "receive": False},
"err_disabled": False,
"suspended": False,
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 5,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 3000,
"in_rate_pkts": 5,
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 545526,
"in_multicast_pkts": 535961,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 41210298,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 535996,
"out_pkts": 23376,
"out_late_collision": 0,
"out_octets": 3642296,
"in_overrun": 0,
"out_babble": 0,
},
"phys_address": "0057.d2ff.422a",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"media_type": "10/100/1000BaseTX",
"rxload": "1/255",
"duplex_mode": "full",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo",
},
"encapsulations": {"encapsulation": "arpa"},
"last_input": "never",
"last_output": "00:00:02",
"line_protocol": "up",
"mac_address": "0057.d2ff.422a",
"connected": True,
"port_channel": {
"port_channel_member": True,
"port_channel_int": "Port-channel12",
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000mb/s",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255",
},
"GigabitEthernet0/0/4": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"bandwidth": 1000000,
"auto_negotiate": True,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0,
},
},
"delay": 10,
"duplex_mode": "full",
"link_type": "auto",
"port_speed": "1000mbps",
"media_type": "unknown",
"enabled": False,
"encapsulations": {"encapsulation": "arpa"},
"flow_control": {"receive": False, "send": False},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mac_address": "380e.4dff.dc72",
"phys_address": "380e.4dff.dc72",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"port_channel": {"port_channel_member": False},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-2T+6X1GE",
},
}
| 31.804833
| 85
| 0.453919
|
5445646be3f26f6a69b259975e86cdb28353313a
| 1,254
|
py
|
Python
|
tests/unit/notifier/test_slack_notifer.py
|
steveYeah/amqpeek
|
0bcbedb2c96a4d77306e6143fac450ad620aad0d
|
[
"MIT"
] | 3
|
2016-10-30T23:30:25.000Z
|
2019-03-13T13:44:05.000Z
|
tests/unit/notifier/test_slack_notifer.py
|
steveYeah/amqpeek
|
0bcbedb2c96a4d77306e6143fac450ad620aad0d
|
[
"MIT"
] | 7
|
2016-10-13T11:53:05.000Z
|
2020-10-02T15:34:54.000Z
|
tests/unit/notifier/test_slack_notifer.py
|
steveYeah/amqpeek
|
0bcbedb2c96a4d77306e6143fac450ad620aad0d
|
[
"MIT"
] | 2
|
2017-05-03T10:18:20.000Z
|
2017-05-18T05:29:01.000Z
|
"""Tests the slack notifier module."""
from unittest.mock import patch
import pytest
from amqpeek.notifier import SlackNotifier
class TestSlackNotifier(object):
"""Tests for the SlackNotifier class."""
@pytest.fixture
def slack_notifier_args(self) -> dict:
"""Some default args to use for these tests."""
return {"api_key": "my_key", "username": "test", "channel": "#general"}
@pytest.fixture
def slack_notifier(self, slack_notifier_args: dict) -> SlackNotifier:
"""Patch the slack notifier."""
with patch("amqpeek.notifier.Slacker"):
return SlackNotifier(**slack_notifier_args)
def test_notify(
self,
slack_notifier: SlackNotifier,
slack_notifier_args: dict,
message_args: dict,
) -> None:
"""Test the notfiy method calls slack correctly."""
slack_notifier.notify(
subject=message_args["subject"], message=message_args["message"]
)
slack_notifier.slack.chat.post_message.assert_called_once_with(
channel=slack_notifier_args["channel"],
text="{}: {}".format(message_args["subject"], message_args["message"]),
username=slack_notifier_args["username"],
)
| 31.35
| 83
| 0.649123
|
8e7b321484c32cb61222cb2fd645aefc315d8112
| 1,589
|
py
|
Python
|
tests/wycheproof/test_x448.py
|
wdscxsj/cryptography
|
94590a9aecc9e5ef6fc8eda52bae43643a4c44bd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 4,492
|
2015-01-02T23:02:52.000Z
|
2022-03-31T12:59:57.000Z
|
tests/wycheproof/test_x448.py
|
wdscxsj/cryptography
|
94590a9aecc9e5ef6fc8eda52bae43643a4c44bd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3,692
|
2015-01-01T03:16:56.000Z
|
2022-03-31T19:20:25.000Z
|
tests/wycheproof/test_x448.py
|
wdscxsj/cryptography
|
94590a9aecc9e5ef6fc8eda52bae43643a4c44bd
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1,155
|
2015-01-09T00:48:05.000Z
|
2022-03-31T23:46:43.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import binascii
import pytest
from cryptography.hazmat.primitives.asymmetric.x448 import (
X448PrivateKey,
X448PublicKey,
)
from .utils import wycheproof_tests
@pytest.mark.supported(
only_if=lambda backend: backend.x448_supported(),
skip_message="Requires OpenSSL with X448 support",
)
@wycheproof_tests("x448_test.json")
def test_x448(backend, wycheproof):
assert set(wycheproof.testgroup.items()) == {
("curve", "curve448"),
("type", "XdhComp"),
}
private_key = X448PrivateKey.from_private_bytes(
binascii.unhexlify(wycheproof.testcase["private"])
)
public_key_bytes = binascii.unhexlify(wycheproof.testcase["public"])
if len(public_key_bytes) == 57:
assert wycheproof.acceptable
assert wycheproof.has_flag("NonCanonicalPublic")
with pytest.raises(ValueError):
X448PublicKey.from_public_bytes(public_key_bytes)
return
public_key = X448PublicKey.from_public_bytes(public_key_bytes)
assert wycheproof.valid or wycheproof.acceptable
expected = binascii.unhexlify(wycheproof.testcase["shared"])
if expected == b"\x00" * 56:
assert wycheproof.acceptable
# OpenSSL returns an error on all zeros shared key
with pytest.raises(ValueError):
private_key.exchange(public_key)
else:
assert private_key.exchange(public_key) == expected
| 30.557692
| 79
| 0.713656
|
e029b55e773f1ab54a0fe9ede69f7a4a3d1ae9e2
| 375
|
py
|
Python
|
base/tasks.py
|
0xelectron/mhtportal-web
|
bd05069d6245e86d4ae887cacf33b04ef9476816
|
[
"MIT"
] | null | null | null |
base/tasks.py
|
0xelectron/mhtportal-web
|
bd05069d6245e86d4ae887cacf33b04ef9476816
|
[
"MIT"
] | 5
|
2019-10-20T06:17:36.000Z
|
2021-06-10T18:13:29.000Z
|
base/tasks.py
|
0xelectron/mhtportal-web
|
bd05069d6245e86d4ae887cacf33b04ef9476816
|
[
"MIT"
] | 2
|
2019-05-11T17:25:25.000Z
|
2019-10-12T17:59:47.000Z
|
import logging
import requests
from celery import shared_task
logger = logging.getLogger(__name__)
@shared_task
def send_sms_async(url, params=None):
try:
if params:
requests.post(url, data=params)
else:
requests.get(url)
except requests.RequestException as e:
logger.exception('While sending sms using requests')
| 19.736842
| 60
| 0.68
|
ffd3816d8cc9a364e7823e48f7eb3be36c2eef11
| 8,807
|
py
|
Python
|
tests/test_model.py
|
toni-moreno/loudml
|
0252b6792393fc46f2dc9c1da25dd89fc27a5fa4
|
[
"Apache-2.0"
] | 245
|
2018-01-30T08:11:53.000Z
|
2022-03-26T07:17:42.000Z
|
tests/test_model.py
|
robcowart/loudml
|
0008baef02259a8ae81dd210d3f91a51ffc9ed9f
|
[
"Apache-2.0"
] | 620
|
2018-01-28T22:58:24.000Z
|
2022-03-13T13:40:42.000Z
|
tests/test_model.py
|
robcowart/loudml
|
0008baef02259a8ae81dd210d3f91a51ffc9ed9f
|
[
"Apache-2.0"
] | 97
|
2018-03-06T14:44:26.000Z
|
2022-03-24T01:57:31.000Z
|
import unittest
from loudml import (
errors,
)
from loudml.model import (
Feature,
flatten_features,
Model,
)
class TestModel(unittest.TestCase):
def invalid_feature(self, **kwargs):
with self.assertRaises(errors.Invalid):
Feature(**kwargs)
def test_validate_feature(self):
# Valid
Feature(
name="foo",
field="bar",
metric="avg",
)
Feature(
name="foo",
field="bar",
metric="avg",
measurement="baz",
default=0,
)
Feature(
name="foo",
field="prefix.bar",
metric="avg",
measurement="prefix.baz",
)
# Invalid
self.invalid_feature(
name="foo/invalid",
field="bar",
metric="avg",
)
self.invalid_feature(
metric="avg",
field="bar",
)
self.invalid_feature(
name="foo",
field="bar",
)
self.invalid_feature(
name="foo",
metric="avg",
)
self.invalid_feature(
name="foo",
metric="avg",
field="bar",
default="invalid",
)
def invalid_model(self, **kwargs):
with self.assertRaises(errors.Invalid):
Model(**kwargs)
def test_validate_model(self):
# Valid
Model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'avg',
},
{
'name': 'bar',
'field': 'baz',
'metric': 'count',
}
],
}
)
Model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'avg',
},
],
'routing': 'cux',
}
)
Model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'bar',
'measurement': 'prefix.measurement',
'field': 'prefix.baz',
'metric': 'avg',
},
],
'routing': 'cux',
}
)
# Invalid
self.invalid_model(
settings={
'type': 'generic',
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'avg',
},
],
}
)
self.invalid_model(
settings={
'name': 'foo',
'type': 'generic',
'features': [],
}
)
self.invalid_model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'avg',
'io': 'i',
},
],
}
)
self.invalid_model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'count',
'io': 'o',
}
],
}
)
self.invalid_model(
settings={
'name': 'foo/invalid',
'type': 'generic',
'features': [
{
'name': 'bar',
'field': 'baz',
'metric': 'avg',
},
],
}
)
def test_flatten_features(self):
res = flatten_features([
{
'name': 'foo',
'field': 'foo',
'metric': 'avg',
},
])
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['io'], 'io')
res = flatten_features([
{
'name': 'foo',
'field': 'foo',
'metric': 'avg',
'io': 'i',
},
{
'name': 'bar',
'field': 'bar',
'metric': 'avg',
'io': 'io',
},
{
'name': 'baz',
'field': 'baz',
'metric': 'avg',
'io': 'o',
},
])
self.assertEqual(res, [
{
'name': 'bar',
'field': 'bar',
'metric': 'avg',
'io': 'io',
},
{
'name': 'baz',
'field': 'baz',
'metric': 'avg',
'io': 'o',
},
{
'name': 'foo',
'field': 'foo',
'metric': 'avg',
'io': 'i',
},
])
res = flatten_features({
'io': [
{
'name': 'foo',
'field': 'foo',
'metric': 'avg',
},
],
'o': [
{
'name': 'bar',
'field': 'bar',
'metric': 'avg',
},
],
'i': [
{
'name': 'baz',
'field': 'baz',
'metric': 'avg',
},
]
})
self.assertEqual(res, [
{
'name': 'foo',
'field': 'foo',
'metric': 'avg',
'io': 'io',
},
{
'name': 'bar',
'field': 'bar',
'metric': 'avg',
'io': 'o',
},
{
'name': 'baz',
'field': 'baz',
'metric': 'avg',
'io': 'i',
},
])
def test_agg_id(self):
model = Model(
settings={
'name': "foo",
'type': "generic",
'features': [
{
'name': 'f1',
'measurement': 'm',
'field': 'f',
'metric': 'avg',
},
{
'name': 'f2',
'measurement': 'm',
'field': 'f',
'metric': 'avg',
'match_all': [
{'key': 'key', 'value': 'value'}
],
},
{
'name': 'f3',
'measurement': 'm',
'field': 'f',
'metric': 'avg',
'match_all': [
{'key': 'key', 'value': 'value'}
],
},
{
'name': 'f4',
'measurement': 'm',
'field': 'f',
'metric': 'avg',
'match_all': [
{'key': 'key', 'value': 'value2'}
],
},
{
'name': 'f5',
'field': 'f',
'metric': 'avg',
},
],
}
)
agg_ids = [feature.agg_id for feature in model.features]
self.assertEqual(agg_ids[0], 'm')
self.assertEqual(
agg_ids[1], 'm_ced1b023686195d411caee8450821ff77ed0c5eb')
self.assertEqual(agg_ids[2], agg_ids[1])
self.assertEqual(
agg_ids[3], 'm_7359bacde7a306a62e35501cc9bb905e6b2c6f72')
self.assertEqual(agg_ids[4], 'all')
| 26.368263
| 69
| 0.269899
|
b4dc32f904e7ddc460920e869406ae35587ad393
| 3,553
|
py
|
Python
|
bigbench/api/model.py
|
colinzhaoust/BIG-bench
|
34cf4e737879f2fed7642c8df7648fef47cc463b
|
[
"Apache-2.0"
] | null | null | null |
bigbench/api/model.py
|
colinzhaoust/BIG-bench
|
34cf4e737879f2fed7642c8df7648fef47cc463b
|
[
"Apache-2.0"
] | null | null | null |
bigbench/api/model.py
|
colinzhaoust/BIG-bench
|
34cf4e737879f2fed7642c8df7648fef47cc463b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model abstraction for BIG-bench.
"""
import abc
from typing import List, Union, Optional
class Model(abc.ABC):
"""Abstract language model class for BIG-bench"""
@abc.abstractmethod
def generate_text(
self,
inputs: Union[str, List[str]],
max_length: int,
stop_string: Optional[str],
output_regex: Optional[str],
) -> Union[str, List[str]]:
"""Generates text for given inputs.
Args:
inputs: String or list of strings as inputs for model.
max_length: Maximum string length of output.
stop_string: If specified, model output will be truncated to the shortest
string which includes stop_string.
output_regex: If specified, the the first match to the python regular
expression output_regex in the model output will be returned. If there is
no match, an empty string will be returned.
Returns:
String or list of strings generated by model.
"""
@abc.abstractmethod
def cond_log_prob(
self,
inputs: Union[str, List[str]],
targets: Union[List[str], List[List[str]]],
absolute_normalization: Optional[bool],
) -> Union[List[float], List[List[float]]]:
"""Computes conditional log probabilities of targets given inputs.
Args:
`inputs`: A single string input or a list of string inputs.
`targets`: Possible string outputs for each input. If input is a
string, this is a list `[t_1, t_2, ..., t_n]` of possible string
outputs. If input is a list of strings, then this is a nested
list `[[t_1, t_2, ..., t_n], ...]` with length equal to `len(inputs)`.
`absolute_normalization`: When True, the function returns the log
probability of unconstrained generation or the target sequence. When
False (default), log probabilities are normalized so that the probabilities
of generating `targets` sum to 1. Note that setting `absolute_normalization`
to True restricts the class of models that can be evaluated to those that
can assign absolute probabilities to sequences.
Returns:
If a single string input is provided, returns a list of
log-probabilities `[lp_1, lp_2, ..., lp_n]` predicted by the model,
where `lp_i = log(prob(t_i | input)` is the conditional log-prob
to generate target `t_i` given input. If a list of string inputs
was provided, returns a list of such elements of the form
`[[lp_1, lp_2, ..., lp_n], ...]`, where each element contains the
log-probabilities for the corresponding input and targets.
In this case, the length of the returned list is `len(input)`.
If conditional probabilities are not supported by the model, the
model returns None.
"""
| 42.807229
| 89
| 0.649029
|
9bc6914528c2ea8cbf07bea7aa7ac6ea43ac798d
| 6,233
|
py
|
Python
|
test/test_simple_macd.py
|
alexanu/TradingBot
|
9fabb6a0be51fbc1dba5a30605a7c29127f29fa2
|
[
"MIT"
] | null | null | null |
test/test_simple_macd.py
|
alexanu/TradingBot
|
9fabb6a0be51fbc1dba5a30605a7c29127f29fa2
|
[
"MIT"
] | null | null | null |
test/test_simple_macd.py
|
alexanu/TradingBot
|
9fabb6a0be51fbc1dba5a30605a7c29127f29fa2
|
[
"MIT"
] | null | null | null |
import os
import sys
import inspect
import pytest
import json
from datetime import datetime as dt
import pandas as pd
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, "{}/src".format(parentdir))
from Strategies.SimpleMACD import SimpleMACD
from Utility.Utils import TradeDirection
from Interfaces.Market import Market
from Components.Broker import Broker, Interval
from Components.IGInterface import IGInterface
from Components.AVInterface import AVInterface
from common.MockRequests import (
ig_request_login,
ig_request_set_account,
ig_request_market_info,
ig_request_prices,
av_request_macd_ext,
av_request_prices,
)
@pytest.fixture
def config():
"""
Returns a dict with config parameter for strategy and simpleMACD
"""
# Read configuration file
try:
with open("config/config.json", "r") as file:
config = json.load(file)
config["alpha_vantage"]["enable"] = True
config["alpha_vantage"]["api_timeout"] = 0
except IOError:
exit()
return config
@pytest.fixture
def credentials():
"""
Returns a dict with credentials parameters
"""
return {
"username": "user",
"password": "pwd",
"api_key": "12345",
"account_id": "12345",
"av_api_key": "12345",
}
@pytest.fixture
def broker(config, credentials, requests_mock):
"""
Initialise the strategy with mock services
"""
ig_request_login(requests_mock)
ig_request_set_account(requests_mock)
services = {
"ig_index": IGInterface(config, credentials),
"alpha_vantage": AVInterface(credentials["av_api_key"], config),
}
return Broker(config, services)
def create_mock_market(broker, requests_mock):
ig_request_market_info(requests_mock)
data = broker.get_market_info("mock")
market = Market()
market.epic = data["epic"]
market.id = data["market_id"]
market.name = data["name"]
market.bid = data["bid"]
market.offer = data["offer"]
market.high = data["high"]
market.low = data["low"]
market.stop_distance_min = data["stop_distance_min"]
return market
def datafram_from_json(filepath):
"""Load a json file and return a dataframe"""
try:
with open(filepath, "r") as file:
px = pd.DataFrame.from_dict(
json.load(file)["Technical Analysis: MACDEXT"],
orient="index",
dtype=float,
)
px.index = pd.to_datetime(px.index)
return px
except IOError:
exit()
def test_find_trade_signal_buy(config, broker, requests_mock):
strategy = SimpleMACD(config, broker)
data = datafram_from_json("test/test_data/alpha_vantage/mock_macd_ext_buy.json")
# Create a mock market data from the json file
market = create_mock_market(broker, requests_mock)
# Call function to test
tradeDir, limit, stop = strategy.find_trade_signal(market, data)
assert tradeDir is not None
assert limit is not None
assert stop is not None
assert tradeDir == TradeDirection.BUY
def test_find_trade_signal_sell(config, broker, requests_mock):
strategy = SimpleMACD(config, broker)
data = datafram_from_json("test/test_data/alpha_vantage/mock_macd_ext_sell.json")
# Create a mock market data from the json file
market = create_mock_market(broker, requests_mock)
tradeDir, limit, stop = strategy.find_trade_signal(market, data)
assert tradeDir is not None
assert limit is not None
assert stop is not None
assert tradeDir == TradeDirection.SELL
def test_find_trade_signal_hold(config, broker, requests_mock):
strategy = SimpleMACD(config, broker)
data = datafram_from_json("test/test_data/alpha_vantage/mock_macd_ext_hold.json")
# Create a mock market data from the json file
market = create_mock_market(broker, requests_mock)
tradeDir, limit, stop = strategy.find_trade_signal(market, data)
assert tradeDir is not None
assert limit is None
assert stop is None
assert tradeDir == TradeDirection.NONE
def test_find_trade_signal_exception(config):
# TODO provide wrong data and assert exception thrown
assert True
def test_calculate_stop_limit(config, broker):
strategy = SimpleMACD(config, broker)
limit, stop = strategy.calculate_stop_limit(TradeDirection.BUY, 100, 100, 10, 10)
assert limit == 110
assert stop == 90
limit, stop = strategy.calculate_stop_limit(TradeDirection.SELL, 100, 100, 10, 10)
assert limit == 90
assert stop == 110
limit, stop = strategy.calculate_stop_limit(TradeDirection.NONE, 100, 100, 10, 10)
assert limit is None
assert stop is None
def test_generate_signals_from_dataframe(config, broker):
strategy = SimpleMACD(config, broker)
data = datafram_from_json("test/test_data/alpha_vantage/mock_macd_ext_hold.json")
px = strategy.generate_signals_from_dataframe(data)
assert "positions" in px
assert len(px) > 26
# TODO add more checks
def test_get_trade_direction_from_signals(config, broker, requests_mock):
strategy = SimpleMACD(config, broker)
data = datafram_from_json("test/test_data/alpha_vantage/mock_macd_ext_buy.json")
dataframe = strategy.generate_signals_from_dataframe(data)
tradeDir = strategy.get_trade_direction_from_signals(dataframe)
# BUY becasue the mock response loads the buy test json
assert tradeDir == TradeDirection.BUY
def test_backtest(config, broker, requests_mock):
av_request_macd_ext(requests_mock, data="mock_macd_ext_buy.json")
av_request_prices(requests_mock)
strategy = SimpleMACD(config, broker)
# Create a mock market data from the json file
market = create_mock_market(broker, requests_mock)
result = strategy.backtest(
market,
dt.strptime("2018-01-01", "%Y-%m-%d"),
dt.strptime("2018-06-01", "%Y-%m-%d"),
)
assert "balance" in result
assert result["balance"] is not None
assert result["balance"] == 997.9299999999998
assert "trades" in result
assert len(result["trades"]) == 8
| 29.966346
| 86
| 0.703995
|
eac14dc3b5c17390abd3dfa617466ff9c11b15c1
| 790
|
py
|
Python
|
tgpush.py
|
Verrickt/zju-dailyhealth-autocheck
|
43854717852727c640c3360e289aa54a36be7694
|
[
"MIT"
] | 5
|
2021-12-22T04:27:03.000Z
|
2021-12-23T03:42:50.000Z
|
tgpush.py
|
Verrickt/zju-dailyhealth-autocheck
|
43854717852727c640c3360e289aa54a36be7694
|
[
"MIT"
] | null | null | null |
tgpush.py
|
Verrickt/zju-dailyhealth-autocheck
|
43854717852727c640c3360e289aa54a36be7694
|
[
"MIT"
] | null | null | null |
from requests import post
import os
"""
TG 消息推送模块
"""
def post_tg(config,message):
TG_TOKEN = config['bot_token']
CHAT_ID = config['chat_id']
PROXY = config['proxy']
telegram_message = f"{message}"
params = (
('chat_id', CHAT_ID),
('text', telegram_message),
('parse_mode', "Markdown"), #可选Html或Markdown
('disable_web_page_preview', "yes")
)
telegram_url = "https://api.telegram.org/bot" + TG_TOKEN + "/sendMessage"
proxies = {
'https':PROXY,
'http': PROXY
}
telegram_req = post(telegram_url, params=params,proxies=proxies)
telegram_status = telegram_req.status_code
if telegram_status == 200:
print(f"INFO: Telegram Message sent")
else:
print("Telegram Error")
| 23.235294
| 77
| 0.61519
|
ab88d9cbaf815ac2d205a77dd23961825d2ec06a
| 1,916
|
py
|
Python
|
3.Understanding Convolutions/3_image.py
|
OwenGranot/Deep-Learning
|
436cc00783c7aeef527f1f06b6550e6d0ab944e0
|
[
"MIT"
] | null | null | null |
3.Understanding Convolutions/3_image.py
|
OwenGranot/Deep-Learning
|
436cc00783c7aeef527f1f06b6550e6d0ab944e0
|
[
"MIT"
] | null | null | null |
3.Understanding Convolutions/3_image.py
|
OwenGranot/Deep-Learning
|
436cc00783c7aeef527f1f06b6550e6d0ab944e0
|
[
"MIT"
] | null | null | null |
# Disable debbuging logs (to get rid of cuda warnings)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
from scipy import signal
from scipy import misc
import matplotlib.pyplot as plt
from PIL import Image
image = Image.open('bird.jpg')
# convert("L") translates color images into b/w
image_gr = image.convert("L")
print("\n Original type: %r \n \n" % image_gr) # Original type: <PIL.Image.Image image mode=L size=1920x1440 at 0x247634E89D0>
# convert image to a matrix with values from 0 to 255 (uint8)
arr = np.asarray(image_gr)
print("After conversion to numerical representation: \n\n %r" % arr)
### Plot image
imgplot = plt.imshow(arr)
imgplot.set_cmap('gray') #you can experiment different colormaps (Greys,winter,autumn)
print("\n Input image converted to gray scale: \n")
# plt.show(imgplot) I have no idea why this isn't showing
'''
Now we can use an edge detector kernel
'''
kernel = np.array([[0,1,0],
[1, -4, 1],
[0, 1, 0],
])
grad = signal.convolve2d(arr, kernel, mode="same", boundary="symm")
print('GRADIENT MAGNITUDE - Feature map')
fig, aux = plt.subplots(figsize=(10, 10))
aux.imshow(np.absolute(grad), cmap='gray')
'''
If we change the kernel and start to analyze the outputs we would
be acting as a CNN. The difference is that a NN do all this work automaticcaly,
as in the kernel adjustment using different weights.
In addition, we can understand how biases affect the behaviour of feature maps.
Please not that when you are dealing with most of the real applications
of CNNs, you usually convert the pixels values to a range from 0 to 1.
This process is called normalization.
'''
grad_biases = np.absolute(grad) + 100
grad_biases[grad_biases > 255] = 255
print("GRADIENT MAGNITUDE - Feature map")
fig, aux = plt.subplots(figsize=(10, 10))
aux.imshow(np.absolute(grad_biases), cmap='gray')
| 29.476923
| 126
| 0.709812
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.