hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace382d3e0de0972f1c5f330e99422df50a72c45 | 2,146 | py | Python | auth_service/config.py | play-learn-grow-together/auth-service | 477ab45d0270bdeeab9a63ad768efd702c4cb622 | [
"MIT"
] | null | null | null | auth_service/config.py | play-learn-grow-together/auth-service | 477ab45d0270bdeeab9a63ad768efd702c4cb622 | [
"MIT"
] | null | null | null | auth_service/config.py | play-learn-grow-together/auth-service | 477ab45d0270bdeeab9a63ad768efd702c4cb622 | [
"MIT"
] | null | null | null | import os
APP_DIR = os.path.dirname(__file__)
DEFAULT_SECRET_KEY = "Uphooh4CheiQuoosez8Shieb9aesu1taeHa6cheiThuud2taijoh0kei2ush2sie"
DB_HOST = os.environ.get("DB_HOST", "localhost")
DB_PORT = os.environ.get("DB_PORT", "5432")
DB_USER = os.environ.get("DB_USER", "oidc")
DB_PASSWORD = os.environ.get("DB_PASSWORD", "secret11!")
DB_DATABASE = os.environ.get("DB_DATABASE", "oidc")
DB_ENGINE = os.environ.get("DB_ENGINE", "postgres")
DB_SCHEME = "postgres+psycopg2"
if DB_ENGINE == "mysql":
DB_SCHEME = "mysql+mysqlconnector"
SQLALCHEMY_DATABASE_URI = (
f"{DB_SCHEME}://"
f"{DB_USER}:{DB_PASSWORD}@"
f"{DB_HOST}:{DB_PORT}/"
f"{DB_DATABASE}"
)
class Config:
SECRET_KEY = os.environ.get("SECRET_KEY") or DEFAULT_SECRET_KEY
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI
SQLALCHEMY_TRACK_MODIFICATIONS = False
OAUTH2_REFRESH_TOKEN_GENERATOR = True
MOCK_USER_SERVICE = bool(os.environ.get('MOCK_USER_SERVICE'))
USER_API_URL = ""
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
@classmethod
def init_app(cls, app):
Config.init_app(app)
class DockerConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.INFO)
app.logger.addHandler(syslog_handler)
config = {
"development": DevelopmentConfig,
"testing": TestingConfig,
"production": ProductionConfig,
"docker": DockerConfig,
"unix": UnixConfig,
"default": DevelopmentConfig,
}
| 23.582418 | 87 | 0.698509 |
ace3832c1d3adcd331225a83c5463a65cdd8c06a | 77,476 | py | Python | swift/proxy/controllers/base.py | larsbutler/swift | 216d68eaa861b0607f1a05828f757f19cb8e6b64 | [
"Apache-2.0"
] | null | null | null | swift/proxy/controllers/base.py | larsbutler/swift | 216d68eaa861b0607f1a05828f757f19cb8e6b64 | [
"Apache-2.0"
] | null | null | null | swift/proxy/controllers/base.py | larsbutler/swift | 216d68eaa861b0607f1a05828f757f19cb8e6b64 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source bufferedhttp connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
from six.moves.urllib.parse import quote
import os
import time
import functools
import inspect
import itertools
import operator
from copy import deepcopy
from sys import exc_info
from swift import gettext_ as _
from eventlet import sleep
from eventlet.timeout import Timeout
import six
from swift.common.wsgi import make_pre_authed_env
from swift.common.utils import Timestamp, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
GreenAsyncPile, quorum_size, parse_content_type, \
document_iters_to_http_response_body
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
ConnectionTimeout, RangeAlreadyComplete
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE
from swift.common.swob import Request, Response, Range, \
HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \
status_map
from swift.common.request_helpers import strip_sys_meta_prefix, \
strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \
http_response_to_document_iters, is_object_transient_sysmeta, \
strip_object_transient_sysmeta_prefix
from swift.common.storage_policy import POLICIES
DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds
DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name == 'etag':
response.headers[name] = value.replace('"', '')
elif name not in ('date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def source_key(resp):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:param resp: bufferedhttp response object
"""
return Timestamp(resp.getheader('x-backend-timestamp') or
resp.getheader('x-put-timestamp') or
resp.getheader('x-timestamp') or 0)
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
return func
def _prep_headers_to_info(headers, server_type):
"""
Helper method that iterates once over a dict of headers,
converting all keys to lower case and separating
into subsets containing user metadata, system metadata
and other headers.
"""
meta = {}
sysmeta = {}
other = {}
for key, val in dict(headers).items():
lkey = key.lower()
if is_user_meta(server_type, lkey):
meta[strip_user_meta_prefix(server_type, lkey)] = val
elif is_sys_meta(server_type, lkey):
sysmeta[strip_sys_meta_prefix(server_type, lkey)] = val
else:
other[lkey] = val
return other, meta, sysmeta
def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'account')
account_info = {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'meta': meta,
'sysmeta': sysmeta,
}
if is_success(status_int):
account_info['account_really_exists'] = not config_true_value(
headers.get('x-backend-fake-account-listing'))
return account_info
def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'container')
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'storage_policy': headers.get('x-backend-storage-policy-index', '0'),
'cors': {
'allow_origin': meta.get('access-control-allow-origin'),
'expose_headers': meta.get('access-control-expose-headers'),
'max_age': meta.get('access-control-max-age')
},
'meta': meta,
'sysmeta': sysmeta,
}
def headers_to_object_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of object info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'object')
transient_sysmeta = {}
for key, val in headers.iteritems():
if is_object_transient_sysmeta(key):
key = strip_object_transient_sysmeta_prefix(key.lower())
transient_sysmeta[key] = val
info = {'status': status_int,
'length': headers.get('content-length'),
'type': headers.get('content-type'),
'etag': headers.get('etag'),
'meta': meta,
'sysmeta': sysmeta,
'transient_sysmeta': transient_sysmeta
}
return info
def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
if controller.app.strict_cors_mode and \
not controller.is_origin_allowed(cors_info, req_origin):
return resp
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
if 'Access-Control-Expose-Headers' not in resp.headers:
expose_headers = set([
'cache-control', 'content-language', 'content-type',
'expires', 'last-modified', 'pragma', 'etag',
'x-timestamp', 'x-trans-id'])
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.add(header.lower())
if cors_info.get('expose_headers'):
expose_headers = expose_headers.union(
[header_line.strip().lower()
for header_line in
cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
if 'Access-Control-Allow-Origin' not in resp.headers:
if cors_info['allow_origin'] and \
cors_info['allow_origin'].strip() == '*':
resp.headers['Access-Control-Allow-Origin'] = '*'
else:
resp.headers['Access-Control-Allow-Origin'] = req_origin
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped
def get_object_info(env, app, path=None, swift_source=None):
"""
Get the info structure for an object, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the object.
"""
(version, account, container, obj) = \
split_path(path or env['PATH_INFO'], 4, 4, True)
info = _get_object_info(app, env, account, container, obj,
swift_source=swift_source)
if info:
info = deepcopy(info)
else:
info = headers_to_object_info({}, 0)
for field in ('length',):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_container_info(env, app, swift_source=None):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the container.
"""
(version, account, container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account, container)
if not info:
# Cache miss; go HEAD the container and populate the caches
env.setdefault('swift.infocache', {})
# Before checking the container, make sure the account exists.
#
# If it is an autocreateable account, just assume it exists; don't
# HEAD the account, as a GET or HEAD response for an autocreateable
# account is successful whether the account actually has .db files
# on disk or not.
is_autocreate_account = account.startswith(
getattr(app, 'auto_create_account_prefix', '.'))
if not is_autocreate_account:
account_info = get_account_info(env, app, swift_source)
if not account_info or not is_success(account_info['status']):
return headers_to_container_info({}, 0)
req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, account, container)),
(swift_source or 'GET_CONTAINER_INFO'))
resp = req.get_response(app)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# See similar comment in get_account_info() for justification.
info = _get_info_from_infocache(env, account, container)
if info is None:
info = set_info_cache(app, env, account, container, resp)
if info:
info = deepcopy(info) # avoid mutating what's in swift.infocache
else:
info = headers_to_container_info({}, 0)
# Old data format in memcache immediately after a Swift upgrade; clean
# it up so consumers of get_container_info() aren't exposed to it.
info.setdefault('storage_policy', '0')
if 'object_count' not in info and 'container_size' in info:
info['object_count'] = info.pop('container_size')
for field in ('bytes', 'object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the account.
:raises ValueError: when path doesn't contain an account
"""
(version, account, _junk, _junk) = \
split_path(env['PATH_INFO'], 2, 4, True)
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account)
# Cache miss; go HEAD the account and populate the caches
if not info:
env.setdefault('swift.infocache', {})
req = _prepare_pre_auth_info_request(
env, "/%s/%s" % (version, account),
(swift_source or 'GET_ACCOUNT_INFO'))
resp = req.get_response(app)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# The point of this is to avoid setting the value in memcached
# twice. Otherwise, we're needlessly sending requests across the
# network.
#
# If the info didn't make it into the cache, we'll compute it from
# the response and populate the cache ourselves.
#
# Note that this is taking "exists in infocache" to imply "exists in
# memcache". That's because we're trying to avoid superfluous
# network traffic, and checking in memcache prior to setting in
# memcache would defeat the purpose.
info = _get_info_from_infocache(env, account)
if info is None:
info = set_info_cache(app, env, account, None, resp)
if info:
info = info.copy() # avoid mutating what's in swift.infocache
else:
info = headers_to_account_info({}, 0)
for field in ('container_count', 'bytes', 'total_object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_cache_key(account, container=None, obj=None):
"""
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:returns: a string cache_key
"""
if obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
cache_key = 'object/%s/%s/%s' % (account, container, obj)
elif container:
if not account:
raise ValueError('Container cache key requires account')
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
return cache_key
def set_info_cache(app, env, account, container, resp):
"""
Cache info in both memcache and env.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted container name or None
:param resp: the response received or None if info cache should be cleared
:returns: the info that was placed into the cache, or None if the
request status was not in (404, 410, 2xx).
"""
infocache = env.setdefault('swift.infocache', {})
cache_time = None
if container and resp:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Container-Existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
elif resp:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Account-Existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
cache_key = get_cache_key(account, container)
if resp:
if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE):
cache_time *= 0.1
elif not is_success(resp.status_int):
cache_time = None
# Next actually set both memcache and the env cache
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if not cache_time:
infocache.pop(cache_key, None)
if memcache:
memcache.delete(cache_key)
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, time=cache_time)
infocache[cache_key] = info
return info
def set_object_info_cache(app, env, account, container, obj, resp):
"""
Cache object info in the WSGI environment, but not in memcache. Caching
in memcache would lead to cache pressure and mass evictions due to the
large number of objects in a typical Swift cluster. This is a
per-request cache only.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted container name
:param object: the unquoted object name
:param resp: a GET or HEAD response received from an object server, or
None if info cache should be cleared
:returns: the object info
"""
cache_key = get_cache_key(account, container, obj)
if 'swift.infocache' in env and not resp:
env['swift.infocache'].pop(cache_key, None)
return
info = headers_to_object_info(resp.headers, resp.status_int)
env.setdefault('swift.infocache', {})[cache_key] = info
return info
def clear_info_cache(app, env, account, container=None):
"""
Clear the cached info in both memcache and env
:param app: the application object
:param env: the WSGI environment
:param account: the account name
:param container: the containr name or None if setting info for containers
"""
set_info_cache(app, env, account, container, None)
def _get_info_from_infocache(env, account, container=None):
"""
Get cached account or container information from request-environment
cache (swift.infocache).
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss
"""
cache_key = get_cache_key(account, container)
if 'swift.infocache' in env and cache_key in env['swift.infocache']:
return env['swift.infocache'][cache_key]
return None
def _get_info_from_memcache(app, env, account, container=None):
"""
Get cached account or container information from memcache
:param app: the application object
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss. Also
returns None if memcache is not in use.
"""
cache_key = get_cache_key(account, container)
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if memcache:
info = memcache.get(cache_key)
if info:
for key in info:
if isinstance(info[key], six.text_type):
info[key] = info[key].encode("utf-8")
elif isinstance(info[key], dict):
for subkey, value in info[key].items():
if isinstance(value, six.text_type):
info[key][subkey] = value.encode("utf-8")
env.setdefault('swift.infocache', {})[cache_key] = info
return info
return None
def _get_info_from_caches(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order.
Used for both account and container info.
:param app: the application object
:param env: the environment used by the current request
:returns: the cached info or None if not cached
"""
info = _get_info_from_infocache(env, account, container)
if info is None:
info = _get_info_from_memcache(app, env, account, container)
return info
def _prepare_pre_auth_info_request(env, path, swift_source):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source=swift_source)
# This is a sub request for container metadata- drop the Origin header from
# the request so the it is not treated as a CORS request.
newenv.pop('HTTP_ORIGIN', None)
# ACLs are only shown to account owners, so let's make sure this request
# looks like it came from the account owner.
newenv['swift_owner'] = True
# Note that Request.blank expects quoted path
return Request.blank(quote(path), environ=newenv)
def get_info(app, env, account, container=None, swift_source=None):
"""
Get info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:param swift_source: swift source logged for any subrequests made while
retrieving the account or container info
:returns: information about the specified entity in a dictionary. See
get_account_info and get_container_info for details on what's in the
dictionary.
"""
env.setdefault('swift.infocache', {})
if container:
path = '/v1/%s/%s' % (account, container)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_container_info(path_env, app, swift_source=swift_source)
else:
# account info
path = '/v1/%s' % (account,)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_account_info(path_env, app, swift_source=swift_source)
def _get_object_info(app, env, account, container, obj, swift_source=None):
"""
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container
:param obj: The unquoted name of the object
:returns: the cached info or None if cannot be retrieved
"""
cache_key = get_cache_key(account, container, obj)
info = env.get('swift.infocache', {}).get(cache_key)
if info:
return info
# Not in cache, let's try the object servers
path = '/v1/%s/%s/%s' % (account, container, obj)
req = _prepare_pre_auth_info_request(env, path, swift_source)
resp = req.get_response(app)
# Unlike get_account_info() and get_container_info(), we don't save
# things in memcache, so we can store the info without network traffic,
# *and* the proxy doesn't cache object info for us, so there's no chance
# that the object info would be in the environment. Thus, we just
# compute the object info based on the response and stash it in
# swift.infocache.
info = set_object_info_cache(app, env, account, container, obj, resp)
return info
def close_swift_conn(src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
# Since the backends set "Connection: close" in their response
# headers, the response object (src) is solely responsible for the
# socket. The connection object (src.swift_conn) has no references
# to the socket, so calling its close() method does nothing, and
# therefore we don't do it.
#
# Also, since calling the response's close() method might not
# close the underlying socket but only decrement some
# reference-counter, we have a special method here that really,
# really kills the underlying socket with a close() syscall.
src.nuke_from_orbit() # it's the only way to be sure
except Exception:
pass
def bytes_to_skip(record_size, range_start):
"""
Assume an object is composed of N records, where the first N-1 are all
the same size and the last is at most that large, but may be smaller.
When a range request is made, it might start with a partial record. This
must be discarded, lest the consumer get bad data. This is particularly
true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the
size of the object is unknown at the time the request is made.
This function computes the number of bytes that must be discarded to
ensure only whole records are yielded. Erasure-code decoding needs this.
This function could have been inlined, but it took enough tries to get
right that some targeted unit tests were desirable, hence its extraction.
"""
return (record_size - (range_start % record_size)) % record_size
class ResumingGetter(object):
def __init__(self, app, req, server_type, node_iter, partition, path,
backend_headers, concurrency=1, client_chunk_size=None,
newest=None):
self.app = app
self.node_iter = node_iter
self.server_type = server_type
self.partition = partition
self.path = path
self.backend_headers = backend_headers
self.client_chunk_size = client_chunk_size
self.skip_bytes = 0
self.used_nodes = []
self.used_source_etag = ''
self.concurrency = concurrency
# stuff from request
self.req_method = req.method
self.req_path = req.path
self.req_query_string = req.query_string
if newest is None:
self.newest = config_true_value(req.headers.get('x-newest', 'f'))
else:
self.newest = newest
# populated when finding source
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
# populated from response headers
self.start_byte = self.end_byte = self.length = None
def fast_forward(self, num_bytes):
"""
Will skip num_bytes into the current ranges.
:params num_bytes: the number of bytes that have already been read on
this request. This will change the Range header
so that the next req will start where it left off.
:raises ValueError: if invalid range header
:raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes
> end of range + 1
:raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1
"""
if 'Range' in self.backend_headers:
req_range = Range(self.backend_headers['Range'])
begin, end = req_range.ranges[0]
if begin is None:
# this is a -50 range req (last 50 bytes of file)
end -= num_bytes
if end == 0:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
else:
begin += num_bytes
if end is not None and begin == end + 1:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
if end is not None and (begin > end or end < 0):
raise HTTPRequestedRangeNotSatisfiable()
req_range.ranges = [(begin, end)] + req_range.ranges[1:]
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers['Range'] = 'bytes=%d-' % num_bytes
def pop_range(self):
"""
Remove the first byterange from our Range header.
This is used after a byterange has been completely sent to the
client; this way, should we need to resume the download from another
object server, we do not re-fetch byteranges that the client already
has.
If we have no Range header, this is a no-op.
"""
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
except ValueError:
# there's a Range header, but it's garbage, so get rid of it
self.backend_headers.pop('Range')
return
begin, end = req_range.ranges.pop(0)
if len(req_range.ranges) > 0:
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers.pop('Range')
def learn_size_from_content_range(self, start, end, length):
"""
If client_chunk_size is set, makes sure we yield things starting on
chunk boundaries based on the Content-Range header in the response.
Sets our Range header's first byterange to the value learned from
the Content-Range header in the response; if we were given a
fully-specified range (e.g. "bytes=123-456"), this is a no-op.
If we were given a half-specified range (e.g. "bytes=123-" or
"bytes=-456"), then this changes the Range header to a
semantically-equivalent one *and* it lets us resume on a proper
boundary instead of just in the middle of a piece somewhere.
"""
if length == 0:
return
if self.client_chunk_size:
self.skip_bytes = bytes_to_skip(self.client_chunk_size, start)
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
new_ranges = [(start, end)] + req_range.ranges[1:]
except ValueError:
new_ranges = [(start, end)]
else:
new_ranges = [(start, end)]
self.backend_headers['Range'] = (
"bytes=" + (",".join("%s-%s" % (s if s is not None else '',
e if e is not None else '')
for s, e in new_ranges)))
def is_good_source(self, src):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param src: the response from the backend
:returns: True if found, False if not
"""
if self.server_type == 'Object' and src.status == 416:
return True
return is_success(src.status) or is_redirection(src.status)
def response_parts_iter(self, req):
source, node = self._get_source_and_node()
it = None
if source:
it = self._get_response_parts_iter(req, node, source)
return it
def _get_response_parts_iter(self, req, node, source):
# Someday we can replace this [mess] with python 3's "nonlocal"
source = [source]
node = [node]
try:
client_chunk_size = self.client_chunk_size
node_timeout = self.app.node_timeout
if self.server_type == 'Object':
node_timeout = self.app.recoverable_node_timeout
# This is safe; it sets up a generator but does not call next()
# on it, so no IO is performed.
parts_iter = [
http_response_to_document_iters(
source[0], read_chunk_size=self.app.object_chunk_size)]
def get_next_doc_part():
while True:
try:
# This call to next() performs IO when we have a
# multipart/byteranges response; it reads the MIME
# boundary and part headers.
#
# If we don't have a multipart/byteranges response,
# but just a 200 or a single-range 206, then this
# performs no IO, and either just returns source or
# raises StopIteration.
with ChunkReadTimeout(node_timeout):
# if StopIteration is raised, it escapes and is
# handled elsewhere
start_byte, end_byte, length, headers, part = next(
parts_iter[0])
return (start_byte, end_byte, length, headers, part)
except ChunkReadTimeout:
new_source, new_node = self._get_source_and_node()
if new_source:
self.app.exception_occurred(
node[0], _('Object'),
_('Trying to read during GET (retrying)'))
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
source[0] = new_source
node[0] = new_node
# This is safe; it sets up a generator but does
# not call next() on it, so no IO is performed.
parts_iter[0] = http_response_to_document_iters(
new_source,
read_chunk_size=self.app.object_chunk_size)
else:
raise StopIteration()
def iter_bytes_from_response_part(part_file):
nchunks = 0
buf = ''
bytes_used_from_backend = 0
while True:
try:
with ChunkReadTimeout(node_timeout):
chunk = part_file.read(self.app.object_chunk_size)
nchunks += 1
buf += chunk
except ChunkReadTimeout:
exc_type, exc_value, exc_traceback = exc_info()
if self.newest or self.server_type != 'Object':
six.reraise(exc_type, exc_value, exc_traceback)
try:
self.fast_forward(bytes_used_from_backend)
except (HTTPException, ValueError):
six.reraise(exc_type, exc_value, exc_traceback)
except RangeAlreadyComplete:
break
buf = ''
new_source, new_node = self._get_source_and_node()
if new_source:
self.app.exception_occurred(
node[0], _('Object'),
_('Trying to read during GET (retrying)'))
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
source[0] = new_source
node[0] = new_node
# This is safe; it just sets up a generator but
# does not call next() on it, so no IO is
# performed.
parts_iter[0] = http_response_to_document_iters(
new_source,
read_chunk_size=self.app.object_chunk_size)
try:
_junk, _junk, _junk, _junk, part_file = \
get_next_doc_part()
except StopIteration:
# Tried to find a new node from which to
# finish the GET, but failed. There's
# nothing more to do here.
return
else:
six.reraise(exc_type, exc_value, exc_traceback)
else:
if buf and self.skip_bytes:
if self.skip_bytes < len(buf):
buf = buf[self.skip_bytes:]
bytes_used_from_backend += self.skip_bytes
self.skip_bytes = 0
else:
self.skip_bytes -= len(buf)
bytes_used_from_backend += len(buf)
buf = ''
if not chunk:
if buf:
with ChunkWriteTimeout(
self.app.client_timeout):
bytes_used_from_backend += len(buf)
yield buf
buf = ''
break
if client_chunk_size is not None:
while len(buf) >= client_chunk_size:
client_chunk = buf[:client_chunk_size]
buf = buf[client_chunk_size:]
with ChunkWriteTimeout(
self.app.client_timeout):
yield client_chunk
bytes_used_from_backend += len(client_chunk)
else:
with ChunkWriteTimeout(self.app.client_timeout):
yield buf
bytes_used_from_backend += len(buf)
buf = ''
# This is for fairness; if the network is outpacing
# the CPU, we'll always be able to read and write
# data without encountering an EWOULDBLOCK, and so
# eventlet will not switch greenthreads on its own.
# We do it manually so that clients don't starve.
#
# The number 5 here was chosen by making stuff up.
# It's not every single chunk, but it's not too big
# either, so it seemed like it would probably be an
# okay choice.
#
# Note that we may trampoline to other greenthreads
# more often than once every 5 chunks, depending on
# how blocking our network IO is; the explicit sleep
# here simply provides a lower bound on the rate of
# trampolining.
if nchunks % 5 == 0:
sleep()
part_iter = None
try:
while True:
start_byte, end_byte, length, headers, part = \
get_next_doc_part()
self.learn_size_from_content_range(
start_byte, end_byte, length)
part_iter = iter_bytes_from_response_part(part)
yield {'start_byte': start_byte, 'end_byte': end_byte,
'entity_length': length, 'headers': headers,
'part_iter': part_iter}
self.pop_range()
except StopIteration:
req.environ['swift.non_client_disconnect'] = True
finally:
if part_iter:
part_iter.close()
except ChunkReadTimeout:
self.app.exception_occurred(node[0], _('Object'),
_('Trying to read during GET'))
raise
except ChunkWriteTimeout:
self.app.logger.warning(
_('Client did not read from proxy within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
except GeneratorExit:
if not req.environ.get('swift.non_client_disconnect'):
self.app.logger.warning(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
finally:
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
@property
def last_status(self):
if self.statuses:
return self.statuses[-1]
else:
return None
@property
def last_headers(self):
if self.source_headers:
return self.source_headers[-1]
else:
return None
def _make_node_request(self, node, node_timeout, logger_thread_locals):
self.app.logger.thread_locals = logger_thread_locals
if node in self.used_nodes:
return False
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'],
self.partition, self.req_method, self.path,
headers=self.backend_headers,
query_string=self.req_query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': self.req_method, 'path': self.req_path})
return False
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
close_swift_conn(possible_source)
else:
if self.used_source_etag:
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
if self.used_source_etag != src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"'):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
return False
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(None)
self.source_headers.append(possible_source.getheaders())
self.sources.append((possible_source, node))
if not self.newest: # one good source is enough
return True
else:
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(possible_source.read())
self.source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.app.error_occurred(
node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': self.bodies[-1][:1024],
'type': self.server_type})
return False
def _get_source_and_node(self):
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
nodes = GreenthreadSafeIterator(self.node_iter)
node_timeout = self.app.node_timeout
if self.server_type == 'Object' and not self.newest:
node_timeout = self.app.recoverable_node_timeout
pile = GreenAsyncPile(self.concurrency)
for node in nodes:
pile.spawn(self._make_node_request, node, node_timeout,
self.app.logger.thread_locals)
_timeout = self.app.concurrency_timeout \
if pile.inflight < self.concurrency else None
if pile.waitfirst(_timeout):
break
else:
# ran out of nodes, see if any stragglers will finish
any(pile)
if self.sources:
self.sources.sort(key=lambda s: source_key(s[0]))
source, node = self.sources.pop()
for src, _junk in self.sources:
close_swift_conn(src)
self.used_nodes.append(node)
src_headers = dict(
(k.lower(), v) for k, v in
source.getheaders())
# Save off the source etag so that, if we lose the connection
# and have to resume from a different node, we can be sure that
# we have the same object (replication) or a fragment archive
# from the same object (EC). Otherwise, if the cluster has two
# versions of the same object, we might end up switching between
# old and new mid-stream and giving garbage to the client.
self.used_source_etag = src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"')
return source, node
return None, None
class GetOrHeadHandler(ResumingGetter):
def _make_app_iter(self, req, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param req: incoming request object
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
ct = source.getheader('Content-Type')
if ct:
content_type, content_type_attrs = parse_content_type(ct)
is_multipart = content_type == 'multipart/byteranges'
else:
is_multipart = False
boundary = "dontcare"
if is_multipart:
# we need some MIME boundary; fortunately, the object server has
# furnished one for us, so we'll just re-use it
boundary = dict(content_type_attrs)["boundary"]
parts_iter = self._get_response_parts_iter(req, node, source)
def add_content_type(response_part):
response_part["content_type"] = \
HeaderKeyDict(response_part["headers"]).get("Content-Type")
return response_part
return document_iters_to_http_response_body(
(add_content_type(pi) for pi in parts_iter),
boundary, is_multipart, self.app.logger)
def get_working_response(self, req):
source, node = self._get_source_and_node()
res = None
if source:
res = Response(request=req)
res.status = source.status
update_headers(res, source.getheaders())
if req.method == 'GET' and \
source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(req, node, source)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = source.swift_conn
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = \
source.getheader('x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = source.getheader('Content-Length')
if source.getheader('Content-Type'):
res.charset = None
res.content_type = source.getheader('Content-Type')
return res
class NodeIter(object):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param app: a proxy app
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
def __init__(self, app, ring, partition, node_iter=None):
self.app = app
self.ring = ring
self.partition = partition
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(
part_nodes, ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
self.nodes_left = self.app.request_node_count(num_primary_nodes)
self.expected_handoffs = self.nodes_left - num_primary_nodes
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
self.primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
self.handoff_iter = node_iter
def __iter__(self):
self._node_iter = self._node_gen()
return self
def log_handoffs(self, handoffs):
"""
Log handoff requests if handoff logging is enabled and the
handoff was not expected.
We only log handoffs when we've pushed the handoff count further
than we would normally have expected under normal circumstances,
that is (request_node_count - num_primaries), when handoffs goes
higher than that it means one of the primaries must have been
skipped because of error limiting before we consumed all of our
nodes_left.
"""
if not self.app.log_handoffs:
return
extra_handoffs = handoffs - self.expected_handoffs
if extra_handoffs > 0:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if (extra_handoffs == len(self.primary_nodes)):
# all the primaries were skipped, and handoffs didn't help
self.app.logger.increment('handoff_all_count')
def _node_gen(self):
for node in self.primary_nodes:
if not self.app.error_limited(node):
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
handoffs = 0
for node in self.handoff_iter:
if not self.app.error_limited(node):
handoffs += 1
self.log_handoffs(handoffs)
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
def next(self):
return next(self._node_iter)
def __next__(self):
return self.next()
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
"""
Creates a controller attached to an application instance
:param app: the application instance
"""
self.account_name = None
self.app = app
self.trans_id = '-'
self._allowed_methods = None
@property
def allowed_methods(self):
if self._allowed_methods is None:
self._allowed_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'publicly_accessible', False):
self._allowed_methods.add(name)
return self._allowed_methods
def _x_remove_headers(self):
"""
Returns a list of headers that must not be sent to the backend
:returns: a list of header
"""
return []
def transfer_headers(self, src_headers, dst_headers):
"""
Transfer legal headers from an original client request to dictionary
that will be used as headers by the backend request
:param src_headers: A dictionary of the original client request headers
:param dst_headers: A dictionary of the backend request headers
"""
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in self._x_remove_headers())
dst_headers.update((k.lower(), v)
for k, v in src_headers.items()
if k.lower() in self.pass_through_headers or
is_sys_or_user_meta(st, k))
def generate_request_headers(self, orig_req=None, additional=None,
transfer=False):
"""
Create a list of headers to be used in backend requests
:param orig_req: the original request sent by the client to the proxy
:param additional: additional headers to send to the backend
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
# Use the additional headers first so they don't overwrite the headers
# we require.
headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
if transfer:
self.transfer_headers(orig_req.headers, headers)
headers.setdefault('x-timestamp', Timestamp(time.time()).internal)
if orig_req:
referer = orig_req.as_referer()
else:
referer = ''
headers['x-trans-id'] = self.trans_id
headers['connection'] = 'close'
headers['user-agent'] = 'proxy-server %s' % os.getpid()
headers['referer'] = referer
return headers
def account_info(self, account, req=None):
"""
Get account information, and also verify that the account exists.
:param account: name of the account to get the info for
:param req: caller's HTTP request context object (optional)
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
partition, nodes = self.app.account_ring.get_nodes(account)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s" % (account,)
info = get_account_info(path_env, self.app)
if (not info
or not is_success(info['status'])
or not info.get('account_really_exists', True)):
return None, None, None
if info.get('container_count') is None:
container_count = 0
else:
container_count = int(info['container_count'])
return partition, nodes, container_count
def container_info(self, account, container, req=None):
"""
Get container information and thusly verify container existence.
This will also verify account existence.
:param account: account name for the container
:param container: container name to look up
:param req: caller's HTTP request context object (optional)
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
part, nodes = self.app.container_ring.get_nodes(account, container)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s/%s" % (account, container)
info = get_container_info(path_env, self.app)
if not info or not is_success(info.get('status')):
info = headers_to_container_info({}, 0)
info['partition'] = None
info['nodes'] = None
else:
info['partition'] = part
info['nodes'] = nodes
if info.get('storage_policy') is None:
info['storage_policy'] = 0
return info
def _make_request(self, nodes, part, method, path, headers, query,
logger_thread_locals):
"""
Iterates over the given node iterator, sending an HTTP request to one
node at a time. The first non-informational, non-server-error
response is returned. If no non-informational, non-server-error
response is received from any of the nodes, returns None.
:param nodes: an iterator of the backend server and handoff servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: dictionary of headers
:param query: query string to send to the backend.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
:returns: a swob.Response object, or None if no responses were received
"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_node_timing = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if not is_informational(resp.status) and \
not is_server_error(resp.status):
return resp.status, resp.reason, resp.getheaders(), \
resp.read()
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node,
_('ERROR Insufficient Storage'))
elif is_server_error(resp.status):
self.app.error_occurred(
node, _('ERROR %(status)d '
'Trying to %(method)s %(path)s'
' From %(type)s Server') % {
'status': resp.status,
'method': method,
'path': path,
'type': self.server_type})
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string='', overrides=None):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param req: a request sent by the client
:param ring: the ring used for finding backend servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query_string: optional query string to send to the backend
:param overrides: optional return status override map used to override
the returned status of a request.
:returns: a swob.Response object
"""
start_nodes = ring.get_part_nodes(part)
nodes = GreenthreadSafeIterator(self.app.iter_nodes(ring, part))
pile = GreenAsyncPile(len(start_nodes))
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, self.app.logger.thread_locals)
response = []
statuses = []
for resp in pile:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
if self.have_quorum(statuses, len(start_nodes)):
break
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for resp in finished_quickly:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
while len(response) < len(start_nodes):
response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
statuses, reasons, resp_headers, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method),
overrides=overrides, headers=resp_headers)
def _quorum_size(self, n):
"""
Number of successful backend responses needed for the proxy to
consider the client request successful.
"""
return quorum_size(n)
def have_quorum(self, statuses, node_count, quorum=None):
"""
Given a list of statuses from several requests, determine if
a quorum response can already be decided.
:param statuses: list of statuses returned
:param node_count: number of nodes being queried (basically ring count)
:param quorum: number of statuses required for quorum
:returns: True or False, depending on if quorum is established
"""
if quorum is None:
quorum = self._quorum_size(node_count)
if len(statuses) >= quorum:
for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES,
HTTP_BAD_REQUEST):
if sum(1 for s in statuses
if hundred <= s < hundred + 100) >= quorum:
return True
return False
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None, overrides=None,
quorum_size=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:param overrides: overrides to apply when lacking quorum
:param quorum_size: quorum size to use
:returns: swob.Response object with the correct status, body, etc. set
"""
if quorum_size is None:
quorum_size = self._quorum_size(len(statuses))
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
quorum_size=quorum_size)
if overrides and not resp:
faked_up_status_indices = set()
transformed = []
for (i, (status, reason, hdrs, body)) in enumerate(zip(
statuses, reasons, headers, bodies)):
if status in overrides:
faked_up_status_indices.add(i)
transformed.append((overrides[status], '', '', ''))
else:
transformed.append((status, reason, hdrs, body))
statuses, reasons, headers, bodies = zip(*transformed)
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
indices_to_avoid=faked_up_status_indices,
quorum_size=quorum_size)
if not resp:
resp = HTTPServiceUnavailable(request=req)
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
return resp
def _compute_quorum_response(self, req, statuses, reasons, bodies, etag,
headers, quorum_size, indices_to_avoid=()):
if not statuses:
return None
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[(i, s) for i, s in enumerate(statuses)
if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size:
try:
status_index, status = max(
((i, stat) for i, stat in hstatuses
if i not in indices_to_avoid),
key=operator.itemgetter(1))
except ValueError:
# All statuses were indices to avoid
continue
resp = status_map[status](request=req)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
return None
@public
def GET(self, req):
"""
Handler for HTTP GET requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""
Handler for HTTP HEAD requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
def autocreate_account(self, req, account):
"""
Autocreate an account
:param req: request leading to this autocreate
:param account: the unquoted account name
"""
partition, nodes = self.app.account_ring.get_nodes(account)
path = '/%s' % account
headers = {'X-Timestamp': Timestamp(time.time()).internal,
'X-Trans-Id': self.trans_id,
'Connection': 'close'}
# transfer any x-account-sysmeta headers from original request
# to the autocreate PUT
headers.update((k, v)
for k, v in req.headers.items()
if is_sys_meta('account', k))
resp = self.make_requests(Request.blank('/v1' + path),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if is_success(resp.status_int):
self.app.logger.info(_('autocreate account %r'), path)
clear_info_cache(self.app, req.environ, account)
else:
self.app.logger.warning(_('Could not autocreate account %r'),
path)
def GETorHEAD_base(self, req, server_type, node_iter, partition, path,
concurrency=1, client_chunk_size=None):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type used in logging
:param node_iter: an iterator to obtain nodes from
:param partition: partition
:param path: path for the request
:param concurrency: number of requests to run concurrently
:param client_chunk_size: chunk size for response body iterator
:returns: swob.Response object
"""
backend_headers = self.generate_request_headers(
req, additional=req.headers)
handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter,
partition, path, backend_headers,
concurrency,
client_chunk_size=client_chunk_size)
res = handler.get_working_response(req)
if not res:
res = self.best_response(
req, handler.statuses, handler.reasons, handler.bodies,
'%s %s' % (server_type, req.method),
headers=handler.source_headers)
# if a backend policy index is present in resp headers, translate it
# here with the friendly policy name
if 'X-Backend-Storage-Policy-Index' in res.headers and \
is_success(res.status_int):
policy = \
POLICIES.get_by_index(
res.headers['X-Backend-Storage-Policy-Index'])
if policy:
res.headers['X-Storage-Policy'] = policy.name
else:
self.app.logger.error(
'Could not translate %s (%r) from %r to policy',
'X-Backend-Storage-Policy-Index',
res.headers['X-Backend-Storage-Policy-Index'], path)
return res
def is_origin_allowed(self, cors_info, origin):
"""
Is the given Origin allowed to make requests to this resource
:param cors_info: the resource's CORS related metadata headers
:param origin: the origin making the request
:return: True or False
"""
allowed_origins = set()
if cors_info.get('allow_origin'):
allowed_origins.update(
[a.strip()
for a in cors_info['allow_origin'].split(' ')
if a.strip()])
if self.app.cors_allow_origin:
allowed_origins.update(self.app.cors_allow_origin)
return origin in allowed_origins or '*' in allowed_origins
@public
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods)}
resp = Response(status=200, request=req, headers=headers)
# If this isn't a CORS pre-flight request then return now
req_origin_value = req.headers.get('Origin', None)
if not req_origin_value:
return resp
# This is a CORS preflight request so check it's allowed
try:
container_info = \
self.container_info(self.account_name,
self.container_name, req)
except AttributeError:
# This should only happen for requests to the Account. A future
# change could allow CORS requests to the Account level as well.
return resp
cors = container_info.get('cors', {})
# If the CORS origin isn't allowed return a 401
if not self.is_origin_allowed(cors, req_origin_value) or (
req.headers.get('Access-Control-Request-Method') not in
self.allowed_methods):
resp.status = HTTP_UNAUTHORIZED
return resp
# Allow all headers requested in the request. The CORS
# specification does leave the door open for this, as mentioned in
# http://www.w3.org/TR/cors/#resource-preflight-requests
# Note: Since the list of headers can be unbounded
# simply returning headers can be enough.
allow_headers = set()
if req.headers.get('Access-Control-Request-Headers'):
allow_headers.update(
list_from_csv(req.headers['Access-Control-Request-Headers']))
# Populate the response with the CORS preflight headers
if cors.get('allow_origin') and \
cors.get('allow_origin').strip() == '*':
headers['access-control-allow-origin'] = '*'
else:
headers['access-control-allow-origin'] = req_origin_value
if cors.get('max_age') is not None:
headers['access-control-max-age'] = cors.get('max_age')
headers['access-control-allow-methods'] = \
', '.join(self.allowed_methods)
if allow_headers:
headers['access-control-allow-headers'] = ', '.join(allow_headers)
resp.headers = headers
return resp
| 41.188729 | 79 | 0.585652 |
ace3846545f18e31c091c297eff450e5a9b03c27 | 417 | py | Python | gems/migrations/0014_gemsmeta_background.py | 0xOddrey/DroppingGemsNFT | 536c0c3f0f53ac9437d88625709609037306848e | [
"MIT"
] | null | null | null | gems/migrations/0014_gemsmeta_background.py | 0xOddrey/DroppingGemsNFT | 536c0c3f0f53ac9437d88625709609037306848e | [
"MIT"
] | null | null | null | gems/migrations/0014_gemsmeta_background.py | 0xOddrey/DroppingGemsNFT | 536c0c3f0f53ac9437d88625709609037306848e | [
"MIT"
] | null | null | null | # Generated by Django 3.2.10 on 2022-02-08 02:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gems', '0013_twitterconnection_user_id'),
]
operations = [
migrations.AddField(
model_name='gemsmeta',
name='background',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
| 21.947368 | 72 | 0.613909 |
ace3846e08d19325f7042ffceb4422918881421c | 2,041 | py | Python | gfootball/play_game.py | mahi97/football | 18d51cb110034ce7080b2ccce14a2539d2a04af3 | [
"Apache-2.0"
] | 3 | 2019-06-20T05:47:02.000Z | 2019-08-25T05:04:11.000Z | gfootball/play_game.py | AzharMithani/football | 0f09bcb8b3d48ac31987e13739e21a58ef0ca405 | [
"Apache-2.0"
] | 1 | 2019-06-09T10:06:36.000Z | 2019-06-09T10:06:36.000Z | gfootball/play_game.py | nczempin/gfootball | 617e9cb6d48b4ac7187b9b3de68bd4ab44ea528e | [
"Apache-2.0"
] | 1 | 2022-03-10T14:48:07.000Z | 2022-03-10T14:48:07.000Z | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script allowing to play the game by multiple players."""
from gfootball.env import football_env
from gfootball.env import config
from absl import app
from absl import flags
FLAGS = flags.FLAGS
# For Impala tfhub modules, use tfhub_impala=[module_dir], like this:
# --home_players=tfhub_impala=/usr/local/google/home/michaz/model_ine
flags.DEFINE_string(
'home_players', 'keyboard',
'Comma separated list of home players, single keyboard player by default')
flags.DEFINE_string('away_players', '', 'List of away players')
flags.DEFINE_string('level', '', 'Level to play')
flags.DEFINE_enum('action_set', 'full', ['default', 'full'], 'Action set')
flags.DEFINE_bool('real_time', True,
'If true, environment will slow down so humans can play.')
def main(_):
cfg = config.Config({
'action_set': FLAGS.action_set,
'away_players':
FLAGS.away_players.split(',') if FLAGS.away_players else '',
'dump_full_episodes': True,
'home_players':
FLAGS.home_players.split(',') if FLAGS.home_players else '',
'real_time': FLAGS.real_time,
'render': True
})
if FLAGS.level:
cfg['level'] = FLAGS.level
env = football_env.FootballEnv(cfg)
env.reset(cfg)
try:
while True:
_, _, done, _ = env.step(None)
if done:
env.reset(cfg)
except KeyboardInterrupt:
env.write_dump('shutdown')
exit(1)
if __name__ == '__main__':
app.run(main)
| 31.4 | 78 | 0.700147 |
ace3867b069f016c1e7f2c2690f60b95bc13435c | 830 | py | Python | paths.py | BeastImran/personal-website | 2376f9c01b0af62455a9dd608889f75a3e42d9b6 | [
"CC-BY-3.0"
] | 4 | 2021-07-21T19:30:22.000Z | 2021-12-26T14:50:51.000Z | paths.py | BeastImran/personal-website | 2376f9c01b0af62455a9dd608889f75a3e42d9b6 | [
"CC-BY-3.0"
] | null | null | null | paths.py | BeastImran/personal-website | 2376f9c01b0af62455a9dd608889f75a3e42d9b6 | [
"CC-BY-3.0"
] | null | null | null | minify = '/min/'
only_domain = '192.168.73.227:8000'
only_www_domain = 'www.' + only_domain
domain = 'http://' + only_domain
www_domain = "http://" + only_www_domain
paths = {
"html": {
"index": domain + '/templates' + minify + 'index.html',
"resume": domain + '/templates' + minify + 'resume.html',
"activities": domain + '/templates' + minify + 'activities.html',
"contact": domain + '/templates' + minify + 'contact.html',
},
"css": {
"main_css": domain + '/static/css' + minify + 'personal_site.css',
},
"js": {
"main_js": domain + '/static/js' + minify + 'personal_site.js',
},
"videos": domain + '/static/videos/',
"images": domain + '/static/images/',
"documents": domain + '/static/documents/',
"sitemap": domain + '/sitemap.xml',
}
| 33.2 | 74 | 0.568675 |
ace38746a001de2c29c73a9ac7b40c737c45272e | 14,664 | py | Python | raymon/profiling/profiles.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | null | null | null | raymon/profiling/profiles.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | null | null | null | raymon/profiling/profiles.py | pbonte/raymon | 83912d7a5ff22d61289688828169a7178fa34a2d | [
"MIT"
] | null | null | null | import json
import html
import json
import tempfile
import shutil
import webbrowser
import numbers
from pydoc import locate
from pathlib import Path
import pkg_resources
import raymon
from raymon.globals import Buildable, ProfileStateException, Serializable
from raymon.profiling.components import Component, InputComponent, OutputComponent, ActualComponent, EvalComponent
from raymon.profiling.reducers import Reducer
from raymon.out import NoOutput, nullcontext
class ModelProfile(Serializable, Buildable):
_attrs = ["name", "version", "components"]
def __init__(
self,
name="default",
version="0.0.0",
components={},
reducers={},
):
self._name = None
self._version = None
self._components = {}
self._reducers = {}
self.name = str(name)
self.version = str(version)
self.components = components
self.reducers = reducers
"""Serializable interface"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise ValueError(f"Profile name should be a string")
if "@" in value:
raise ValueError(f"Profile name should not include '@'")
self._name = value.lower()
@property
def version(self):
return self._version
@version.setter
def version(self, value):
if not isinstance(value, str):
raise ValueError(f"Profile version should be a string")
self._version = value
@property
def components(self):
return self._components
@components.setter
def components(self, value):
if isinstance(value, list) and all(isinstance(component, Component) for component in value):
# Convert to dict
self._components = {c.name: c for c in value}
elif isinstance(value, dict) and all(isinstance(component, Component) for component in value.values()):
self._components = value
else:
raise ValueError(f"components must be a list[Component] or dict[str, Component]")
@property
def reducers(self):
return self._reducers
@reducers.setter
def reducers(self, value):
if isinstance(value, list) and all(isinstance(reducers, Reducer) for reducers in value):
# Convert to dict
self._reducers = {c.name: c for c in value}
elif isinstance(value, dict) and all(isinstance(reducers, Reducer) for reducers in value.values()):
self._reducers = value
else:
raise ValueError(f"components must be a list[Reducer] or dict[str, Reducer]")
@property
def group_idfr(self):
return f"{self.name}@{self.version}".lower()
def to_jcr(self):
jcr = {
"name": self.name,
"version": self.version,
}
ser_comps = {}
for component in self.components.values():
ser_comps[component.name] = component.to_jcr()
jcr["components"] = ser_comps
# likewise for reducers
ser_reducers = {}
for reducer in self.reducers.values():
ser_reducers[reducer.name] = reducer.to_jcr()
jcr["reducers"] = ser_reducers
return jcr
@classmethod
def from_jcr(cls, jcr, mock_extractors=False):
name = jcr["name"]
version = jcr["version"]
components = {}
for comp_dict in jcr["components"].values():
component = Component.from_jcr(comp_dict, mock_extractor=mock_extractors)
components[component.name] = component
# TODO: likewise for reducers
reducers = {}
for reducer_dict in jcr["reducers"].values():
reducer = Reducer.from_jcr(reducer_dict)
reducers[reducer.name] = reducer
return cls(name=name, version=version, components=components, reducers=reducers)
def save(self, dir):
dir = Path(dir)
fpath = dir / f"{self.name}@{self.version}.json"
with open(fpath, "w") as f:
json.dump(self.to_jcr(), f, indent=4)
@classmethod
def load(cls, fpath):
with open(fpath, "r") as f:
jcr = json.load(f)
return cls.from_jcr(jcr)
"""Buildable Interface"""
def build(self, input=None, output=None, actual=None, domains={}, silent=True):
if silent:
ctx_mgr = NoOutput()
else:
ctx_mgr = nullcontext()
# Build the schema
with ctx_mgr:
component_values = {}
for component in self.components.values():
print(component.name)
comp_domain = domains.get(component.name, None)
if isinstance(component, InputComponent):
values = component.build(data=input, domain=comp_domain)
elif isinstance(component, OutputComponent):
values = component.build(data=output, domain=comp_domain)
elif isinstance(component, ActualComponent):
values = component.build(data=actual, domain=comp_domain)
elif isinstance(component, EvalComponent):
values = component.build(data=[output, actual])
else:
raise ProfileStateException("Unknown Component type: ", type(component))
component_values[component.name] = values
for reducer in self.reducers.values():
reducer.build(data=component_values)
def is_built(self):
return all(component.is_built() for component in self.components.values())
"""Other Methods"""
def __str__(self):
return f'ModelProfile(name="{self.name}", version="{self.version}"'
def set_group(self, tags):
for component_tag in tags:
component_tag.group = self.group_idfr
def drop_component(self, name, comp_type="input_component"):
new_comps = [c for c in getattr(self, comp_type).values() if c.name != name]
setattr(self, comp_type, new_comps)
def flatten_tags(self, tags):
tags_dict = {}
for tag in tags:
tags_dict[tag["name"]] = tag["value"]
return tags_dict
def _validate_simple(self, data, components, convert_json=True):
tags = []
if self.is_built():
for component in components:
component_tags = component.validate(data=data)
self.set_group(component_tags)
tags.extend(component_tags)
else:
raise ProfileStateException(
f"Cannot check data on an unbuilt profile. Check whether all components are built."
)
if convert_json:
tags = [t.to_jcr() for t in tags]
return tags
def validate_input(self, input, convert_json=True):
components = [c for c in self.components.values() if isinstance(c, InputComponent)]
return self._validate_simple(data=input, components=components, convert_json=convert_json)
def validate_output(self, output, convert_json=True):
components = [c for c in self.components.values() if isinstance(c, OutputComponent)]
return self._validate_simple(data=output, components=components, convert_json=convert_json)
def validate_actual(self, actual, convert_json=True):
components = [c for c in self.components.values() if isinstance(c, ActualComponent)]
return self._validate_simple(data=actual, components=components, convert_json=convert_json)
def validate_eval(self, output, actual, convert_json=True):
tags = []
components = [c for c in self.components.values() if isinstance(c, EvalComponent)]
if self.is_built():
for component in components:
component_tags = component.validate(data=(output, actual))
print(component_tags)
self.set_group(component_tags)
tags.extend(component_tags)
else:
raise ProfileStateException(
f"Cannot check data on an unbuilt profile. Check whether all components are built."
)
if convert_json:
tags = [t.to_jcr() for t in tags]
return tags
def contrast(self, other, thresholds={}):
if not self.is_built():
raise ProfileStateException("Profile 'self' is not built.")
if not other.is_built():
raise ProfileStateException("Profile 'other' is not built.")
component_thresholds = thresholds.get("components", {})
reducer_thresholds = thresholds.get("reducers", {})
report = {}
for component in self.components.values():
comp_thresholds = component_thresholds.get(component.name, {})
comp_report = component.contrast(
other.components[component.name],
thresholds=comp_thresholds,
)
report[component.name] = comp_report
reducer_reports = {}
for reducer in self.reducers.values():
red_threshold = reducer_thresholds.get(reducer.name, {})
red_report = reducer.contrast(
other.reducers[reducer.name], components=self.components, thresholds=red_threshold
)
reducer_reports[reducer.name] = red_report
jcr = {}
jcr["reference"] = self.to_jcr()
jcr["alternativeA"] = other.to_jcr()
jcr["health_reports"] = report
jcr["reducer_reports"] = reducer_reports
return jcr
def contrast_alternatives(self, alternativeA, alternativeB, thresholds={}):
if not self.is_built():
raise ProfileStateException("Profile 'self' is not built.")
if not alternativeA.is_built():
raise ProfileStateException("Profile 'alternativeA' is not built.")
if not alternativeB.is_built():
raise ProfileStateException("Profile 'alternativeB' is not built.")
component_thresholds = thresholds.get("components", {})
reducer_thresholds = thresholds.get("reducers", {})
report = {}
for component in self.components.values():
print(component.name)
comp_thresholds = component_thresholds.get(component.name, {})
comp_report = alternativeA.components[component.name].contrast(
alternativeB.components[component.name],
thresholds=comp_thresholds,
)
report[component.name] = comp_report
reducer_reports = {}
for reducer in self.reducers.values():
red_threshold = reducer_thresholds.get(reducer.name, {})
red_report = alternativeA.reducers[reducer.name].contrast(
alternativeB.reducers[reducer.name], thresholds=red_threshold
)
reducer_reports[reducer.name] = red_report
jcr = {}
jcr["reference"] = self.to_jcr()
jcr["alternativeA"] = alternativeA.to_jcr()
jcr["alternativeB"] = alternativeB.to_jcr()
jcr["health_reports"] = report
jcr["reducer_reports"] = reducer_reports
return jcr
def view(self, poi=None, mode="iframe", outdir=None, silent=True):
if silent:
ctx_mgr = NoOutput()
else:
ctx_mgr = nullcontext()
# Build the schema
with ctx_mgr:
if poi is not None:
poi_dict = self.flatten_tags(self.validate_input(poi))
else:
poi_dict = {}
jsonescaped = html.escape(json.dumps(self.to_jcr()))
poiescaped = html.escape(json.dumps(poi_dict))
htmlstr = f"""
<meta charset="utf-8">
<title>Raymon view</title>
<script src="./raymon.min.js"></script>
<link href="https://unpkg.com/@primer/css@17.0.1/dist/primer.css" rel="stylesheet" />
<body>
<raymon-view-schema-str profile="{jsonescaped}" poi="{poiescaped}"></raymon-view-schema-str>
</body>
"""
return self._build_page(htmlstr=htmlstr, mode=mode, outdir=outdir)
def view_contrast(self, other, mode="external", thresholds={}, outdir=None, silent=True):
if silent:
ctx_mgr = NoOutput()
else:
ctx_mgr = nullcontext()
# Build the schema
with ctx_mgr:
jcr = self.contrast(other, thresholds=thresholds)
jsonescaped = html.escape(json.dumps(jcr))
htmlstr = f"""
<meta charset="utf-8">
<title>Raymon contrast</title>
<script src="./raymon.min.js"></script>
<link href="https://unpkg.com/@primer/css@17.0.1/dist/primer.css" rel="stylesheet" />
<body>
<raymon-compare-schema-str comparison="{jsonescaped}"></raymon-compare-schema-str>
</body>
"""
return self._build_page(htmlstr=htmlstr, mode=mode, outdir=outdir)
def view_contrast_alternatives(
self, alternativeA, alternativeB, mode="external", thresholds={}, outdir=None, silent=True
):
if silent:
ctx_mgr = NoOutput()
else:
ctx_mgr = nullcontext()
# Build the schema
with ctx_mgr:
jcr = self.contrast_alternatives(alternativeA, alternativeB, thresholds=thresholds)
jsonescaped = html.escape(json.dumps(jcr))
htmlstr = f"""
<meta charset="utf-8">
<title>Raymon contrast</title>
<script src="./raymon.min.js"></script>
<link href="https://unpkg.com/@primer/css@17.0.1/dist/primer.css" rel="stylesheet" />
<body>
<raymon-compare-schema-str comparison="{jsonescaped}"></raymon-compare-schema-str>
</body>
"""
return self._build_page(htmlstr=htmlstr, mode=mode, outdir=outdir)
def _build_page(self, htmlstr, mode="iframe", outdir=None):
tmp_dir = Path(tempfile.mkdtemp(dir=outdir, prefix=".tmp"))
shutil.copy(src=pkg_resources.resource_filename("raymon", "frontend/raymon.min.js"), dst=tmp_dir)
shutil.copy(src=pkg_resources.resource_filename("raymon", "frontend/raymon.min.js.map"), dst=tmp_dir)
html_file = tmp_dir / "schema.html"
with open(html_file, "w") as f:
f.write(htmlstr)
if mode == "external":
webbrowser.open_new_tab("file://" + str(html_file))
return html_file
| 38.589474 | 114 | 0.602769 |
ace38820c3b802d937d1f10dbfc5eaf1b1715fd6 | 9,217 | py | Python | src/_pytask/collect.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 41 | 2020-07-24T15:19:19.000Z | 2022-03-17T17:40:57.000Z | src/_pytask/collect.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 240 | 2020-06-26T21:37:49.000Z | 2022-03-31T08:56:56.000Z | src/_pytask/collect.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | null | null | null | """Implement functionality to collect tasks."""
import importlib
import inspect
import os
import sys
import time
from pathlib import Path
from typing import Generator
from typing import List
from _pytask.config import hookimpl
from _pytask.config import IS_FILE_SYSTEM_CASE_SENSITIVE
from _pytask.console import console
from _pytask.enums import ColorCode
from _pytask.exceptions import CollectionError
from _pytask.mark_utils import has_marker
from _pytask.nodes import create_task_name
from _pytask.nodes import FilePathNode
from _pytask.nodes import PythonFunctionTask
from _pytask.path import find_case_sensitive_path
from _pytask.report import CollectionReport
from _pytask.shared import reduce_node_name
from _pytask.traceback import render_exc_info
@hookimpl
def pytask_collect(session):
"""Collect tasks."""
session.collection_start = time.time()
_collect_from_paths(session)
try:
session.hook.pytask_collect_modify_tasks(session=session, tasks=session.tasks)
except Exception:
report = CollectionReport.from_exception(exc_info=sys.exc_info())
session.collection_reports.append(report)
session.hook.pytask_collect_log(
session=session, reports=session.collection_reports, tasks=session.tasks
)
return True
def _collect_from_paths(session):
"""Collect tasks from paths.
Go through all paths, check if the path is ignored, and collect the file if not.
"""
for path in _not_ignored_paths(session.config["paths"], session):
reports = session.hook.pytask_collect_file_protocol(
session=session, path=path, reports=session.collection_reports
)
if reports is not None:
session.collection_reports.extend(reports)
session.tasks.extend(i.node for i in reports if i.successful)
@hookimpl
def pytask_ignore_collect(path, config):
"""Ignore a path during the collection."""
is_ignored = any(path.match(pattern) for pattern in config["ignore"])
return is_ignored
@hookimpl
def pytask_collect_file_protocol(session, path, reports):
try:
reports = session.hook.pytask_collect_file(
session=session, path=path, reports=reports
)
except Exception:
node = FilePathNode.from_path(path)
reports = [CollectionReport.from_exception(node=node, exc_info=sys.exc_info())]
session.hook.pytask_collect_file_log(session=session, reports=reports)
return reports
@hookimpl
def pytask_collect_file(session, path, reports):
"""Collect a file."""
if any(path.match(pattern) for pattern in session.config["task_files"]):
spec = importlib.util.spec_from_file_location(path.stem, str(path))
if spec is None:
raise ImportError(f"Can't find module '{path.stem}' at location {path}.")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
collected_reports = []
for name, obj in inspect.getmembers(mod):
if has_marker(obj, "parametrize"):
names_and_objects = session.hook.pytask_parametrize_task(
session=session, name=name, obj=obj
)
else:
names_and_objects = [(name, obj)]
for name_, obj_ in names_and_objects:
report = session.hook.pytask_collect_task_protocol(
session=session, reports=reports, path=path, name=name_, obj=obj_
)
if report is not None:
collected_reports.append(report)
return collected_reports
@hookimpl
def pytask_collect_task_protocol(session, path, name, obj):
"""Start protocol for collecting a task."""
try:
session.hook.pytask_collect_task_setup(
session=session, path=path, name=name, obj=obj
)
task = session.hook.pytask_collect_task(
session=session, path=path, name=name, obj=obj
)
if task is not None:
session.hook.pytask_collect_task_teardown(session=session, task=task)
return CollectionReport.from_node(task)
except Exception:
task = PythonFunctionTask(name, create_task_name(path, name), path, None)
return CollectionReport.from_exception(exc_info=sys.exc_info(), node=task)
@hookimpl(trylast=True)
def pytask_collect_task(session, path, name, obj):
"""Collect a task which is a function.
There is some discussion on how to detect functions in this `thread
<https://stackoverflow.com/q/624926/7523785>`_. :class:`types.FunctionType` does not
detect built-ins which is not possible anyway.
"""
if name.startswith("task_") and callable(obj):
return PythonFunctionTask.from_path_name_function_session(
path, name, obj, session
)
_TEMPLATE_ERROR = (
"The provided path of the dependency/product in the marker is {}, but the path of "
"the file on disk is {}. Case-sensitive file systems would raise an error.\n\n"
"Please, align the names to ensure reproducibility on case-sensitive file systems "
"(often Linux or macOS) or disable this error with 'check_casing_of_paths = false'."
)
@hookimpl(trylast=True)
def pytask_collect_node(session, path, node):
"""Collect a node of a task as a :class:`pytask.nodes.FilePathNode`.
Strings are assumed to be paths. This might be a strict assumption, but since this
hook is executed at last and possible errors will be shown, it seems reasonable and
unproblematic.
``trylast=True`` might be necessary if other plugins try to parse strings themselves
like a plugin for downloading files which depends on URLs given as strings.
Parameters
----------
session : _pytask.session.Session
The session.
path : Union[str, pathlib.Path]
The path to file where the task and node are specified.
node : Union[str, pathlib.Path]
The value of the node which can be a str, a path or anything which cannot be
handled by this function.
"""
if isinstance(node, str):
node = Path(node)
if isinstance(node, Path):
if not node.is_absolute():
node = path.parent.joinpath(node)
# ``normpath`` removes ``../`` from the path which is necessary for the casing
# check which will fail since ``.resolves()`` also normalizes a path.
node = Path(os.path.normpath(node))
if (
not IS_FILE_SYSTEM_CASE_SENSITIVE
and session.config["check_casing_of_paths"]
and sys.platform == "win32"
):
case_sensitive_path = find_case_sensitive_path(node, "win32")
if str(node) != str(case_sensitive_path):
raise ValueError(_TEMPLATE_ERROR.format(node, case_sensitive_path))
return FilePathNode.from_path(node)
def _not_ignored_paths(paths: List[Path], session) -> Generator[Path, None, None]:
"""Traverse paths and yield not ignored paths.
The paths passed by the user can either point to files or directories. For
directories, all subsequent files and folders are considered, but one level after
another, so that files of ignored folders are not checked.
Parameters
----------
paths : List[pathlib.Path]
List of paths from which tasks are collected.
session : _pytask.session.Session
The session.
Yields
------
path : pathlib.Path
A path which is not ignored.
"""
for path in paths:
if not session.hook.pytask_ignore_collect(path=path, config=session.config):
if path.is_dir():
files_in_dir = path.iterdir()
yield from _not_ignored_paths(files_in_dir, session)
else:
yield path
@hookimpl
def pytask_collect_log(session, reports, tasks):
"""Log collection."""
session.collection_end = time.time()
console.print(f"Collected {len(tasks)} task{'' if len(tasks) == 1 else 's'}.")
failed_reports = [i for i in reports if not i.successful]
if failed_reports:
console.print()
console.rule(
f"[{ColorCode.FAILED}]Failures during collection", style=ColorCode.FAILED
)
for report in failed_reports:
if report.node is None:
header = "Error"
else:
short_name = reduce_node_name(report.node, session.config["paths"])
header = f"Could not collect {short_name}"
console.rule(f"[{ColorCode.FAILED}]{header}", style=ColorCode.FAILED)
console.print()
console.print(
render_exc_info(*report.exc_info, session.config["show_locals"])
)
console.print()
session.hook.pytask_log_session_footer(
session=session,
infos=[
(len(tasks), "collected", ColorCode.SUCCESS),
(len(failed_reports), "failed", ColorCode.FAILED),
],
duration=round(session.collection_end - session.collection_start, 2),
color=ColorCode.FAILED if len(failed_reports) else ColorCode.SUCCESS,
)
raise CollectionError
| 34.01107 | 88 | 0.668005 |
ace3887468878ec5b60fa1c866b78108d9843fb3 | 4,819 | py | Python | generator/lib/components.py | tedteng/ops-toolbelt | 6b84e8425ccb8a7d2be19f813b34cbd58c565a65 | [
"Apache-2.0"
] | null | null | null | generator/lib/components.py | tedteng/ops-toolbelt | 6b84e8425ccb8a7d2be19f813b34cbd58c565a65 | [
"Apache-2.0"
] | null | null | null | generator/lib/components.py | tedteng/ops-toolbelt | 6b84e8425ccb8a7d2be19f813b34cbd58c565a65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import shlex
from lib import validation
class BaseComponentConfig:
def __init__(self, name, info):
self.name = name
self.info = info
def get_info(self):
return self.info
def get_name(self):
return self.name
def get_provided_apps(self):
return self.name
class StringComponentConfig(BaseComponentConfig):
def __init__(self, config):
validation.ConfigValidator.validate_str(__class__, config)
super().__init__(config, config)
class DictComponentConfig(BaseComponentConfig):
required_keys = [
{"key": "name", "types":(str)},
]
optional_keys = [
{"key": "info", "types":(str, type(None))},
{"key": "provides", "types":(str, list, type(None))}
]
def __init__(self, config):
validation.ConfigValidator.validate_dict(__class__, config)
name = config["name"]
if "info" in config.keys():
info = config.get("info")
else:
info = name
super().__init__(name, info)
self.provides = config.get("provides")
def get_provided_apps(self):
return self.provides
class ToolConfig(DictComponentConfig):
required_keys = [
{"key": "from", "types": (str)}
]
optional_keys = [
{"key": "to", "types": (str)},
{"key": "command", "types": (dict)},
{"key": "version", "types": (str)},
]
def __init__(self, config):
DictComponentConfig.__init__(self, config)
self._from = config["from"]
self.to = config.get("to")
self.command = config.get("command")
self.version = config.get("version")
def get_to(self):
if self.to is None:
return None
if self.version is not None:
return self.to.format(version=self.get_version())
return self.to
def get_from(self):
_from = self._from
if self.version is not None:
_from = _from.format(version=self.get_version())
return _from
def get_command(self):
return self.command
def get_version(self):
return self.version
class BashCommandConfig(DictComponentConfig):
required_keys = [
{"key": "command", "types": (str)}
]
def __init__(self, config):
DictComponentConfig.__init__(self, config)
self.command = config["command"]
def get_command(self):
return self.command
class AptRepoConfig(DictComponentConfig):
required_keys = [
{"key": "url", "types": (str)},
{"key": "key-url", "types": (str)}
]
optional_keys = [
{"key": "release-prefix", "types": (str)}
]
def __init__(self, config):
DictComponentConfig.__init__(self, config)
self.release_prefix = config.get("release-prefix", "")
self.url = config["url"]
self.key_url = config["key-url"]
def get_release_prefix(self):
return self.release_prefix
def get_repo_url(self):
return self.url
def get_key_url(self):
return self.key_url
class ComponentConfigParser:
registered_classes = [StringComponentConfig, DictComponentConfig, BashCommandConfig, ToolConfig, AptRepoConfig]
def __init__(self, *argv):
for component_class in argv:
if component_class not in ComponentConfigParser.registered_classes:
raise TypeError("Unsupported class for components: {}.".format(component_class))
self.component_classes = argv
def parse_components(self, component_configs):
components = []
for config in component_configs:
lastErr = None
component = None
for clazz in self.component_classes:
try:
component = clazz(config)
except TypeError as err:
lastErr = err
continue
if component is None and lastErr is not None:
raise lastErr
components.append(component)
return components
| 30.694268 | 201 | 0.625441 |
ace3894b5e53e3ed5ebbf83f8524e59a68fef968 | 14,629 | py | Python | DLplatform/coordinator.py | fraunhofer-iais/dlplatform | 7d127f8a17869f69d259e41822460f2962124f7b | [
"Apache-2.0"
] | 5 | 2020-05-05T08:54:26.000Z | 2021-02-20T07:36:28.000Z | DLplatform/coordinator.py | fraunhofer-iais/dlplatform | 7d127f8a17869f69d259e41822460f2962124f7b | [
"Apache-2.0"
] | 1 | 2020-11-16T14:15:53.000Z | 2020-11-16T14:15:53.000Z | DLplatform/coordinator.py | fraunhofer-iais/dlplatform | 7d127f8a17869f69d259e41822460f2962124f7b | [
"Apache-2.0"
] | 4 | 2020-05-05T08:56:57.000Z | 2020-07-22T11:28:52.000Z | from DLplatform.baseClass import baseClass
from DLplatform.parameters import Parameters
from DLplatform.communicating import Communicator
from DLplatform.synchronizing import Synchronizer
from pickle import loads
from multiprocessing import Queue
import sys, time
'''
The InitializationHandler defines, how the coordinator handles model parameters when new learners register.
In the base case (InitializationHandler), it will leave the models untouched (standard setting). Each learner
is initialized individually in the way it was defined in the learner factory.
UseFirstInitHandler uses the first model it receives (i.e., the from the first learner that registers at the coordinator)
as initial parameters for all other learners. This ensures that all learners start with the exact same parameters.
NoisyInitHandler does the same as UseFirstInitHandler, but adds noise to the parameters. Thus, all learners are initialized
around the same common parameters.
'''
class InitializationHandler:
def __call__(self, params : Parameters):
return params
class UseFirstInitHandler(InitializationHandler):
def __init__(self):
self._initParams = None
def __call__(self, params : Parameters):
if self._initParams is None:
self._initParams = params
return self._initParams
## %TODO finish implementation of noisy initialization
class NoisyInitHandler(InitializationHandler):
def __init__(self, noiseParams):
self._noiseParams = noiseParams
self._initParams = None
def __call__(self, params : Parameters):
if self._initParams is None:
self._initParams = params
eps = self.getNoise()
return self._initParams.add(eps)
def getNoise(self):
if self._noiseParams['type'] == "uniform":
range = self._noiseParams['range']
return None
class Coordinator(baseClass):
'''
Provides the functionality of the central coordinator which handles model
synchronization and information exchange between workers
'''
def __init__(self, minStartNodes = 0, minStopNodes = 0):
'''
Initializes a 'Coordinator' object.
Parameters
----------
Exception
--------
ValueError
in case that identifier is not a string
'''
super().__init__(name = "Coordinator")
self._communicator = None
self._synchronizer = None
self._violations = []
self._nodesInViolation = []
self._balancingSet = {}
self._activeNodes = []
self._initHandler = InitializationHandler()
self._learningLogger = None
# if this parameter is set, then the coordinator will wait till all the nodes are registered
self._minStartNodes = minStartNodes
self._waitingNodes = {}
# if this parameter is larger than 0, then when less than this amount of workers is active,
# process stops - all the other still active workers are asked to exit
self._minStopNodes = minStopNodes
# initializing queue for communication with communicator process
self._communicatorConnection = Queue()
def setLearningLogger(self, logger):
self._learningLogger = logger
def setCommunicator(self, comm : Communicator):
'''
Links a 'Communicator' object to the 'Coordinator' object.
Parameters
----------
comm: object - 'Communicator' object that handles message passing for the coordinator
Returns
-------
Exception
--------
ValueError
in case that identifier is not a Communicator
'''
if not isinstance(comm,Communicator):
error_text = "The attribute comm is of type " + str(type(comm)) + " and not of type" + str(Communicator)
self.error(error_text)
raise ValueError(error_text)
self._communicator = comm
def getCommunicator(self) -> Communicator:
'''
Get 'Communicator' object of the coordinator.
Returns
-------
_communicator: object - 'Communicator' object that handles message passing for the coordinator
'''
return self._communicator
def setInitHandler(self, initHandler : InitializationHandler):
self._initHandler = initHandler
def setSynchronizer(self, synOp : Synchronizer):
'''
Parameters
----------
synOp : Synchronizer
Returns
-------
Exception
--------
ValueError
in case that synOp is not a Synchronizer
'''
if not isinstance(synOp, Synchronizer):
error_text = "The attribute synOp is of type " + str(type(synOp)) + " and not of type" + str(Synchronizer)
self.error(error_text)
raise ValueError(error_text)
self._synchronizer = synOp
def getSynchronizer(self) -> Synchronizer:
'''
Returns
-------
Synchronizer
'''
return self._synchronizer
def checkInterProcessCommunication(self):
'''
Checks queue for new incoming messages and acts in case if a message has arrived
Exceptions
----------
ValueError
in case that the received message doesn't fit with the expected type
'''
if not self._communicatorConnection.empty():
recvObj = self._communicatorConnection.get()
if not isinstance(recvObj,tuple):
raise ValueError("coordinator received recvObj that is not a tuple")
elif not len(recvObj) == 3:
raise ValueError("coordinator received recvObj which has length different from 3")
routing_key, exchange, body = recvObj
self.onMessageReceived(routing_key, exchange, body)
def _setConnectionsToComponents(self):
'''
Gives communicator access to the queue such that an inter process communication can take place.
Exceptions
----------
AttributeError
in case if no communicator is set
'''
if self._communicator is None:
self.error("Communicator not set!")
raise AttributeError("Communicator not set!")
self._communicator.setConnection(consumerConnection = self._communicatorConnection)
def onMessageReceived(self, routing_key, exchange, body):
message = loads(body)
message_size = sys.getsizeof(body)
if routing_key == 'violation':
self.info("Coordinator received a violation")
self._communicator.learningLogger.logViolationMessage(exchange, routing_key, message['id'], message_size, 'receive')
self._violations.append(body)
if routing_key == 'balancing':
self.info("Coordinator received a balancing model")
self._communicator.learningLogger.logBalancingMessage(exchange, routing_key, message['id'], message_size, 'receive')
# append it to violations - thus we enter the balancing process again
# model can send the answer to balancing request only once - then it will be waiting
# for a new model to come and will not react to requests anymore
# so it cannot be that the model answers several times and thus initiates new
# balancing when not needed
self._violations.append(body)
if routing_key == 'registration':
self.info("Coordinator received a registration")
self._communicator.learningLogger.logRegistrationMessage(exchange, routing_key, message['id'], message_size, 'receive')
nodeId = message['id']
self._learningLogger.logModel(filename = "initialization_node" + str(message['id']), params = message['param'])
newParams = self._initHandler(message['param'])
self._learningLogger.logModel(filename = "startState_node" + str(message['id']), params = message['param'])
self._activeNodes.append(nodeId)
self._waitingNodes[nodeId] = newParams
# we send around the initial parameters only when all the expected nodes are there
# in case when parameter is not set, it is equal to 0 - so every new node will satisfy the condition
if len(self._waitingNodes) >= self._minStartNodes:
for worker_id in self._waitingNodes:
time.sleep(0.1)#without the sleep rabbitMQ gets congested and messages do not get delivered to nodes (occurred with 21 nodes and BatchLearners)
self._communicator.sendAggregatedModel(identifiers = [worker_id], param = self._waitingNodes[worker_id], flags = {"setReference":True})
self._waitingNodes.clear()
# we want to allow to wait for 10 nodes, but then others to join dynamically
self._minStartNodes = 1
#TODO: maybe we have to check the balancing set here again.
#If a node registered, while we are doing a full sync, or a balancing operation,
#we might need to check. But then, maybe it's all ok like this.
#will spoil full sync for dynamic case and will spoil periodic case - they will have to wait
# for this new node to make needed amount of updates
# can check if balancing_set is not empty then just add this node to balancing set right away
# and set its ability to train to false
if routing_key == 'deregistration':
self.info("Coordinator received a deregistration")
self._communicator.learningLogger.logDeregistrationMessage(exchange, routing_key, message['id'], message_size, 'receive')
self._learningLogger.logModel(filename = "finalState_node" + str(message['id']), params = message['param'])
self._activeNodes.remove(message['id'])
if not self._balancingSet.get(message['id']) is None:
self._balancingSet.pop(message['id'])
# send exit messages if we have less than needed active nodes
# if the parameter is not set and equal 0 this condition will not work
if len(self._activeNodes) < self._minStopNodes:
self.info("Not enough active workers left, exiting.")
for nodeId in self._activeNodes:
self._communicator.sendExitRequest(nodeId)
# we do not want to send exit messages again
self._minStopNodes = 0
# when all the nodes deregistered we stop the coordinator process
elif len(self._activeNodes) == 0:
self.info("Training finished, exiting.")
sys.exit()
def run(self):
if self._communicator is None:
self.error("Communicator is not set!")
raise AttributeError("Communicator is not set!")
if self._synchronizer is None:
self.error("Synchronizing operator is not set!")
raise AttributeError("Synchronizing operator is not set!")
self._communicator.initiate(exchange = self._communicator._exchangeCoordinator,
topics = ['registration', 'deregistration', 'violation', 'balancing'])
self._communicator.daemon = True
self._setConnectionsToComponents()
self._communicator.start()
if (self._communicatorConnection == None):
raise AttributeError("communicatorConnection was not set properly at the worker!")
while True:
self.checkInterProcessCommunication()
# since the deregistration may happen during the balancing evaluation, we have to check if there are not active nodes
nonActiveBalancingSet = set(self._balancingSet.keys()).difference(set(self._activeNodes))
for nodeId in nonActiveBalancingSet:
self._balancingSet.pop(nodeId)
# we have to enter this in two cases:
# - we got a violation
# - we got all the balancing models
if len(self._violations) > 0 or (len(self._balancingSet.keys()) != 0 and not None in set(self._balancingSet.values())):
if len(self._violations) > 0:
message = loads(self._violations[0])
nodeId = message['id']
param = message['param']
self._nodesInViolation.append(nodeId)
self._balancingSet[nodeId] = param
# @NOTE always deleting the current violation leads to potential extension of a dynamic small balancing to
# a full_sync - might be a case that blocking everything, balancing one violation and then considering the next one
# is a better idea from the point of view of effectiveness
del self._violations[0]
nodes, params, flags = self._synchronizer.evaluate(self._balancingSet, self._activeNodes)
# fill balancing set with None for new nodes in balancing set
for newNode in nodes:
if not newNode in self._balancingSet.keys() and newNode in self._activeNodes:
self._balancingSet[newNode] = None
if params is None and None in self._balancingSet.values():
# request for models from balancing set nodes
for newNode in nodes:
# balancingRequest can be sent only when it is dynamic averaging
if self._balancingSet[newNode] is None and newNode in self._activeNodes:
self._communicator.sendBalancingRequest(newNode)
elif not params is None:
# we do not want to update the nodes that are already inactive
nodesToSendAvg = list(set(nodes) & set(self._activeNodes))
self._communicator.sendAggregatedModel(nodesToSendAvg, params, flags)
self._learningLogger.logBalancing(flags, self._nodesInViolation, list(self._balancingSet.keys()))
self._learningLogger.logAveragedModel(nodes, params, flags)
self._balancingSet.clear()
self._nodesInViolation = []
self._communicator.join()
| 43.799401 | 163 | 0.626974 |
ace38aba0ef48713a1a4dba7c37045b251461ca3 | 2,718 | py | Python | core/migrations/0001_initial.py | XSonecaX/api-receita | 9a293426771bc7bea075c4b6b37c87725720634b | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | XSonecaX/api-receita | 9a293426771bc7bea075c4b6b37c87725720634b | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | XSonecaX/api-receita | 9a293426771bc7bea075c4b6b37c87725720634b | [
"MIT"
] | 1 | 2021-11-27T05:46:45.000Z | 2021-11-27T05:46:45.000Z | # Generated by Django 3.0.1 on 2019-12-23 23:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cpf',
fields=[
('NUM_CPF', models.CharField(max_length=11, primary_key=True, serialize=False)),
('NOME', models.CharField(blank=True, max_length=500, null=True)),
('DATA_NASCIMENTO', models.CharField(blank=True, max_length=12, null=True)),
('IND_SEXO', models.CharField(blank=True, max_length=2, null=True)),
('NOME_MAE', models.CharField(blank=True, max_length=500, null=True)),
('NUM_TITULO_ELEITOR', models.CharField(blank=True, max_length=12, null=True)),
('TIPO_LOGRADOURO', models.CharField(blank=True, max_length=50, null=True)),
('DESCR_LOGRADOURO', models.CharField(blank=True, max_length=100, null=True)),
('NUM_LOGRADOURO', models.CharField(blank=True, max_length=50, null=True)),
('DESCR_COMPLEMENTO_LOGRADOURO', models.CharField(blank=True, max_length=800, null=True)),
('NOME_BAIRRO', models.CharField(blank=True, max_length=500, null=True)),
('NUM_CEP', models.CharField(blank=True, max_length=50, null=True)),
('NOME_MUNICIPIO', models.CharField(blank=True, max_length=200, null=True)),
('SIGLA_UF', models.CharField(blank=True, max_length=50, null=True)),
('NUM_DDD', models.CharField(blank=True, max_length=50, null=True)),
('NUM_TELEFONE', models.CharField(blank=True, max_length=50, null=True)),
('NUM_FAX', models.CharField(blank=True, max_length=50, null=True)),
('SE_ESTRANGEIRO', models.CharField(blank=True, max_length=2, null=True)),
('NOME_PAIS_NACIONALIDADE', models.CharField(blank=True, max_length=500, null=True)),
('COD_SITUACAO_CADASTRAL', models.CharField(blank=True, max_length=2, null=True)),
('DESCR_SITUACAO_CADASTRAL', models.CharField(blank=True, max_length=50, null=True)),
('DATA_SITUACAO_CADASTRAL', models.CharField(blank=True, max_length=12, null=True)),
('DATA_INSCRICAO', models.CharField(blank=True, max_length=12, null=True)),
('ANO_OBITO', models.CharField(blank=True, max_length=10, null=True)),
('ANO_ULTIMA_ENTREGA_DECLARACAO', models.CharField(blank=True, max_length=10, null=True)),
('DATA_BASE', models.CharField(blank=True, max_length=12, null=True)),
],
),
]
| 59.086957 | 106 | 0.623252 |
ace38c35fa06bc4a36b9c972872870f135a9dfcc | 966 | py | Python | Evaulation_Regression_Model_Performance_w_r-Square/evaluation_regression_models_RSquare.py | malidrsn/Machine-Learning-Prediction-Algorithms-w-Examples-V1 | cc2eb5f0e8e8640bb6d5651f2eae961d7dcac702 | [
"MIT"
] | null | null | null | Evaulation_Regression_Model_Performance_w_r-Square/evaluation_regression_models_RSquare.py | malidrsn/Machine-Learning-Prediction-Algorithms-w-Examples-V1 | cc2eb5f0e8e8640bb6d5651f2eae961d7dcac702 | [
"MIT"
] | null | null | null | Evaulation_Regression_Model_Performance_w_r-Square/evaluation_regression_models_RSquare.py | malidrsn/Machine-Learning-Prediction-Algorithms-w-Examples-V1 | cc2eb5f0e8e8640bb6d5651f2eae961d7dcac702 | [
"MIT"
] | null | null | null | # residual = y-y_head
# square residual = residual^2
# sum square residual = sum((y-y_head)^2) = SSR
# y_avg=12000 olsun
# sum square total = sum((y-y_avg)^2) = SST
# R^2 = R square değerlendirme metodudur.
# R^2 = R square = 1-(SSR/SST) eğer R^2 değeri 1'e ne kadar yakın ise o kadar iyidir.
# **********"""random forest'a uygulanması""""************
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("random_forest.csv", sep=";", header=None)
x = df.iloc[:, 0].values.reshape(-1, 1)
y = df.iloc[:, 1].values.reshape(-1, 1)
from sklearn.ensemble import RandomForestRegressor
# random_state ise bir önceki random kadar bölmesini sağlar
# aynı random değerlerinin seçilmesini sağlar
rf = RandomForestRegressor(n_estimators=100, random_state=42) # n_est = kaç tane tree kullanacağımız anlamına gelir
rf.fit(x, y)
y_head = rf.predict(x)
# R Square
from sklearn.metrics import r2_score
print("R_Score", r2_score(y, y_head))
| 32.2 | 116 | 0.71118 |
ace38c44b1a4a8d3e0a881a7f06f90e6e29b5d68 | 2,919 | py | Python | utils.py | khakhulin/Text2Img | acb002904122e1f2c0abed3fff69daccfff88c12 | [
"MIT"
] | 3 | 2020-04-30T15:16:54.000Z | 2020-06-16T01:00:15.000Z | utils.py | khakhulin/Text2Img | acb002904122e1f2c0abed3fff69daccfff88c12 | [
"MIT"
] | null | null | null | utils.py | khakhulin/Text2Img | acb002904122e1f2c0abed3fff69daccfff88c12 | [
"MIT"
] | null | null | null | import os
from copy import deepcopy
import torch
import torch.nn as nn
from PIL import Image
from nn_utils import LeakyConv
def save(path, model, optimizer, loss, epoch):
checkpoint = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'loss': loss
}
torch.save(checkpoint, path)
def load(path):
checkpoint = torch.load(path)
model_weights = checkpoint['model']
opt_state = checkpoint['optimizer']
epoch = checkpoint['epoch']
loss = checkpoint['loss']
return model_weights, opt_state, epoch, loss
def freeze_model(model):
model.eval()
for params in model.parameters():
params.requires_grad = False
def unfreeze_model(model):
model.train()
for params in model.parameters():
params.requires_grad = True
def init_weight(model_layer, gain=1.0, sigma=0.02): # TODO add initialization with different properties
# according to authors code
classname = model_layer.__class__.__name__
if classname.find('Conv') != -1 and classname.find('LeakyConv') == -1:
nn.init.orthogonal_(model_layer.weight.data, gain=gain)
elif classname.find('BatchNorm') != -1:
model_layer.weight.data.normal_(1.0, sigma)
model_layer.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal_(model_layer.weight.data, 1.0)
if model_layer.bias is not None:
model_layer.bias.data.fill_(0.0)
def save_images(images, filenames, save_dir, iter, size):
num_images = images.size(0)
folder = os.path.join(save_dir, 'images', 'iter'+str(iter), str(size))
os.makedirs(folder, exist_ok=True)
if filenames is None:
filenames = [str(i) for i in range(num_images)]
# [-1, 1] --> [0, 1]
img_tensor = images.add(1).div(2).detach().cpu()
for i in range(num_images):
im = images[i].detach().cpu()
fullpath = os.path.join(folder, filenames[i]+'.jpg')
# [-1, 1] --> [0, 1]
img = im.add(1).div(2).mul(255).clamp(0, 255).byte()
# [0, 1] --> [0, 255]
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
return img_tensor
def copy_params(net):
copy_params = deepcopy(list(p.data for p in net.parameters()))
return copy_params
def load_params(net, new_param):
for p, new_p in zip(net.parameters(), new_param):
p.data.copy_(new_p)
def set_requires_grad_value(models_list, require_grad):
for i in range(len(models_list)):
for p in models_list[i].parameters():
p.requires_grad = require_grad
def get_top_bottom_mean_grad(params):
# First layer
mean_grad_first = next(params).grad.mean().item()
# Last layer
for p in params:
pass
mean_grad_last = p.grad.mean().item()
return mean_grad_first, mean_grad_last | 28.067308 | 104 | 0.644056 |
ace38c5dcc5fb0a23f200f622235724792815735 | 5,482 | py | Python | src/japronto/request/__init__.py | imuxin/japronto | 97034603638929f93da22d791db2bd400de58ec0 | [
"MIT"
] | null | null | null | src/japronto/request/__init__.py | imuxin/japronto | 97034603638929f93da22d791db2bd400de58ec0 | [
"MIT"
] | null | null | null | src/japronto/request/__init__.py | imuxin/japronto | 97034603638929f93da22d791db2bd400de58ec0 | [
"MIT"
] | null | null | null | import cgi
import collections
import encodings.idna
from functools import wraps
from http.cookies import _unquote as unquote_cookie
from json import loads as json_loads
import urllib.parse
class HttpRequest(object):
__slots__ = ('path', 'method', 'version', 'headers', 'body')
def __init__(self, method, path, version, headers):
self.path = path
self.method = method
self.version = version
self.headers = headers
self.body = None
def dump_headers(self):
print('path', self.path)
print('method', self.method)
print('version', self.version)
for n, v in self.headers.items():
print(n, v)
def __repr__(self):
return '<HttpRequest {0.method} {0.path} {0.version}, {1} headers>' \
.format(self, len(self.headers))
def memoize(func):
@wraps(func)
def wrapper(request):
ns = request.extra.setdefault('_japronto', {})
try:
return ns[func.__name__]
except KeyError:
pass
result = func(request)
ns[func.__name__] = result
return result
return wrapper
@memoize
def text(request):
if request.body is None:
return None
return request.body.decode(request.encoding or 'utf-8')
@memoize
def json(request):
if request.body is None:
return None
return json_loads(request.text)
@memoize
def query(request):
qs = request.query_string
if not qs:
return {}
return dict(urllib.parse.parse_qsl(qs))
def remote_addr(request):
return request.transport.get_extra_info('peername')[0]
@memoize
def parsed_content_type(request):
content_type = request.headers.get('Content-Type')
if not content_type:
return None, {}
return cgi.parse_header(content_type)
def mime_type(request):
return parsed_content_type(request)[0]
def encoding(request):
return parsed_content_type(request)[1].get('charset')
@memoize
def parsed_form_and_files(request):
if request.mime_type == 'application/x-www-form-urlencoded':
return dict(urllib.parse.parse_qsl(request.text)), None
elif request.mime_type == 'multipart/form-data':
boundary = parsed_content_type(request)[1]['boundary'].encode('utf-8')
return parse_multipart_form(request.body, boundary)
return None, None
def form(request):
return parsed_form_and_files(request)[0]
def files(request):
return parsed_form_and_files(request)[1]
@memoize
def hostname_and_port(request):
host = request.headers.get('Host')
if not host:
return None, None
hostname, *rest = host.split(':', 1)
port = rest[0] if rest else None
return encodings.idna.ToUnicode(hostname), int(port)
def port(request):
return hostname_and_port(request)[1]
def hostname(request):
return hostname_and_port(request)[0]
def parse_cookie(cookie):
"""Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
This function attempts to mimic browser cookie parsing behavior;
it specifically does not follow any of the cookie-related RFCs
(because browsers don't either).
The algorithm used is identical to that used by Django version 1.9.10.
"""
cookiedict = {}
for chunk in cookie.split(str(';')):
if str('=') in chunk:
key, val = chunk.split(str('='), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(''), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = unquote_cookie(val)
return cookiedict
@memoize
def cookies(request):
if 'Cookie' not in request.headers:
return {}
try:
cookies = parse_cookie(request.headers['Cookie'])
except Exception:
return {}
return {k: urllib.parse.unquote(v) for k, v in cookies.items()}
File = collections.namedtuple('File', ['type', 'body', 'name'])
def parse_multipart_form(body, boundary):
files = {}
fields = {}
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
file_type = None
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b'\r\n', line_index)
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
form_header_field = form_line[0:colon_index]
form_header_value, form_parameters = cgi.parse_header(
form_line[colon_index + 2:])
if form_header_field == 'Content-Disposition':
if 'filename' in form_parameters:
file_name = form_parameters['filename']
field_name = form_parameters.get('name')
elif form_header_field == 'Content-Type':
file_type = form_header_value
post_data = form_part[line_index:-4]
if file_name or file_type:
file = File(type=file_type, name=file_name, body=post_data)
files[field_name] = file
else:
value = post_data.decode('utf-8')
fields[field_name] = value
return fields, files
| 25.981043 | 78 | 0.631157 |
ace38c61c6c694c385e1aba9d74aa3d333c7f347 | 1,141 | py | Python | tests/testapp/testapp/urls.py | HOYINWUN/django-map-widgets | 2dd9bbd6833a781ea1683de9fa63d964c5936371 | [
"MIT"
] | 425 | 2016-02-21T21:02:20.000Z | 2022-03-17T02:11:17.000Z | tests/testapp/testapp/urls.py | HOYINWUN/django-map-widgets | 2dd9bbd6833a781ea1683de9fa63d964c5936371 | [
"MIT"
] | 112 | 2016-08-24T02:24:06.000Z | 2022-01-27T10:38:36.000Z | tests/testapp/testapp/urls.py | HOYINWUN/django-map-widgets | 2dd9bbd6833a781ea1683de9fa63d964c5936371 | [
"MIT"
] | 132 | 2016-06-01T09:56:00.000Z | 2022-02-16T14:42:17.000Z | """testapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.urls import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
url(r'^$', RedirectView.as_view(url=reverse_lazy('widgets:list'))),
url(r'^admin/', admin.site.urls),
url(r'^widgets/', include('widgets.urls', namespace='widgets')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 38.033333 | 79 | 0.728309 |
ace38c7abc4d9b1b9c2016cba738abec78906739 | 1,043 | py | Python | hypercane/hfilter/highest_rank_per_cluster.py | ato/hypercane | 290ef402006ee8f8d98090e31da52819e26145a0 | [
"MIT"
] | 2 | 2020-06-11T18:42:02.000Z | 2020-10-06T21:17:15.000Z | hypercane/hfilter/highest_rank_per_cluster.py | ato/hypercane | 290ef402006ee8f8d98090e31da52819e26145a0 | [
"MIT"
] | 55 | 2020-06-01T00:23:00.000Z | 2022-02-21T20:52:29.000Z | hypercane/hfilter/highest_rank_per_cluster.py | ato/hypercane | 290ef402006ee8f8d98090e31da52819e26145a0 | [
"MIT"
] | 3 | 2021-02-07T05:36:24.000Z | 2021-12-17T05:45:14.000Z | import logging
module_logger = logging.getLogger('hypercane.hfilter.highest_rank_per_cluster')
def return_highest_ranking_memento_per_cluster(urimdata, rankkey):
cluster_assignments = {}
highest_rank_per_cluster = []
for urim in urimdata:
cluster = urimdata[urim]['Cluster']
cluster_assignments.setdefault(cluster, []).append(urim)
module_logger.debug("cluster_assignments: {}".format(cluster_assignments))
module_logger.debug("urimdata: {}".format(urimdata))
module_logger.info("Applying score key {} to finding highest scoring memento per cluster for {} URI-Ms".format(
rankkey, len(urimdata)
))
for cluster in cluster_assignments:
cluster_ranks = []
for urim in cluster_assignments[cluster]:
module_logger.info("examining URI-M {} with record {}".format(urim, urimdata[urim]))
cluster_ranks.append( (urimdata[urim][rankkey], urim) )
highest_rank_per_cluster.append( max(cluster_ranks)[1] )
return highest_rank_per_cluster
| 33.645161 | 115 | 0.710451 |
ace38db8e741757f728e80ad075188d273700ec1 | 6,963 | py | Python | spectrofit/core/QTImportFile.py | fulmen27/SpectroFit | 75c170b9ac1c92fb592fc6ce1826bf4dd4409c93 | [
"MIT"
] | 1 | 2020-04-22T08:51:37.000Z | 2020-04-22T08:51:37.000Z | spectrofit/core/QTImportFile.py | fulmen27/SpectroFit | 75c170b9ac1c92fb592fc6ce1826bf4dd4409c93 | [
"MIT"
] | null | null | null | spectrofit/core/QTImportFile.py | fulmen27/SpectroFit | 75c170b9ac1c92fb592fc6ce1826bf4dd4409c93 | [
"MIT"
] | 1 | 2021-01-13T13:43:03.000Z | 2021-01-13T13:43:03.000Z | import csv
import json
from astropy.io import fits
import numpy as np
import pandas as pd
from PySide2.QtWidgets import QFileDialog, QMainWindow
import os
class ImportFile(QMainWindow):
"""Class to handle file import"""
def __init__(self, type, master):
super().__init__(master)
self.master = master
self.my_csv = dict()
self.delim = dict()
self.type = ""
self.data = {"lambda": [], "yspectre": [], "3rd_col": []}
self.fits_data = {}
self.lineident = {"1st_col": [], "2nd_col": [], "3rd_col": [], "lambda": [], "element_name": []}
self.delim["filename"] = "delim_ordre_TBL.json"
# Check each type of file :
if type == "s":
self._open_s()
self._open_delim()
elif type == "csv":
self._open_csv()
self._open_delim()
elif type == "fits":
self._open_fits()
self._open_lineident()
self._lineident_json_to_dict()
def __str__(self):
print(self.my_csv)
def _open_csv(self):
"""Open CSV file with data of Narval"""
filename = QFileDialog.getOpenFileName(self, 'Open file', os.getcwd(), "*.csv;;*.*")
if filename != "" and filename is not None and os.path.exists(filename[0]):
self.my_csv["filename"] = filename[0]
self.type = "csv"
with open(self.my_csv["filename"], 'r') as my_file:
self.my_csv["csv_file"] = csv.reader(my_file, delimiter=";")
for row in self.my_csv["csv_file"]:
self.data["lambda"].append(float(row[0]))
self.data["yspectre"].append(float(row[1]))
self.data["3rd_col"].append(float(row[2]))
else:
raise ValueError("Filename empty or filename not find in path")
def _open_s(self):
"""
Open .s file with Narval data:
first column : lambda
second column : intensity of spectrum
third column : error
:return:
"""
filename = QFileDialog.getOpenFileName(self, "S files", os.getcwd(), "*.s;;*.*")
if filename != "" and filename is not None and os.path.exists(filename[0]):
self.my_csv["filename"] = filename[0]
self.type = "s"
with open(self.my_csv["filename"]) as my_file: # open file
# read all lines, convert to float and store it in the right list
for row in my_file:
r = row.rstrip().split(" ")
while '' in r:
r.remove('')
if len(r) == 3:
try:
self.data["lambda"].append(float(r[0]))
self.data["yspectre"].append(float(r[1]))
self.data["3rd_col"].append(float(r[2]))
except:
print("can't convert value go to next line")
else:
raise ValueError("Filename empty or filename not find in path")
def _open_fits(self):
"""
Open .fit file with data from NeoNarval instrument.
:return:
"""
filename = QFileDialog.getOpenFileName(self, "S files", os.getcwd(), "*.fits;;*.*")
if filename != "" and filename is not None and os.path.exists(filename[0]):
self.my_csv["filename"] = filename[0]
f = fits.open(self.my_csv["filename"]) # open file
self.type = "fits"
error = True
for i in range(len(f)): # iterate through all table in fit's meta data
try:
data = np.asarray(f[i].data)
pd_table = pd.DataFrame(data)
print(pd_table)
# check each table if it has wented data
# if yes, store it in a pandas dataframe
if "Wavelength1" in pd_table.columns and "Intensity" in pd_table.columns:
pd_table["Wavelength1"] = pd_table["Wavelength1"].divide(10.0)
if pd_table["Wavelength1"].iloc[0] > pd_table["Wavelength1"].iloc[-1]:
pd_table = pd_table.iloc[::-1]
self.fits_data["Wav"] = pd_table
elif "Velocity" in pd_table.columns and "Intensity" in pd_table.columns:
self.fits_data["vel"] = pd_table
elif "Orderlimit" in pd_table.columns:
self.fits_data["order"] = pd_table
else:
raise ValueError("Couldn't convert the table to a known format")
error = False
except ValueError:
if len(f) == 1:
raise ValueError("Couldn't convert data")
else:
if i == len(f) - 1 and error:
raise ValueError("Couldn't convert any of the table")
else:
print("Error when converting one table : going to next")
else:
raise ValueError("Filename empty or filename not find in path")
def _open_delim(self):
"""
Open delim file to get the limit of each order
Store limits in a list
"""
self.delim["filename"] = "./spectrofit/core/delim_ordre_TBL.json"
if os.path.exists(self.delim["filename"]):
with open(self.delim["filename"]) as f:
self.delim["delim_file"] = f
self.delim["delim_data"] = json.load(f)
else:
raise ValueError("No delim order TBL (JSON file) found in path")
def _open_lineident(self):
"""
open lineident file. It contains all information on the lambda of each known elements
Useful to plot line with fundamental ray for each element
:return:
"""
self.lineident["filename"] = 'lineident.csv'
if os.path.exists('lineident.csv'):
with open(self.lineident["filename"], 'r') as f:
self.lineident["lineident_file"] = csv.reader(f, delimiter=";")
for row in self.lineident["lineident_file"]:
self.lineident["1st_col"].append(float(row[0]))
self.lineident["2nd_col"].append(float(row[1]))
self.lineident["3rd_col"].append(float(row[2]))
self.lineident["lambda"].append(
float(row[3]) / 10) # divide by ten because in file lambda in Angstrom
self.lineident["element_name"].append(row[4])
else:
raise ValueError("No lineident found in path")
def _lineident_json_to_dict(self):
with open("./spectrofit/core/lineident.json", 'r') as f:
self.lineident_json = json.load(f)
| 39.338983 | 104 | 0.520178 |
ace38de630639628c1135dd53c78bda63af5a041 | 32,885 | py | Python | Other_separators/separator.py | hamedsteiner/Unsupervised_SIRR | acdd728d4ff32080fed04e7cd8ae9fb6b7187232 | [
"MIT"
] | null | null | null | Other_separators/separator.py | hamedsteiner/Unsupervised_SIRR | acdd728d4ff32080fed04e7cd8ae9fb6b7187232 | [
"MIT"
] | null | null | null | Other_separators/separator.py | hamedsteiner/Unsupervised_SIRR | acdd728d4ff32080fed04e7cd8ae9fb6b7187232 | [
"MIT"
] | null | null | null | '''
Date: Feb, 2020
Author: Suhong Kim
'''
from collections import namedtuple
import time
import numpy as np
from skimage import exposure
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.autograd import Variable
# local
from net.percep_model import *
from net.losses import *
from net.noise import get_noise
from utils.image_io import *
class Separator:
def __init__(self, image_file,
# image info
image_gt_b_file=None, image_gt_r_file=None,
image_size=224,
# structure info
architecture='percep2', pretrained='places365',
input_type='image', alpha_gt=0.4,
enable_cross=False, enable_feedback=False,
enable_augment=False, enable_alpha=False,
# Loss Params
recon_loss_weight=0.0, gray_loss_weight=0.0,
cross_loss_weight=0.0, excld_loss_weight=0.0,
smooth_loss_weight=0.0, ceil_loss_weight=0.0,
tvex_loss_weight=0.0,
# training info
num_iter=5000, learning_rate=0.0001,
show_every=100, plot_every=-1,
outdir='./output', outdir_name=None,
model_init_dir=None,
):
# -- Arguments--
self.image_file = image_file
self.image_gt_b_file = image_gt_b_file
self.image_gt_r_file = image_gt_r_file
self.alpha_gt = alpha_gt
self.image_size = image_size
self.architecture = architecture
self.pretrained = pretrained
self.input_type = input_type
self.enable_cross = enable_cross
self.enable_feedback = enable_feedback
self.enable_augment = enable_augment
self.enable_alpha = enable_alpha
self.num_iter = num_iter
self.learning_rate = learning_rate
self.plot_every = plot_every
self.show_every = show_every
self.model_init_dir = model_init_dir
# init for Losses
self.recon_loss_weight = recon_loss_weight #"default" or "chwise"
self.gray_loss_weight = gray_loss_weight # gray-scale recon loss
self.cross_loss_weight = cross_loss_weight
self.excld_loss_weight = excld_loss_weight # exclusion loss (from DoubleDIP)
self.smooth_loss_weight = smooth_loss_weight
self.ceil_loss_weight = ceil_loss_weight # image reflection prior loss
self.tvex_loss_weight = tvex_loss_weight
# --Environment--
if not os.path.exists(outdir):
os.makedirs(outdir)
if outdir_name == None:
outdir_name = "{}_{}_{}".format(architecture,
input_type,
time.strftime("%Hh%Mmin",time.localtime()))
self.output_path = os.path.join(outdir, outdir_name)
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
print(">>>>>>>>>>>>> The output will be saved in", self.output_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(">>>>>>>>>>>>> This model will be trained on", self.device)
# --Type definition--
self.LOSSES = namedtuple('LOSSES', ['loss', 'recon', 'gray', 'cross', 'excld','smooth', 'ceil', 'tvex'])
self.PSNRS = namedtuple('PSNRS', ['psnr', 'psnr_pair1', 'psnr_pair2'])
self.SSIMS = namedtuple('SSIMS', ['ssim', 'ssim_pair1', 'ssim_pair2'])
self.RESULT = namedtuple('RESULT', ['reflection', 'background', 'reconstruction', 'losses', 'psnrs', 'ssims'])
def run(self, num_run, in_batch=False, show=True):
best_list = []
n, cnt = 0, 0
while(n < num_run):
print(">>>>>>>>>>>>> running ({:d} / {:d}) on ....".format(n+1, num_run))
self.initialize()
status = self.optimize()
if not status:
cnt += 1
if cnt > num_run:
print("************* Stop running due to NaN loss *************")
break
print("************* Loss reached to NaN, restarted *************")
n = max(n-1, 0)
continue
self.finalize(n, show)
best_list.append(self.best_result)
n = n + 1
self.finalize_all(best_list)
def initialize(self, num_batch=1):
# initialize all global vairables
self.loss_list = []
self.psnr_list = []
self.ssim_list = []
self.best_result = None
self._init_image()
self._init_inputs(num_batch)
self._init_nets()
self._init_losses()
def _init_image(self):
# get image name
self.image_name = os.path.basename(self.image_file).split('.')[0]
# open images as numpy array
self.image_np = prepare_image(self.image_file, imsize=self.image_size)
self.image_gt_b = prepare_image(self.image_gt_b_file, imsize=self.image_size) if self.image_gt_b_file is not None else None
self.image_gt_r = prepare_image(self.image_gt_r_file, imsize=self.image_size) if self.image_gt_r_file is not None else None
if self.image_gt_r is None and self.image_gt_b is not None:
self.image_gt_r = self.image_np - self.image_gt_b
# convert image into tensor
self.image_t = np_to_torch(self.image_np).to(self.device)
self.image_gt_b_t = np_to_torch(self.image_gt_b).to(self.device)
self.image_gt_r_t = np_to_torch(self.image_gt_r).to(self.device)
def _init_inputs(self, num_batch=1):
if self.input_type == 'noise' or self.input_type == 'meshgrid':
self.dipnet_b_input = get_noise(self.image_np.shape[0], self.input_type, (self.image_np.shape[1], self.image_np.shape[2])).to(self.device)
self.dipnet_r_input = get_noise(self.image_np.shape[0], self.input_type, (self.image_np.shape[1], self.image_np.shape[2])).to(self.device)
else:
# if the input type is an image
self.dipnet_b_input = self.image_t
self.dipnet_r_input = self.image_t
# # if num_batch is more than 1
self.dipnet_b_input = torch.cat(tuple([self.dipnet_b_input]*num_batch),
dim=len(self.dipnet_b_input.shape)-1)
self.dipnet_r_input = torch.cat(tuple([self.dipnet_r_input]*num_batch),
dim=len(self.dipnet_r_input.shape)-1)
def _init_nets(self) :
num_ch_in_b = self.dipnet_b_input.shape[1]
num_ch_in_r = self.dipnet_r_input.shape[1]
num_ch_out = self.image_np.shape[0]
# select the type of net structure
dipnet_b = PercepNet(num_ch_in_b, num_ch_out, self.image_t.clone(), self.pretrained, enable_attn=False)
dipnet_r = PercepNet(num_ch_in_r, num_ch_out, self.image_t.clone(), self.pretrained, enable_attn=False)
# assign nets to the system
self.dipnet_b = dipnet_b.to(self.device)
self.dipnet_r = dipnet_r.to(self.device)
if self.enable_alpha:
# alphanet = AlphaNet(num_ch_in_b, num_ch_out, self.image_t.clone(), self.pretrained)
# self.alphanet = alphanet.to(self.device)
self.alphanet = Variable(self.alpha_gt*torch.ones(1,1).to(self.device), requires_grad=True)
# initialize
if self.model_init_dir is not None:
try:
checkpoint = torch.load(self.model_init_dir)
self.dipnet_b.load_state_dict(checkpoint['dipnet_b'])
self.dipnet_r.load_state_dict(checkpoint['dipnet_r'])
print("loadded")
except:
print("fail to load init model")
# assign parameters for the optimizer
self.parameters = [p for p in self.dipnet_b.parameters() if p.requires_grad] + \
[p for p in self.dipnet_r.parameters() if p.requires_grad]
if self.enable_alpha:
#self.parameters += [p for p in self.alphanet.parameters() if p.requires_grad]
self.parameters += [self.alphanet]
# Compute number of parameters
s = sum(np.prod(list(p.size())) for p in self.parameters)
print ('------------> Number of params: {:,}'.format(s))
def _init_losses(self):
self.compute_l1_loss = nn.L1Loss().to(self.device)
self.compute_smooth_l1_loss = nn.SmoothL1Loss().to(self.device)
self.compute_mse_loss = nn.MSELoss().to(self.device)
self.compute_ssim_loss = SSIMLoss().to(self.device)
self.compute_exclusion_loss = ExclusionLoss().to(self.device)
self.compute_mask_l1_loss = ExtendedL1Loss().to(self.device)
self.compute_gradient_loss = GradientLoss().to(self.device)
def optimize(self):
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
optimizer = torch.optim.AdamW(self.parameters, lr=self.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3000, gamma=0.5)
# optimizer = torch.optim.RMSprop(self.parameters, lr=self.learning_rate, momentum=0.9)
for itr in range(self.num_iter):
# optimization
itr_begin = time.time()
optimizer.zero_grad()
self._optimize_on_iter(itr)
optimizer.step()
scheduler.step()
itr_end = time.time()
self._harvest_results(itr)
# post-processing
# display results
self._print_status(itr, (itr_end-itr_begin))
self._plot_results(itr)
# check return status
if np.isnan(self.current_result.losses.loss):
return False
if self.current_result.psnrs.psnr > 40.0:
print("-----------Early Stopping")
return True
return True
def _optimize_on_iter(self, step):
# process
if self.enable_feedback and step > 0:
b_input = self.background_out_prev
r_input = self.reflection_out_prev
if self.enable_cross:
b_input = self.background_cross
r_input = self.reflection_cross
else:
b_input = self.dipnet_b_input
r_input = self.dipnet_r_input
if self.enable_augment and self.input_type != 'image_feat':
transform = transforms.Compose([
transforms.ColorJitter(
brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
transforms.ToTensor()
])
b_input = transform(np_to_pil(torch_to_np(b_input))).unsqueeze(0).to(self.device)
r_input = transform(np_to_pil(torch_to_np(r_input))).unsqueeze(0).to(self.device)
if self.enable_alpha:
self.alpha_out = self.alphanet
self.background_pure = self.dipnet_b(b_input)
self.reflection_pure = self.dipnet_r(r_input)
self.background_out = (1-self.alpha_out)*self.background_pure
self.reflection_out = self.alpha_out*self.reflection_pure
#self.background_out = (1-self.alpha_out)*self.dipnet_b(b_input)
#self.reflection_out = self.alpha_out*self.dipnet_r(r_input)
self.recon_out = self.background_out + self.reflection_out
print(self.image_t.clone().detach().shape)
print(self.reflection_out.clone().detach().shape)
print(self.background_out.clone().detach().shape)
self.background_cross = self.image_t.clone().detach() - self.reflection_out.clone().detach()
self.reflection_cross = self.image_t.clone().detach() - self.background_out.clone().detach()
else:
self.background_out = self.dipnet_b(b_input)
self.reflection_out = self.dipnet_r(r_input)
self.recon_out = self.background_out + self.reflection_out
self.background_cross = self.image_t.clone().detach() - self.reflection_out.clone().detach()
self.reflection_cross = self.image_t.clone().detach() - self.background_out.clone().detach()
residue_mean = torch.mean(torch.mean(self.reflection_cross.squeeze(0), dim=2),dim=1)
residue_mean = torch.cat([residue_mean[ch].repeat(224, 224).unsqueeze(0) for ch in range(len(residue_mean))], dim=0)
residue_mean = residue_mean.unsqueeze(0)
self.background_cross = torch.clamp(self.background_cross + residue_mean, 0., 1.)
self.reflection_cross = torch.clamp(self.reflection_cross - residue_mean, 0., 1.)
# when the net has latent
try:
self.latent_b_out = self.dipnet_b.latent
self.latent_r_out = self.dipnet_r.latent
except:
self.latent_b_out = None
self.latent_r_out = None
# compute loss
total_loss = self._compute_losses(step)
# backpropagate
total_loss.backward()
# update prev
self.background_out_prev = self.background_out.clone().detach().requires_grad_(True)
self.reflection_out_prev = self.reflection_out.clone().detach().requires_grad_(True)
def _compute_losses(self, step):
# initilize loss buffer
loss_out = dict.fromkeys(self.LOSSES._fields,torch.zeros((1),
device=self.device, requires_grad=True))
# ---reconstruction loss
loss_out['recon'] = loss_out['recon'] + self.recon_loss_weight \
* self.compute_mse_loss(self.recon_out, self.image_t)
if step < 1000:
gt_gx, gt_gy = self.compute_gradient_loss(self.image_t)
gx, gy = self.compute_gradient_loss(self.recon_out)
loss_out['recon'] = loss_out['recon'] \
+ 0.07*self.recon_loss_weight * self.compute_l1_loss(gx, gt_gx) \
+ 0.07*self.recon_loss_weight * self.compute_l1_loss(gy, gt_gy)
if step > 0:
# self-recon loss
loss_out['recon'] = loss_out['recon'] + 0.1*self.recon_loss_weight \
* self.compute_ssim_loss(self.background_out,
self.background_out_prev)
loss_out['recon'] = loss_out['recon'] + 0.1*self.recon_loss_weight \
* self.compute_ssim_loss(self.reflection_out,
self.reflection_out_prev)
# ---gray-scale loss
loss_out['gray'] = loss_out['gray'] + self.gray_loss_weight \
* self.compute_mse_loss(rgb_to_gray(self.recon_out),
rgb_to_gray(self.image_t))
# ---exclusion loss
loss_out['excld'] = loss_out['excld'] + self.excld_loss_weight \
* self.compute_exclusion_loss(self.background_out, self.reflection_out)
# ---Total Variance Balance loss (total variance should be balanced)
loss_out['tvex'] = loss_out['tvex'] + self.tvex_loss_weight \
* torch.abs(self.compute_gradient_loss(self.background_out, mean=True)
- self.compute_gradient_loss(self.reflection_out, mean=True))
# ---smooth loss(Total Variance Loss)
loss_out['smooth'] = loss_out['smooth'] + 0.5 * self.smooth_loss_weight \
* self.compute_gradient_loss(self.background_out, mean=True)
loss_out['smooth'] = loss_out['smooth'] + 0.5 * self.smooth_loss_weight \
* self.compute_gradient_loss(self.reflection_out, mean=True)
# ---ceil loss
loss_out['ceil'] = loss_out['ceil'] + 0.5 * self.ceil_loss_weight \
* self.compute_mask_l1_loss(self.background_out, self.image_t,
(self.background_out > self.image_t))
loss_out['ceil'] = loss_out['ceil'] + 0.5 * self.ceil_loss_weight \
* self.compute_mask_l1_loss(self.reflection_out, self.image_t,
(self.reflection_out > self.image_t))
# ---cross-guiding loss
if self.cross_loss_weight > 0 and step > 0:
loss_out['cross'] = loss_out['cross'] + 0.5 * self.cross_loss_weight \
* self.compute_mse_loss(self.background_out,
self.background_cross)
loss_out['cross'] = loss_out['cross'] + 0.5 * self.cross_loss_weight \
* self.compute_mse_loss(self.reflection_out,
self.reflection_cross)
# compute total
t_loss = sum(loss_out.values())
# save loss_out
self.losses = self.LOSSES(loss=t_loss.item(), recon=loss_out['recon'].item(),
gray=loss_out['gray'].item(), cross=loss_out['cross'].item(),
excld=loss_out['excld'].item(),smooth=loss_out['smooth'].item(),
ceil=loss_out['ceil'].item(), tvex=loss_out['tvex'].item())
# return
return t_loss
def _harvest_results(self, step):
"""
All the results here should be separated from the graph
"""
# network results
background_out_np = np.clip(torch_to_np(self.background_out.data), 0, 1)
reflection_out_np = np.clip(torch_to_np(self.reflection_out.data), 0, 1)
recon_out_np = np.clip(torch_to_np(self.recon_out.data), 0, 1)
self.loss_list.append(self.losses.loss)
# evaluation results
# PSNR
psnr = compute_psnr(self.image_np, recon_out_np)
self.psnr_list.append(psnr)
# for speed, cacluate other psnrs every show_every
if step == 0 or step % self.show_every == self.show_every - 1 :
psnr_b = compute_psnr(self.image_gt_b, background_out_np)
psnr_r = compute_psnr(self.image_gt_r, reflection_out_np)
self.psnr_pair1 = (psnr_b, psnr_r)
psnr_b = compute_psnr(self.image_gt_r, background_out_np)
psnr_r = compute_psnr(self.image_gt_b, reflection_out_np)
self.psnr_pair2 = (psnr_b, psnr_r)
# save panrs
psnrs = self.PSNRS(psnr, self.psnr_pair1, self.psnr_pair2)
# SSIM
ssim = compute_ssim(self.image_np, recon_out_np)
self.ssim_list.append(ssim)
# for speed, cacluate other ssims every show_every
if step == 0 or step % self.show_every == self.show_every - 1 :
ssim_b = compute_ssim(self.image_gt_b, background_out_np)
ssim_r = compute_ssim(self.image_gt_r, reflection_out_np)
self.ssim_pair1 = (ssim_b, ssim_r)
ssim_b = compute_ssim(self.image_gt_r, background_out_np)
ssim_r = compute_ssim(self.image_gt_b, reflection_out_np)
self.ssim_pair2 = (ssim_b, ssim_r)
# save ssims
ssims = self.SSIMS(ssim, self.ssim_pair1, self.ssim_pair2)
# update the current result
self.current_result = self.RESULT(background=background_out_np,
reflection=reflection_out_np,
reconstruction=recon_out_np,
losses=self.losses,
psnrs=psnrs, ssims=ssims)
# update the best result
if self.best_result is None or self.best_result.psnrs.psnr < self.current_result.psnrs.psnr:
self.best_result = self.current_result
# save model
# torch.save({"architecture": self.architecture,
# "connection": self.connection,
# "dipnet_b": self.dipnet_b.state_dict(),
# "dipnet_r": self.dipnet_r.state_dict()}, os.path.join(self.output_path, "checkpoint"))
def _print_status(self, step, duration):
if step % self.show_every == self.show_every - 1 :
loss_str = "Loss:"
for name, value in zip(self.LOSSES._fields, self.current_result.losses):
if value == 0.0:
continue
if name == 'loss':
loss_str += "{:f} (".format(value)
else:
loss_str += " {}: {:f} ".format(name, value)
loss_str += ")"
psnr_str = "PSNR: {:3.3f}".format(self.current_result.psnrs.psnr)
if self.current_result.psnrs.psnr_pair1[0] > 0:
psnr_str += "({:3.3f}, {:3.3f})".format(self.current_result.psnrs.psnr_pair1[0], self.current_result.psnrs.psnr_pair1[1])
psnr_str += "({:3.3f}, {:3.3f})".format(self.current_result.psnrs.psnr_pair2[0], self.current_result.psnrs.psnr_pair2[1])
ssim_str = "SSIM: {:1.3f}".format(self.current_result.ssims.ssim)
if self.current_result.ssims.ssim_pair1[0] > 0:
ssim_str += "({:1.3f}, {:1.3f})".format(self.current_result.ssims.ssim_pair1[0], self.current_result.ssims.ssim_pair1[1])
ssim_str += "({:1.3f}, {:1.3f})".format(self.current_result.ssims.ssim_pair2[0], self.current_result.ssims.ssim_pair2[1])
if self.enable_alpha:
alpha_str = "alpha:{:1.3f}".format(self.alpha_out.item())
else:
alpha_str = ""
### print all
print('Iteration:{:6d} {} {} {} {} Duration: {:.4f}'.format(step, alpha_str, loss_str, psnr_str, ssim_str, duration), '\r', end='')
def _plot_results(self, step):
if self.plot_every > 0 and (step % self.plot_every == self.plot_every - 1) :
plot_image_grid("{}_results_iter{}".format(self.image_name, step),
[self.current_result.background, self.current_result.reflection, self.current_result.reconstruction],
output_path=self.output_path)
if self.enable_alpha:
plot_image_grid("{}_results_pure_iter{}".format(self.image_name, step),
[torch_to_np(self.background_pure),
torch_to_np(self.reflection_pure),
torch_to_np(self.background_pure + self.reflection_pure)],
output_path=self.output_path)
plot_image_grid("{}_results_renorm_iter{}".format(self.image_name, step),
[renormalize(self.current_result.background),
renormalize(self.current_result.reflection),
renormalize(self.current_result.reconstruction)],
output_path=self.output_path)
save_image("{}_background_{:d}".format(self.image_name, step), self.current_result.background, output_path=self.output_path, show=False)
save_image("{}_relfection_{:d}".format(self.image_name, step), self.current_result.reflection, output_path=self.output_path, show=False)
save_image("{}_background_cross_{:d}".format(self.image_name, step), np.clip(torch_to_np(self.background_cross),0,1), output_path=self.output_path, show=False)
save_image("{}_relfection_cross_{:d}".format(self.image_name, step), np.clip(torch_to_np(self.reflection_cross),0,1), output_path=self.output_path, show=False)
def finalize(self, num_run, show):
alpha = self.alpha_out.item() if self.enable_alpha else -1
plot_image_grid("{}_results_best_{:d}_a_{:1.3f}_p_{:3.2f}_{:3.2f}_s_{:1.4f}_{:1.4f}".format(
self.image_name, num_run, alpha, self.best_result.psnrs.psnr_pair1[0], self.best_result.psnrs.psnr_pair1[1],
self.best_result.ssims.ssim_pair1[0], self.best_result.ssims.ssim_pair1[1]),
[self.best_result.background, self.best_result.reflection, self.best_result.reconstruction],
output_path=self.output_path, show=show)
plot_image_grid("{}_results_best_renorm_{:d}_a_{:1.3f}_p_{:3.2f}_{:3.2f}_s_{:1.4f}_{:1.4f}".format(
self.image_name, num_run, alpha, self.best_result.psnrs.psnr_pair2[0], self.best_result.psnrs.psnr_pair2[1],
self.best_result.ssims.ssim_pair2[0], self.best_result.ssims.ssim_pair2[1]),
[renormalize(self.best_result.background),
renormalize(self.best_result.reflection),
renormalize(self.best_result.reconstruction)],
output_path=self.output_path, show=show)
save_graph("{}_loss_{:d}".format(self.image_name, num_run), self.loss_list, output_path=self.output_path, show=False)
save_graph("{}_psnr_{:d}".format(self.image_name, num_run), self.psnr_list, output_path=self.output_path, show=False)
save_graph("{}_ssim_{:d}".format(self.image_name, num_run), self.ssim_list, output_path=self.output_path, show=False)
save_image("{}_background_{:d}".format(self.image_name, num_run), self.best_result.background, output_path=self.output_path, show=False)
save_image("{}_relfection_{:d}".format(self.image_name, num_run), self.best_result.reflection, output_path=self.output_path, show=False)
save_image("{}_background_cross_{:d}".format(self.image_name, num_run), self.image_np - self.best_result.reflection, output_path=self.output_path, show=False)
save_image("{}_relfection_cross_{:d}".format(self.image_name, num_run), self.image_np - self.best_result.background, output_path=self.output_path, show=False)
def finalize_all(self, b_list):
print(">>>>>>>>>>>>> Final Results >>>>>>>>>>>>>")
best_t_list = [b.background for b in b_list]
best_r_list = [b.reflection for b in b_list]
best_m_list = [b.reconstruction for b in b_list]
best_l_list = [b.losses.loss for b in b_list]
# plot gt inputs if exist
if self.image_gt_b is not None and self.image_gt_r is not None:
plot_image_grid("{}_ground_truth".format(self.image_name), [self.image_gt_b, self.image_gt_r, self.image_np],
output_path=self.output_path)
# print the Final model with the lowest loss
fidx = np.argmin(best_l_list)
plot_image_grid("{}_results_best".format(self.image_name),
[best_t_list[fidx], best_r_list[fidx], best_m_list[fidx]],
output_path=self.output_path)
# print the adaptive model with the lowest loss
fidx = np.argmin(best_l_list)
p2, p98 = np.percentile(best_t_list[fidx].transpose(1,2,0), (2, 98))
equl_t = exposure.rescale_intensity(best_t_list[fidx].transpose(1,2,0), in_range=(p2, p98)).transpose(2,0,1)
p2, p98 = np.percentile(best_r_list[fidx].transpose(1,2,0), (2, 98))
equl_r = exposure.rescale_intensity(best_r_list[fidx].transpose(1,2,0),in_range=(p2, p98)).transpose(2,0,1)
p2, p98 = np.percentile(best_m_list[fidx].transpose(1,2,0), (2, 98))
equl_m = exposure.rescale_intensity(best_m_list[fidx].transpose(1,2,0), in_range=(p2, p98)).transpose(2,0,1)
plot_image_grid("{}_results_rescaled".format(self.image_name),
[equl_t, equl_r, equl_m], output_path=self.output_path)
# print the min model
plot_image_grid("{}_results_renorm".format(self.image_name),
[renormalize(best_t_list[fidx]), renormalize(best_r_list[fidx]), renormalize(best_m_list[fidx])],
output_path=self.output_path)
def get_filelist(dir, Filelist=[]):
newDir = dir
if os.path.isfile(dir):
Filelist.append(dir)
elif os.path.isdir(dir):
for s in os.listdir(dir):
newDir = os.path.join(dir, s)
get_filelist(newDir, Filelist)
return Filelist
# functions to be added
# 1) compare the histogram of the image -> compare with the output
# -> adjusting the value based on flickering removal
# 2) resize the image as its original
if __name__ == "__main__":
# mahsa = get_filelist("./images_analyze")
# mahsa = get_filelist("./Kaggle_image/I")
mahsa = os.listdir("./Kaggle_image/I")
# print(len(mahsa))
# exit()
torch.cuda.manual_seed_all(1943)
np.random.seed(1943)
# ----- Samples -------#
image_dir = 'images'
#names = ['withgt_87', 'wild', 'Real20_9']
names = [ 'toy1', 'toy3', 'toy2', 'toy', 'sample1']
names = [ 'input1', 'input2']
image_dir = './Kaggle_image'
get_image_file = lambda n : '{}/I/{}'.format(image_dir, n)
get_image_gt_b_file = lambda n :'{}/B/{}'.format(image_dir, n)
# get_image_file = lambda n : '{}/I/{}_m.jpg'.format(image_dir, n)
# get_image_gt_b_file = lambda n :'{}/B/{}_g.jpg'.format(image_dir, n)
# get_image_gt_r_file = lambda n :'{}/R/{}_r.jpg'.format(image_dir, n)
# -------- SIR ----------#
# image_dir = 'images_new'
# names_m = get_filelist(os.path.join(image_dir, 'I'), [])
# names = sorted([os.path.basename(f)[:-6] for f in names_m])
# if True:
# best = ['9','87','105']#,'9', '87, '2', '61', '56', '37']
# names = best
# get_image_file = lambda n : '{}/I/{}_m.jpg'.format(image_dir, n)
# get_image_gt_b_file = lambda n :'{}/B/{}_g.jpg'.format(image_dir, n)
# get_image_gt_r_file = lambda n :'{}/R/{}_r.jpg'.format(image_dir, n)
# # -------Berkely -----#
# image_dir = 'images_zhang'
# names_m = get_filelist(os.path.join(image_dir, 'blended'), [])
# names = sorted([os.path.basename(f)[:-4] for f in names_m])
# names = ['15'] # 25, 29, 93, 107
# get_image_file = lambda n : '{}/blended/{}.jpg'.format(image_dir, n)
# get_image_gt_b_file = lambda n :'{}/transmission_layer/{}.jpg'.format(image_dir, n)
# get_image_gt_r_file= lambda n : None
alpha_vals = [0.15]
for alpha_val in alpha_vals:
# for idx, name in enumerate(names):
for idx, name in enumerate(mahsa):
# image_file=get_image_file(name)
# image_file=get_image_file(name)
# print(image_file)
# exit()
print("*********************************")
print("running on {}_m.jpg ({:d}/{:d})".format(name,idx+1, len(mahsa)))
print("*********************************")
model3 = Separator(
image_file=get_image_file(name),
# image_gt_b_file=name,
image_gt_b_file=get_image_gt_b_file(name),
image_gt_r_file=None,
# image_gt_b_file=None,
# image_gt_r_file=None,
# exp1 parameters -----------
image_size=224,
pretrained = 'places365',
input_type = 'image',
enable_cross = True, #True,
enable_feedback = True, # True,
enable_alpha = True, #True,
enable_augment = True, # helping to stabilize the graph?
alpha_gt = alpha_val, # 0.1
# exp2 parameters -----------
recon_loss_weight = 1.0,
gray_loss_weight = 0.1,
cross_loss_weight = 0.1, #0.1,
excld_loss_weight = 0.1,
ceil_loss_weight = 1.0, # 1
smooth_loss_weight= 0.005,
tvex_loss_weight = 0.001,
# training parameters -------
learning_rate=0.0001,#0.0001
show_every=2000, plot_every=1000,
outdir = './HybridTest/',
outdir_name = 'kaggle_images{}'.format(alpha_val))
model3.run(num_run=4)
del model3
| 49.750378 | 171 | 0.577984 |
ace38e509828a6cac792481b922c6c79e27d5f57 | 395 | py | Python | spiderman/asgi.py | nickobrad/Friendly-Neighborhood-Spiderman | af6c7488845153240fc37e271b9d1465b56f93d1 | [
"MIT"
] | null | null | null | spiderman/asgi.py | nickobrad/Friendly-Neighborhood-Spiderman | af6c7488845153240fc37e271b9d1465b56f93d1 | [
"MIT"
] | null | null | null | spiderman/asgi.py | nickobrad/Friendly-Neighborhood-Spiderman | af6c7488845153240fc37e271b9d1465b56f93d1 | [
"MIT"
] | null | null | null | """
ASGI config for spiderman project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'spiderman.settings')
application = get_asgi_application()
| 23.235294 | 78 | 0.787342 |
ace38ee86f651b73aa4420f03aab20e134fedd0c | 13,403 | py | Python | ticTacToeGui.py | battler82004/ap-csp-create-task | cae6af14b705f56be9b30f5420466533a251ef8e | [
"MIT"
] | null | null | null | ticTacToeGui.py | battler82004/ap-csp-create-task | cae6af14b705f56be9b30f5420466533a251ef8e | [
"MIT"
] | null | null | null | ticTacToeGui.py | battler82004/ap-csp-create-task | cae6af14b705f56be9b30f5420466533a251ef8e | [
"MIT"
] | null | null | null | # Tic Tac Toe GUI with guizero
# James Taddei
# 5/10/21
""" Need:
Finish functionality
Test
Refactor, comment, and f-strings
Upload to github
"""
# The way that this works is that you need to set the button's
# text to 'X' or 'O' (buttonName.text = "X"). Since these buttons
# all have the same name though and are stored in the list, we use
# currBoard[pos].text = "X" instead.
import guizero as gui
from math import floor
from random import randint
from time import sleep
def num_to_grid(gridNum):
"""
Takes a grid number (1-9) and returns the number as
a grid array (x, y)
"""
x = gridNum % 3
y = floor(gridNum / 3)
return (x, y)
def valid_location(currBoard, location):
"""
Takes in the current board and a potential location and checks if placing
something in that square is a valid move or not. Ends by returning true
if the move's valid and false otherwise.
"""
if (currBoard[location].text != " "): # Checks if the location is taken
return False
else:
return True
def win_checker(currBoard):
"""
Takes in the current board, finds whether or not someone can win
and who wins, and returns a tuple with these two parts.
"""
# This var is all of the locations that need to be checked to look over the 8 possible ways to win
# 8 checks - 3 vertical, 3 horizontal, 2 crosses
locationsToCheck = [[0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 4, 8], [2, 4, 6]]
# Checks if any win condition is true
for i in range(8):
pos1, pos2, pos3 = locationsToCheck[i]
# If someone has 3 in a row, then True is returned
if ((currBoard[pos1].text == currBoard[pos2].text) and (currBoard[pos2].text == currBoard[pos3].text) and (currBoard[pos1].text != " ")):
return (True, currBoard[pos1].text)
# Final return statement which says that no one has won
return (False, "none")
def tie_checker(currBoard):
"""
Takes in the current board and checks if there's a tie. To check for a tie,
the script searches for whether or not the board is filled. Returns true
if the board is filled and false if not.
"""
# Finds how many squares are filled in
filledInCount = 0
for pos in currBoard:
if (pos.text != " "):
filledInCount += 1
# Checks if all the squares are filled (if there's a tie)
if (filledInCount == 9):
return True
else:
return False
def flip_symbol(symbol):
"""
Takes in the symbol of the current player and flips it
"""
global currSymbol
if (symbol == "X"):
currSymbol = "O"
else:
currSymbol = "X"
def is_game_over():
"""
Checks if the game is over and calls the final print function
if it is.
"""
if ((win_checker(currBoard))[0] or tie_checker(currBoard)):
return (final_printer(currBoard, 2))
else:
return 0
def places_to_win(currBoard, checkingFor):
"""
Takes in the current board and the enemy's symbol and checks if
the enemy can win next turn (has 2 in a row with an empty space
after). All of the positions where the enemy could win next turn
are then returned.
"""
def win_pos_finder(posChecking):
"""
Takes in a list of 3 positions that would allow someone to win
and checks if the enemy has 2/3 of them. If this is the case, the
potential win position is returned. Otherwise, 10 is returned as
a placeholder.
"""
if (currBoard[posChecking[0]].text == currBoard[posChecking[1]].text and currBoard[posChecking[0]].text == checkingFor and currBoard[posChecking[2]].text == " "):
return posChecking[2] # Returns the 3rd element if that position is where the player could win
elif (currBoard[posChecking[1]].text == currBoard[posChecking[2]].text and currBoard[posChecking[1]].text == checkingFor and currBoard[posChecking[0]].text == " "):
return posChecking[0] # Returns the 1st element if that position is where the player could win
elif (currBoard[posChecking[0]].text == currBoard[posChecking[2]].text and currBoard[posChecking[0]].text == checkingFor and currBoard[posChecking[1]].text == " "):
return posChecking[1] # Returns the 2nd element if that position is where the player could win
else:
return 10 # Returns 10 if there is no position where the player could win (for this specific way to win)
locationsToCheck = [[0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 4, 8], [2, 4, 6]] # List of all of the ways to win
posToBlockWin = []
# Removes 10's (placeholders) from the list of positions to block
for i in range(8):
potentialLocation = win_pos_finder(locationsToCheck[i])
if (potentialLocation != 10):
posToBlockWin.append(potentialLocation)
return posToBlockWin # A list of all of the places where the enemy could win next turn
def random_location(currBoard):
"""
Chooses a random valid location for the bot to place its 'X' or 'O'
this turn. This location is then returned.
"""
location = randint(0, 8)
while not(valid_location(currBoard, location)): # Checks if the move is valid
location = randint(0, 8)
return location
def bot_turn(opponentSymbol, friendlySymbol):
"""
Takes the current board, the enemy's symbol (like 'X'), and the
friendly symbol (like 'O'). It then has the bot 'choose' a
position and place its symbol there before printing the new
board. It ends by returning the current board.
"""
if (win_checker(currBoard)[0] or tie_checker(currBoard)):
return
squaresToProtect = places_to_win(currBoard, opponentSymbol)
# Finds where to place the bot's 'O' for this turn
if (len(squaresToProtect) >= 1): # Checks if there's a way the enemy can win
if (randint(0, 2) == 0): # Has a 2/3 chance to protect where the enemy can win
toPlace = random_location(currBoard) # Picks a random location to place the 'O'
else: # Chooses the protecting option
toPlace = squaresToProtect[0]
else: # Since there's nowhere to proteect, the bot chooses a random spot
toPlace = random_location(currBoard)
# Adds the move to the board and prints the new board
currBoard[toPlace].text = friendlySymbol
if (currSymbol == "X"):
currBoard[toPlace].text_color = "#3ffc0a"
elif (currSymbol == "O"):
currBoard[toPlace].text_color = "#f505e5"
flip_symbol(currSymbol)
is_game_over()
def reset_symbol():
global currSymbol
currSymbol = "X"
def who_is_first(whoIsFirst):
orderSelector.visible = False
global botNeedsTurn
if (whoIsFirst == 1):
creatorInfo = ("X", "player_first")
botNeedsTurn = False
else:
creatorInfo = ("O", "bot_first")
botNeedsTurn = True
newInstructionalMessage.visible = False
game_creator(creatorInfo[0], creatorInfo[1])
def num_players_selector(x):
numOfPlayersBoard.visible = False
instructionMessage.visible = False
if (x == 1):
# Instructional text
global newInstructionalMessage
newInstructionalMessage = gui.Text(app, color = "white", text= "\nClick '1' to go first or '2' to go second")
global orderSelector
orderSelector = gui.Box(app, layout="grid")
for x in range(1, 3):
button = gui.PushButton(orderSelector, command=who_is_first, args=[x], text=str(x), grid=[x, 0], width=5, height=3)
button.text_color = "green"
elif (x == 2):
game_creator("X", "two_players")
else: # game_creator with it working
game_creator("X", "two_bots")
def main():
# Window creation
global app
app = gui.App(title="Tic Tace Toe")
app.bg = "black"
# Instructional text
global instructionMessage
instructionMessage = gui.Text(app, color = "white", text="\nSelect the number of players")
# Board Creation
global numOfPlayersBoard
numOfPlayersBoard = gui.Box(app, layout="grid")
# Button creation
for x in range(3):
button = gui.PushButton(numOfPlayersBoard, command=num_players_selector, args=[x], text=str(x), grid=[x, 0], width=5, height=3)
button.text_color = "green"
# Displaying the app
app.display()
def two_bots_button(pos, currBoard):
if (not(win_checker(currBoard)[0]) and not(tie_checker(currBoard))):
while True:
if (currSymbol == "X"):
bot_turn("X", "O")
else:
bot_turn("O", "X")
isGameOver = is_game_over()
if (isGameOver != 0):
print(isGameOver)
break
sleep(0.5)
else:
# Game reset
for i in range(9):
currBoard[i].text = " "
reset_symbol()
def one_player_button(pos, currBoard, botIsFirst):
"""
Adds the players symbol to the button
that they pressed.
"""
if (not(win_checker(currBoard)[0]) and not(tie_checker(currBoard))):
if (valid_location(currBoard, pos)):
currBoard[pos].text = currSymbol
errorMessage.value = ""
else:
errorMessage.value = "Error, please click on a square that isn't taken already"
return
# Setting text color
if (currSymbol == "X"):
currBoard[pos].text_color = "#3ffc0a"
elif (currSymbol == "O"):
currBoard[pos].text_color = "#f505e5"
isGameOver = is_game_over()
if (isGameOver != 0):
print(isGameOver)
return
flip_symbol(currSymbol)
if (currSymbol == "O"):
bot_turn("X", "O")
isGameOver = is_game_over()
if (isGameOver != 0):
print(isGameOver)
else:
# Game reset
for i in range(9):
currBoard[i].text = " "
reset_symbol()
if (botIsFirst):
sleep(0.5)
bot_turn("X", "O")
for i in range(9):
if (currBoard[i].text == "O"):
currBoard[i].text_color = "#f505e5"
flip_symbol(currSymbol)
def two_players_button(pos, currBoard):
"""
Adds the players symbol to the button
that they pressed.
"""
if (not(win_checker(currBoard)[0]) and not(tie_checker(currBoard))):
if (valid_location(currBoard, pos)):
currBoard[pos].text = currSymbol
errorMessage.value = ""
else:
errorMessage.value = "Error, please click on a square that isn't taken already"
return
# Setting text color
if (currSymbol == "X"):
currBoard[pos].text_color = "#3ffc0a"
elif (currSymbol == "O"):
currBoard[pos].text_color = "#f505e5"
isGameOver = is_game_over()
if (isGameOver != 0):
print(isGameOver)
return
flip_symbol(currSymbol)
else:
# Game reset
for i in range(9):
currBoard[i].text = " "
reset_symbol()
def button_press(pos, currBoard, buttonPressFunc):
if (buttonPressFunc == "two_bots"):
two_bots_button(pos, currBoard)
elif (buttonPressFunc == "player_first"):
one_player_button(pos, currBoard, False)
elif (buttonPressFunc == "bot_first"):
one_player_button(pos, currBoard, True)
else:
two_players_button(pos, currBoard)
def game_creator(startSymbol, buttonPressFunc):
"""
This function will create the board and set up the screen
for the actual game.
"""
global currBoard, currSymbol
currBoard = [" ", " ", " ", " ", " ", " ", " ", " ", " "]
currSymbol = startSymbol
# Board Creation
gameBoard = gui.Box(app, layout="grid")
# Button creation
for pos in range(9):
buttonText = currBoard[pos]
x, y = num_to_grid(pos)
button = gui.PushButton(gameBoard, command=button_press, args=[pos, currBoard, buttonPressFunc], text=buttonText, grid=[x, y], width=3)
currBoard[pos] = button
# Instructional text
global userPrompt, errorMessage
userPrompt = gui.Text(app, color="white", text="Click on an open square to place your symbol there")
errorMessage = gui.Text(app, color = "red", text="")
if (buttonPressFunc == "bot_first"):
bot_turn("X", "O")
def final_printer(currBoard, numOfPlayers):
"""
Prints out a message based on who won
"""
# Checks if someone has won and if so who won
anyoneWon, winner = win_checker(currBoard)
if (anyoneWon):
# Returns the win message
if (winner == "X"): # Player / Player 1 / Bot 1 win return
if (numOfPlayers == 1): # Player win vs Bot
return ("\n\nYou have triumphed over the bot! Congratulations on the victory!")
elif (numOfPlayers == 2): # Player 1 win vs Player 2
return ("\n\nCongratulations player 1 on your victory against your opponent!")
else: # Bot 1 win
return ("\n\nBot number 1 has won the epic battle against the other bot!")
elif (winner == "O"): # Bot / Player 2 / Bot 2 win return
if (numOfPlayers == 1): # Bot win vs Player
return ("\n\nThe bot has bested you! Better luck next time.")
elif (numOfPlayers == 2): # Player 2 win vs Player 1
return ("\n\nCongratulations player 2 on your victory against your opponent!")
else: # Bot 2 win
return ("\n\nBot number 2 rules the day after defeating bot 1!")
else: # Retuns a draw message
if (numOfPlayers == 1): # Draw in Player vs Bot
return ("\n\nYou have tied with the bot. Kind of anticlimactic. Hopefully you'll win next time.")
elif (numOfPlayers == 2): # Draw in Player vs Player
return ("\n\nYou have tied with each other. Kind of anticlimactic. Hopefully someone will win next time.")
else: # Draw in Bot vs Bot
return ("\n\nThe bots have tied with each other. Kind of anticlimactic. Hopefully one will win next time.")
main()
| 31.836105 | 169 | 0.657316 |
ace38ee9d5cccb9cc335c104792fb088dfedd79d | 2,499 | py | Python | chainer/functions/activation/crelu.py | tkerola/chainer | 572f6eef2c3f1470911ac08332c2b5c3440edf44 | [
"MIT"
] | 1 | 2021-02-26T10:27:25.000Z | 2021-02-26T10:27:25.000Z | chainer/functions/activation/crelu.py | nolfwin/chainer | 8d776fcc1e848cb9d3800a6aab356eb91ae9d088 | [
"MIT"
] | 2 | 2019-05-14T15:45:01.000Z | 2019-05-15T07:12:49.000Z | chainer/functions/activation/crelu.py | nolfwin/chainer | 8d776fcc1e848cb9d3800a6aab356eb91ae9d088 | [
"MIT"
] | 2 | 2019-07-16T00:24:47.000Z | 2021-02-26T10:27:27.000Z | import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class CReLU(function_node.FunctionNode):
"""Concatenated Rectified Linear Unit."""
def __init__(self, axis=1):
if not isinstance(axis, int):
raise TypeError('axis must be an integer value')
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].ndim > self.axis,
in_types[0].ndim >= -self.axis
)
def get_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[self.axis] *= 2
return tuple(output_shape)
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
y = xp.empty(self.get_output_shape(x.shape), dtype=x.dtype)
y_former, y_latter = xp.split(y, 2, axis=self.axis)
zero = x.dtype.type(0)
xp.maximum(zero, x, out=y_former)
xp.maximum(zero, -x, out=y_latter)
self.retain_inputs((0,))
return y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
gy_former, gy_latter = chainer.functions.split_axis(
gy, 2, axis=self.axis)
return gy_former * (x.data > 0) - gy_latter * (x.data < 0),
def crelu(x, axis=1):
"""Concatenated Rectified Linear Unit function.
This function is expressed as follows
.. math:: f(x) = (\\max(0, x), \\max(0, -x)).
Here, two output values are concatenated along an axis.
See: https://arxiv.org/abs/1603.05201
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
axis (int): Axis that the output values are concatenated along.
Default is 1.
Returns:
~chainer.Variable: Output variable of concatenated array.
If the axis is 1, A :math:`(s_1, s_2 \\times 2, ..., s_N)`-shaped float
array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.]], dtype=float32)
>>> y = F.crelu(x, axis=1)
>>> y.array
array([[0., 0., 1., 0.],
[2., 0., 0., 3.]], dtype=float32)
"""
return CReLU(axis=axis).apply((x,))[0]
| 30.108434 | 79 | 0.57543 |
ace38f478a86ceb19a256d46aa3c4c1bdf8604cc | 406 | py | Python | resippy/image_objects/earth_overhead/digital_globe/view_ready_stereo/view_ready_stereo_image_factory.py | BeamIO-Inc/resippy | 37f6b8e865f4836696a9db0a4b17eae2426cdd96 | [
"BSD-3-Clause"
] | 11 | 2019-03-30T02:32:13.000Z | 2021-11-02T23:15:17.000Z | resippy/image_objects/earth_overhead/digital_globe/view_ready_stereo/view_ready_stereo_image_factory.py | BeamIO-Inc/resippy | 37f6b8e865f4836696a9db0a4b17eae2426cdd96 | [
"BSD-3-Clause"
] | 10 | 2019-02-28T21:24:01.000Z | 2019-12-31T15:02:03.000Z | resippy/image_objects/earth_overhead/digital_globe/view_ready_stereo/view_ready_stereo_image_factory.py | BeamIO-Inc/resippy | 37f6b8e865f4836696a9db0a4b17eae2426cdd96 | [
"BSD-3-Clause"
] | 4 | 2019-08-16T22:20:23.000Z | 2021-04-27T08:23:01.000Z | from __future__ import division
from resippy.image_objects.earth_overhead.digital_globe.view_ready_stereo.view_ready_stereo_image \
import ViewReadyStereoImage
class ViewReadyStereoImageFactory:
@staticmethod
def from_file(fname # type: str
): # type: (...) -> ViewReadyStereoImage
vrs_image = ViewReadyStereoImage.init_from_file(fname)
return vrs_image
| 31.230769 | 99 | 0.738916 |
ace38f88912269cbbea56eafb1563e7fd76cb7e1 | 1,540 | py | Python | setup.py | Frencil/eclipsescraper | 9d0b4e37d5064d9cb6f748ddf258c722dcb0ee8e | [
"Apache-2.0"
] | 5 | 2015-03-08T14:49:28.000Z | 2018-06-13T16:56:25.000Z | setup.py | Frencil/eclipsescraper | 9d0b4e37d5064d9cb6f748ddf258c722dcb0ee8e | [
"Apache-2.0"
] | 3 | 2015-03-19T17:25:31.000Z | 2016-04-09T07:58:05.000Z | setup.py | Frencil/eclipsescraper | 9d0b4e37d5064d9cb6f748ddf258c722dcb0ee8e | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
import sys
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(name='eclipsescraper',
version=0.4,
description="Python module for scraping NASA's eclipse site into usable CZML documents",
long_description=open('README.md').read(),
classifiers=[
'Topic :: Scientific/Engineering :: GIS',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
keywords='GIS JSON CZML Cesium Globe Eclipse NASA',
author="Christopher Clark (Frencil)",
author_email='frencils@gmail.com',
url='https://github.com/Frencil/eclipsescraper',
license='Apache 2.0',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
tests_require=['pytest'],
cmdclass = {'test': PyTest},
install_requires=[
# -*- Extra requirements: -*-
"czml>=0.3.2",
"geopy>=1.9.1",
"lxml",
],
)
| 33.478261 | 94 | 0.622078 |
ace38fd9efcb3975f95756d38a7672575591fcf3 | 482 | py | Python | myflair/wsgi.py | eleweek/myflair | eb23cf71ad64abfa59af13117292d8a11e4db171 | [
"MIT"
] | 2 | 2015-07-19T08:09:55.000Z | 2017-08-10T05:06:16.000Z | myflair/wsgi.py | eleweek/flayr | eb23cf71ad64abfa59af13117292d8a11e4db171 | [
"MIT"
] | 1 | 2016-07-09T06:29:06.000Z | 2016-07-09T06:29:06.000Z | myflair/wsgi.py | eleweek/flayr | eb23cf71ad64abfa59af13117292d8a11e4db171 | [
"MIT"
] | 1 | 2015-06-05T02:42:11.000Z | 2015-06-05T02:42:11.000Z | """
WSGI config for myflair project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myflair.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 25.368421 | 78 | 0.802905 |
ace390747717617d8b4b4c97986b6a0e01d8728e | 1,454 | py | Python | python/kfserving/test/test_v1alpha2_xg_boost_spec.py | rakelkar/kfserving | 534bce5d8720a05c9badf58d35bbb5bcb4bcc5af | [
"Apache-2.0"
] | 1 | 2020-09-24T07:44:07.000Z | 2020-09-24T07:44:07.000Z | python/kfserving/test/test_v1alpha2_xg_boost_spec.py | rakelkar/kfserving | 534bce5d8720a05c9badf58d35bbb5bcb4bcc5af | [
"Apache-2.0"
] | null | null | null | python/kfserving/test/test_v1alpha2_xg_boost_spec.py | rakelkar/kfserving | 534bce5d8720a05c9badf58d35bbb5bcb4bcc5af | [
"Apache-2.0"
] | 1 | 2020-06-20T07:01:09.000Z | 2020-06-20T07:01:09.000Z | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kfserving
from kfserving.models.v1alpha2_xg_boost_spec import V1alpha2XGBoostSpec # noqa: E501
from kfserving.rest import ApiException
class TestV1alpha2XGBoostSpec(unittest.TestCase):
"""V1alpha2XGBoostSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha2XGBoostSpec(self):
"""Test V1alpha2XGBoostSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kfserving.models.v1alpha2_xg_boost_spec.V1alpha2XGBoostSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.436364 | 93 | 0.729711 |
ace390ab0f82d056e13c24b2c5006d678f25e0f8 | 6,109 | py | Python | Python/klampt/plan/cspaceutils.py | hpbader42/Klampt | 89faaef942c0c6fca579a3770314c6610e2ac772 | [
"BSD-3-Clause"
] | null | null | null | Python/klampt/plan/cspaceutils.py | hpbader42/Klampt | 89faaef942c0c6fca579a3770314c6610e2ac772 | [
"BSD-3-Clause"
] | null | null | null | Python/klampt/plan/cspaceutils.py | hpbader42/Klampt | 89faaef942c0c6fca579a3770314c6610e2ac772 | [
"BSD-3-Clause"
] | 1 | 2019-07-01T08:48:32.000Z | 2019-07-01T08:48:32.000Z | import math
import time
from cspace import CSpace
def default_sampleneighborhood(c,r):
return [ci + random.uniform(-r,r) for ci in c]
def default_visible(a,b):
raise RuntimeError("Can't check visibility")
def default_distance(a,b):
return math.sqrt(math.pow(ai-bi,2) for (ai,bi) in zip(a,b))
def default_interpolate(a,b,u):
return [ai+u*(bi-ai) for (ai,bi) in zip(a,b)]
def makedefault(space):
"""Helper: makes a space's callbacks perform the default Cartesian space
operations."""
space.sampleneighborhood = default_sampleneighborhood
space.visible = default_visible
space.distance = default_distance
space.interpolate = default_interpolate
class CompositeCSpace(CSpace):
"""A cartesian product of multiple spaces, given as a list upon
construction. The feasible method can be overloaded to include
interaction tests."""
def __init__(self,spaces):
CSpace.__init__(self)
self.spaces = spaces
#construct optional methods
def sampleneighborhood(c,r):
return self.join(s.sampleneighborhood(cs,r) for (s,cs) in zip(self.spaces,self.split(c)))
def visible(a,b):
return all(s.visible(ai,bi) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
def distance(a,b):
return sum(s.distance(ai,bi) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
def interpolate(a,b,u):
return self.join(s.interpolate(ai,bi,u) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
if any(hasattr(s,'sampleneighborhood') for s in spaces):
for s in self.spaces:
if not hasattr(s,'sampleneighborhood'):
s.sampleneighborhood = defaultsampleneighborhood
self.sampleneighborhood = sampleneighborhood
if any(hasattr(s,'visible') for s in spaces):
for s in self.spaces:
if not hasattr(s,'visible'):
s.visible = defaultvisible
self.visible = visible
if any(hasattr(s,'distance') for s in spaces):
for s in self.spaces:
if not hasattr(s,'distance'):
s.distance = defaultdistance
self.distance = distance
if any(hasattr(s,'interpolate') for s in spaces):
for s in self.spaces:
if not hasattr(s,'interpolate'):
s.interpolate = defaultinterpolate
self.interpolate = interpolate
#TODO: should add feasibility tests for subspaces -- this will allow the planning module to optimize
#constraint testing order.
def subDims(self):
return [len(s.sample()) for s in self.spaces]
def split(self,x):
d = self.subDims()
res = []
pos = 0
for di in d:
res.append(x[pos:pos+di])
pos += di
return res
def join(self,xs):
res = []
for x in xs:
res += x
return res
def feasible(self,x):
for (xi,si) in zip(self.split(x),self.spaces):
if not si.feasible(xi):
return False
return True
def sample(self):
return self.join(s.sample() for s in self.spaces)
class EmbeddedCSpace(CSpace):
"""A subspace of an ambient space, with the active DOFs given by a list
of DOF indices of that ambient space.
Attributes:
- ambientspace: the ambient configuration space
- mapping: the list of active indices into the ambient configuration
space
- xinit: the initial configuration in the ambient space (by default, 0)
"""
def __init__(self,ambientspace,subset,xinit=None):
CSpace.__init__(self)
self.ambientspace = ambientspace
n = len(ambientspace.sample())
self.mapping = subset
#start at the zero config if no initial configuration is given
if xinit==None:
self.xinit = [0.0]*n
else:
self.xinit = xinit
#construct optional methods
def sampleneighborhood(c,r):
return self.project(self.ambientspace.sampleneighborhood(self.lift(c),r))
def visible(a,b):
return self.ambientspace.visible(self.lift(a),self.lift(b))
def distance(a,b):
return self.ambientspace.distance(self.lift(a),self.lift(b))
def interpolate(a,b,u):
return self.project(self.ambientspace.interpolate(self.lift(a)),self.lift(b))
if hasattr(ambientspace,'sampleneighborhood'):
self.sampleneighborhood = sampleneighborhood
if hasattr(ambientspace,'visible'):
self.visible = visible
if hasattr(ambientspace,'distance'):
self.distance = distance
if hasattr(ambientspace,'interpolate'):
self.interpolate = interpolate
self.eps = self.ambientspace.eps
self.bound = [self.ambientspace.bound[i] for i in self.mapping]
self.properties = self.ambientspace.properties
if self.ambientspace.feasibilityTests is not None:
self.feasibilityTests = [(lambda x:f(self.lift(x))) for f in self.ambientspace.feasibilityTests]
self.feasibilityTestNames = self.ambientspace.feasibilityTestNames[:]
self.feasibilityTestDependencies = self.ambientspace.feasibilityTestDependencies[:]
def project(self,xamb):
"""Ambient space -> embedded space"""
print "EmbeddedCSpace.project"
return [xamb[i] for i in self.mapping]
def lift(self,xemb):
"""Embedded space -> ambient space"""
xamb = self.xinit[:]
for (i,j) in enumerate(self.mapping):
xamb[j] = xemb[i]
return xamb
def liftPath(self,path):
"""Given a CSpace path path, lifts this to the full ambient space configuration"""
return [self.lift(q) for q in path]
def feasible(self,x):
return self.ambientspace.feasible(self.lift(x))
def sample(self):
return self.project(self.ambientspace.sample())
| 36.363095 | 114 | 0.620396 |
ace390fcd508840569383bb07f74cd09a213e7a1 | 9,404 | py | Python | pyaws/tags/copy-tags-all-instances.py | mwozniczak/pyaws | af8f6d64ff47fd2ef2eb9fef25680e4656523fa3 | [
"MIT"
] | null | null | null | pyaws/tags/copy-tags-all-instances.py | mwozniczak/pyaws | af8f6d64ff47fd2ef2eb9fef25680e4656523fa3 | [
"MIT"
] | null | null | null | pyaws/tags/copy-tags-all-instances.py | mwozniczak/pyaws | af8f6d64ff47fd2ef2eb9fef25680e4656523fa3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Summary:
Script to copy tags on all EC2 instances in a region to their respective
EBS Volumes while eliminating or retaining certain tags as specified
Args:
profiles (list): awscli profile roles. Denotes accounts in which to run
regions (list): AWS region codes
DEBUGMODE (bool): don't change tags, but print out tags to be copied
SUMMARY_REPORT (bool): gen summary report only
logger (logging object): logger
Author:
Blake Huber, copyright 2017
(although it's hard to think someone would be desperate enough to rip
off such hastily written crap code. At some pt I'll clean it up, meanwhile
Use at your own risk)
"""
import sys
import loggers
import inspect
import datetime
from time import sleep
import json
from pygments import highlight, lexers, formatters
import boto3
from botocore.exceptions import ClientError
logger = loggers.getLogger('1.0')
DEBUGMODE = True # will not retag any resources
SUMMARY_REPORT = False # print summary report only
regions = ['ap-southeast-1', 'eu-west-1', 'us-east-1']
profiles = [
'gcreds-phht-gen-ra1-pr',
'gcreds-phht-gen-ra2-pr',
'gcreds-phht-gen-ra3-pr',
'gcreds-phht-gen-ra4-pr',
'gcreds-phht-gen-ra5-pr',
]
profiles = ['gcreds-phht-gen-ra3-pr']
# tags - to remove
TAGKEY_BACKUP = 'MPC-AWS-BACKUP'
TAGKEY_CPM = 'cpm backup'
TAGKEY_SNOW_CPM = 'MPC-SN-BACKUP'
NETWORKER = 'networker backup'
NAME = 'Name'
# tags we should not copy from the ec2 instance to the ebs volume
NO_COPY_LIST = [TAGKEY_BACKUP, TAGKEY_CPM, TAGKEY_SNOW_CPM, NETWORKER, NAME]
# tags on ebs volumes to preserve and ensure we do not overwrite or rm
PRESERVE_TAGS = ['Name']
# -- declarations -------------------------------------------------------------
def filter_tags(tag_list, *args):
"""
- Filters a tag set by exclusion
- variable tag keys given as parameters, tag keys corresponding to args
are excluded
RETURNS
TYPE: list
"""
clean = tag_list.copy()
for tag in tag_list:
for arg in args:
if arg == tag['Key']:
clean.remove(tag)
return clean
def valid_tags(tag_list):
""" checks tags for invalid chars """
for tag in tag_list:
if ':' in tag['Key']:
return False
return True
def pretty_print_tags(tag_list):
""" prints json tags with syntax highlighting """
json_str = json.dumps(tag_list, indent=4, sort_keys=True)
print(highlight(
json_str, lexers.JsonLexer(), formatters.TerminalFormatter()
))
print('\n')
return
def select_tags(tag_list, key_list):
"""
Return selected tags from a list of many tags given a tag key
"""
select_list = []
# select tags by keys in key_list
for tag in tag_list:
for key in key_list:
if key == tag['Key']:
select_list.append(tag)
# ensure only tag-appropriate k,v pairs in list
clean = [{'Key': x['Key'], 'Value': x['Value']} for x in select_list]
return clean
def get_instances(profile, rgn):
""" returns all EC2 instance Ids in a region """
vm_ids = []
session = boto3.Session(profile_name=profile, region_name=rgn)
client = session.client('ec2')
r = client.describe_instances()
for detail in [x['Instances'] for x in r['Reservations']]:
for instance in detail:
vm_ids.append(instance['InstanceId'])
return vm_ids
def calc_runtime(start, end):
""" Calculates job runtime given start, end datetime stamps
Args:
- start (datetime object): job start timestamp
- end (datetime object): job end timestamp
"""
duration = end - start
if (duration.seconds / 60) < 1:
return (duration.seconds), 'seconds'
else:
return (duration.seconds / 60), 'minutes'
# -- main ---------------------------------------------------------------------
def main():
""" copies ec2 instance tags to attached resources """
for profile in profiles:
# derive account alias from profile
account = '-'.join(profile.split('-')[1:])
for region in regions:
#instances = []
session = boto3.Session(profile_name=profile, region_name=region)
client = session.client('ec2')
ec2 = session.resource('ec2')
instances = get_instances(profile, region)
if SUMMARY_REPORT:
print('\nFor AWS Account %s, region %s, Found %d Instances\n' % (account, region, len(instances)))
continue
# copy tags
if instances:
try:
base = ec2.instances.filter(InstanceIds=instances)
ct = 0
for instance in base:
ids, after_tags = [], []
ct += 1
if instance.tags:
# filter out tags to prohibited from copy
filtered_tags = filter_tags(instance.tags, *NO_COPY_LIST)
else:
# no tags on instance to copy
continue
if not valid_tags(filtered_tags):
print('\nWARNING:')
logger.warning('Skipping instance ID %s, Invalid Tags\n' % instance.id)
continue
# collect attached resource ids to be tagged
for vol in instance.volumes.all():
ids.append(vol.id)
for eni in instance.network_interfaces:
ids.append(eni.id)
logger.info('InstanceID %s, instance %d of %d:' % (instance.id, ct, len(instances)))
logger.info('Resource Ids to tag is:')
logger.info(str(ids) + '\n')
if DEBUGMODE:
# BEFORE tag copy
logger.info('BEFORE list of %d tags is:' % (len(instance.tags)))
pretty_print_tags(instance.tags)
# AFTER tag copy | put Name tag back into apply tags, ie, after_tags
retain_tags = select_tags(instance.tags, PRESERVE_TAGS)
for tag in (*retain_tags, *filtered_tags):
after_tags.append(tag)
logger.info('For InstanceID %s, the AFTER FILTERING list of %d tags is:' % (instance.id, len(after_tags)))
logger.info('Tags to apply are:')
pretty_print_tags(after_tags)
else:
logger.info('InstanceID %s, instance %d of %d:' % (instance.id, ct, len(instances)))
if filtered_tags: # we must have something to apply
# apply tags
for resourceId in ids:
# retain a copy of tags to preserve if is a volume
if resourceId.startswith('vol-'):
r = client.describe_tags(
Filters=[{
'Name': 'resource-id',
'Values': [resourceId],
},
]
)
retain_tags = select_tags(r['Tags'], PRESERVE_TAGS)
# add retained tags before appling to volume
if retain_tags:
for tag in retain_tags:
filtered_tags.append(tag)
# clear tags
print('\n')
logger.info('Clearing tags on resource: %s' % str(resourceId))
client.delete_tags(Resources=[resourceId], Tags=[])
# create new tags
logger.info('Applying tags to resource %s\n' % resourceId)
ec2.create_tags(Resources=[resourceId], Tags=filtered_tags)
# delay to throttle API requests
sleep(1)
except ClientError as e:
logger.exception(
"%s: Problem (Code: %s Message: %s)" %
(inspect.stack()[0][3], e.response['Error']['Code'],
e.response['Error']['Message'])
)
raise
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
duration, unit = calc_runtime(start_time, end_time)
logger.info('Job Start: %s' % start_time.isoformat())
logger.info('Job End: %s' % end_time.isoformat())
logger.info('Job Completed. Duration: %d %s' % (duration, unit))
sys.exit(0)
| 38.383673 | 134 | 0.509783 |
ace3911e5da732a4ab18450b75a51ce29e5997d9 | 202 | py | Python | fastai/text/__init__.py | shafiul/fastai | 08d6de8a9a89a77569bfbccca278fc5522772100 | [
"Apache-2.0"
] | 2 | 2019-02-08T04:59:27.000Z | 2020-05-15T21:17:23.000Z | fastai/text/__init__.py | shafiul/fastai | 08d6de8a9a89a77569bfbccca278fc5522772100 | [
"Apache-2.0"
] | 3 | 2021-05-20T19:59:09.000Z | 2022-02-26T09:11:29.000Z | fastai/text/__init__.py | shafiul/fastai | 08d6de8a9a89a77569bfbccca278fc5522772100 | [
"Apache-2.0"
] | 1 | 2020-01-09T15:44:46.000Z | 2020-01-09T15:44:46.000Z | from .learner import *
from .data import *
from .transform import *
from .models import *
from .. import text
__all__ = [*learner.__all__, *data.__all__, *transform.__all__, *models.__all__, 'text']
| 22.444444 | 89 | 0.717822 |
ace3919004d45c3d3a5c58aa7a646aabdd557a91 | 57 | py | Python | djangobible/migrations/__init__.py | avendesora/django-scripture-index | 9877e74f9864d3c7d300409e8b1be9a1c3cabcf4 | [
"MIT"
] | 1 | 2020-10-10T18:24:08.000Z | 2020-10-10T18:24:08.000Z | djangobible/migrations/__init__.py | avendesora/django-scripture-index | 9877e74f9864d3c7d300409e8b1be9a1c3cabcf4 | [
"MIT"
] | 1 | 2021-02-23T11:45:23.000Z | 2021-02-24T10:20:41.000Z | djangobible/migrations/__init__.py | avendesora/django-scripture-index | 9877e74f9864d3c7d300409e8b1be9a1c3cabcf4 | [
"MIT"
] | 1 | 2020-10-27T18:02:37.000Z | 2020-10-27T18:02:37.000Z | """Database migration(s) for the djangobible library."""
| 28.5 | 56 | 0.736842 |
ace3925b882cb30abe911eaced709e862951fecd | 11,063 | py | Python | cogs/Ordsky.py | LBlend/MornBot | 5a89bdabaf85ea5411b64bddb1cf974c2b9ac808 | [
"MIT"
] | 2 | 2018-12-30T18:53:01.000Z | 2020-01-20T05:14:04.000Z | cogs/Ordsky.py | LBlend/MornBot | 5a89bdabaf85ea5411b64bddb1cf974c2b9ac808 | [
"MIT"
] | 55 | 2018-12-30T21:55:32.000Z | 2020-01-29T00:01:10.000Z | cogs/Ordsky.py | LBlend/MornBot | 5a89bdabaf85ea5411b64bddb1cf974c2b9ac808 | [
"MIT"
] | 8 | 2019-01-24T22:39:38.000Z | 2020-01-17T23:29:19.000Z | import discord
from discord.ext import commands
from codecs import open
import json
from PIL import Image
from numpy import array
from wordcloud import WordCloud
from re import sub
from os import remove
import functools
from io import BytesIO
from cogs.utils import Defaults
class Ordsky(commands.Cog):
def __init__(self, bot):
self.bot = bot
@staticmethod
def generate(text, mask, filtered_words):
"""Generate wordcloud"""
wc = WordCloud(max_words=4000, mask=mask, repeat=False, stopwords=filtered_words)
wc.process_text(text)
wc.generate(text)
img = wc.to_image()
b = BytesIO()
img.save(b, 'png')
b.seek(0)
return b
@commands.bot_has_permissions(embed_links=True)
@commands.group()
async def ordsky(self, ctx):
"""Generer en ordsky basert på meldingene dine"""
if ctx.invoked_subcommand is None:
await self.generer.invoke(ctx)
@commands.cooldown(1, 2, commands.BucketType.user)
@ordsky.command(aliases=['consent'])
async def samtykke(self, ctx):
"""Gi samtykke til å samle meldingsdataen din"""
database_find = {'_id': ctx.author.id}
try:
database_user = self.bot.database['ordsky'].find_one(database_find)
except:
return await Defaults.error_fatal_send(ctx, text='Jeg har ikke tilkobling til databasen\n\n' +
'Be båtteier om å fikse dette')
if database_user is None:
self.bot.database['ordsky'].insert_one({'_id': ctx.author.id, 'ordsky_consent': False})
self.bot.database['ordsky'].find_one(database_find)
self.bot.database['ordsky'].update_one(database_find, {'$set': {'ordsky_consent': True}})
else:
self.bot.database['ordsky'].update_one(database_find, {'$set': {'ordsky_consent': True}})
embed = discord.Embed(color=ctx.me.color, description='✅ Samtykke registrert!')
await Defaults.set_footer(ctx, embed)
await ctx.send(embed=embed)
@commands.cooldown(1, 2, commands.BucketType.user)
@ordsky.command(aliases=['ingensamtykke', 'noconsent', 'slettdata'])
async def tabort(self, ctx):
"""Fjern samtykke og slett meldingsdata"""
database_find = {'_id': ctx.author.id}
try:
database_user = self.bot.database['ordsky'].find_one(database_find)
except:
return await Defaults.error_fatal_send(ctx, text='Jeg har ikke tilkobling til databasen\n\n' +
'Be båtteier om å fikse dette')
self.bot.database['ordsky'].delete_one(database_user)
embed = discord.Embed(color=ctx.me.color, description='✅ Meldingsdata er slettet!')
await Defaults.set_footer(ctx, embed)
await ctx.send(embed=embed)
@commands.cooldown(1, 60, commands.BucketType.user)
@ordsky.command(aliases=['mydata'])
async def data(self, ctx):
"""Få tilsendt dine data"""
database_find = {'_id': ctx.author.id}
try:
database_user = self.bot.database['ordsky'].find_one(database_find)
except:
return await Defaults.error_fatal_send(ctx, text='Jeg har ikke tilkobling til databasen\n\n' +
'Be båtteier om å fikse dette')
if database_user is None:
self.bot.database['ordsky'].insert_one({'_id': ctx.author.id, 'ordsky_consent': False})
return await Defaults.error_warning_send(ctx, text='Jeg har ingen data om deg å sende eller ' +
'så kan jeg ikke sende meldinger til deg!')
try:
database_user['ordsky_data']
except KeyError:
return await Defaults.error_warning_send(ctx, text='Jeg har ingen data om deg å sende eller ' +
'så kan jeg ikke sende meldinger til deg!')
raw_data = ''
for value in database_user['ordsky_data'].values():
if value is None:
continue
else:
raw_data += value
if raw_data == '':
return await Defaults.error_warning_send(ctx, text='Jeg har ingen data om deg å sende eller ' +
'så kan jeg ikke sende meldinger til deg!')
with open(f'./assets/temp/{ctx.author.id}_ordsky.json', 'w') as f:
json.dump(database_user, f, indent=4)
try:
await ctx.author.send(file=discord.File(f'./assets/temp/{ctx.author.id}_ordsky.json'))
embed = discord.Embed(color=ctx.me.color, description='✅ Meldingsdata har blitt sendt i DM!')
await Defaults.set_footer(ctx, embed)
await ctx.send(embed=embed)
except:
await Defaults.error_fatal_send(ctx, text='Sending av data feilet! Sjekk om du har blokkert meg')
try:
remove(f'./assets/temp/{ctx.author.id}_ordsky.json')
except:
pass
@commands.bot_has_permissions(embed_links=True, read_message_history=True, attach_files=True)
@commands.cooldown(1, 150, commands.BucketType.user)
@ordsky.command(aliases=['generate', 'create', 'lag'])
async def generer(self, ctx):
"""Generer en ordsky"""
database_find = {'_id': ctx.author.id}
try:
database_user = self.bot.database['ordsky'].find_one(database_find)
except:
return await Defaults.error_fatal_send(ctx, text='Jeg har ikke tilkobling til databasen\n\n' +
'Be båtteier om å fikse dette')
if database_user is None:
self.bot.database['ordsky'].insert_one({'_id': ctx.author.id, 'ordsky_consent': False})
self.bot.database['ordsky'].find_one(database_find)
database_user = self.bot.database['ordsky'].find_one(database_find)
if database_user['ordsky_consent'] is False:
await Defaults.error_warning_send(ctx, text='Du må gi meg tillatelse til å samle og beholde ' +
'meldingsdataene dine.\n\n' +
f'Skriv `{self.bot.prefix}ordsky samtykke` for å gjøre dette',
mention=True)
self.bot.get_command(f'{ctx.command}').reset_cooldown(ctx)
return
embed = discord.Embed(description='**Henter meldinger:** ⌛\n**Generer ordsky:** -')
await Defaults.set_footer(ctx, embed)
status_msg = await ctx.send(embed=embed)
command_prefixes = ['§', '!', '.', '-', '€', '|', '$', '=', '?', '<', ':', '#', ',']
message_data = ''
try:
database_user['ordsky_data'][f'{ctx.guild.id}']
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.author).send_messages:
continue
try:
async for message in channel.history(limit=300):
has_prefixes = False
if message.author.id == ctx.author.id:
for prefixes in command_prefixes:
if prefixes in message.clean_content[:3]:
has_prefixes = True
if has_prefixes is False:
message_data += f'[{str(message.created_at)[0:19]}] ' +\
f'({message.channel.id}-{message.id}) ' +\
f'{message.clean_content} '
except:
continue
except KeyError:
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.author).send_messages:
continue
try:
async for message in channel.history(limit=2000):
has_prefixes = False
if message.author.id == ctx.author.id:
for prefixes in command_prefixes:
if prefixes in message.clean_content[:3]:
has_prefixes = True
if has_prefixes is False:
message_data += f'[{str(message.created_at)[0:19]}] ' +\
f'({message.channel.id}-{message.id}) ' +\
f'{message.clean_content} '
except:
continue
if message_data != '':
self.bot.database['ordsky'].update_one(database_find,
{'$set': {f'ordsky_data.{ctx.guild.id}': message_data}}, upsert=True)
database_user = self.bot.database['ordsky'].find_one(database_find)
try:
message_data = database_user['ordsky_data'][f'{ctx.guild.id}']
except KeyError:
return await Defaults.error_fatal_edit(ctx, status_msg,
text='Har ikke nok meldingsdata for å generere ordsky')
database_message_data = message_data
embed = discord.Embed(description='**Henter meldinger:** ✅\n**Generer ordsky:** ⌛')
await Defaults.set_footer(ctx, embed)
await status_msg.edit(embed=embed)
text = sub(r'http\S+', '', database_message_data)
text = sub(r':\S+', '', text)
text = sub(r'#\S+', '', text)
text = sub(r'@\S+', '', text)
with open('./assets/ordsky/ordliste.txt', 'r', encoding='utf-8') as f:
filtered_words = [line.split(',') for line in f.readlines()]
filtered_words = filtered_words[0]
mask = array(Image.open('./assets/ordsky/mask/skyform.png'))
task = functools.partial(Ordsky.generate, text, mask, filtered_words)
b = await self.bot.loop.run_in_executor(None, task)
if str(ctx.author.color) != '#000000':
color = ctx.author.color
else:
color = discord.Colour(0x99AAB5)
final_image = discord.File(b, filename=f'{ctx.author.id}_{ctx.guild.id}.png')
embed = discord.Embed(color=color, description='☁️ Her er ordskyen din! ☁️')
embed.set_image(url=f'attachment://{ctx.author.id}_{ctx.guild.id}.png')
await Defaults.set_footer(ctx, embed)
await ctx.send(file=final_image, content=ctx.author.mention, embed=embed)
await status_msg.delete()
def setup(bot):
bot.add_cog(Ordsky(bot))
| 44.075697 | 119 | 0.543704 |
ace3927aa79c25a66dbaa7e212ef3ad3a4c2cb1f | 30 | py | Python | rules/__init__.py | pvantonov/mini-readability | 189ea6f41828019caf2a1b1f9230b5401ac19168 | [
"MIT"
] | null | null | null | rules/__init__.py | pvantonov/mini-readability | 189ea6f41828019caf2a1b1f9230b5401ac19168 | [
"MIT"
] | null | null | null | rules/__init__.py | pvantonov/mini-readability | 189ea6f41828019caf2a1b1f9230b5401ac19168 | [
"MIT"
] | null | null | null | __author__ = 'Blackwanderer'
| 15 | 29 | 0.766667 |
ace392c2e7978458d69391b19f4af8d5071ec7cc | 3,667 | py | Python | molliesms/api.py | nederhoed/mollie | 53755acbabccf6f729b76341bb4dd2a981770292 | [
"BSD-3-Clause"
] | null | null | null | molliesms/api.py | nederhoed/mollie | 53755acbabccf6f729b76341bb4dd2a981770292 | [
"BSD-3-Clause"
] | null | null | null | molliesms/api.py | nederhoed/mollie | 53755acbabccf6f729b76341bb4dd2a981770292 | [
"BSD-3-Clause"
] | null | null | null | """\
Python implementation of the Mollie SMS Abstract Programming Interface
"""
from xml.dom.minidom import parseString
import types
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import molliesms.exceptions
class MollieSMS(object):
"""\
The Mollie class allows you to send multiple sms messages using a single
configuration. As an alternative, you can specify all arguments using the
'sendsms' classmethod
As per august 2014, old Mollie will not work anymore. All SMS services
have moved to Messagebird. Only secure call can be made to the SMS service.
"""
DEFAULT_MOLLIEGW = "https://api.messagebird.com/xml/sms"
SECURE_MOLLIEGW = "https://api.messagebird.com/xml/sms"
DUTCH_GW = 1
FOREIGN_GW = 2
NORMAL_SMS = "normal"
WAPPUSH_SMS = "wappush"
VCARD_SMS = "vcard"
def __init__(self, username, password, originator=None,
molliegw=None, gateway=None):
"""\
Initialize the Mollie class. This configuration will be reused
with each 'send' call.
username
authentication username
password
authentication password
originator
SMS originator phonenumber, i.e. +31612345678
molliegw
Full url of the Mollie SMS gateway. Two predefined
gateways are available,
MollieSMS.DEFAULT_MOLLIEGW
Standard (non secure) production gateway
MollieSMS.Secure_MOLLIEGW
Secure production gateway (https)
"""
self.username = username
self.password = password
self.originator = originator
self.molliegw = molliegw or MollieSMS.DEFAULT_MOLLIEGW
self.gateway = gateway or MollieSMS.FOREIGN_GW
def send(self, recipients, message, originator=None, deliverydate=None,
smstype=None, dryrun=False):
"""\
Send a single SMS using the instances default configuration
"""
originator = originator or self.originator
return self.sendsms(self.username, self.password, originator,
recipients, message, self.molliegw, self.gateway,
deliverydate, smstype, dryrun)
def sendsms(cls, username, password, originator, recipients,
message, molliegw=None, gateway=None, deliverydate=None,
smstype=None, dryrun=False):
if type(recipients) not in (tuple, list):
recipients = [recipients]
args = {}
args['username'] = username
args['password'] = password
args['originator'] = originator
args['recipients'] = ",".join(recipients)
args['message'] = message
molliegw = molliegw or MollieSMS.DEFAULT_MOLLIEGW
# optional arguments
url = molliegw + "?" + urllib.parse.urlencode(args)
if dryrun:
print(url)
return 0
response = urllib.request.urlopen(url, timeout=30)
responsexml = response.read()
dom = parseString(responsexml)
recipients = int(dom.getElementsByTagName("recipients")[0].childNodes[0].data)
success = dom.getElementsByTagName("success")[0].childNodes[0].data
resultcode = int(dom.getElementsByTagName("resultcode")[0].childNodes[0].data)
resultmessage = dom.getElementsByTagName("resultmessage")[0].childNodes[0].data
if success != "true":
e = molliesms.exceptions.by_code[resultcode]
raise e(resultmessage)
return recipients
sendsms = classmethod(sendsms)
| 34.92381 | 87 | 0.638124 |
ace3931cefd8c60a898cc72386240a85e16c5a38 | 365 | py | Python | stockp/logger.py | costajob/stock_prices | be3797ddf54522dc5e04a6e704b5a911be785631 | [
"MIT"
] | 3 | 2018-12-17T14:47:26.000Z | 2019-05-03T13:18:02.000Z | stockp/logger.py | costajob/stock_prices | be3797ddf54522dc5e04a6e704b5a911be785631 | [
"MIT"
] | null | null | null | stockp/logger.py | costajob/stock_prices | be3797ddf54522dc5e04a6e704b5a911be785631 | [
"MIT"
] | 1 | 2021-06-07T09:30:15.000Z | 2021-06-07T09:30:15.000Z | """
Synopsis
--------
The logger singleton for the main program, tracing to ERROR level by default.
"""
import logging
from os import path
FILENAME = path.abspath('./log/stockp.log')
FORMAT = '[%(asctime)s #%(process)d] -- %(levelname)s : %(message)s'
logging.basicConfig(filename=FILENAME, format=FORMAT, level=logging.ERROR)
BASE = logging.getLogger(__name__)
| 24.333333 | 77 | 0.717808 |
ace3932890167516e3484827d8fefef32c447369 | 5,412 | py | Python | estudy/views.py | jaiswalshubhamm/estudycorner | 1d8162175580d8b13085b287d4c8ffd5eab876cf | [
"MIT"
] | null | null | null | estudy/views.py | jaiswalshubhamm/estudycorner | 1d8162175580d8b13085b287d4c8ffd5eab876cf | [
"MIT"
] | 1 | 2022-02-10T13:26:46.000Z | 2022-02-10T13:26:46.000Z | estudy/views.py | jaiswalshubhamm/estudycorner | 1d8162175580d8b13085b287d4c8ffd5eab876cf | [
"MIT"
] | null | null | null | from django.shortcuts import render
from estudy.cryptography import Encryption
from .models import Register, Login, Enquiry
from admZone.models import SMS
import random
import json
import requests
from django.contrib import auth
from admZone.models import Notification
import urllib
import datetime
# Create your views here.
def home(request):
data=Notification.objects.all()
return render(request, 'home.html', {'notification': data})
def about(request):
return render(request, 'about.html')
def downloads(request):
return render(request, 'download.html')
def contact(request):
return render(request, 'contact.html')
def registration(request):
return render(request, 'registration.html')
def registerStudent(request):
if request.method=='POST':
sitekey=request.POST['g-recaptcha-response']
secretkey='6LcGpK0ZAAAAAIcRyKsgxiMMvGElQ0cuNrI_CjTb'
captchadata={
'secret': secretkey,
'response': sitekey
}
r=requests.post('https://www.google.com/recaptcha/api/siteverify', data=captchadata)
response=json.loads(r.text)
status=response['success']
if status:
uname=request.POST['name']
gender=request.POST['gender']
dob=request.POST['dob']
mobno=request.POST['mobno']
pwd=request.POST['pwd']
email=request.POST['email']
course=request.POST['course']
pwd=request.POST['pwd']
address=request.POST['address']
file=request.FILES['img']
r=random.randrange(9999999)
fname='Media/userpictures/'+str(r)+file.name
with open(fname, 'wb+') as myfile:
for i in file.chunks():
myfile.write(i)
reg=Register(name=uname, gender=gender, dob=dob, email=email, mobno=mobno, course=course, picture=fname, address=address)
reg.save()
c=Encryption()
l=Login(email=email, pwd=c.encryptMyData(pwd))
l.save()
return render(request, 'login.html', {'msg': 'Regstration Success, Please login'})
else:
return render(request, 'registration.html', {'msg': 'Invalid Captcha, Please try again'})
else:
return render(request, 'registration.html')
def login(request):
return render(request, 'login.html')
def stdLogout(request):
auth.logout(request)
return render(request, 'login.html')
def validateStudentLogin(request):
if request.method=='POST':
id=request.POST['email']
pwd=request.POST['pwd']
c=Encryption()
l=Login.objects.all().filter(email=id, pwd=c.encryptMyData(pwd))
count=0
for i in l:
count=count+1
if count==1:
request.session['id']=id
return render(request, 'stdDashboard.html')
else:
return render(request, 'login.html', {'msg': 'Invalid ID/Password'})
else:
return render(request, 'login.html', {'msg': 'Please Input UserId And Password'})
def saveEnquiry(request):
if request.method=='POST':
name=request.POST['name']
email=request.POST['email']
mobno=request.POST['phonenumber']
topic=request.POST['topic']
msg=request.POST['msg']
e=Enquiry(name=name, email=email, mobno=mobno, topic=topic, msg=msg)
e.save()
return render(request, 'home.html', {'msg': 'Record submitted successfully.'})
else:
return render(request, 'home.html', {'msg': 'Please Input Your Query'})
def forgetPassword(request):
return render(request, 'forgetPassword.html')
def forgetOTP(request):
email=request.POST['email']
mobno=request.POST['mobno']
r=Register.objects.all().filter(email=email, mobno=mobno)
count=0
for i in r:
count=count+1
if count==1:
try:
num=random.randrange(10000)
msg='Your One Time Password (OTP) for E-Study Corner is {}.'
otp=msg.format(num)
nmsg=urllib.request.quote(otp)
# SMS Code
api="http://t.160smsalert.com/submitsms.jsp?user=kkmishra&key=5a78c161adXX&mobile={}&message={}&senderid=TXTSMS&accusage=1"
napi=api.format(mobno, nmsg)
res=urllib.request.urlopen(napi)
request.session['otp']=num
return render(request, 'forgetOTP.html', {'email': email})
except:
return render(request, 'forgetOTP.html', {'msg': 'Unable To send OTP, Please Try Again.'})
else:
return render(request, 'forgetPassword.html', {'msg': 'Invalid Mobile Number/Email'})
def forgetVarify(request):
uotp=request.POST['uotp']
email=request.POST['email']
sotp=request.session['otp']
if str(uotp)==str(sotp):
return render(request, 'newPass.html', {'email': email})
else:
return render(request, 'forgetOTP.html', {'msg': 'Invalid OTP, Please try again.'})
def newPass(request):
try:
email=request.POST['email']
pwd=request.POST['newpwd']
c=Encryption()
l=Login.objects.filter(email=email)
l.update(pwd=c.encryptMyData(pwd))
return render(request, 'login.html', {'msg': 'Password Changed Succesfully.'})
except:
return render(request, 'newpass.html', {'msg': 'Unable to update password plase try again.','email':email}) | 35.84106 | 135 | 0.62306 |
ace3944eefabee08efa33136722a76bd0f2d94ff | 3,629 | py | Python | phonenumbers/data/region_IT.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | 7 | 2019-05-20T09:57:02.000Z | 2020-01-10T05:30:48.000Z | phonenumbers/data/region_IT.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | null | null | null | phonenumbers/data/region_IT.py | igushev/fase_lib | 182c626193193b196041b18b9974b5b2cbf15c67 | [
"MIT"
] | null | null | null | """Auto-generated file, do not edit by hand. IT metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IT = PhoneMetadata(id='IT', country_code=39, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[01589]\\d{5,10}|3(?:[12457-9]\\d{8}|[36]\\d{7,9})', possible_number_pattern='\\d{6,11}', possible_length=(6, 7, 8, 9, 10, 11)),
fixed_line=PhoneNumberDesc(national_number_pattern='0(?:[26]\\d{4,9}|(?:1(?:[0159]\\d|[27][1-5]|31|4[1-4]|6[1356]|8[2-57])|3(?:[0159]\\d|2[1-4]|3[12]|[48][1-6]|6[2-59]|7[1-7])|4(?:[0159]\\d|[23][1-9]|4[245]|6[1-5]|7[1-4]|81)|5(?:[0159]\\d|2[1-5]|3[2-6]|4[1-79]|6[4-6]|7[1-578]|8[3-8])|7(?:[0159]\\d|2[12]|3[1-7]|4[2346]|6[13569]|7[13-6]|8[1-59])|8(?:[0159]\\d|2[34578]|3[1-356]|[6-8][1-5])|9(?:[0159]\\d|[238][1-5]|4[12]|6[1-8]|7[1-6]))\\d{2,7})', example_number='0212345678', possible_length=(6, 7, 8, 9, 10, 11)),
mobile=PhoneNumberDesc(national_number_pattern='3(?:[12457-9]\\d{8}|6\\d{7,8}|3\\d{7,9})', possible_number_pattern='\\d{9,11}', example_number='3123456789', possible_length=(9, 10, 11)),
toll_free=PhoneNumberDesc(national_number_pattern='80(?:0\\d{6}|3\\d{3})', possible_number_pattern='\\d{6,9}', example_number='800123456', possible_length=(6, 9)),
premium_rate=PhoneNumberDesc(national_number_pattern='0878\\d{5}|1(?:44|6[346])\\d{6}|89(?:2\\d{3}|4(?:[0-4]\\d{2}|[5-9]\\d{4})|5(?:[0-4]\\d{2}|[5-9]\\d{6})|9\\d{6})', possible_number_pattern='\\d{6,10}', example_number='899123456', possible_length=(6, 8, 9, 10)),
shared_cost=PhoneNumberDesc(national_number_pattern='84(?:[08]\\d{6}|[17]\\d{3})', possible_number_pattern='\\d{6,9}', example_number='848123456', possible_length=(6, 9)),
personal_number=PhoneNumberDesc(national_number_pattern='1(?:78\\d|99)\\d{6}', possible_number_pattern='\\d{9,10}', example_number='1781234567', possible_length=(9, 10)),
voip=PhoneNumberDesc(national_number_pattern='55\\d{8}', possible_number_pattern='\\d{10}', example_number='5512345678', possible_length=(10,)),
no_international_dialling=PhoneNumberDesc(national_number_pattern='848\\d{6}', possible_number_pattern='\\d{9}', example_number='848123456', possible_length=(9,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['0[26]|55']),
NumberFormat(pattern='(0[26])(\\d{4})(\\d{5})', format='\\1 \\2 \\3', leading_digits_pattern=['0[26]']),
NumberFormat(pattern='(0[26])(\\d{4,6})', format='\\1 \\2', leading_digits_pattern=['0[26]']),
NumberFormat(pattern='(0\\d{2})(\\d{3,4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['0[13-57-9][0159]']),
NumberFormat(pattern='(\\d{3})(\\d{3,6})', format='\\1 \\2', leading_digits_pattern=['0[13-57-9][0159]|8(?:03|4[17]|9[245])', '0[13-57-9][0159]|8(?:03|4[17]|9(?:2|[45][0-4]))']),
NumberFormat(pattern='(0\\d{3})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['0[13-57-9][2-46-8]']),
NumberFormat(pattern='(0\\d{3})(\\d{2,6})', format='\\1 \\2', leading_digits_pattern=['0[13-57-9][2-46-8]']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[13]|8(?:00|4[08]|9[59])', '[13]|8(?:00|4[08]|9(?:5[5-9]|9))']),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['894', '894[5-9]']),
NumberFormat(pattern='(\\d{3})(\\d{4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['3'])],
main_country_for_code=True,
leading_zero_possible=True,
mobile_number_portable_region=True)
| 134.407407 | 519 | 0.621383 |
ace395b5e94c06e6ffc902994ace4c5292d6432f | 7,812 | py | Python | product_tagger.py | jayapalreddy540/product_tagger | 6cd80fc18586b619d2a383bf430ae9b7a913c54a | [
"MIT"
] | null | null | null | product_tagger.py | jayapalreddy540/product_tagger | 6cd80fc18586b619d2a383bf430ae9b7a913c54a | [
"MIT"
] | null | null | null | product_tagger.py | jayapalreddy540/product_tagger | 6cd80fc18586b619d2a383bf430ae9b7a913c54a | [
"MIT"
] | null | null | null | # ! /usr/local/bin python
# -*- coding:utf-8 -*-
'''
Main file of the project.
The project aims at findng core terms, brand, and descriptions
from a product title from e-commerce websites.
The e-commerce product title lacks grammatical structure and can
be hardly processed using context information. One characteristic
of e-commerce product titles is that they are usually short. Therefore,
I come up with the Look up tagger approach to ahieve the objective.
The steps are as follows:
- Construct a brand name library and a product name library.
Please refer to `library_builder.py'.
- Build a tag table using the product name library and brand
name library. Please refer to `tagTable_builder.py'.
- Build a lookup tagger using `nltk.UnigramTagger' with the
built tag table.
- After processing each product title, generate unit-, bi-,
and tri-grams. A lot of product and brand names contain ngrams.
- Look up trigrams first, and then bigrams, and at last unigrams.
Trigrams are taken with highest priority and unigrams the lowest.
- If no product name is found in the product title after all the
lookup procedure, extract the last alpha noun in the title as
the product name. If no brand name is found, leave it empty.
- As the product title itself is very short, we assume all the information
in it is useful. Then all the other words are descriptive information.
Usage:
>> tag_product(title)
return (core_term, brand, descriptions)
Example:
>> tag_product('Makibes Unisex Red LED Digital Band Wrist Watch')
(u'watch', u'makibes',u'unisex red led digital band wrist')
@author = Yong Zhang
@email = yzhang067@e.ntu.edu.sg
'''
import nltk
from nltk.util import ngrams
from utils import remove_punct
import _pickle as pickle
import logging
import xlrd
import os
import pandas as pd
import sys
import importlib
importlib.reload(sys)
this_dir = os.getcwd()
Table_PATH = os.path.join(this_dir, "resources", "tagTable.pkl")
def extract_words(all_words):
"""
Product title contains some preposition words.
The words after preposition word usually indicate
components, materials or intended usage. Therefore,
there is no need to find brand and product name in
the words after preposition. There is one exception:
the preposition word appears in the first place.
"""
prepositions = ['for','with','from','of','by','on']
MODE = 0 if all_words[0] in prepositions else 1
if MODE:
words = []
for word in all_words:
if word not in prepositions:
words.append(word)
else:
words = all_words
return words
def extract_attributes(grams, core_term, brand, color, tagger):
"""
Extract word with 'C' tag as core term.
Extract word with 'B' tag as brand name.
If there are multiple core terms and brand names,
The first of brand names is taken as brand name.
The last of core terms if taken as core term.
"""
core_terms, brands, colors = [], [], []
for token, tag in tagger.tag(grams):
if tag == 'C':
if not color:
colors.append(token)
elif tag == 'P':
if not core_term:
core_terms.append(token)
elif tag == 'B':
if not brand:
brands.append(token)
else:
continue
try:
ct = core_terms
except Exception as e:
ct= core_term
try:
br = brands[0]
except Exception as e:
br = brand
try:
cr = colors
except Exception as e:
cr = color
return ct, br, cr
def tag_product(product_title):
"""
Tag product_title and return core term, brand name, and descriptions.
Input:
string: product_title
Return:
string: core_term
string: brand
string: desc
"""
## build a tagger model
with open(Table_PATH,'rb') as f:
tag_table = pickle.load(f)
unigram_tagger = nltk.UnigramTagger(model=tag_table, backoff=nltk.DefaultTagger('D'))
bigram_tagger = nltk.BigramTagger(model=tag_table)
trigram_tagger = nltk.TrigramTagger(model=tag_table)
## remove punctuations from product title
product_title_tmp = remove_punct(product_title)
## convert plurals to singulars
wnl = nltk.WordNetLemmatizer()
product_words = [wnl.lemmatize(s) for s in product_title_tmp.split()]
clean_title = ' '.join(product_words)
## build unigrams, bigrams, trigrams from which product
## attributes are to be extracted.
unigrams = extract_words(product_words)
bigrams = [' '.join(item) for item in ngrams(unigrams, 2)]
trigrams = [' '.join(item) for item in ngrams(unigrams, 3)]
## Extract attributes from trigrams. If failed, extract from bigrams.
## If still failed, extract from unigrams. If still failed, set the
## last alpha noun as product core term and leave brand empty.
core_term, brand, color = [], None, None
core_term, brand, color = extract_attributes(trigrams, core_term, brand, color, trigram_tagger)
if not core_term or not brand:
core_term, brand, color = extract_attributes(bigrams, core_term, brand, color, bigram_tagger)
if not core_term or not brand:
core_term, brand, color = extract_attributes(unigrams, core_term, brand, color, unigram_tagger)
if not core_term:
pos_words = nltk.pos_tag(unigrams)
for word, tag in pos_words[::-1]:
if tag == 'NN' and word.isalpha():
core_term.append(word)
if not brand:
brand = ''
## The words other than the core term and brand name are regarded as
## description information.
try:
if type(core_term) == 'list':
for term in core_term:
desc = clean_title.replace(term,'')
else:
desc = clean_title.replace(core_term, '').replace(brand, '').replace(color,'')
desc = ' '.join(w for w in desc.split())
except Exception as e:
print('Cannot find core terms from the product title')
desc=product_title
return list(set(core_term)), brand, color, desc
def extract_file(excel_sheet, filename_save):
"""
Extracting product attritbues from product titles in a .xlsx file
"""
if os.path.exists(filename_save):
os.remove(filename_save)
with open(filename_save, 'a') as f:
f.write('Product name, Core terms, Brands, Discriptions\n')
for row_idx in range(1, excel_sheet.nrows):
product_title = excel_sheet.cell(row_idx, 1).value
core_term, brand, color, desc = tag_product(product_title)
f.write(product_title.replace(',','')+','+core_term+','+brand+','+color+","+desc+'\n')
def project_reuslt():
"""
Testing the tagger on project data. Uncomment `project_reuslt()' and
comment other lines and run.
"""
file_dir = 'Product Data.xlsx'
workbook = xlrd.open_workbook(file_dir)
sheet_sample = workbook.sheet_by_index(0)
sheet_test = workbook.sheet_by_index(1)
extract_file(sheet_sample, 'results/sample_results.csv')
extract_file(sheet_test, 'results/test_results.csv')
if __name__ == '__main__':
with open('./resources/brand1.txt','r') as read_file:
titles = read_file.readlines()
df=pd.read_csv('resources/test/description.csv')
titles = list(df['Description'])
titles=titles[0:10]
for title in titles:
core_term, brand, color, desc = tag_product(title)
print('===========')
print('Core term: ',core_term)
print('Brand: ',brand)
print('Color: ',color)
print('Descriptions: ',desc)
# project_reuslt()
| 33.672414 | 103 | 0.65681 |
ace397a0318b10b0cabc3a90e56e238a60d6dfe9 | 5,888 | py | Python | signal2html/html.py | Otto-AA/signal2html | 50f0cd37517c351c003f165a9dafdf1fb70444c7 | [
"MIT"
] | null | null | null | signal2html/html.py | Otto-AA/signal2html | 50f0cd37517c351c003f165a9dafdf1fb70444c7 | [
"MIT"
] | null | null | null | signal2html/html.py | Otto-AA/signal2html | 50f0cd37517c351c003f165a9dafdf1fb70444c7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Code for writing out the HTML
Author: Gertjan van den Burg
"""
import os
import datetime as dt
from emoji import emoji_lis as emoji_list
from jinja2 import Environment, PackageLoader, select_autoescape
from .models import MMSMessageRecord
from .types import (
get_named_message_type,
is_inbox_type,
is_incoming_call,
is_joined_type,
is_missed_call,
is_outgoing_call,
)
from .html_colors import COLORMAP
def is_all_emoji(body):
""" Check if a message is non-empty and only contains emoji """
body = body.replace(" ", "").replace("\ufe0f", "")
return len(emoji_list(body)) == len(body) and len(body) > 0
def format_emoji(body, is_quote=False):
""" Wrap emoji in <span> so we can style it easily """
emoji_pos = emoji_list(body)
new_body = ""
emoji_lookup = {p["location"]: p["emoji"] for p in emoji_pos}
for i, c in enumerate(body):
if i in emoji_lookup:
new_body += "<span class='msg-emoji'>%s</span>" % emoji_lookup[i]
else:
new_body += c
return new_body
def dump_thread(thread, output_dir):
"""Write a Thread instance to a HTML page in the output directory """
# Combine and sort the messages
messages = thread.mms + thread.sms
messages.sort(key=lambda mr: mr.dateSent)
# Find the template
env = Environment(
loader=PackageLoader("signal2html", "templates"),
autoescape=select_autoescape(["html", "xml"]),
)
template = env.get_template("thread.html")
is_group = thread.recipient.isgroup
# Create the message color CSS (depends on individuals)
group_color_css = ""
msg_css = ".msg-sender-%i { background: %s; }\n"
if is_group:
group_recipients = set(m.addressRecipient for m in messages)
sender_idx = {r: k for k, r in enumerate(group_recipients)}
colors_used = []
group_colors = set(ar.color for ar in sender_idx)
for ar, idx in sender_idx.items():
if ar.recipientId._id.startswith("__textsecure_group__"):
continue
# ensure colors are unique, even if they're not in Signal
ar_color = ar.color
if ar_color in colors_used:
color = next(
(c for c in COLORMAP if not c in group_colors),
None,
)
ar_color = ar.color if color is None else color
group_color_css += msg_css % (idx, COLORMAP[ar_color])
colors_used.append(ar.color)
else:
firstInbox = next(
(m for m in messages if is_inbox_type(m._type)), None
)
clr = firstInbox.addressRecipient.color if firstInbox else "teal"
clr = "teal" if clr is None else clr
group_color_css += msg_css % (0, COLORMAP[clr])
# Create a simplified dict for each message
prev_date = None
simple_messages = []
for msg in messages:
if is_joined_type(msg._type):
continue
# Add a "date change" message when to mark the date
date_sent = dt.datetime.fromtimestamp(msg.dateSent // 1000)
date_sent = date_sent.replace(microsecond=(msg.dateSent % 1000) * 1000)
if prev_date is None or date_sent.date() != prev_date:
prev_date = date_sent.date()
out = {
"date_msg": True,
"body": date_sent.strftime("%a, %b %d, %Y"),
}
simple_messages.append(out)
# Handle calls
is_call = False
if is_incoming_call(msg._type):
is_call = True
msg.body = f"{thread.name} called you"
elif is_outgoing_call(msg._type):
is_call = True
msg.body = "You called"
elif is_missed_call(msg._type):
is_call = True
msg.body = "Missed call"
# Deal with quoted messages
quote = {}
if isinstance(msg, MMSMessageRecord) and msg.quote:
quote_author_id = msg.quote.author.recipientId._id
quote_author_name = msg.quote.author.name
if quote_author_id == quote_author_name:
name = "You"
else:
name = quote_author_name
quote = {
"name": name,
"body": format_emoji(msg.quote.text),
"attachments": [],
}
# Clean up message body
body = "" if msg.body is None else msg.body
if isinstance(msg, MMSMessageRecord):
all_emoji = not msg.quote and is_all_emoji(body)
else:
all_emoji = is_all_emoji(body)
body = format_emoji(body)
# Create message dictionary
aR = msg.addressRecipient
out = {
"isAllEmoji": all_emoji,
"isGroup": is_group,
"isCall": is_call,
"type": get_named_message_type(msg._type),
"body": body,
"date": date_sent,
"attachments": [],
"id": msg._id,
"name": aR.name[0],
"sender_idx": sender_idx[aR] if is_group else "0",
"quote": quote,
}
# Add attachments
if isinstance(msg, MMSMessageRecord):
for a in msg.attachments:
if a.quote:
out["quote"]["attachments"].append(a)
else:
out["attachments"].append(a)
simple_messages.append(out)
if not simple_messages:
return
html = template.render(
thread_name=thread.name,
messages=simple_messages,
group_color_css=group_color_css,
)
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(
output_dir, thread.name.replace(" ", "_") + ".html"
)
with open(filename, "w", encoding="utf-8") as fp:
fp.write(html)
| 31.655914 | 79 | 0.578465 |
ace397e9c47ef99814856640bede208111f2ad29 | 4,177 | py | Python | app/grandchallenge/pages/migrations/0003_historicalpage.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2016-11-05T07:16:30.000Z | 2017-11-23T03:38:03.000Z | app/grandchallenge/pages/migrations/0003_historicalpage.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 113 | 2015-05-26T09:27:59.000Z | 2018-03-21T10:45:56.000Z | app/grandchallenge/pages/migrations/0003_historicalpage.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2015-07-16T20:11:22.000Z | 2017-06-06T02:41:24.000Z | # Generated by Django 3.1.11 on 2021-06-01 10:00
import django.db.models.deletion
import simple_history.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0006_auto_20210601_0802"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("pages", "0002_auto_20210601_0802"),
]
operations = [
migrations.CreateModel(
name="HistoricalPage",
fields=[
(
"id",
models.IntegerField(
auto_created=True,
blank=True,
db_index=True,
verbose_name="ID",
),
),
("title", models.SlugField(max_length=64)),
(
"permission_level",
models.CharField(
choices=[
("ALL", "All"),
("REG", "Participants only"),
("ADM", "Administrators only"),
],
default="ALL",
max_length=3,
),
),
(
"order",
models.IntegerField(
default=1,
editable=False,
help_text="Determines order in which page appear in site menu",
),
),
(
"display_title",
models.CharField(
blank=True,
default="",
help_text="On pages and in menu items, use this text. Spaces and special chars allowed here. Optional field. If emtpy, title is used",
max_length=255,
),
),
(
"hidden",
models.BooleanField(
default=False,
help_text="Do not display this page in site menu",
),
),
("html", models.TextField(blank=True, default="")),
(
"history_id",
models.AutoField(primary_key=True, serialize=False),
),
("history_date", models.DateTimeField()),
(
"history_change_reason",
models.CharField(max_length=100, null=True),
),
(
"history_type",
models.CharField(
choices=[
("+", "Created"),
("~", "Changed"),
("-", "Deleted"),
],
max_length=1,
),
),
(
"challenge",
models.ForeignKey(
blank=True,
db_constraint=False,
help_text="Which challenge does this page belong to?",
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="challenges.challenge",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical page",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
)
]
| 35.398305 | 158 | 0.377544 |
ace397efb48828ed5d32f96a0b6d2be8b8519116 | 1,085 | py | Python | cryptos/coins/bitcoin.py | cfd2000/pybitcointool | 96e7843226d76cfe73b2ab0660cc89add490d76a | [
"MIT"
] | 230 | 2017-12-07T07:49:39.000Z | 2022-03-27T10:08:16.000Z | cryptos/coins/bitcoin.py | cfd2000/pybitcointool | 96e7843226d76cfe73b2ab0660cc89add490d76a | [
"MIT"
] | 26 | 2017-12-19T15:09:57.000Z | 2022-02-25T14:13:56.000Z | cryptos/coins/bitcoin.py | cfd2000/pybitcointool | 96e7843226d76cfe73b2ab0660cc89add490d76a | [
"MIT"
] | 145 | 2017-12-20T11:45:55.000Z | 2022-03-05T08:07:32.000Z | from ..explorers import blockchain
from .base import BaseCoin
class Bitcoin(BaseCoin):
coin_symbol = "BTC"
display_name = "Bitcoin"
segwit_supported = True
explorer = blockchain
magicbyte = 0
script_magicbyte = 5
segwit_hrp = "bc"
client_kwargs = {
'server_file': 'bitcoin.json',
}
testnet_overrides = {
'display_name': "Bitcoin Testnet",
'coin_symbol': "BTCTEST",
'magicbyte': 111,
'script_magicbyte': 196,
'segwit_hrp': 'tb',
'hd_path': 1,
'wif_prefix': 0xef,
'client_kwargs': {
'server_file': 'bitcoin_testnet.json',
},
'xprv_headers': {
'p2pkh': 0x04358394,
'p2wpkh-p2sh': 0x044a4e28,
'p2wsh-p2sh': 0x295b005,
'p2wpkh': 0x04358394,
'p2wsh': 0x2aa7a99
},
'xpub_headers': {
'p2pkh': 0x043587cf,
'p2wpkh-p2sh': 0x044a5262,
'p2wsh-p2sh': 0x295b43f,
'p2wpkh': 0x043587cf,
'p2wsh': 0x2aa7ed3
},
} | 25.833333 | 50 | 0.529954 |
ace3985c454f5fd488dc7a43d2d148a2c420df19 | 9,062 | py | Python | opensfm/rig.py | oscarlorentzon/OpenSfM | 8946faf0e1e0637ad5fdf5e056ae4dc55104e8c0 | [
"BSD-2-Clause"
] | 1 | 2019-05-31T13:50:41.000Z | 2019-05-31T13:50:41.000Z | opensfm/rig.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | null | null | null | opensfm/rig.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | 2 | 2017-03-31T16:54:34.000Z | 2018-07-10T11:32:22.000Z | """Tool for handling rigs"""
import logging
import re
from collections import defaultdict
from itertools import combinations
from typing import Dict, Tuple, List, Optional, Set
import networkx as nx
import numpy as np
from opensfm import actions, pygeometry, pymap, types
from opensfm.dataset import DataSet, DataSetBase
logger = logging.getLogger(__name__)
TRigPatterns = Dict[str, str]
TRigCameraGroup = Set[str]
TRigImage = Tuple[str, str]
TRigInstance = List[TRigImage]
def find_image_rig(
image: str, rig_patterns: TRigPatterns
) -> Tuple[Optional[str], Optional[str]]:
"""Given an image and candidates rig model patterns, return the
RigCamera ID/Instance Member ID this image belongs to.
"""
for rig_camera_id, pattern in rig_patterns.items():
instance_member_id = re.sub(pattern, "", image)
if instance_member_id == "":
continue
if instance_member_id != image:
return (rig_camera_id, instance_member_id)
return None, None
def create_instances_with_patterns(
images: List[str], rig_patterns: TRigPatterns
) -> Dict[str, TRigInstance]:
"""Using the provided patterns, group images that should belong to the same rig instances.
It will also check that a RigCamera belong to exactly one group of RigCameras
Returns :
A dict (by instance ID) of list of tuple of (image, rig camera)
"""
per_instance_id: Dict[str, TRigInstance] = {}
for image in images:
rig_camera_id, instance_member_id = find_image_rig(image, rig_patterns)
assert instance_member_id
if not rig_camera_id:
continue
if instance_member_id not in per_instance_id:
per_instance_id[instance_member_id] = []
per_instance_id[instance_member_id].append((image, rig_camera_id))
groups_per_camera: Dict[str, TRigCameraGroup] = {}
for cameras in per_instance_id.values():
cameras_group = {c for _, c in cameras}
for _, c in cameras:
if c in groups_per_camera:
logger.error(
f"Rig camera {c} already belongs to the rig camera group {groups_per_camera[c]}"
)
groups_per_camera[c] = cameras_group
return per_instance_id
def group_instances(
rig_instances: Dict[str, TRigInstance]
) -> Dict[str, List[TRigInstance]]:
per_rig_camera_group: Dict[str, List[TRigInstance]] = {}
for cameras in rig_instances.values():
cameras_group = ", ".join({c for _, c in cameras})
if cameras_group not in per_rig_camera_group:
per_rig_camera_group[cameras_group] = []
per_rig_camera_group[cameras_group].append(cameras)
return per_rig_camera_group
def create_subset_dataset_from_instances(
data: DataSet, rig_instances: Dict[str, TRigInstance], name: str
) -> DataSet:
"""Given a list of images grouped by rigs instances, pick a subset of images
and create a dataset subset with the provided name from them.
Returns :
A DataSet containing a subset of images containing enough rig instances
"""
per_rig_camera_group = group_instances(rig_instances)
subset_images = []
for instances in per_rig_camera_group.values():
instances_sorted = sorted(
instances, key=lambda x: data.load_exif(x[0][0])["capture_time"]
)
subset_size = data.config["rig_calibration_subset_size"]
middle = len(instances_sorted) // 2
instances_calibrate = instances_sorted[
max([0, middle - int(subset_size / 2)]) : min(
[middle + int(subset_size / 2), len(instances_sorted) - 1]
)
]
for instance in instances_calibrate:
subset_images += [x[0] for x in instance]
return data.subset(name, subset_images)
def compute_relative_pose(
pose_instances: List[List[Tuple[pymap.Shot, str]]],
) -> Dict[str, pymap.RigCamera]:
""" Compute a rig model relatives poses given poses grouped by rig instance. """
# Put all poses instances into some canonical frame taken as the mean of their R|t
centered_pose_instances = []
for instance in pose_instances:
origin_center = np.zeros(3)
rotation_center = np.zeros(3)
for shot, _ in instance:
rotation_center += shot.pose.rotation
origin_center += shot.pose.get_origin()
origin_center /= len(instance)
rotation_center /= len(instance)
centered_pose_instance = []
for shot, rig_camera_id in instance:
instance_pose = pygeometry.Pose(rotation_center)
instance_pose.set_origin(origin_center)
instance_pose_camera = shot.pose.relative_to(instance_pose)
centered_pose_instance.append(
(
instance_pose_camera,
rig_camera_id,
shot.camera.id,
)
)
centered_pose_instances.append(centered_pose_instance)
# Average canonical poses per RigCamera ID
average_origin, average_rotation, count_poses, camera_ids = {}, {}, {}, {}
for centered_pose_instance in centered_pose_instances:
for pose, rig_camera_id, camera_id in centered_pose_instance:
if rig_camera_id not in average_origin:
average_origin[rig_camera_id] = np.zeros(3)
average_rotation[rig_camera_id] = np.zeros(3)
count_poses[rig_camera_id] = 0
average_origin[rig_camera_id] += pose.get_origin()
average_rotation[rig_camera_id] += pose.rotation
camera_ids[rig_camera_id] = camera_id
count_poses[rig_camera_id] += 1
# Construct final RigCamera results
rig_cameras: Dict[str, pymap.RigCamera] = {}
for rig_camera_id, count in count_poses.items():
o = average_origin[rig_camera_id] / count
r = average_rotation[rig_camera_id] / count
pose = pygeometry.Pose(r)
pose.set_origin(o)
rig_cameras[rig_camera_id] = pymap.RigCamera(pose, rig_camera_id)
return rig_cameras
def create_rig_cameras_from_reconstruction(
reconstruction: types.Reconstruction, rig_instances: Dict[str, TRigInstance]
) -> Dict[str, pymap.RigCamera]:
""" Compute rig cameras poses, given a reconstruction and rig instances's shots. """
rig_cameras: Dict[str, pymap.RigCamera] = {}
reconstructions_shots = set(reconstruction.shots)
per_rig_camera_group = group_instances(rig_instances)
for instances in per_rig_camera_group.values():
pose_groups = []
for instance in instances:
if any(
True if shot_id not in reconstructions_shots else False
for shot_id, _ in instance
):
continue
pose_groups.append(
[
(reconstruction.shots[shot_id], rig_camera_id)
for shot_id, rig_camera_id in instance
]
)
rig_cameras.update(compute_relative_pose(pose_groups))
return rig_cameras
def create_rigs_with_pattern(data: DataSet, patterns: TRigPatterns):
"""Create rig data (`rig_cameras.json` and `rig_assignments.json`) by performing
pattern matching to group images belonging to the same instances, followed
by a bit of ad-hoc SfM to find some initial relative poses.
"""
# Construct instances assignments for each rig
instances_per_rig = create_instances_with_patterns(data.images(), patterns)
for rig_id, instances in instances_per_rig.items():
logger.info(
f"Found {len(instances)} rig instances for rig {rig_id} using pattern matching."
)
# Create some subset DataSet with enough images from each rig
subset_data = create_subset_dataset_from_instances(
data, instances_per_rig, "rig_calibration"
)
# # Run a bit of SfM without any rig
logger.info(f"Running SfM on a subset of {len(subset_data.images())} images.")
actions.extract_metadata.run_dataset(subset_data)
actions.detect_features.run_dataset(subset_data)
actions.match_features.run_dataset(subset_data)
actions.create_tracks.run_dataset(subset_data)
actions.reconstruct.run_dataset(subset_data)
# Compute some relative poses
rig_cameras = create_rig_cameras_from_reconstruction(
subset_data.load_reconstruction()[0], instances_per_rig
)
data.save_rig_cameras(rig_cameras)
data.save_rig_assignments(list(instances_per_rig.values()))
def same_rig_shot(meta1, meta2):
"""True if shots taken at the same time on a rig."""
have_gps = (
"gps" in meta1
and "gps" in meta2
and "latitude" in meta1["gps"]
and "latitude" in meta2["gps"]
)
same_gps = (
have_gps
and meta1["gps"]["latitude"] == meta2["gps"]["latitude"]
and meta1["gps"]["longitude"] == meta2["gps"]["longitude"]
)
same_time = meta1["capture_time"] == meta2["capture_time"]
return same_gps and same_time
| 37.292181 | 100 | 0.66972 |
ace39a4cdf3dcd22ac9db032fec42ef3ede6078b | 41,337 | py | Python | release/scripts/startup/bl_ui/space_toolsystem_common.py | livada/blender | c1c2c639c0772dfcd37d38dce4158e9e34a8a6a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/bl_ui/space_toolsystem_common.py | livada/blender | c1c2c639c0772dfcd37d38dce4158e9e34a8a6a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/bl_ui/space_toolsystem_common.py | livada/blender | c1c2c639c0772dfcd37d38dce4158e9e34a8a6a3 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import (
Menu,
)
from bpy.app.translations import pgettext_tip as tip_
__all__ = (
"ToolDef",
"ToolSelectPanelHelper",
"activate_by_id",
"activate_by_id_or_cycle",
"description_from_id",
"keymap_from_id",
)
# Support reloading icons.
if "_icon_cache" in locals():
release = bpy.app.icons.release
for icon_value in _icon_cache.values():
if icon_value != 0:
release(icon_value)
del release
# (filename -> icon_value) map
_icon_cache = {}
def _keymap_fn_from_seq(keymap_data):
def keymap_fn(km):
if keymap_fn.keymap_data:
from bl_keymap_utils.io import keymap_init_from_data
keymap_init_from_data(km, keymap_fn.keymap_data)
keymap_fn.keymap_data = keymap_data
return keymap_fn
def _item_is_fn(item):
return (not (type(item) is ToolDef) and callable(item))
from collections import namedtuple
ToolDef = namedtuple(
"ToolDef",
(
# Unique tool name (withing space & mode context).
"idname",
# The name to display in the interface.
"label",
# Description (for tool-tip), when not set, use the description of 'operator',
# may be a string or a 'function(context, item, key-map) -> string'.
"description",
# The name of the icon to use (found in ``release/datafiles/icons``) or None for no icon.
"icon",
# An optional cursor to use when this tool is active.
"cursor",
# An optional gizmo group to activate when the tool is set or None for no gizmo.
"widget",
# Optional key-map for tool, possible values are:
#
# - ``None`` when the tool doesn't have a key-map.
# Also the default value when no key-map value is defined.
#
# - A string literal for the key-map name, the key-map items are located in the default key-map.
#
# - ``()`` an empty tuple for a default name.
# This is convenience functionality for generating a key-map name.
# So if a tool name is "Bone Size", in "Edit Armature" mode for the "3D View",
# All of these values are combined into an id, e.g:
# "3D View Tool: Edit Armature, Bone Envelope"
#
# Typically searching for a string ending with the tool name
# in the default key-map will lead you to the key-map for a tool.
#
# - A function that populates a key-maps passed in as an argument.
#
# - A tuple filled with triple's of:
# ``(operator_id, operator_properties, keymap_item_args)``.
#
# Use this to define the key-map in-line.
#
# Note that this isn't used for Blender's built in tools which use the built-in key-map.
# Keep this functionality since it's likely useful for add-on key-maps.
#
# Warning: currently 'from_dict' this is a list of one item,
# so internally we can swap the key-map function for the key-map it's self.
# This isn't very nice and may change, tool definitions shouldn't care about this.
"keymap",
# Optional data-block associated with this tool.
# (Typically brush name, usage depends on mode, we could use for non-brush ID's in other modes).
"data_block",
# Optional primary operator (for introspection only).
"operator",
# Optional draw settings (operator options, tool_settings).
"draw_settings",
# Optional draw cursor.
"draw_cursor",
)
)
del namedtuple
def from_dict(kw_args):
"""
Use so each tool can avoid defining all members of the named tuple.
Also convert the keymap from a tuple into a function
(since keymap is a callback).
"""
kw = {
"description": None,
"icon": None,
"cursor": None,
"widget": None,
"keymap": None,
"data_block": None,
"operator": None,
"draw_settings": None,
"draw_cursor": None,
}
kw.update(kw_args)
keymap = kw["keymap"]
if keymap is None:
pass
elif type(keymap) is tuple:
keymap = [_keymap_fn_from_seq(keymap)]
else:
keymap = [keymap]
kw["keymap"] = keymap
return ToolDef(**kw)
def from_fn(fn):
"""
Use as decorator so we can define functions.
"""
return ToolDef.from_dict(fn())
def with_args(**kw):
def from_fn(fn):
return ToolDef.from_dict(fn(**kw))
return from_fn
from_fn.with_args = with_args
ToolDef.from_dict = from_dict
ToolDef.from_fn = from_fn
del from_dict, from_fn, with_args
class ToolActivePanelHelper:
# Sub-class must define.
# bl_space_type = 'VIEW_3D'
# bl_region_type = 'UI'
bl_label = "Active Tool"
# bl_category = "Tool"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
ToolSelectPanelHelper.draw_active_tool_header(
context,
layout.column(),
show_tool_name=True,
tool_key=ToolSelectPanelHelper._tool_key_from_context(context, space_type=self.bl_space_type),
)
class ToolSelectPanelHelper:
"""
Generic Class, can be used for any toolbar.
- keymap_prefix:
The text prefix for each key-map for this spaces tools.
- tools_all():
Returns (context_mode, tools) tuple pair for all tools defined.
- tools_from_context(context, mode=None):
Returns tools available in this context.
Each tool is a 'ToolDef' or None for a separator in the toolbar, use ``None``.
"""
@staticmethod
def _tool_class_from_space_type(space_type):
return next(
(cls for cls in ToolSelectPanelHelper.__subclasses__()
if cls.bl_space_type == space_type),
None
)
@staticmethod
def _icon_value_from_icon_handle(icon_name):
import os
if icon_name is not None:
assert(type(icon_name) is str)
icon_value = _icon_cache.get(icon_name)
if icon_value is None:
dirname = bpy.utils.system_resource('DATAFILES', "icons")
filename = os.path.join(dirname, icon_name + ".dat")
try:
icon_value = bpy.app.icons.new_triangles_from_file(filename)
except Exception as ex:
if not os.path.exists(filename):
print("Missing icons:", filename, ex)
else:
print("Corrupt icon:", filename, ex)
# Use none as a fallback (avoids layout issues).
if icon_name != "none":
icon_value = ToolSelectPanelHelper._icon_value_from_icon_handle("none")
else:
icon_value = 0
_icon_cache[icon_name] = icon_value
return icon_value
else:
return 0
# tool flattening
#
# usually 'tools' is already expanded into ToolDef
# but when registering a tool, this can still be a function
# (_tools_flatten is usually called with cls.tools_from_context(context)
# [that already yields from the function])
# so if item is still a function (e.g._defs_XXX.generate_from_brushes)
# seems like we cannot expand here (have no context yet)
# if we yield None here, this will risk running into duplicate tool bl_idname [in register_tool()]
# but still better than erroring out
@staticmethod
def _tools_flatten(tools):
for item_parent in tools:
if item_parent is None:
yield None
for item in item_parent if (type(item_parent) is tuple) else (item_parent,):
if item is None or _item_is_fn(item):
yield None
else:
yield item
@staticmethod
def _tools_flatten_with_tool_index(tools):
for item_parent in tools:
if item_parent is None:
yield None, -1
i = 0
for item in item_parent if (type(item_parent) is tuple) else (item_parent,):
if item is None or _item_is_fn(item):
yield None, -1
else:
yield item, i
i += 1
# Special internal function, gives use items that contain keymaps.
@staticmethod
def _tools_flatten_with_keymap(tools):
for item_parent in tools:
if item_parent is None:
continue
for item in item_parent if (type(item_parent) is tuple) else (item_parent,):
# skip None or generator function
if item is None or _item_is_fn(item):
continue
if item.keymap is not None:
yield item
@classmethod
def _tool_get_active(cls, context, space_type, mode, with_icon=False):
"""
Return the active Python tool definition and icon name.
"""
tool_active = ToolSelectPanelHelper._tool_active_from_context(context, space_type, mode)
tool_active_id = getattr(tool_active, "idname", None)
for item in ToolSelectPanelHelper._tools_flatten(cls.tools_from_context(context, mode)):
if item is not None:
if item.idname == tool_active_id:
if with_icon:
icon_value = ToolSelectPanelHelper._icon_value_from_icon_handle(item.icon)
else:
icon_value = 0
return (item, tool_active, icon_value)
return None, None, 0
@classmethod
def _tool_get_by_id(cls, context, idname):
"""
Return the active Python tool definition and index (if in sub-group, else -1).
"""
for item, index in ToolSelectPanelHelper._tools_flatten_with_tool_index(cls.tools_from_context(context)):
if item is not None:
if item.idname == idname:
return (item, index)
return None, -1
@classmethod
def _tool_get_by_id_active(cls, context, idname):
"""
Return the active Python tool definition and index (if in sub-group, else -1).
"""
for item in cls.tools_from_context(context):
if item is not None:
if type(item) is tuple:
if item[0].idname == idname:
index = cls._tool_group_active.get(item[0].idname, 0)
return (item[index], index)
else:
if item.idname == idname:
return (item, -1)
return None, -1
@classmethod
def _tool_get_by_id_active_with_group(cls, context, idname):
"""
Return the active Python tool definition and index (if in sub-group, else -1).
"""
for item in cls.tools_from_context(context):
if item is not None:
if type(item) is tuple:
if item[0].idname == idname:
index = cls._tool_group_active.get(item[0].idname, 0)
return (item[index], index, item)
else:
if item.idname == idname:
return (item, -1, None)
return None, -1, None
@classmethod
def _tool_get_group_by_id(cls, context, idname, *, coerce=False):
"""
Return the group which contains idname, or None.
"""
for item in cls.tools_from_context(context):
if item is not None:
if type(item) is tuple:
for subitem in item:
if subitem.idname == idname:
return item
else:
if item.idname == idname:
if coerce:
return (item,)
else:
return None
return None
@classmethod
def _tool_get_by_flat_index(cls, context, tool_index):
"""
Return the active Python tool definition and index (if in sub-group, else -1).
Return the index of the expanded list.
"""
i = 0
for item, index in ToolSelectPanelHelper._tools_flatten_with_tool_index(cls.tools_from_context(context)):
if item is not None:
if i == tool_index:
return (item, index)
i += 1
return None, -1
@classmethod
def _tool_get_active_by_index(cls, context, tool_index):
"""
Return the active Python tool definition and index (if in sub-group, else -1).
Return the index of the list without expanding.
"""
i = 0
for item in cls.tools_from_context(context):
if item is not None:
if i == tool_index:
if type(item) is tuple:
index = cls._tool_group_active.get(item[0].idname, 0)
item = item[index]
else:
index = -1
return (item, index)
i += 1
return None, -1
@classmethod
def _tool_group_active_set_by_id(cls, context, idname_group, idname):
item_group = cls._tool_get_group_by_id(context, idname_group, coerce=True)
if item_group:
for i, item in enumerate(item_group):
if item and item.idname == idname:
cls._tool_group_active[item_group[0].idname] = i
return True
return False
@staticmethod
def _tool_active_from_context(context, space_type, mode=None, create=False):
if space_type in {'VIEW_3D', 'PROPERTIES'}:
if mode is None:
mode = context.mode
tool = context.workspace.tools.from_space_view3d_mode(mode, create=create)
if tool is not None:
tool.refresh_from_context()
return tool
elif space_type == 'IMAGE_EDITOR':
space_data = context.space_data
if mode is None:
if space_data is None:
mode = 'VIEW'
else:
mode = space_data.mode
tool = context.workspace.tools.from_space_image_mode(mode, create=create)
if tool is not None:
tool.refresh_from_context()
return tool
elif space_type == 'NODE_EDITOR':
space_data = context.space_data
tool = context.workspace.tools.from_space_node(create=create)
if tool is not None:
tool.refresh_from_context()
return tool
elif space_type == 'SEQUENCE_EDITOR':
space_data = context.space_data
if mode is None:
mode = space_data.view_type
tool = context.workspace.tools.from_space_sequencer(mode, create=create)
if tool is not None:
tool.refresh_from_context()
return tool
return None
@staticmethod
def _tool_identifier_from_button(context):
return context.button_operator.name
@classmethod
def _km_action_simple(cls, kc_default, kc, context_descr, label, keymap_fn):
km_idname = f"{cls.keymap_prefix:s} {context_descr:s}, {label:s}"
km = kc.keymaps.get(km_idname)
km_kwargs = dict(space_type=cls.bl_space_type, region_type='WINDOW', tool=True)
if km is None:
km = kc.keymaps.new(km_idname, **km_kwargs)
keymap_fn[0](km)
print(len(km.keymap_items))
if km.keymap_items:
print(km.keymap_items[0].to_string())
keymap_fn[0] = km.name
# Ensure we have a default key map, so the add-ons keymap is properly overlayed.
if kc_default is not kc:
kc_default.keymaps.new(km_idname, **km_kwargs)
@classmethod
def register(cls):
wm = bpy.context.window_manager
# Write into defaults, users may modify in preferences.
kc_default = wm.keyconfigs.default
# Track which tool-group was last used for non-active groups.
# Blender stores the active tool-group index.
#
# {tool_name_first: index_in_group, ...}
cls._tool_group_active = {}
# ignore in background mode
if kc_default is None:
return
for context_mode, tools in cls.tools_all():
if context_mode is None:
context_descr = "All"
else:
context_descr = context_mode.replace("_", " ").title()
for item in cls._tools_flatten_with_keymap(tools):
keymap_data = item.keymap
if callable(keymap_data[0]):
cls._km_action_simple(kc_default, kc_default, context_descr, item.label, keymap_data)
@classmethod
def keymap_ui_hierarchy(cls, context_mode):
# See: bpy_extras.keyconfig_utils
# Keymaps may be shared, don't show them twice.
visited = set()
for context_mode_test, tools in cls.tools_all():
if context_mode_test == context_mode:
for item in cls._tools_flatten_with_keymap(tools):
km_name = item.keymap[0]
# print((km.name, cls.bl_space_type, 'WINDOW', []))
if km_name in visited:
continue
visited.add(km_name)
yield (km_name, cls.bl_space_type, 'WINDOW', [])
# -------------------------------------------------------------------------
# Layout Generators
#
# Meaning of received values:
# - Bool: True for a separator, otherwise False for regular tools.
# - None: Signal to finish (complete any final operations, e.g. add padding).
@staticmethod
def _layout_generator_single_column(layout, scale_y):
col = layout.column(align=True)
col.scale_y = scale_y
is_sep = False
while True:
if is_sep is True:
col = layout.column(align=True)
col.scale_y = scale_y
elif is_sep is None:
yield None
return
is_sep = yield col
@staticmethod
def _layout_generator_multi_columns(layout, column_count, scale_y):
scale_x = scale_y * 1.1
column_last = column_count - 1
col = layout.column(align=True)
row = col.row(align=True)
row.scale_x = scale_x
row.scale_y = scale_y
is_sep = False
column_index = 0
while True:
if is_sep is True:
if column_index != column_last:
row.label(text="")
col = layout.column(align=True)
row = col.row(align=True)
row.scale_x = scale_x
row.scale_y = scale_y
column_index = 0
is_sep = yield row
if is_sep is None:
if column_index == column_last:
row.label(text="")
yield None
return
if column_index == column_count:
column_index = 0
row = col.row(align=True)
row.scale_x = scale_x
row.scale_y = scale_y
column_index += 1
@staticmethod
def _layout_generator_detect_from_region(layout, region, scale_y):
"""
Choose an appropriate layout for the toolbar.
"""
# Currently this just checks the width,
# we could have different layouts as preferences too.
system = bpy.context.preferences.system
view2d = region.view2d
view2d_scale = (
view2d.region_to_view(1.0, 0.0)[0] -
view2d.region_to_view(0.0, 0.0)[0]
)
width_scale = region.width * view2d_scale / system.ui_scale
if width_scale > 120.0:
show_text = True
column_count = 1
else:
show_text = False
# 2 column layout, disabled
if width_scale > 80.0:
column_count = 2
else:
column_count = 1
if column_count == 1:
ui_gen = ToolSelectPanelHelper._layout_generator_single_column(
layout, scale_y=scale_y,
)
else:
ui_gen = ToolSelectPanelHelper._layout_generator_multi_columns(
layout, column_count=column_count, scale_y=scale_y,
)
return ui_gen, show_text
@classmethod
def draw_cls(cls, layout, context, detect_layout=True, scale_y=1.75):
# Use a classmethod so it can be called outside of a panel context.
# XXX, this UI isn't very nice.
# We might need to create new button types for this.
# Since we probably want:
# - tool-tips that include multiple key shortcuts.
# - ability to click and hold to expose sub-tools.
space_type = context.space_data.type
tool_active_id = getattr(
ToolSelectPanelHelper._tool_active_from_context(context, space_type),
"idname", None,
)
if detect_layout:
ui_gen, show_text = cls._layout_generator_detect_from_region(layout, context.region, scale_y)
else:
ui_gen = ToolSelectPanelHelper._layout_generator_single_column(layout, scale_y)
show_text = True
# Start iteration
ui_gen.send(None)
for item in cls.tools_from_context(context):
if item is None:
ui_gen.send(True)
continue
if type(item) is tuple:
is_active = False
i = 0
for i, sub_item in enumerate(item):
if sub_item is None:
continue
is_active = (sub_item.idname == tool_active_id)
if is_active:
index = i
break
del i, sub_item
if is_active:
# not ideal, write this every time :S
cls._tool_group_active[item[0].idname] = index
else:
index = cls._tool_group_active.get(item[0].idname, 0)
item = item[index]
use_menu = True
else:
index = -1
use_menu = False
is_active = (item.idname == tool_active_id)
icon_value = ToolSelectPanelHelper._icon_value_from_icon_handle(item.icon)
sub = ui_gen.send(False)
if use_menu:
sub.operator_menu_hold(
"wm.tool_set_by_id",
text=item.label if show_text else "",
depress=is_active,
menu="WM_MT_toolsystem_submenu",
icon_value=icon_value,
).name = item.idname
else:
sub.operator(
"wm.tool_set_by_id",
text=item.label if show_text else "",
depress=is_active,
icon_value=icon_value,
).name = item.idname
# Signal to finish any remaining layout edits.
ui_gen.send(None)
def draw(self, context):
self.draw_cls(self.layout, context)
@staticmethod
def _tool_key_from_context(context, *, space_type=None):
if space_type is None:
space_data = context.space_data
space_type = space_data.type
else:
space_data = None
if space_type == 'VIEW_3D':
return space_type, context.mode
elif space_type == 'IMAGE_EDITOR':
if space_data is None:
space_data = context.space_data
return space_type, space_data.mode
elif space_type == 'NODE_EDITOR':
return space_type, None
elif space_type == 'SEQUENCE_EDITOR':
return space_type, context.space_data.view_type
else:
return None, None
@staticmethod
def tool_active_from_context(context):
space_type = context.space_data.type
return ToolSelectPanelHelper._tool_active_from_context(context, space_type)
@staticmethod
def draw_active_tool_fallback(
context, layout, tool,
*,
is_horizontal_layout=False,
):
idname_fallback = tool.idname_fallback
space_type = tool.space_type
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
item_fallback, _index = cls._tool_get_by_id(context, idname_fallback)
if item_fallback is not None:
draw_settings = item_fallback.draw_settings
if draw_settings is not None:
if not is_horizontal_layout:
layout.separator()
draw_settings(context, layout, tool)
@staticmethod
def draw_active_tool_header(
context, layout,
*,
show_tool_name=False,
tool_key=None,
):
if tool_key is None:
space_type, mode = ToolSelectPanelHelper._tool_key_from_context(context)
else:
space_type, mode = tool_key
if space_type is None:
return None
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
item, tool, icon_value = cls._tool_get_active(context, space_type, mode, with_icon=True)
if item is None:
return None
# Note: we could show 'item.text' here but it makes the layout jitter when switching tools.
# Add some spacing since the icon is currently assuming regular small icon size.
layout.label(text=" " + item.label if show_tool_name else " ", icon_value=icon_value)
if show_tool_name:
layout.separator()
draw_settings = item.draw_settings
if draw_settings is not None:
draw_settings(context, layout, tool)
idname_fallback = tool.idname_fallback
if idname_fallback and idname_fallback != item.idname:
tool_settings = context.tool_settings
# Show popover which looks like an enum but isn't one.
if tool_settings.workspace_tool_type == 'FALLBACK':
tool_fallback_id = cls.tool_fallback_id
item, _select_index = cls._tool_get_by_id_active(context, tool_fallback_id)
label = item.label
else:
label = "Active Tool"
split = layout.split(factor=0.33)
row = split.row()
row.alignment = 'RIGHT'
row.label(text="Drag:")
row = split.row()
row.context_pointer_set("tool", tool)
row.popover(panel="TOPBAR_PT_tool_fallback", text=label)
return tool
# Show a list of tools in the popover.
@staticmethod
def draw_fallback_tool_items(layout, context):
space_type = context.space_data.type
if space_type == 'PROPERTIES':
space_type = 'VIEW_3D'
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
tool_fallback_id = cls.tool_fallback_id
_item, _select_index, item_group = cls._tool_get_by_id_active_with_group(context, tool_fallback_id)
if item_group is None:
# Could print comprehensive message - listing available items.
raise Exception("Fallback tool doesn't exist")
col = layout.column(align=True)
tool_settings = context.tool_settings
col.prop_enum(
tool_settings,
"workspace_tool_type",
value='DEFAULT',
text="Active Tool",
)
is_active_tool = (tool_settings.workspace_tool_type == 'DEFAULT')
col = layout.column(align=True)
if is_active_tool:
index_current = -1
else:
index_current = cls._tool_group_active.get(item_group[0].idname, 0)
for i, sub_item in enumerate(item_group):
is_active = (i == index_current)
props = col.operator(
"wm.tool_set_by_id",
text=sub_item.label,
depress=is_active,
)
props.name = sub_item.idname
props.as_fallback = True
props.space_type = space_type
@staticmethod
def draw_fallback_tool_items_for_pie_menu(layout, context):
space_type = context.space_data.type
if space_type == 'PROPERTIES':
space_type = 'VIEW_3D'
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
tool_fallback_id = cls.tool_fallback_id
_item, _select_index, item_group = cls._tool_get_by_id_active_with_group(context, tool_fallback_id)
if item_group is None:
# Could print comprehensive message - listing available items.
raise Exception("Fallback tool doesn't exist")
# Allow changing the active tool,
# even though this isn't the purpose of the pie menu
# it's confusing from a user perspective if we don't allow it.
is_fallback_group_active = getattr(
ToolSelectPanelHelper._tool_active_from_context(context, space_type),
"idname", None,
) in (item.idname for item in item_group)
pie = layout.menu_pie()
tool_settings = context.tool_settings
pie.prop_enum(
tool_settings,
"workspace_tool_type",
value='DEFAULT',
text="Active Tool",
icon='TOOL_SETTINGS', # Could use a less generic icon.
)
is_active_tool = (tool_settings.workspace_tool_type == 'DEFAULT')
if is_active_tool:
index_current = -1
else:
index_current = cls._tool_group_active.get(item_group[0].idname, 0)
for i, sub_item in enumerate(item_group):
is_active = (i == index_current)
props = pie.operator(
"wm.tool_set_by_id",
text=sub_item.label,
depress=is_active,
icon_value=ToolSelectPanelHelper._icon_value_from_icon_handle(sub_item.icon),
)
props.name = sub_item.idname
props.space_type = space_type
if not is_fallback_group_active:
props.as_fallback = True
# The purpose of this menu is to be a generic popup to select between tools
# in cases when a single tool allows to select alternative tools.
class WM_MT_toolsystem_submenu(Menu):
bl_label = ""
@staticmethod
def _tool_group_from_button(context):
# Lookup the tool definitions based on the space-type.
cls = ToolSelectPanelHelper._tool_class_from_space_type(context.space_data.type)
if cls is not None:
button_identifier = ToolSelectPanelHelper._tool_identifier_from_button(context)
for item_group in cls.tools_from_context(context):
if type(item_group) is tuple:
for sub_item in item_group:
if (sub_item is not None) and (sub_item.idname == button_identifier):
return cls, item_group
return None, None
def draw(self, context):
layout = self.layout
layout.scale_y = 2.0
_cls, item_group = self._tool_group_from_button(context)
if item_group is None:
# Should never happen, just in case
layout.label(text="Unable to find toolbar group")
return
for item in item_group:
if item is None:
layout.separator()
continue
icon_value = ToolSelectPanelHelper._icon_value_from_icon_handle(item.icon)
layout.operator(
"wm.tool_set_by_id",
text=item.label,
icon_value=icon_value,
).name = item.idname
def _activate_by_item(context, space_type, item, index, *, as_fallback=False):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
tool = ToolSelectPanelHelper._tool_active_from_context(context, space_type, create=True)
tool_fallback_id = cls.tool_fallback_id
if as_fallback:
# To avoid complicating logic too much, isolate all fallback logic to this block.
# This will set the tool again, using the item for the fallback instead of the primary tool.
#
# If this ends up needing to be more complicated,
# it would be better to split it into a separate function.
_item, _select_index, item_group = cls._tool_get_by_id_active_with_group(context, tool_fallback_id)
if item_group is None:
# Could print comprehensive message - listing available items.
raise Exception("Fallback tool doesn't exist")
index_new = -1
for i, sub_item in enumerate(item_group):
if sub_item.idname == item.idname:
index_new = i
break
if index_new == -1:
raise Exception("Fallback tool not found in group")
cls._tool_group_active[tool_fallback_id] = index_new
# Done, now get the current tool to replace the item & index.
tool_active = ToolSelectPanelHelper._tool_active_from_context(context, space_type)
item, index = cls._tool_get_by_id(context, getattr(tool_active, "idname", None))
else:
# Ensure the active fallback tool is read from saved state (even if the fallback tool is not in use).
stored_idname_fallback = tool.idname_fallback
if stored_idname_fallback:
cls._tool_group_active_set_by_id(context, tool_fallback_id, stored_idname_fallback)
del stored_idname_fallback
# Find fallback keymap.
item_fallback = None
_item, select_index = cls._tool_get_by_id(context, tool_fallback_id)
if select_index != -1:
item_fallback, _index = cls._tool_get_active_by_index(context, select_index)
# End calculating fallback.
tool.setup(
idname=item.idname,
keymap=item.keymap[0] if item.keymap is not None else "",
cursor=item.cursor or 'DEFAULT',
gizmo_group=item.widget or "",
data_block=item.data_block or "",
operator=item.operator or "",
index=index,
idname_fallback=(item_fallback and item_fallback.idname) or "",
keymap_fallback=(item_fallback and item_fallback.keymap and item_fallback.keymap[0]) or "",
)
WindowManager = bpy.types.WindowManager
handle_map = _activate_by_item._cursor_draw_handle
handle = handle_map.pop(space_type, None)
if handle is not None:
WindowManager.draw_cursor_remove(handle)
if item.draw_cursor is not None:
def handle_fn(context, item, tool, xy):
item.draw_cursor(context, tool, xy)
handle = WindowManager.draw_cursor_add(handle_fn, (context, item, tool), space_type)
handle_map[space_type] = handle
_activate_by_item._cursor_draw_handle = {}
def activate_by_id(context, space_type, idname, *, as_fallback=False):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return False
item, index = cls._tool_get_by_id(context, idname)
if item is None:
return False
_activate_by_item(context, space_type, item, index, as_fallback=as_fallback)
return True
def activate_by_id_or_cycle(context, space_type, idname, *, offset=1, as_fallback=False):
# Only cycle when the active tool is activated again.
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
item, _index = cls._tool_get_by_id(context, idname)
if item is None:
return False
tool_active = ToolSelectPanelHelper._tool_active_from_context(context, space_type)
id_active = getattr(tool_active, "idname", None)
id_current = ""
for item_group in cls.tools_from_context(context):
if type(item_group) is tuple:
index_current = cls._tool_group_active.get(item_group[0].idname, 0)
for sub_item in item_group:
if sub_item.idname == idname:
id_current = item_group[index_current].idname
break
if id_current:
break
if id_current == "":
return activate_by_id(context, space_type, idname)
if id_active != id_current:
return activate_by_id(context, space_type, id_current)
index_found = (tool_active.index + offset) % len(item_group)
cls._tool_group_active[item_group[0].idname] = index_found
item_found = item_group[index_found]
_activate_by_item(context, space_type, item_found, index_found)
return True
def description_from_id(context, space_type, idname, *, use_operator=True):
# Used directly for tooltips.
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
item, _index = cls._tool_get_by_id(context, idname)
if item is None:
return False
# Custom description.
description = item.description
if description is not None:
if callable(description):
km = _keymap_from_item(context, item)
return description(context, item, km)
return tip_(description)
# Extract from the operator.
if use_operator:
operator = item.operator
if operator is None:
if item.keymap is not None:
km = _keymap_from_item(context, item)
if km is not None:
for kmi in km.keymap_items:
if kmi.active:
operator = kmi.idname
break
if operator is not None:
import _bpy
return tip_(_bpy.ops.get_rna_type(operator).description)
return ""
def item_from_id(context, space_type, idname):
# Used directly for tooltips.
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
item, _index = cls._tool_get_by_id(context, idname)
return item
def item_from_id_active(context, space_type, idname):
# Used directly for tooltips.
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
item, _index = cls._tool_get_by_id_active(context, idname)
return item
def item_from_id_active_with_group(context, space_type, idname):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
cls, item, _index = cls._tool_get_by_id_active_with_group(context, idname)
return item
def item_group_from_id(context, space_type, idname, *, coerce=False):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
return cls._tool_get_group_by_id(context, idname, coerce=coerce)
def item_from_flat_index(context, space_type, index):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
item, _index = cls._tool_get_by_flat_index(context, index)
return item
def item_from_index_active(context, space_type, index):
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
item, _index = cls._tool_get_active_by_index(context, index)
return item
def keymap_from_id(context, space_type, idname):
# Used directly for tooltips.
cls = ToolSelectPanelHelper._tool_class_from_space_type(space_type)
if cls is None:
return None
item, _index = cls._tool_get_by_id(context, idname)
if item is None:
return False
keymap = item.keymap
# List container of one.
if keymap:
return keymap[0]
return ""
def _keymap_from_item(context, item):
if item.keymap is not None:
wm = context.window_manager
keyconf = wm.keyconfigs.active
return keyconf.keymaps.get(item.keymap[0])
return None
classes = (
WM_MT_toolsystem_submenu,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 35.913988 | 113 | 0.603382 |
ace39aca7f75999e4fe962ad54fa7336e285053d | 1,529 | py | Python | utilities/pickle_manager.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | utilities/pickle_manager.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | utilities/pickle_manager.py | marcellogoccia/deep-value-investing | 4d45cc92c157246485b638d2052596a76975ec8a | [
"MIT"
] | null | null | null | import pickle
import os
from utilities.json_manager import JsonManager as jSon
extension = 'pkl'
# To read a pickle file from the command line try the following:
# python -mpickle pickle_file.pkl
class PickleManager:
def __init__(self):
pass
@staticmethod
def save(data, name_file='', path=''):
"""
Save the dictionary passed as input, data, into a pickle file.
After saving the data into a pickle file, it stores it into a json file for offline easy reading.
@param data the dictionary to store
@param name_file the name of the file where the pickle data will be stored.
@param path the path where to store the pickle file
@return The path and filename of the stored pickle file
"""
if not name_file:
name_file = f"data.{extension}"
if extension not in name_file:
name_file = f'{name_file}.{extension}'
path_filename = os.path.abspath(os.path.join(path, name_file))
with open(path_filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
jSon.save(data, path_filename)
return path_filename
@staticmethod
def load(path_filename):
"""
Load the dictionary from the json file passed as input
@param path_filename the path and the name of the file where the pickle file was stored
@return The dictionary loaded
"""
with open(path_filename, 'rb') as f:
return pickle.load(f)
| 30.58 | 105 | 0.649444 |
ace39b8cf2d64821a38a8db59d2cde96bd1726f4 | 2,611 | py | Python | scripts/extra/intersect-gt-and-dr.py | k5iogura/mAPe | 92e87b0d8f58fdfe14e1a574040f0c384ca832de | [
"Apache-2.0"
] | null | null | null | scripts/extra/intersect-gt-and-dr.py | k5iogura/mAPe | 92e87b0d8f58fdfe14e1a574040f0c384ca832de | [
"Apache-2.0"
] | null | null | null | scripts/extra/intersect-gt-and-dr.py | k5iogura/mAPe | 92e87b0d8f58fdfe14e1a574040f0c384ca832de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import glob
import argparse
## This script ensures same number of files in ground-truth and detection-results folder.
## When you encounter file not found error, it's usually because you have
## mismatched numbers of ground-truth and detection-results files.
## You can use this script to move ground-truth and detection-results files that are
## not in the intersection into a backup folder (backup_no_matches_found).
## This will retain only files that have the same name in both folders.
def check(path):
if os.path.exists(path):return str(path)
return None
parser = argparse.ArgumentParser()
parser.add_argument('-db', '--input_dir', type=check, nargs=1, required=True)
args = parser.parse_args()
target=args.input_dir[0]
# make sure that the cwd() in the beginning is the location of the python script (so that every path makes sense)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
parent_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
parent_path = os.path.abspath(os.path.join(parent_path, os.pardir))
GT_PATH = os.path.join(parent_path, target,'ground-truth')
DR_PATH = os.path.join(parent_path, target,'detection-results')
backup_folder = 'backup_no_matches_found' # must end without slash
os.chdir(GT_PATH)
gt_files = glob.glob('*.txt')
if len(gt_files) == 0:
print("Error: no .txt files found in", GT_PATH)
sys.exit()
os.chdir(DR_PATH)
dr_files = glob.glob('*.txt')
if len(dr_files) == 0:
print("Error: no .txt files found in", DR_PATH)
sys.exit()
gt_files = set(gt_files)
dr_files = set(dr_files)
print('total ground-truth files:', len(gt_files))
print('total detection-results files:', len(dr_files))
print()
gt_backup = gt_files - dr_files
dr_backup = dr_files - gt_files
def backup(src_folder, backup_files, backup_folder):
# non-intersection files (txt format) will be moved to a backup folder
if not backup_files:
print('No backup required for', src_folder)
return
os.chdir(src_folder)
## create the backup dir if it doesn't exist already
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
for file in backup_files:
os.rename(file, backup_folder + '/' + file)
backup(GT_PATH, gt_backup, backup_folder)
backup(DR_PATH, dr_backup, backup_folder)
if gt_backup:
print('total ground-truth backup files:', len(gt_backup))
if dr_backup:
print('total detection-results backup files:', len(dr_backup))
intersection = gt_files & dr_files
print('total intersected files:', len(intersection))
print("Intersection completed!")
| 35.283784 | 113 | 0.736499 |
ace39e5b1ad1450eedcc35000699c975197573d2 | 51,119 | py | Python | dallinger/experiment_server/experiment_server.py | stefanuddenberg/Dallinger | 7132441cc2017c381813bf014f9795c882f6be3a | [
"MIT"
] | null | null | null | dallinger/experiment_server/experiment_server.py | stefanuddenberg/Dallinger | 7132441cc2017c381813bf014f9795c882f6be3a | [
"MIT"
] | null | null | null | dallinger/experiment_server/experiment_server.py | stefanuddenberg/Dallinger | 7132441cc2017c381813bf014f9795c882f6be3a | [
"MIT"
] | null | null | null | """ This module provides the backend Flask server that serves an experiment. """
from datetime import datetime
import gevent
from json import dumps
from json import loads
import os
import re
from flask import abort, Flask, render_template, request, Response, send_from_directory
from jinja2 import TemplateNotFound
from rq import Queue
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy.sql.expression import true
from psycopg2.extensions import TransactionRollbackError
from dallinger import db
from dallinger import experiment
from dallinger import models
from dallinger.config import get_config
from dallinger import recruiters
from dallinger.notifications import get_messenger
from dallinger.notifications import MessengerError
from .replay import ReplayBackend
from .worker_events import worker_function
from .utils import (
crossdomain,
nocache,
ValidatesBrowser,
error_page,
error_response,
success_response,
ExperimentError,
)
# Initialize the Dallinger database.
session = db.session
redis_conn = db.redis_conn
# Connect to the Redis queue for notifications.
q = Queue(connection=redis_conn)
WAITING_ROOM_CHANNEL = "quorum"
app = Flask("Experiment_Server")
@app.before_first_request
def _config():
config = get_config()
if not config.ready:
config.load()
return config
def Experiment(args):
klass = experiment.load()
return klass(args)
# Load the experiment's extra routes, if any.
try:
from dallinger_experiment.experiment import extra_routes
except ImportError:
pass
else:
app.register_blueprint(extra_routes)
# Ideally, we'd only load recruiter routes if the recruiter is active, but
# it turns out this is complicated, so for now we always register our
# primary recruiter's route:
app.register_blueprint(recruiters.mturk_routes)
"""Basic routes."""
@app.route("/")
def index():
"""Index route"""
config = _config()
html = "<html><head></head><body><h1>Dallinger Experiment in progress</h1><dl>"
for item in sorted(config.as_dict().items()):
html += '<dt style="font-weight:bold;margin-top:15px;">{}</dt><dd>{}</dd>'.format(
*item
)
html += "</dl></body></html>"
return html
@app.route("/robots.txt")
def static_robots_txt():
"""Serve robots.txt from static file."""
return send_from_directory("static", "robots.txt")
@app.route("/favicon.ico")
def static_favicon():
return send_from_directory("static", "favicon.ico", mimetype="image/x-icon")
@app.errorhandler(ExperimentError)
def handle_exp_error(exception):
"""Handle errors by sending an error page."""
app.logger.error(
"%s (%s) %s", exception.value, exception.errornum, str(dict(request.args))
)
return error_page(error_type=exception.value)
"""Define functions for handling requests."""
@app.teardown_request
def shutdown_session(_=None):
"""Rollback and close session at end of a request."""
session.remove()
db.logger.debug("Closing Dallinger DB session at flask request end")
@app.context_processor
def inject_experiment():
"""Inject experiment and enviroment variables into the template context."""
exp = Experiment(session)
return dict(experiment=exp, env=os.environ)
@app.route("/error-page", methods=["POST", "GET"])
def render_error():
request_data = request.form.get("request_data")
participant_id = request.form.get("participant_id")
participant = None
if participant_id:
participant = models.Participant.query.get(participant_id)
return error_page(participant=participant, request_data=request_data)
hit_error_template = """Dear experimenter,
This is an automated email from Dallinger. You are receiving this email because
a recruited participant has been unable to complete the experiment due to
a bug.
The application id is: {app_id}
The information about the failed HIT is recorded in the database in the
Notification table, with assignment_id {assignment_id}.
To see the logs, use the command "dallinger logs --app {app_id}"
To pause the app, use the command "dallinger hibernate --app {app_id}"
To destroy the app, use the command "dallinger destroy --app {app_id}"
The Dallinger dev. team.
"""
@app.route("/handle-error", methods=["POST"])
def handle_error():
request_data = request.form.get("request_data")
error_feedback = request.form.get("error_feedback")
error_type = request.form.get("error_type")
error_text = request.form.get("error_text")
worker_id = request.form.get("worker_id")
assignment_id = request.form.get("assignment_id")
participant_id = request.form.get("participant_id")
hit_id = request.form.get("hit_id")
participant = None
completed = False
details = {"request_data": {}}
if request_data:
try:
request_data = loads(request_data)
except ValueError:
request_data = {}
details["request_data"] = request_data
try:
data = loads(request_data.get("data", "null")) or request_data
except ValueError:
data = request_data
if not participant_id and "participant_id" in data:
participant_id = data["participant_id"]
if not worker_id and "worker_id" in data:
worker_id = data["worker_id"]
if not assignment_id and "assignment_id" in data:
assignment_id = data["assignment_id"]
if not hit_id and "hit_id" in data:
hit_id = data["hit_id"]
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
details["feedback"] = error_feedback
details["error_type"] = error_type
details["error_text"] = error_text
if participant_id is None and worker_id:
participants = (
session.query(models.Participant).filter_by(worker_id=worker_id).all()
)
if participants:
participant = participants[0]
if not assignment_id:
assignment_id = participant.assignment_id
if participant_id is None and assignment_id:
participants = (
session.query(models.Participant).filter_by(worker_id=assignment_id).all()
)
if participants:
participant = participants[0]
participant_id = participant.id
if not worker_id:
worker_id = participant.worker_id
if participant_id is not None:
_worker_complete(participant_id)
completed = True
details["request_data"].update(
{"worker_id": worker_id, "hit_id": hit_id, "participant_id": participant_id}
)
notif = models.Notification(
assignment_id=assignment_id or "unknown",
event_type="ExperimentError",
details=details,
)
session.add(notif)
session.commit()
config = _config()
message = {
"subject": "Error during HIT.",
"body": hit_error_template.format(
app_id=config.get("id", "unknown"), assignment_id=assignment_id or "unknown"
),
}
db.logger.debug("Reporting HIT error...")
messenger = get_messenger(config)
try:
messenger.send(message)
except MessengerError as ex:
db.logger.exception(ex)
return render_template(
"error-complete.html",
completed=completed,
contact_address=config.get("contact_email_on_error"),
hit_id=hit_id,
)
"""Define routes for managing an experiment and the participants."""
@app.route("/launch", methods=["POST"])
def launch():
"""Launch the experiment."""
try:
exp = Experiment(db.init_db(drop_all=False))
except Exception as ex:
return error_response(
error_text="Failed to load experiment in /launch: {}".format(str(ex)),
status=500,
simple=True,
)
try:
exp.log("Launching experiment...", "-----")
except IOError as ex:
return error_response(
error_text="IOError writing to experiment log: {}".format(str(ex)),
status=500,
simple=True,
)
try:
recruitment_details = exp.recruiter.open_recruitment(
n=exp.initial_recruitment_size
)
session.commit()
except Exception as e:
return error_response(
error_text="Failed to open recruitment, check experiment server log "
"for details: {}".format(str(e)),
status=500,
simple=True,
)
for task in exp.background_tasks:
try:
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to spawn task on launch: {}, ".format(task)
+ "check experiment server log for details",
status=500,
simple=True,
)
if _config().get("replay", False):
try:
task = ReplayBackend(exp)
gevent.spawn(task)
except Exception:
return error_response(
error_text="Failed to launch replay task for experiment."
"check experiment server log for details",
status=500,
simple=True,
)
# If the experiment defines a channel, subscribe the experiment to the
# redis communication channel:
if exp.channel is not None:
try:
from dallinger.experiment_server.sockets import chat_backend
chat_backend.subscribe(exp, exp.channel)
except Exception:
return error_response(
error_text="Failed to subscribe to chat for channel on launch "
+ "{}".format(exp.channel)
+ ", check experiment server log for details",
status=500,
simple=True,
)
message = "\n".join(
(
"Initial recruitment list:\n{}".format(
"\n".join(recruitment_details["items"])
),
"Additional details:\n{}".format(recruitment_details["message"]),
)
)
return success_response(recruitment_msg=message)
def should_show_thanks_page_to(participant):
"""In the context of the /ad route, should the participant be shown
the thanks.html page instead of ad.html?
"""
if participant is None:
return False
status = participant.status
marked_done = participant.end_time is not None
ready_for_external_submission = (
status in ("overrecruited", "working") and marked_done
)
assignment_complete = status in ("submitted", "approved")
return assignment_complete or ready_for_external_submission
@app.route("/ad", methods=["GET"])
@nocache
def advertisement():
"""
This is the url we give for the ad for our 'external question'. The ad has
to display two different things: This page will be called from within
mechanical turk, with url arguments hitId, assignmentId, and workerId.
If the worker has not yet accepted the hit:
These arguments will have null values, we should just show an ad for
the experiment.
If the worker has accepted the hit:
These arguments will have appropriate values and we should enter the
person in the database and provide a link to the experiment popup.
"""
if not ("hitId" in request.args and "assignmentId" in request.args):
raise ExperimentError("hit_assign_worker_id_not_set_in_mturk")
config = _config()
# Browser rule validation, if configured:
browser = ValidatesBrowser(config)
if not browser.is_supported(request.user_agent.string):
raise ExperimentError("browser_type_not_allowed")
hit_id = request.args["hitId"]
assignment_id = request.args["assignmentId"]
app_id = config.get("id", "unknown")
mode = config.get("mode")
debug_mode = mode == "debug"
worker_id = request.args.get("workerId")
participant = None
if worker_id is not None:
# First check if this workerId has completed the task before
# under a different assignment (v1):
already_participated = bool(
models.Participant.query.filter(
models.Participant.assignment_id != assignment_id
)
.filter(models.Participant.worker_id == worker_id)
.count()
)
if already_participated and not debug_mode:
raise ExperimentError("already_did_exp_hit")
# Next, check for participants already associated with this very
# assignment, and retain their status, if found:
try:
participant = (
models.Participant.query.filter(models.Participant.hit_id == hit_id)
.filter(models.Participant.assignment_id == assignment_id)
.filter(models.Participant.worker_id == worker_id)
.one()
)
except exc.SQLAlchemyError:
pass
recruiter_name = request.args.get("recruiter")
if recruiter_name:
recruiter = recruiters.by_name(recruiter_name)
else:
recruiter = recruiters.from_config(config)
recruiter_name = recruiter.nickname
if should_show_thanks_page_to(participant):
# They've either done, or they're from a recruiter that requires
# submission of an external form to complete their participation.
return render_template(
"thanks.html",
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
external_submit_url=recruiter.external_submission_url,
mode=config.get("mode"),
app_id=app_id,
)
if participant and participant.status == "working":
# Once participants have finished the instructions, we do not allow
# them to start the task again.
raise ExperimentError("already_started_exp_mturk")
# Participant has not yet agreed to the consent. They might not
# even have accepted the HIT.
return render_template(
"ad.html",
recruiter=recruiter_name,
hitid=hit_id,
assignmentid=assignment_id,
workerid=worker_id,
mode=config.get("mode"),
app_id=app_id,
)
@app.route("/summary", methods=["GET"])
def summary():
"""Summarize the participants' status codes."""
exp = Experiment(session)
state = {
"status": "success",
"summary": exp.log_summary(),
"completed": exp.is_complete(),
}
unfilled_nets = (
models.Network.query.filter(models.Network.full != true())
.with_entities(models.Network.id, models.Network.max_size)
.all()
)
working = (
models.Participant.query.filter_by(status="working")
.with_entities(func.count(models.Participant.id))
.scalar()
)
state["unfilled_networks"] = len(unfilled_nets)
nodes_remaining = 0
required_nodes = 0
if state["unfilled_networks"] == 0:
if working == 0 and state["completed"] is None:
state["completed"] = True
else:
for net in unfilled_nets:
node_count = (
models.Node.query.filter_by(network_id=net.id, failed=False)
.with_entities(func.count(models.Node.id))
.scalar()
)
net_size = net.max_size
required_nodes += net_size
nodes_remaining += net_size - node_count
state["nodes_remaining"] = nodes_remaining
state["required_nodes"] = required_nodes
if state["completed"] is None:
state["completed"] = False
# Regenerate a waiting room message when checking status
# to counter missed messages at the end of the waiting room
nonfailed_count = models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
exp = Experiment(session)
overrecruited = exp.is_overrecruited(nonfailed_count)
if exp.quorum:
quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
return Response(dumps(state), status=200, mimetype="application/json")
@app.route("/experiment_property/<prop>", methods=["GET"])
@app.route("/experiment/<prop>", methods=["GET"])
def experiment_property(prop):
"""Get a property of the experiment by name."""
exp = Experiment(session)
try:
value = exp.public_properties[prop]
except KeyError:
abort(404)
return success_response(**{prop: value})
@app.route("/<page>", methods=["GET"])
def get_page(page):
"""Return the requested page."""
try:
return render_template(page + ".html")
except TemplateNotFound:
abort(404)
@app.route("/<directory>/<page>", methods=["GET"])
def get_page_from_directory(directory, page):
"""Get a page from a given directory."""
return render_template(directory + "/" + page + ".html")
@app.route("/consent")
def consent():
"""Return the consent form. Here for backwards-compatibility with 2.x."""
config = _config()
return render_template(
"consent.html",
hit_id=request.args["hit_id"],
assignment_id=request.args["assignment_id"],
worker_id=request.args["worker_id"],
mode=config.get("mode"),
)
"""Routes for reading and writing to the database."""
def request_parameter(parameter, parameter_type=None, default=None, optional=False):
"""Get a parameter from a request.
parameter is the name of the parameter you are looking for
parameter_type is the type the parameter should have
default is the value the parameter takes if it has not been passed
If the parameter is not found and no default is specified,
or if the parameter is found but is of the wrong type
then a Response object is returned
"""
exp = Experiment(session)
# get the parameter
try:
value = request.values[parameter]
except KeyError:
# if it isnt found use the default, or return an error Response
if default is not None:
return default
elif optional:
return None
else:
msg = "{} {} request, {} not specified".format(
request.url, request.method, parameter
)
return error_response(error_type=msg)
# check the parameter type
if parameter_type is None:
# if no parameter_type is required, return the parameter as is
return value
elif parameter_type == "int":
# if int is required, convert to an int
try:
value = int(value)
return value
except ValueError:
msg = "{} {} request, non-numeric {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
elif parameter_type == "known_class":
# if its a known class check against the known classes
try:
value = exp.known_classes[value]
return value
except KeyError:
msg = "{} {} request, unknown_class: {} for parameter {}".format(
request.url, request.method, value, parameter
)
return error_response(error_type=msg)
elif parameter_type == "bool":
# if its a boolean, convert to a boolean
if value in ["True", "False"]:
return value == "True"
else:
msg = "{} {} request, non-boolean {}: {}".format(
request.url, request.method, parameter, value
)
return error_response(error_type=msg)
else:
msg = "/{} {} request, unknown parameter type: {} for parameter {}".format(
request.url, request.method, parameter_type, parameter
)
return error_response(error_type=msg)
def assign_properties(thing):
"""Assign properties to an object.
When creating something via a post request (e.g. a node), you can pass the
properties of the object in the request. This function gets those values
from the request and fills in the relevant columns of the table.
"""
details = request_parameter(parameter="details", optional=True)
if details:
setattr(thing, "details", loads(details))
for p in range(5):
property_name = "property" + str(p + 1)
property = request_parameter(parameter=property_name, optional=True)
if property:
setattr(thing, property_name, property)
session.commit()
@app.route("/participant/<worker_id>/<hit_id>/<assignment_id>/<mode>", methods=["POST"])
@db.serialized
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route is hit early on. Any nodes the participant creates will be
defined in reference to the participant object. You must specify the
worker_id, hit_id, assignment_id, and mode in the url.
"""
# Lock the table, triggering multiple simultaneous accesses to fail
try:
session.connection().execute("LOCK TABLE participant IN EXCLUSIVE MODE NOWAIT")
except exc.OperationalError as e:
e.orig = TransactionRollbackError()
raise e
missing = [p for p in (worker_id, hit_id, assignment_id) if p == "undefined"]
if missing:
msg = "/participant POST: required values were 'undefined'"
return error_response(error_type=msg, status=403)
fingerprint_hash = request.args.get("fingerprint_hash")
try:
fingerprint_found = models.Participant.query.filter_by(
fingerprint_hash=fingerprint_hash
).one_or_none()
except MultipleResultsFound:
fingerprint_found = True
if fingerprint_hash and fingerprint_found:
db.logger.warning("Same browser fingerprint detected.")
if mode == "live":
return error_response(
error_type="/participant POST: Same participant dectected.", status=403
)
already_participated = models.Participant.query.filter_by(
worker_id=worker_id
).one_or_none()
if already_participated:
db.logger.warning("Worker has already participated.")
return error_response(
error_type="/participant POST: worker has already participated.", status=403
)
duplicate = models.Participant.query.filter_by(
assignment_id=assignment_id, status="working"
).one_or_none()
if duplicate:
msg = """
AWS has reused assignment_id while existing participant is
working. Replacing older participant {}.
"""
app.logger.warning(msg.format(duplicate.id))
q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id)
# Count working or beyond participants.
nonfailed_count = (
models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
+ 1
)
recruiter_name = request.args.get("recruiter", "undefined")
if not recruiter_name or recruiter_name == "undefined":
recruiter = recruiters.from_config(_config())
if recruiter:
recruiter_name = recruiter.nickname
# Create the new participant.
participant = models.Participant(
recruiter_id=recruiter_name,
worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode,
fingerprint_hash=fingerprint_hash,
)
exp = Experiment(session)
overrecruited = exp.is_overrecruited(nonfailed_count)
if overrecruited:
participant.status = "overrecruited"
session.add(participant)
session.flush() # Make sure we know the id for the new row
result = {"participant": participant.__json__()}
# Queue notification to others in waiting room
if exp.quorum:
quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
result["quorum"] = quorum
# return the data
return success_response(**result)
@app.route("/participant/<participant_id>", methods=["GET"])
def get_participant(participant_id):
"""Get the participant with the given id."""
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/participant GET: no participant found", status=403
)
# return the data
return success_response(participant=ppt.__json__())
@app.route("/network/<network_id>", methods=["GET"])
def get_network(network_id):
"""Get the network with the given id."""
try:
net = models.Network.query.filter_by(id=network_id).one()
except NoResultFound:
return error_response(error_type="/network GET: no network found", status=403)
# return the data
return success_response(network=net.__json__())
@app.route("/question/<participant_id>", methods=["POST"])
def create_question(participant_id):
"""Send a POST request to the question table.
Questions store information at the participant level, not the node
level.
You should pass the question (string) number (int) and response
(string) as arguments.
"""
# Get the participant.
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/question POST no participant found", status=403
)
question = request_parameter(parameter="question")
response = request_parameter(parameter="response")
number = request_parameter(parameter="number", parameter_type="int")
for x in [question, response, number]:
if isinstance(x, Response):
return x
# Consult the recruiter regarding whether to accept a questionnaire
# from the participant:
rejection = ppt.recruiter.rejects_questionnaire_from(ppt)
if rejection:
return error_response(
error_type="/question POST, status = {}, reason: {}".format(
ppt.status, rejection
),
participant=ppt,
)
try:
# execute the request
models.Question(
participant=ppt, question=question, response=response, number=number
)
session.commit()
except Exception:
return error_response(error_type="/question POST server error", status=403)
# return the data
return success_response()
@app.route("/node/<int:node_id>/neighbors", methods=["GET"])
def node_neighbors(node_id):
"""Send a GET request to the node table.
This calls the neighbours method of the node
making the request and returns a list of descriptions of
the nodes (even if there is only one).
Required arguments: participant_id, node_id
Optional arguments: type, connection
After getting the neighbours it also calls
exp.node_get_request()
"""
exp = Experiment(session)
# get the parameters
node_type = request_parameter(
parameter="node_type", parameter_type="known_class", default=models.Node
)
connection = request_parameter(parameter="connection", default="to")
failed = request_parameter(parameter="failed", parameter_type="bool", optional=True)
for x in [node_type, connection]:
if type(x) == Response:
return x
# make sure the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/neighbors, node does not exist",
error_text="/node/{0}/neighbors, node {0} does not exist".format(node_id),
)
# get its neighbors
if failed is not None:
# This will always raise because "failed" is not a supported parameter.
# We just want to pass the exception message back in the response:
try:
node.neighbors(type=node_type, direction=connection, failed=failed)
except Exception as e:
return error_response(error_type="node.neighbors", error_text=str(e))
else:
nodes = node.neighbors(type=node_type, direction=connection)
try:
# ping the experiment
exp.node_get_request(node=node, nodes=nodes)
session.commit()
except Exception:
return error_response(error_type="exp.node_get_request")
return success_response(nodes=[n.__json__() for n in nodes])
@app.route("/node/<participant_id>", methods=["POST"])
@db.serialized
def create_node(participant_id):
"""Send a POST request to the node table.
This makes a new node for the participant, it calls:
1. exp.get_network_for_participant
2. exp.create_node
3. exp.add_node_to_network
4. exp.node_post_request
"""
exp = Experiment(session)
# Get the participant.
try:
participant = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(error_type="/node POST no participant found", status=403)
# Make sure the participant status is working
if participant.status != "working":
error_type = "/node POST, status = {}".format(participant.status)
return error_response(error_type=error_type, participant=participant)
# execute the request
network = exp.get_network_for_participant(participant=participant)
if network is None:
return Response(dumps({"status": "error"}), status=403)
node = exp.create_node(participant=participant, network=network)
assign_properties(node)
exp.add_node_to_network(node=node, network=network)
# ping the experiment
exp.node_post_request(participant=participant, node=node)
# return the data
return success_response(node=node.__json__())
@app.route("/node/<int:node_id>/vectors", methods=["GET"])
def node_vectors(node_id):
"""Get the vectors of a node.
You must specify the node id in the url.
You can pass direction (incoming/outgoing/all) and failed
(True/False/all).
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="all")
failed = request_parameter(parameter="failed", parameter_type="bool", default=False)
for x in [direction, failed]:
if type(x) == Response:
return x
# execute the request
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/vectors, node does not exist")
try:
vectors = node.vectors(direction=direction, failed=failed)
exp.vector_get_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(
error_type="/node/vectors GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(vectors=[v.__json__() for v in vectors])
@app.route("/node/<int:node_id>/connect/<int:other_node_id>", methods=["POST"])
def connect(node_id, other_node_id):
"""Connect to another node.
The ids of both nodes must be speficied in the url.
You can also pass direction (to/from/both) as an argument.
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="to")
if type(direction == Response):
return direction
# check the nodes exist
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/connect, node does not exist")
other_node = models.Node.query.get(other_node_id)
if other_node is None:
return error_response(
error_type="/node/connect, other node does not exist",
participant=node.participant,
)
# execute the request
try:
vectors = node.connect(whom=other_node, direction=direction)
for v in vectors:
assign_properties(v)
# ping the experiment
exp.vector_post_request(node=node, vectors=vectors)
session.commit()
except Exception:
return error_response(
error_type="/vector POST server error",
status=403,
participant=node.participant,
)
return success_response(vectors=[v.__json__() for v in vectors])
@app.route("/info/<int:node_id>/<int:info_id>", methods=["GET"])
def get_info(node_id, info_id):
"""Get a specific info.
Both the node and info id must be specified in the url.
"""
exp = Experiment(session)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info, node does not exist")
# execute the experiment method:
info = models.Info.query.get(info_id)
if info is None:
return error_response(
error_type="/info GET, info does not exist", participant=node.participant
)
elif info.origin_id != node.id and info.id not in [
t.info_id for t in node.transmissions(direction="incoming", status="received")
]:
return error_response(
error_type="/info GET, forbidden info",
status=403,
participant=node.participant,
)
try:
# ping the experiment
exp.info_get_request(node=node, infos=info)
session.commit()
except Exception:
return error_response(
error_type="/info GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(info=info.__json__())
@app.route("/node/<int:node_id>/infos", methods=["GET"])
def node_infos(node_id):
"""Get all the infos of a node.
The node id must be specified in the url.
You can also pass info_type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/infos, node does not exist")
try:
# execute the request:
infos = node.infos(type=info_type)
# ping the experiment
exp.info_get_request(node=node, infos=infos)
session.commit()
except Exception:
return error_response(
error_type="/node/infos GET server error",
status=403,
participant=node.participant,
)
return success_response(infos=[i.__json__() for i in infos])
@app.route("/node/<int:node_id>/received_infos", methods=["GET"])
def node_received_infos(node_id):
"""Get all the infos a node has been sent and has received.
You must specify the node id in the url.
You can also pass the info type.
"""
exp = Experiment(session)
# get the parameters
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
if type(info_type) == Response:
return info_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/infos, node {} does not exist".format(node_id)
)
# execute the request:
infos = node.received_infos(type=info_type)
try:
# ping the experiment
exp.info_get_request(node=node, infos=infos)
session.commit()
except Exception:
return error_response(
error_type="info_get_request error",
status=403,
participant=node.participant,
)
return success_response(infos=[i.__json__() for i in infos])
@app.route("/tracking_event/<int:node_id>", methods=["POST"])
@crossdomain(origin="*")
def tracking_event_post(node_id):
"""Enqueue a TrackingEvent worker for the specified Node.
"""
details = request_parameter(parameter="details", optional=True)
if details:
details = loads(details)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
db.logger.debug(
"rq: Queueing %s with for node: %s for worker_function",
"TrackingEvent",
node_id,
)
q.enqueue(
worker_function, "TrackingEvent", None, None, node_id=node_id, details=details
)
return success_response(details=details)
@app.route("/info/<int:node_id>", methods=["POST"])
@crossdomain(origin="*")
def info_post(node_id):
"""Create an info.
The node id must be specified in the url.
You must pass contents as an argument.
info_type is an additional optional argument.
If info_type is a custom subclass of Info it must be
added to the known_classes of the experiment class.
"""
# get the parameters and validate them
contents = request_parameter(parameter="contents")
info_type = request_parameter(
parameter="info_type", parameter_type="known_class", default=models.Info
)
for x in [contents, info_type]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/info POST, node does not exist")
exp = Experiment(session)
try:
# execute the request
info = info_type(origin=node, contents=contents)
assign_properties(info)
# ping the experiment
exp.info_post_request(node=node, info=info)
session.commit()
except Exception:
return error_response(
error_type="/info POST server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(info=info.__json__())
@app.route("/node/<int:node_id>/transmissions", methods=["GET"])
def node_transmissions(node_id):
"""Get all the transmissions of a node.
The node id must be specified in the url.
You can also pass direction (to/from/all) or status (all/pending/received)
as arguments.
"""
exp = Experiment(session)
# get the parameters
direction = request_parameter(parameter="direction", default="incoming")
status = request_parameter(parameter="status", default="all")
for x in [direction, status]:
if type(x) == Response:
return x
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmissions, node does not exist")
# execute the request
transmissions = node.transmissions(direction=direction, status=status)
try:
if direction in ["incoming", "all"] and status in ["pending", "all"]:
node.receive()
session.commit()
# ping the experiment
exp.transmission_get_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmissions GET server error",
status=403,
participant=node.participant,
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
@app.route("/node/<int:node_id>/transmit", methods=["POST"])
def node_transmit(node_id):
"""Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom. However,
the values these accept are more limited than for the back end due to the
necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g. 'Info' or
'Agent'). Passing an int will get that info/node, passing a class name will
pass the class. Note that if the class you are specifying is a custom class
it will need to be added to the dictionary of known_classes in your
experiment code.
You may also pass the values property1, property2, property3, property4,
property5 and details. If passed this will fill in the relevant values of
the transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
dallinger.post(
"/node/" + my_node_id + "/transmit",
{what: "Meme",
to_whom: 10}
);
"""
exp = Experiment(session)
what = request_parameter(parameter="what", optional=True)
to_whom = request_parameter(parameter="to_whom", optional=True)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmit, node does not exist")
# create what
if what is not None:
try:
what = int(what)
what = models.Info.query.get(what)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant,
)
except Exception:
try:
what = exp.known_classes[what]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(what), participant=node.participant
)
# create to_whom
if to_whom is not None:
try:
to_whom = int(to_whom)
to_whom = models.Node.query.get(to_whom)
if to_whom is None:
return error_response(
error_type="/node/transmit POST, recipient Node does not exist",
participant=node.participant,
)
except Exception:
try:
to_whom = exp.known_classes[to_whom]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(to_whom), participant=node.participant
)
# execute the request
try:
transmissions = node.transmit(what=what, to_whom=to_whom)
for t in transmissions:
assign_properties(t)
session.commit()
# ping the experiment
exp.transmission_post_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmit POST, server error", participant=node.participant
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions])
@app.route("/node/<int:node_id>/transformations", methods=["GET"])
def transformation_get(node_id):
"""Get all the transformations of a node.
The node id must be specified in the url.
You can also pass transformation_type.
"""
exp = Experiment(session)
# get the parameters
transformation_type = request_parameter(
parameter="transformation_type",
parameter_type="known_class",
default=models.Transformation,
)
if type(transformation_type) == Response:
return transformation_type
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/node/transformations, "
"node {} does not exist".format(node_id)
)
# execute the request
transformations = node.transformations(type=transformation_type)
try:
# ping the experiment
exp.transformation_get_request(node=node, transformations=transformations)
session.commit()
except Exception:
return error_response(
error_type="/node/transformations GET failed", participant=node.participant
)
# return the data
return success_response(transformations=[t.__json__() for t in transformations])
@app.route(
"/transformation/<int:node_id>/<int:info_in_id>/<int:info_out_id>", methods=["POST"]
)
def transformation_post(node_id, info_in_id, info_out_id):
"""Transform an info.
The ids of the node, info in and info out must all be in the url.
You can also pass transformation_type.
"""
exp = Experiment(session)
# Get the parameters.
transformation_type = request_parameter(
parameter="transformation_type",
parameter_type="known_class",
default=models.Transformation,
)
if type(transformation_type) == Response:
return transformation_type
# Check that the node etc. exists.
node = models.Node.query.get(node_id)
if node is None:
return error_response(
error_type="/transformation POST, " "node {} does not exist".format(node_id)
)
info_in = models.Info.query.get(info_in_id)
if info_in is None:
return error_response(
error_type="/transformation POST, info_in {} does not exist".format(
info_in_id
),
participant=node.participant,
)
info_out = models.Info.query.get(info_out_id)
if info_out is None:
return error_response(
error_type="/transformation POST, info_out {} does not exist".format(
info_out_id
),
participant=node.participant,
)
try:
# execute the request
transformation = transformation_type(info_in=info_in, info_out=info_out)
assign_properties(transformation)
session.commit()
# ping the experiment
exp.transformation_post_request(node=node, transformation=transformation)
session.commit()
except Exception:
return error_response(
error_type="/transformation POST failed", participant=node.participant
)
# return the data
return success_response(transformation=transformation.__json__())
@app.route("/notifications", methods=["POST", "GET"])
@crossdomain(origin="*")
def api_notifications():
"""Receive MTurk REST notifications."""
event_type = request.values["Event.1.EventType"]
assignment_id = request.values.get("Event.1.AssignmentId")
participant_id = request.values.get("participant_id")
# Add the notification to the queue.
db.logger.debug(
"rq: Queueing %s with id: %s for worker_function", event_type, assignment_id
)
q.enqueue(worker_function, event_type, assignment_id, participant_id)
db.logger.debug("rq: Submitted Queue Length: %d (%s)", len(q), ", ".join(q.job_ids))
return success_response()
def check_for_duplicate_assignments(participant):
"""Check that the assignment_id of the participant is unique.
If it isnt the older participants will be failed.
"""
participants = models.Participant.query.filter_by(
assignment_id=participant.assignment_id
).all()
duplicates = [
p for p in participants if (p.id != participant.id and p.status == "working")
]
for d in duplicates:
q.enqueue(worker_function, "AssignmentAbandoned", None, d.id)
@app.route("/worker_complete", methods=["GET"])
@db.scoped_session_decorator
def worker_complete():
"""Complete worker."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(status="success")
def _worker_complete(participant_id):
participants = models.Participant.query.filter_by(id=participant_id).all()
if not participants:
raise KeyError()
participant = participants[0]
participant.end_time = datetime.now()
session.add(participant)
session.commit()
# Notify recruiter for possible qualification assignment, etc.
participant.recruiter.notify_completed(participant)
event_type = participant.recruiter.submitted_event()
if event_type is None:
return
worker_function(
event_type=event_type,
assignment_id=participant.assignment_id,
participant_id=participant_id,
)
@app.route("/worker_failed", methods=["GET"])
@db.scoped_session_decorator
def worker_failed():
"""Fail worker. Used by bots only for now."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(
field="status", data="success", request_type="worker failed"
)
def _worker_failed(participant_id):
participants = models.Participant.query.filter_by(id=participant_id).all()
if not participants:
raise KeyError()
participant = participants[0]
participant.end_time = datetime.now()
session.add(participant)
session.commit()
# TODO: Recruiter.rejected_event/failed_event (replace conditional w/ polymorphism)
if participant.recruiter_id == "bots" or participant.recruiter_id.startswith(
"bots:"
):
worker_function(
assignment_id=participant.assignment_id,
participant_id=participant.id,
event_type="BotAssignmentRejected",
)
# Insert "mode" into pages so it's carried from page to page done server-side
# to avoid breaking backwards compatibility with old templates.
def insert_mode(page_html, mode):
"""Insert mode."""
match_found = False
matches = re.finditer("workerId={{ workerid }}", page_html)
match = None
for match in matches:
match_found = True
if match_found:
new_html = page_html[: match.end()] + "&mode=" + mode + page_html[match.end() :]
return new_html
else:
raise ExperimentError("insert_mode_failed")
| 32.435914 | 90 | 0.652047 |
ace39f080f7f3037e745ddbff1ba1aec1453b0ca | 2,372 | py | Python | mykit/templates/pv_fixlayer_conv.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | 4 | 2019-01-02T09:17:54.000Z | 2019-12-26T07:15:59.000Z | mykit/templates/pv_fixlayer_conv.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | 6 | 2019-03-06T03:16:12.000Z | 2019-03-14T14:36:01.000Z | mykit/templates/pv_fixlayer_conv.py | minyez/mykit | 911413120c081be2cfcaef06d62dc40b2abd2747 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from pv_calc_utils import *
from pv_classes import vasp_read_poscar
from shutil import copy2
from argparse import ArgumentParser
import subprocess as sp
import os
def Main(ArgList):
# =================== Parser ==========================
description = '''
Test the convergence of slab calculation w.r.t fixed layers
Fix from the bottom side or symmetric fixing. Combined with pv_fix_slab.py'''
parser = ArgumentParser(description=description)
parser.add_argument("-n",dest='nproc',type=int,default=1,help="Number of processors ")
parser.add_argument("-s",dest='sym',help="Flag for symmetric fixing",action="store_true")
parser.add_argument("-v",dest='vasp_path',default="vasp",help="Path of vasp executive")
parser.add_argument("-D",dest='debug',help="Debug mode",action="store_true")
opts = parser.parse_args()
case_pos = vasp_read_poscar()
natoms = case_pos.natoms
all_list = []
calc_list = []
rev_list = reversed(calc_list)
vasp_path, vasp_cmd = vasp_vaspcmd_zmy(opts.nproc,vasp_path=opts.vasp_path)
if opts.debug:
print "Total number of atoms: %d" % natoms
for i in rev_list:
os.chdir(str(i)+'fixed')
if not i == all_list[-1]:
index_in_all = all_list.index(i)
try:
copy2('../'+str(all_list[index_in_all+1])+'fixed/WAVECAR','WAVECAR')
except IOError:
print " WAVECAR is not found. Start from scratch."
try:
copy2('../'+str(all_list[index_in_all+1])+'fixed/CONTCAR','CONTCAR')
except IOError:
print " CONTCAR from last fixing is not found. Exit."
sys.exit(1)
opt_num = int(sp.check_output("grep -c 'T T T' POSCAR_new",shell=True))
fix_num = natoms - opt_num
if opts.debug:
print optnum,fix_num,type(fix_num)
if not opts.sym:
sp.call("pv_fix_slab.py -f CONTCAR -o POSCAR -n %d" % fix_num,shell=True)
else:
surf_num = opt_num / 2
sp.call("pv_fix_slab.py -f CONTCAR -o POSCAR -n %d -s" % surf_num,shell=True)
vasp_vasprun_zmy(vasp_cmd,'out','error')
os.chdir('..')
# ====================================================
if __name__ == "__main__":
Main(sys.argv)
| 36.492308 | 93 | 0.593592 |
ace3a3285de8a6db330791ec1c3d7c8d6dc4cd23 | 176 | py | Python | doj/__init__.py | beachmachine/django-jython | 35aaabe31c5dce0ce0c7752e6a98228c3ed6c987 | [
"BSD-3-Clause"
] | 23 | 2015-02-13T07:58:23.000Z | 2020-04-03T03:36:45.000Z | doj/__init__.py | beachmachine/django-jython | 35aaabe31c5dce0ce0c7752e6a98228c3ed6c987 | [
"BSD-3-Clause"
] | 15 | 2015-02-13T07:59:48.000Z | 2021-07-16T01:16:21.000Z | doj/__init__.py | beachmachine/django-jython | 35aaabe31c5dce0ce0c7752e6a98228c3ed6c987 | [
"BSD-3-Clause"
] | 13 | 2015-02-13T08:05:14.000Z | 2022-03-21T20:52:47.000Z | # -*- coding: utf-8 -*-
__VERSION = (1, 8, 0, 'b', 4)
def get_version():
"""
Gets the version of the library
:return: Version
"""
return tuple(__VERSION) | 16 | 35 | 0.551136 |
ace3a6997fb85cd0e7b605db4dbfd1b3c9858d6d | 43 | py | Python | tests/components/poolsense/__init__.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/poolsense/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/poolsense/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the PoolSense integration."""
| 21.5 | 42 | 0.72093 |
ace3a69ab84407792dc1ad8acec50ce0498a93f6 | 690 | py | Python | model.py | utkusenel/SDP | 57198f2fced398dcdd8d362d0a9f918e77163c1b | [
"MIT"
] | null | null | null | model.py | utkusenel/SDP | 57198f2fced398dcdd8d362d0a9f918e77163c1b | [
"MIT"
] | null | null | null | model.py | utkusenel/SDP | 57198f2fced398dcdd8d362d0a9f918e77163c1b | [
"MIT"
] | null | null | null | import pandas as pd
from gurobipy import *
data = pd.read_csv("/Users/utkusenel/Documents/SDP/Parameters.csv", delimiter = ";")
print(data.columns)
sdp_model = Model("sdp_model")
for j in range(0,10):
for t in range(0,12):
I[j,t]= sdp_model.addVars(vtype=GRB.INTEGER,name="I".format(j,t)) ## Decision variables defined.
O[j,t] = sdp_model.addVars(vtype=GRB.BINARY,name="O".format(j,t)) ## j is the number of items and t is the number of periods that are forecasted
OS[j,t] = sdp_model.addVars(vtype=GRB.INTEGER,name="OS".format(j,t))
### PARAMETERS
SS = data.iloc[j,23] ## safety stock parameter
ROP = data.iloc[j,22]
##DDL = data.
| 32.857143 | 153 | 0.656522 |
ace3a6ac63cb0ea0c18ea6f776a6baf33582dcfc | 1,049 | py | Python | creten/indicators/BB.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 9 | 2019-12-17T10:42:40.000Z | 2021-12-02T23:07:05.000Z | creten/indicators/BB.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | null | null | null | creten/indicators/BB.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 6 | 2019-03-04T15:01:10.000Z | 2022-01-12T23:22:55.000Z | from indicators.Indicator import Indicator
from indicators.SMA import SMA
from indicators.StdDev import StdDev
class BB(Indicator):
def __init__(self, period, stdDevMult, timeSeries = None):
super(BB, self).__init__()
self.period = period
self.stdDevMult = stdDevMult
self.stdDev = StdDev(self.period)
self.cb = SMA(self.period)
self.lb = []
self.ub = []
self.addSubIndicator(self.cb)
self.addSubIndicator(self.stdDev)
self.initialize(timeSeries)
def _calculate(self):
if len(self.timeSeries) < self.period:
return
stdDev = self.stdDev[-1]
self.lb.append(self.cb[-1] - self.stdDevMult * stdDev)
self.ub.append(self.cb[-1] + self.stdDevMult * stdDev)
def removeValue(self):
super(BB, self).removeValue()
if len(self.ub) > 0:
self.ub.pop(-1)
if len(self.lb) > 0:
self.lb.pop(-1)
def removeAll(self):
super(BB, self).removeAll()
self.ub = []
self.lb = []
def getCentralBand(self):
return self.cb
def getUpperBand(self):
return self.ub
def getLowerBand(self):
return self.lb | 19.425926 | 59 | 0.691134 |
ace3a6fb1dd97199afcd831f3c11f0f08d20f30f | 3,267 | py | Python | Lib/unittest/__init__.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 6,660 | 2018-01-13T12:16:53.000Z | 2022-03-31T15:15:28.000Z | Lib/unittest/__init__.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 238 | 2020-10-21T04:54:00.000Z | 2022-03-31T21:49:03.000Z | Lib/unittest/__init__.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 1,933 | 2018-01-15T13:08:40.000Z | 2022-03-31T11:28:59.000Z | """
Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
Smalltalk testing framework (used with permission).
This module contains the core framework classes that form the basis of
specific test cases and suites (TestCase, TestSuite etc.), and also a
text-based utility class for running the tests and reporting the results
(TextTestRunner).
Simple usage:
import unittest
class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): # test method names begin with 'test'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
def testMultiply(self):
self.assertEqual((0 * 10), 0)
self.assertEqual((5 * 8), 40)
if __name__ == '__main__':
unittest.main()
Further information is available in the bundled documentation, and from
http://docs.python.org/library/unittest.html
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', 'installHandler',
'registerResult', 'removeResult', 'removeHandler',
'addModuleCleanup']
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
__unittest = True
from .result import TestResult
from .async_case import IsolatedAsyncioTestCase
from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip,
skipIf, skipUnless, expectedFailure)
from .suite import BaseTestSuite, TestSuite
from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases)
from .main import TestProgram, main
from .runner import TextTestRunner, TextTestResult
from .signals import installHandler, registerResult, removeResult, removeHandler
# deprecated
_TextTestResult = TextTestResult
# There are no tests here, so don't try to run anything discovered from
# introspecting the symbols (e.g. FunctionTestCase). Instead, all our
# tests come from within unittest.test.
def load_tests(loader, tests, pattern):
import os.path
# top level directory cached on loader instance
this_dir = os.path.dirname(__file__)
return loader.discover(start_dir=this_dir, pattern=pattern)
| 40.333333 | 80 | 0.74472 |
ace3a719cccae39b9d41d77a38632b4135286660 | 12,367 | py | Python | lib/python3.6/site-packages/trello/card.py | HaiBinh/slack_for_trello | 4a854ab43208498ce5972f60f13bf143ac5ad29c | [
"MIT"
] | null | null | null | lib/python3.6/site-packages/trello/card.py | HaiBinh/slack_for_trello | 4a854ab43208498ce5972f60f13bf143ac5ad29c | [
"MIT"
] | 1 | 2021-02-08T20:37:31.000Z | 2021-02-08T20:37:31.000Z | lib/python3.6/site-packages/trello/card.py | HaiBinh/slack_for_trello | 4a854ab43208498ce5972f60f13bf143ac5ad29c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
from dateutil import parser as dateparser
from trello.checklist import Checklist
from trello.label import Label
class Card(object):
"""
Class representing a Trello card. Card attributes are stored on
the object
"""
@property
def member_id(self):
return self.idMembers
@property
def short_id(self):
return self.idShort
@property
def list_id(self):
return self.idList
@property
def board_id(self):
return self.idBoard
@property
def description(self):
return self.desc
@property
def date_last_activity(self):
return self.dateLastActivity
@description.setter
def description(self, value):
self.desc = value
@property
def idLabels(self):
return self.label_ids
@idLabels.setter
def idLabels(self, values):
self.label_ids = values
@property
def list_labels(self):
if self.labels:
return self.labels
return None
@property
def comments(self):
"""
Lazily loads and returns the comments
"""
try:
if self._comments is None:
self._comments = self.fetch_comments()
except AttributeError:
self._comments = None
return self._comments
@property
def checklists(self):
"""
Lazily loads and returns the checklists
"""
try:
if self._checklists is None:
self._checklists = self.fetch_checklists()
except AttributeError:
self._checklists = None
return self._checklists
def __init__(self, parent, card_id, name=''):
"""
:trello_list: reference to the parent list
:card_id: ID for this card
"""
if isinstance(parent, List):
self.trello_list = parent
self.board = parent.board
else:
self.board = parent
self.client = parent.client
self.id = card_id
self.name = name
@classmethod
def from_json(cls, parent, json_obj):
"""
Deserialize the card json object to a Card object
:trello_list: the list object that the card belongs to
:json_obj: json object
"""
if 'id' not in json_obj:
raise Exception("key 'id' is not in json_obj")
card = cls(parent,
json_obj['id'],
name=json_obj['name'].encode('utf-8'))
card.desc = json_obj.get('desc', '')
card.closed = json_obj['closed']
card.url = json_obj['url']
card.member_ids = json_obj['idMembers']
card.idLabels = json_obj['idLabels']
card.idList = json_obj['idList']
card.labels = Label.from_json_list(card.board, json_obj['labels'])
return card
def __repr__(self):
return '<Card %s>' % self.name
def fetch(self, eager=True):
"""
Fetch all attributes for this card
:param eager: If eager is true comments and checklists will be fetched immediately, otherwise on demand
"""
json_obj = self.client.fetch_json(
'/cards/' + self.id,
query_params={'badges': False})
self.id = json_obj['id']
self.name = json_obj['name'].encode('utf-8')
self.desc = json_obj.get('desc', '')
self.closed = json_obj['closed']
self.url = json_obj['url']
self.idMembers = json_obj['idMembers']
self.idShort = json_obj['idShort']
self.idList = json_obj['idList']
self.idBoard = json_obj['idBoard']
self.idLabels = json_obj['idLabels']
self.labels = Label.from_json_list(self.board, json_obj['labels'])
self.badges = json_obj['badges']
self.pos = json_obj['pos']
if json_obj.get('due', ''):
self.due = json_obj.get('due', '')
else:
self.due = ''
self.checked = json_obj['checkItemStates']
self.dateLastActivity = dateparser.parse(json_obj['dateLastActivity'])
self._checklists = self.fetch_checklists() if eager else None
self._comments = self.fetch_comments() if eager else None
def fetch_comments(self, force=False):
comments = []
if (force is True) or (self.badges['comments'] > 0):
comments = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params={'filter': 'commentCard'})
return sorted(comments, key=lambda comment: comment['date'])
return comments
def get_list(self):
obj = self.client.fetch_json('/lists/' + self.idList)
return List.from_json(board=self, json_obj=obj)
def get_comments(self):
"""Alias for fetch_comments for backward compatibility. Always contact server"""
return self.fetch_comments(force=True)
def fetch_checklists(self):
checklists = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists', )
# Thanks https://github.com/HuffAndPuff for noticing checklist were not sorted
json_obj = sorted(json_obj, key=lambda checklist: checklist['pos'])
for cl in json_obj:
checklists.append(Checklist(self.client, self.checked, cl,
trello_card=self.id))
return checklists
def fetch_actions(self, action_filter='createCard'):
"""
Fetch actions for this card can give more argv to action_filter,
split for ',' json_obj is list
"""
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/actions',
query_params={'filter': action_filter})
self.actions = json_obj
def attriExp(self, multiple):
"""
Provides the option to explore what comes from trello
:multiple is one of the attributes of GET /1/cards/[card id or shortlink]/actions
"""
self.fetch_actions(multiple)
return self.actions
def listCardMove_date(self):
"""
Will return the history of transitions of a card from one list to another
The lower the index the more resent the historical item
It returns a list of lists. The sublists are triplates of
starting list, ending list and when the transition occured.
"""
self.fetch_actions('updateCard:idList')
res = []
for idx in self.actions:
date_str = idx['date']
dateDate = dateparser.parse(date_str)
strLst = idx['data']['listBefore']['name']
endLst = idx['data']['listAfter']['name']
res.append([strLst, endLst, dateDate])
return res
@property
def latestCardMove_date(self):
"""
returns the date of the last card transition
"""
self.fetch_actions('updateCard:idList')
date_str = self.actions[0]['date']
return dateparser.parse(date_str)
@property
def create_date(self):
"""Will return the creation date of the card.
WARNING: if the card was create via convertion of a checklist item
it fails. attriExp('convertToCardFromCheckItem') allows to
test for the condition.
"""
self.fetch_actions()
date_str = self.actions[0]['date']
return dateparser.parse(date_str)
@property
def due_date(self):
return dateparser.parse(self.due) if self.due else ''
def set_name(self, new_name):
"""
Update the name on the card to :new_name:
"""
self._set_remote_attribute('name', new_name)
self.name = new_name
def set_description(self, description):
self._set_remote_attribute('desc', description)
self.desc = description
def set_due(self, due):
"""Set the due time for the card
:due: a datetime object
"""
datestr = due.strftime('%Y-%m-%dT%H:%M:%S')
self._set_remote_attribute('due', datestr)
self.due = datestr
def set_pos(self, pos):
"""
Update card position in list
:pos: 'top', 'bottom' or int
"""
self._set_remote_attribute('pos', pos)
self.pos = pos
def set_closed(self, closed):
self._set_remote_attribute('closed', closed)
self.closed = closed
def delete(self):
# Delete this card permanently
self.client.fetch_json(
'/cards/' + self.id,
http_method='DELETE')
def assign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/members',
http_method='POST',
post_args={'value': member_id})
def unassign(self, member_id):
self.client.fetch_json(
'/cards/' + self.id + '/idMembers/' + member_id,
http_method='DELETE')
def subscribe(self):
self.client.fetch_json(
'/cards/' + self.id + '/subscribed',
http_method='PUT',
post_args={'value': True})
def comment(self, comment_text):
"""Add a comment to a card."""
self.client.fetch_json(
'/cards/' + self.id + '/actions/comments',
http_method='POST',
post_args={'text': comment_text})
def add_label(self, label):
self.client.fetch_json(
'/cards/' + self.id + '/idLabels',
http_method='POST',
post_args={'value': label.id})
def attach(self, name=None, mimeType=None, file=None, url=None):
"""
Add an attachment to the card. The attachment can be either a
file or a url. Setting the name and/or mime type is optional.
:param name: The name of the attachment
:param mimeType: mime type for the attachement
:param file: a file-like, binary object that supports read()
:param url: a URL pointing to the resource to be attached
"""
if (file and url) or (not file and not url):
raise Exception('Please provide either a file or url, and not both!')
kwargs = {}
if file:
kwargs['files'] = dict(file=(name, file, mimeType))
else:
kwargs['name'] = name
kwargs['mimeType'] = mimeType
kwargs['url'] = url
self._post_remote_data(
'attachments', **kwargs
)
def change_list(self, list_id):
self.client.fetch_json(
'/cards/' + self.id + '/idList',
http_method='PUT',
post_args={'value': list_id})
def change_board(self, board_id, list_id=None):
args = {'value': board_id}
if list_id is not None:
args['idList'] = list_id
self.client.fetch_json(
'/cards/' + self.id + '/idBoard',
http_method='PUT',
post_args=args)
def add_checklist(self, title, items, itemstates=None):
"""Add a checklist to this card
:title: title of the checklist
:items: a list of the item names
:itemstates: a list of the state (True/False) of each item
:return: the checklist
"""
if itemstates is None:
itemstates = []
json_obj = self.client.fetch_json(
'/cards/' + self.id + '/checklists',
http_method='POST',
post_args={'name': title}, )
cl = Checklist(self.client, [], json_obj, trello_card=self.id)
for i, name in enumerate(items):
try:
checked = itemstates[i]
except IndexError:
checked = False
cl.add_checklist_item(name, checked)
self.fetch()
return cl
def _set_remote_attribute(self, attribute, value):
self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='PUT',
post_args={'value': value}, )
def _post_remote_data(self, attribute, files=None, **kwargs):
self.client.fetch_json(
'/cards/' + self.id + '/' + attribute,
http_method='POST',
files=files,
post_args=kwargs)
from trello.trellolist import List
| 31.468193 | 111 | 0.576453 |
ace3a71b60bb0c928472760b44f5d86d21c8eea0 | 26,074 | py | Python | mmdet/models/roi_heads/htc_roi_head.py | HenryOsborne/xViewDetection | 28bbeb263b05c4c892eae87713e34c62cbe606d5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/htc_roi_head.py | HenryOsborne/xViewDetection | 28bbeb263b05c4c892eae87713e34c62cbe606d5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/htc_roi_head.py | HenryOsborne/xViewDetection | 28bbeb263b05c4c892eae87713e34c62cbe606d5 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class HybridTaskCascadeRoIHead(CascadeRoIHead):
"""Hybrid task cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1901.07518
"""
def __init__(self,
num_stages,
stage_loss_weights,
semantic_roi_extractor=None,
semantic_head=None,
semantic_fusion=('bbox', 'mask'),
interleaved=True,
mask_info_flow=True,
**kwargs):
super(HybridTaskCascadeRoIHead,
self).__init__(num_stages, stage_loss_weights, **kwargs)
##############################################################
# assert self.with_bbox and self.with_mask
assert self.with_bbox
##############################################################
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = build_roi_extractor(
semantic_roi_extractor)
self.semantic_head = build_head(semantic_head)
self.semantic_fusion = semantic_fusion
self.interleaved = interleaved
self.mask_info_flow = mask_info_flow
def init_weights(self, pretrained):
"""Initialize the weights in head.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(HybridTaskCascadeRoIHead, self).init_weights(pretrained)
if self.with_semantic:
self.semantic_head.init_weights()
@property
def with_semantic(self):
"""bool: whether the head has semantic head"""
if hasattr(self, 'semantic_head') and self.semantic_head is not None:
return True
else:
return False
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
outs = ()
# semantic head
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
# bbox heads
rois = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
outs = outs + (mask_pred, )
return outs
def _bbox_forward_train(self,
stage,
x,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for box head in training."""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(
stage, x, rois, semantic_feat=semantic_feat)
bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox,
rois=rois,
bbox_targets=bbox_targets,
)
return bbox_results
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for mask head in
training."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
pos_rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
pos_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
# mask information flow
# forward all previous mask heads to obtain last_feat, and fuse it
# with the normal mask feature
if self.mask_info_flow:
last_feat = None
for i in range(stage):
last_feat = self.mask_head[i](
mask_feats, last_feat, return_logits=False)
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
else:
mask_pred = mask_head(mask_feats, return_feat=False)
mask_targets = mask_head.get_targets(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
mask_results = dict(loss_mask=loss_mask)
return mask_results
def _bbox_forward(self, stage, x, rois, semantic_feat=None):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
"""Mask head forward function for testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_rois = bbox2roi([bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
if self.mask_info_flow:
last_feat = None
last_pred = None
for i in range(stage):
mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
if last_pred is not None:
mask_pred = mask_pred + last_pred
last_pred = mask_pred
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
if last_pred is not None:
mask_pred = mask_pred + last_pred
else:
mask_pred = mask_head(mask_feats)
return mask_pred
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposal_list (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None, list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None, Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# semantic segmentation part
# 2 outputs: segmentation prediction and embedded features
losses = dict()
if self.with_semantic:
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[j],
gt_bboxes[j],
gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = \
self._bbox_forward_train(
i, x, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg, semantic_feat)
roi_labels = bbox_results['bbox_targets'][0]
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
# interleaved execution: use regressed bboxes by the box branch
# to train the mask branch
if self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
# re-assign and sample 512 RoIs from 512 RoIs
sampling_results = []
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j],
gt_bboxes_ignore[j], gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
mask_results = self._mask_forward_train(
i, x, sampling_results, gt_masks, rcnn_train_cfg,
semantic_feat)
for name, value in mask_results['loss_mask'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine bboxes (same as Cascade R-CNN)
if i < self.num_stages - 1 and not self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation."""
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
rois = torch.cat([
bbox_head.regress_by_class(rois[i], bbox_label[i],
bbox_pred[i], img_metas[i])
for i in range(num_imgs)
])
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
bbox_result = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
aug_masks = []
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats += mask_semantic_feat
last_feat = None
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
# split batch mask prediction back to each image
mask_pred = mask_pred.split(num_bbox_per_img, 0)
aug_masks.append(
[mask.sigmoid().cpu().numpy() for mask in mask_pred])
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[]
for _ in range(self.mask_head[-1].num_classes)])
else:
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(
aug_mask, [[img_metas[i]]] * self.num_stages,
rcnn_test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_mask, _bboxes[i], det_labels[i],
rcnn_test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
ms_segm_result['ensemble'] = segm_results
if self.with_mask:
results = list(
zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
if self.with_semantic:
semantic_feats = [
self.semantic_head(feat)[1] for feat in img_feats
]
else:
semantic_feats = [None] * len(img_metas)
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'].argmax(dim=1)
rois = bbox_head.regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[[]
for _ in range(self.mask_head[-1].num_classes)]
]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta, semantic in zip(img_feats, img_metas,
semantic_feats):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor[-1](
x[:len(self.mask_roi_extractor[-1].featmap_strides)],
mask_rois)
if self.with_semantic:
semantic_feat = semantic
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[
-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats += mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(
mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
| 43.969646 | 79 | 0.528189 |
ace3a843c68d07d29432e42170a5a1cd740bc4bf | 13,713 | py | Python | flow.py | llgoncalves/of_core | ace7d1088686f0ce6473fc9da6ae60a5bccf2cd1 | [
"MIT"
] | null | null | null | flow.py | llgoncalves/of_core | ace7d1088686f0ce6473fc9da6ae60a5bccf2cd1 | [
"MIT"
] | null | null | null | flow.py | llgoncalves/of_core | ace7d1088686f0ce6473fc9da6ae60a5bccf2cd1 | [
"MIT"
] | null | null | null | """High-level abstraction for Flows of multiple OpenFlow versions.
Use common fields of FlowStats/FlowMod of supported OF versions. ``match`` and
``actions`` fields are different, so Flow, Action and Match related classes are
inherited in v0x01 and v0x04 modules.
"""
import json
from abc import ABC, abstractmethod
from hashlib import md5
# Note: FlowModCommand is the same in both v0x01 and v0x04
from pyof.v0x04.controller2switch.flow_mod import FlowModCommand
import napps.kytos.of_core.v0x01 as v0x01
import napps.kytos.of_core.v0x04 as v0x04
class FlowFactory(ABC): # pylint: disable=too-few-public-methods
"""Choose the correct Flow according to OpenFlow version."""
@classmethod
def from_of_flow_stats(cls, of_flow_stats, switch):
"""Return a Flow for the switch OpenFlow version."""
flow_class = cls.get_class(switch)
return flow_class.from_of_flow_stats(of_flow_stats, switch)
@staticmethod
def get_class(switch):
"""Return the Flow class for the switch OF version."""
of_version = switch.connection.protocol.version
if of_version == 0x01:
return v0x01.flow.Flow
elif of_version == 0x04:
return v0x04.flow.Flow
raise NotImplementedError(f'Unsupported OpenFlow version {of_version}')
class FlowBase(ABC): # pylint: disable=too-many-instance-attributes
"""Class to abstract a Flow to switches.
This class represents a Flow installed or to be installed inside the
switch. A flow, in this case is represented by a Match object and a set of
actions that should occur in case any match happen.
"""
# of_version number: 0x01, 0x04
of_version = None
# Subclasses must set their version-specific classes
_action_factory = None
_flow_mod_class = None
_match_class = None
def __init__(self, switch, table_id=0xff, match=None, priority=0,
idle_timeout=0, hard_timeout=0, cookie=0, actions=None,
stats=None):
"""Assign parameters to attributes.
Args:
switch (kytos.core.switch.Switch): Switch ID is used to uniquely
identify a flow.
table_id (int): The index of a single table or 0xff for all tables.
match (|match|): Match object.
priority (int): Priority level of flow entry.
idle_timeout (int): Idle time before discarding, in seconds.
hard_timeout (int): Max time before discarding, in seconds.
cookie (int): Opaque controller-issued identifier.
actions (|list_of_actions|): List of actions to apply.
stats (Stats): Latest flow statistics.
"""
# pylint: disable=too-many-arguments,too-many-locals
self.switch = switch
self.table_id = table_id
# Disable not-callable error as subclasses set a class
self.match = match or self._match_class() # pylint: disable=E1102
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.cookie = cookie
self.actions = actions or []
self.stats = stats or FlowStats() # pylint: disable=E1102
@property
def id(self): # pylint: disable=invalid-name
"""Return this flow unique identifier.
Calculate an md5 hash based on this object's modified json string. The
json for ID calculation excludes ``stats`` attribute that changes over
time.
Returns:
str: Flow unique identifier (md5sum).
"""
flow_str = self.as_json(sort_keys=True, include_id=False)
md5sum = md5()
md5sum.update(flow_str.encode('utf-8'))
return md5sum.hexdigest()
def as_dict(self, include_id=True):
"""Return the Flow as a serializable Python dictionary.
Args:
include_id (bool): Default is ``True``. Internally, it is set to
``False`` when calculating the flow ID that is based in this
dictionary's JSON string.
Returns:
dict: Serializable dictionary.
"""
flow_dict = {
'switch': self.switch.id,
'table_id': self.table_id,
'match': self.match.as_dict(),
'priority': self.priority,
'idle_timeout': self.idle_timeout,
'hard_timeout': self.hard_timeout,
'cookie': self.cookie,
'actions': [action.as_dict() for action in self.actions]}
if include_id:
# Avoid infinite recursion
flow_dict['id'] = self.id
# Remove statistics that change over time
flow_dict['stats'] = self.stats.as_dict()
return flow_dict
@classmethod
def from_dict(cls, flow_dict, switch):
"""Return an instance with values from ``flow_dict``."""
flow = cls(switch)
# Set attributes found in ``flow_dict``
for attr_name, attr_value in flow_dict.items():
if attr_name in vars(flow):
setattr(flow, attr_name, attr_value)
flow.switch = switch
if 'stats' in flow_dict:
flow.stats = FlowStats.from_dict(flow_dict['stats'])
# Version-specific attributes
if 'match' in flow_dict:
flow.match = cls._match_class.from_dict(flow_dict['match'])
if 'actions' in flow_dict:
flow.actions = []
for action_dict in flow_dict['actions']:
action = cls._action_factory.from_dict(action_dict)
if action:
flow.actions.append(action)
return flow
def as_json(self, sort_keys=False, include_id=True):
"""Return the representation of a flow in JSON format.
Args:
sort_keys (bool): ``False`` by default (Python's default). Sorting
is used, for example, to calculate the flow ID.
include_id (bool): ``True`` by default. Internally, the ID is not
included while calculating it.
Returns:
string: Flow JSON string representation.
"""
return json.dumps(self.as_dict(include_id), sort_keys=sort_keys)
def as_of_add_flow_mod(self):
"""Return an OpenFlow add FlowMod."""
return self._as_of_flow_mod(FlowModCommand.OFPFC_ADD)
def as_of_delete_flow_mod(self):
"""Return an OpenFlow delete FlowMod."""
return self._as_of_flow_mod(FlowModCommand.OFPFC_DELETE)
@abstractmethod
def _as_of_flow_mod(self, command):
"""Return a pyof FlowMod with given ``command``."""
# Disable not-callable error as subclasses will set a class
flow_mod = self._flow_mod_class() # pylint: disable=E1102
flow_mod.match = self.match.as_of_match()
flow_mod.cookie = self.cookie
flow_mod.command = command
flow_mod.idle_timeout = self.idle_timeout
flow_mod.hard_timeout = self.hard_timeout
flow_mod.priority = self.priority
return flow_mod
@staticmethod
@abstractmethod
def _get_of_actions(of_flow_stats):
"""Return pyof actions from pyof FlowStats."""
pass
@classmethod
def from_of_flow_stats(cls, of_flow_stats, switch):
"""Create a flow with latest stats based on pyof FlowStats."""
of_actions = cls._get_of_actions(of_flow_stats)
actions = (cls._action_factory.from_of_action(of_action)
for of_action in of_actions)
non_none_actions = [action for action in actions if action]
return cls(switch,
table_id=of_flow_stats.table_id.value,
match=cls._match_class.from_of_match(of_flow_stats.match),
priority=of_flow_stats.priority.value,
idle_timeout=of_flow_stats.idle_timeout.value,
hard_timeout=of_flow_stats.hard_timeout.value,
cookie=of_flow_stats.cookie.value,
actions=non_none_actions,
stats=FlowStats.from_of_flow_stats(of_flow_stats))
class ActionBase(ABC):
"""Base class for a flow action."""
def as_dict(self):
"""Return a dict that can be dumped as JSON."""
return vars(self)
@classmethod
def from_dict(cls, action_dict):
"""Return an action instance from attributes in a dictionary."""
action = cls(None)
for attr_name, value in action_dict.items():
if hasattr(action, attr_name):
setattr(action, attr_name, value)
return action
@abstractmethod
def as_of_action(self):
"""Return a pyof action to be used by a FlowMod."""
pass
@classmethod
@abstractmethod
def from_of_action(cls, of_action):
"""Return an action from a pyof action."""
pass
class ActionFactoryBase(ABC):
"""Deal with different implementations of ActionBase."""
# key: action_type or pyof class, value: ActionBase child
_action_class = {
'output': None,
'set_vlan': None,
# pyof class: ActionBase child
}
@classmethod
def from_dict(cls, action_dict):
"""Build the proper Action from a dictionary.
Args:
action_dict (dict): Action attributes.
"""
action_type = action_dict.get('action_type')
action_class = cls._action_class[action_type]
if action_class:
return action_class.from_dict(action_dict)
@classmethod
def from_of_action(cls, of_action):
"""Build the proper Action from a pyof action.
Args:
of_action (pyof action): Action from python-openflow.
"""
of_class = type(of_action)
action_class = cls._action_class.get(of_class)
if action_class:
return action_class.from_of_action(of_action)
class MatchBase: # pylint: disable=too-many-instance-attributes
"""Base class with common high-level Match fields."""
def __init__(self, in_port=None, dl_src=None, dl_dst=None, dl_vlan=None,
dl_vlan_pcp=None, dl_type=None, nw_proto=None, nw_src=None,
nw_dst=None, tp_src=None, tp_dst=None):
"""Make it possible to set all attributes from the constructor."""
# pylint: disable=too-many-arguments
self.in_port = in_port
self.dl_src = dl_src
self.dl_dst = dl_dst
self.dl_vlan = dl_vlan
self.dl_vlan_pcp = dl_vlan_pcp
self.dl_type = dl_type
self.nw_proto = nw_proto
self.nw_src = nw_src
self.nw_dst = nw_dst
self.tp_src = tp_src
self.tp_dst = tp_dst
def as_dict(self):
"""Return a dictionary excluding ``None`` values."""
return {k: v for k, v in self.__dict__.items() if v is not None}
@classmethod
def from_dict(cls, match_dict):
"""Return a Match instance from a dictionary."""
match = cls()
for key, value in match_dict.items():
if key in match.__dict__:
setattr(match, key, value)
return match
@classmethod
@abstractmethod
def from_of_match(cls, of_match):
"""Return a Match instance from a pyof Match."""
pass
@abstractmethod
def as_of_match(self):
"""Return a python-openflow Match."""
pass
class Stats:
"""Simple class to store statistics as attributes and values."""
def as_dict(self):
"""Return a dict excluding attributes with ``None`` value."""
return {attribute: value
for attribute, value in vars(self).items()
if value is not None}
@classmethod
def from_dict(cls, stats_dict):
"""Return a statistics object from a dictionary."""
stats = cls()
cls._update(stats, stats_dict.items())
return stats
@classmethod
def from_of_flow_stats(cls, of_stats):
"""Create an instance from a pyof FlowStats."""
stats = cls()
stats.update(of_stats)
return stats
def update(self, of_stats):
"""Given a pyof stats object, update attributes' values.
Avoid object creation and memory leak. pyof values are GenericType
instances whose native values can be accessed by `.value`.
"""
# Generator for GenericType values
attr_name_value = ((attr_name, gen_type.value)
for attr_name, gen_type in vars(of_stats).items()
if attr_name in vars(self))
self._update(self, attr_name_value)
@staticmethod
def _update(obj, iterable):
"""From attribute name and value pairs, update ``obj``."""
for attr_name, value in iterable:
if hasattr(obj, attr_name):
setattr(obj, attr_name, value)
class FlowStats(Stats):
"""Common fields for 1.0 and 1.3 FlowStats."""
def __init__(self):
"""Initialize all statistics as ``None``."""
self.byte_count = None
self.duration_sec = None
self.duration_nsec = None
self.packet_count = None
class PortStats(Stats): # pylint: disable=too-many-instance-attributes
"""Common fields for 1.0 and 1.3 PortStats."""
def __init__(self):
"""Initialize all statistics as ``None``."""
self.rx_packets = None
self.tx_packets = None
self.rx_bytes = None
self.tx_bytes = None
self.rx_dropped = None
self.tx_dropped = None
self.rx_errors = None
self.tx_errors = None
self.rx_frame_err = None
self.rx_over_err = None
self.rx_crc_err = None
self.collisions = None
| 34.804569 | 79 | 0.625538 |
ace3a857cfc97d96565ef49ce5b7d31e088a2833 | 3,341 | py | Python | collaborative_filtering/cf_baseline.py | ujjwal-raizada/recommendation-systems | 3c9f6508f98853be55cdb8ff9b7b35e9b0e6d37b | [
"MIT"
] | null | null | null | collaborative_filtering/cf_baseline.py | ujjwal-raizada/recommendation-systems | 3c9f6508f98853be55cdb8ff9b7b35e9b0e6d37b | [
"MIT"
] | null | null | null | collaborative_filtering/cf_baseline.py | ujjwal-raizada/recommendation-systems | 3c9f6508f98853be55cdb8ff9b7b35e9b0e6d37b | [
"MIT"
] | null | null | null | import numpy as np
from time import time
from collections import Counter
import pickle
def loadFile(filename):
'''
Loads file saved after running preprocess.py.
return: opened file object
'''
file = open(filename, 'rb')
filename = pickle.load(file)
return filename
def meanRating(matrix):
mean_rating = matrix.sum(axis=1)
counts = Counter(matrix.nonzero()[0])
n_users = matrix.shape[0]
for i in range(n_users):
if i in counts.keys():
mean_rating[i] = mean_rating[i] / counts[i]
else:
mean_rating[i] = 0
return mean_rating
def baseLineFilter(umat, sim, mmap, umap, ratings, mur, mmr, test, mew):
rating = []
prediction = []
for i in range(int(len(test["movie_id"]) / 100)):
user = test.iloc[i, 0]
movie = test.iloc[i, 1]
stars = int(test.iloc[i, 2])
movie = mmap[str(movie)]
user = umap[str(user)]
rating.append(stars)
movie_sim = sim[movie]
user_ratings = umat[:, user]
b = mmr[movie] + mur[user] - mew
num, den = 0, 0
for j in range(sim.shape[0]):
if (user_ratings[j] != 0):
bi = mur[user] + mmr[j] - mew
num += movie_sim[j] * (user_ratings[j] - bi)
den += movie_sim[j]
predicted_rating = b
if den > 0:
predicted_rating += num / den
if (predicted_rating > 5):
predicted_rating = 5
elif (predicted_rating < 0):
predicted_rating = 0
predicted_rating = int(round(predicted_rating))
prediction.append(predicted_rating)
print(prediction)
return prediction, rating
def computeError(actual_rating, prediction):
'''
Computes root mean square error and mean absolute error
return: rmse -- root mean square (float)
mean -- mean absolute error (float)
'''
n = len(prediction)
actual_rating = np.array(actual_rating)
prediction = np.array(prediction)
rmse = np.sum(np.square(prediction - actual_rating)) / n
mae = np.sum(np.abs(prediction - actual_rating)) / n
return rmse, mae
def topKRecommendation(k, movie_map, similarity, movie_id):
'''
Generates top k recommendations similar to a movie
return: top_similar -- list of tuples(similarity, movie_no)
'''
row_no = movie_map[movie_id]
top_similar = []
for i in range(len(movie_map)):
if (i != row_no):
top_similar.append((similarity[row_no][i], i))
top_similar.sort(reverse=True)
return top_similar[:k]
def main():
utility_matrix = loadFile("utility")
ratings = loadFile("utility")
test = loadFile("test")
umap = loadFile("users_map")
mmap = loadFile("movie_map")
sim = loadFile("similarity")
umat = np.transpose(utility_matrix)
mur = meanRating(utility_matrix)
mmr = meanRating(umat)
mew = sum(sum(utility_matrix)) / np.count_nonzero(utility_matrix)
prediction, actual = baseLineFilter(
umat, sim, mmap, umap, ratings, mur, mmr, test, mew)
rmse, mae = computeError(actual, prediction)
print(rmse)
print(mae)
recommendations = topKRecommendation(4, mmap, sim, "102")
print("recommendations for the user ", recommendations)
if __name__ == "__main__":
main()
| 28.555556 | 72 | 0.614786 |
ace3a8a8e58e08bd82a0f57be99cce65d588cb7d | 4,442 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # (c) 2020 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from units.modules.utils import set_module_args
from .os6_module import TestDellos6Module, load_fixture
from ansible_collections.dellemc.os6.plugins.modules import os6_facts
class TestDellos6Facts(TestDellos6Module):
module = os6_facts
def setUp(self):
super(TestDellos6Facts, self).setUp()
self.mock_run_command = patch(
'ansible.modules.network.os6.os6_facts.run_commands')
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestDellos6Facts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
if '|' in command:
command = str(command).replace('|', '')
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
output.append(load_fixture(filename))
return output
self.run_command.side_effect = load_from_file
def test_os6_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname'])
self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(623, ansible_facts['ansible_net_memfree_mb'])
def test_os6_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
def test_os6_facts_gather_subset_hardware(self):
set_module_args({'gather_subset': 'hardware'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(623, ansible_facts['ansible_net_memfree_mb'])
def test_os6_facts_gather_subset_interfaces(self):
set_module_args({'gather_subset': 'interfaces'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(['Te1/0/5', 'Te1/0/6'], ansible_facts['ansible_net_neighbors'].keys())
self.assertIn('ansible_net_interfaces', ansible_facts)
| 41.90566 | 96 | 0.69023 |
ace3a9dace26155387c57aac9c53bc0bf6db7c94 | 4,181 | py | Python | sangita_data/hindi/sentences/loadsent.py | djokester/sangita_data | c6a2b5038985bf6fff60edebe6edfd2cf8b1b987 | [
"Apache-2.0"
] | 4 | 2018-06-25T06:34:54.000Z | 2018-09-16T04:35:13.000Z | sangita_data/hindi/sentences/loadsent.py | djokester/sangita_data | c6a2b5038985bf6fff60edebe6edfd2cf8b1b987 | [
"Apache-2.0"
] | 4 | 2018-06-23T22:02:54.000Z | 2018-07-06T22:54:48.000Z | sangita_data/hindi/sentences/loadsent.py | djokester/sangita_data | c6a2b5038985bf6fff60edebe6edfd2cf8b1b987 | [
"Apache-2.0"
] | 7 | 2018-06-23T21:44:13.000Z | 2018-07-05T21:51:11.000Z | import sangita_data.hindi.sentences.sent0 as sent0
import sangita_data.hindi.sentences.sent1 as sent1
import sangita_data.hindi.sentences.sent2 as sent2
import sangita_data.hindi.sentences.sent3 as sent3
import sangita_data.hindi.sentences.sent4 as sent4
import sangita_data.hindi.sentences.sent5 as sent5
import sangita_data.hindi.sentences.sent6 as sent6
import sangita_data.hindi.sentences.sent7 as sent7
import sangita_data.hindi.sentences.sent8 as sent8
import sangita_data.hindi.sentences.sent9 as sent9
import sangita_data.hindi.sentences.sent10 as sent10
import sangita_data.hindi.sentences.sent11 as sent11
import sangita_data.hindi.sentences.sent12 as sent12
import sangita_data.hindi.sentences.sent13 as sent13
import sangita_data.hindi.sentences.sent14 as sent14
import sangita_data.hindi.sentences.sent15 as sent15
import sangita_data.hindi.sentences.sent16 as sent16
import sangita_data.hindi.sentences.sent17 as sent17
import sangita_data.hindi.sentences.sent18 as sent18
import sangita_data.hindi.sentences.sent19 as sent19
import sangita_data.hindi.sentences.sent20 as sent20
import sangita_data.hindi.sentences.sent21 as sent21
import sangita_data.hindi.sentences.sent22 as sent22
import sangita_data.hindi.sentences.sent23 as sent23
import sangita_data.hindi.sentences.sent24 as sent24
import sangita_data.hindi.sentences.sent25 as sent25
import sangita_data.hindi.sentences.sent26 as sent26
import sangita_data.hindi.sentences.sent27 as sent27
import sangita_data.hindi.sentences.sent28 as sent28
import sangita_data.hindi.sentences.sent29 as sent29
import sangita_data.hindi.sentences.sent30 as sent30
import sangita_data.hindi.sentences.sent31 as sent31
import sangita_data.hindi.sentences.sent32 as sent32
import sangita_data.hindi.sentences.sent33 as sent33
import sangita_data.hindi.sentences.sent34 as sent34
import sangita_data.hindi.sentences.sent35 as sent35
import sangita_data.hindi.sentences.sent36 as sent36
import sangita_data.hindi.sentences.sent37 as sent37
import sangita_data.hindi.sentences.sent38 as sent38
import sangita_data.hindi.sentences.sent39 as sent39
import sangita_data.hindi.sentences.sent40 as sent40
import sangita_data.hindi.sentences.sent41 as sent41
import sangita_data.hindi.sentences.sent42 as sent42
import sangita_data.hindi.sentences.sent43 as sent43
import sangita_data.hindi.sentences.sent44 as sent44
import sangita_data.hindi.sentences.sent45 as sent45
import sangita_data.hindi.sentences.sent46 as sent46
def drawlist():
sent = sent0.drawlist()
sent = sent + sent1.drawlist()
sent = sent + sent2.drawlist()
sent = sent + sent3.drawlist()
sent = sent + sent4.drawlist()
sent = sent + sent5.drawlist()
sent = sent + sent6.drawlist()
sent = sent + sent7.drawlist()
sent = sent + sent8.drawlist()
sent = sent + sent9.drawlist()
sent = sent + sent10.drawlist()
sent = sent + sent11.drawlist()
sent = sent + sent12.drawlist()
sent = sent + sent13.drawlist()
sent = sent + sent14.drawlist()
sent = sent + sent15.drawlist()
sent = sent + sent16.drawlist()
sent = sent + sent17.drawlist()
sent = sent + sent18.drawlist()
sent = sent + sent19.drawlist()
sent = sent + sent20.drawlist()
sent = sent + sent21.drawlist()
sent = sent + sent22.drawlist()
sent = sent + sent23.drawlist()
sent = sent + sent24.drawlist()
sent = sent + sent25.drawlist()
sent = sent + sent26.drawlist()
sent = sent + sent27.drawlist()
sent = sent + sent28.drawlist()
sent = sent + sent29.drawlist()
sent = sent + sent30.drawlist()
sent = sent + sent31.drawlist()
sent = sent + sent32.drawlist()
sent = sent + sent33.drawlist()
sent = sent + sent34.drawlist()
sent = sent + sent35.drawlist()
sent = sent + sent36.drawlist()
sent = sent + sent37.drawlist()
sent = sent + sent38.drawlist()
sent = sent + sent39.drawlist()
sent = sent + sent40.drawlist()
sent = sent + sent41.drawlist()
sent = sent + sent42.drawlist()
sent = sent + sent43.drawlist()
sent = sent + sent44.drawlist()
sent = sent + sent45.drawlist()
sent = sent + sent46.drawlist()
return sent
| 41.81 | 52 | 0.761301 |
ace3ab476628c5254fe89dc7298ced6e7960b6f5 | 5,891 | py | Python | blueking/component/client.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 31 | 2021-07-28T13:06:11.000Z | 2022-03-10T12:16:44.000Z | blueking/component/client.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 483 | 2021-07-29T03:17:44.000Z | 2022-03-31T13:03:04.000Z | blueking/component/client.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 29 | 2021-07-28T13:06:21.000Z | 2022-03-25T06:18:18.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
"""Component API Client
"""
import json
import logging
import random
import time
import requests
from . import collections, conf
from .compat import urlparse
from .utils import get_signature
# shutdown urllib3's warning
try:
requests.packages.urllib3.disable_warnings()
except Exception:
pass
logger = logging.getLogger("component")
class BaseComponentClient(object):
"""Base client class for component"""
@classmethod
def setup_components(cls, components):
cls.available_collections = components
def __init__(
self,
app_code=None,
app_secret=None,
common_args=None,
use_test_env=False,
language=None,
bk_app_code=None,
bk_app_secret=None,
):
"""
:param str app_code: App code to use
:param str app_secret: App secret to use
:param dict common_args: Args that will apply to every request
:param bool use_test_env: whether use test version of components
"""
self.app_code = bk_app_code or app_code or conf.APP_CODE
self.app_secret = bk_app_secret or app_secret or conf.SECRET_KEY
self.bk_api_ver = conf.DEFAULT_BK_API_VER
self.common_args = common_args or {}
self._cached_collections = {}
self.use_test_env = use_test_env
self.language = language or self.get_cur_language()
def set_use_test_env(self, use_test_env):
"""Change the value of use_test_env
:param bool use_test_env: whether use test version of components
"""
self.use_test_env = use_test_env
def set_language(self, language):
self.language = language
def get_cur_language(self):
try:
from django.utils import translation
return translation.get_language()
except Exception:
return None
def set_bk_api_ver(self, bk_api_ver):
self.bk_api_ver = bk_api_ver
def get_bk_api_ver(self):
return self.bk_api_ver
def merge_params_data_with_common_args(self, method, params, data, enable_app_secret=False):
"""get common args when request"""
common_args = dict(bk_app_code=self.app_code, **self.common_args)
if enable_app_secret:
common_args["bk_app_secret"] = self.app_secret
if method == "GET":
_params = common_args.copy()
_params.update(params or {})
params = _params
elif method == "POST":
_data = common_args.copy()
_data.update(data or {})
data = json.dumps(_data)
return params, data
def request(self, method, url, params=None, data=None, **kwargs):
"""Send request"""
# determine whether access test environment of third-party system
headers = kwargs.pop("headers", {})
if self.use_test_env:
headers["x-use-test-env"] = "1"
if self.language:
headers["blueking-language"] = self.language
params, data = self.merge_params_data_with_common_args(method, params, data, enable_app_secret=True)
logger.debug("Calling %s %s with params=%s, data=%s, headers=%s", method, url, params, data, headers)
return requests.request(method, url, params=params, data=data, verify=False, headers=headers, **kwargs)
def __getattr__(self, key):
if key not in self.available_collections:
return getattr(super(BaseComponentClient, self), key)
if key not in self._cached_collections:
collection = self.available_collections[key]
self._cached_collections[key] = collection(self)
return self._cached_collections[key]
class ComponentClientWithSignature(BaseComponentClient):
"""Client class for component with signature"""
def request(self, method, url, params=None, data=None, **kwargs):
"""Send request, will add "signature" parameter."""
# determine whether access test environment of third-party system
headers = kwargs.pop("headers", {})
if self.use_test_env:
headers["x-use-test-env"] = "1"
if self.language:
headers["blueking-language"] = self.language
params, data = self.merge_params_data_with_common_args(method, params, data, enable_app_secret=False)
if method == "POST":
params = {}
url_path = urlparse(url).path
# signature always in GET params
params.update(
{
"bk_timestamp": int(time.time()),
"bk_nonce": random.randint(1, 2147483647),
}
)
params["bk_signature"] = get_signature(method, url_path, self.app_secret, params=params, data=data)
logger.debug("Calling %s %s with params=%s, data=%s", method, url, params, data)
return requests.request(method, url, params=params, data=data, verify=False, headers=headers, **kwargs)
# 根据是否开启signature来判断使用的Client版本
if conf.CLIENT_ENABLE_SIGNATURE:
ComponentClient = ComponentClientWithSignature
else:
ComponentClient = BaseComponentClient
ComponentClient.setup_components(collections.AVAILABLE_COLLECTIONS)
| 35.920732 | 115 | 0.670854 |
ace3ab7e4de6a10395bad49e070d581ec6623524 | 231 | py | Python | Utils/string_utils.py | nprockaya/python_training | fe7643f59cc899ebd806d8b1747df0775e9fcf24 | [
"Apache-2.0"
] | 1 | 2021-02-10T12:48:58.000Z | 2021-02-10T12:48:58.000Z | Utils/string_utils.py | nprockaya/python_training | fe7643f59cc899ebd806d8b1747df0775e9fcf24 | [
"Apache-2.0"
] | null | null | null | Utils/string_utils.py | nprockaya/python_training | fe7643f59cc899ebd806d8b1747df0775e9fcf24 | [
"Apache-2.0"
] | null | null | null | import re
def clear_spaces(string):
return re.sub(r"\s+", "", string)
def clear_hyphens(string):
return re.sub("[()-]", "", string)
def clear_spaces_and_hyphens(string):
return clear_hyphens(clear_spaces(string))
| 16.5 | 46 | 0.679654 |
ace3ac4994fcb6b68c381517b0aaf3f0f9adda29 | 136 | py | Python | Ved/W2_Q6.py | asumit499/Python-BootCamp | 0b99f9cb862189d13ad291eac12a8be6c46357f5 | [
"MIT"
] | 4 | 2022-03-20T10:59:53.000Z | 2022-03-25T18:28:04.000Z | Ved/W2_Q6.py | asumit499/Python-BootCamp | 0b99f9cb862189d13ad291eac12a8be6c46357f5 | [
"MIT"
] | null | null | null | Ved/W2_Q6.py | asumit499/Python-BootCamp | 0b99f9cb862189d13ad291eac12a8be6c46357f5 | [
"MIT"
] | 15 | 2022-03-12T11:49:10.000Z | 2022-03-15T06:22:55.000Z | num=int(input("Enter any number:"))
N=num%10
print(N)
if N== 0:
print("Hello")
elif N==5:
print("Hello")
else:
print("Bye")
| 13.6 | 35 | 0.580882 |
ace3ac86c45d75c418191497845644a64e462354 | 2,231 | py | Python | day12.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day12.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | day12.py | davidfpc/AoC2021 | b526e606dbf1cc59de4951a321aa9b98d04fde4c | [
"MIT"
] | null | null | null | # program to compute the time
# of execution of any python code
import copy
import time
def read_input(file_name: str) -> {str: {str}}:
with open("inputFiles/" + file_name, "r") as file:
lines = file.read().splitlines()
input_map = {}
# create unidirectional graph
for i in lines:
source, dest = i.split('-')
if source in input_map.keys():
input_map.get(source).add(dest)
else:
input_map[source] = {dest}
for i in list(input_map.keys()):
for z in input_map.get(i):
if z in input_map.keys():
input_map.get(z).add(i)
else:
input_map[z] = {i}
return input_map
def part1(input_value: {str: {str}}, current_path: [str]):
current_node = current_path[-1]
if current_node == 'end':
return 1
if current_node not in input_value.keys():
return 0
path_sum = 0
for i in input_value[current_node]:
if i in current_path and not all(char.isupper() for char in i):
continue
else:
path_sum += part1(input_value, current_path[:] + [i])
return path_sum
def part2(input_value: {str: {str}}, current_path: [str], visited_small: bool = False):
current_node = current_path[-1]
if current_node == 'end':
return 1
if current_node not in input_value.keys():
return 0
path_sum = 0
for i in input_value[current_node]:
if i in current_path and not all(char.isupper() for char in i):
if i != 'start' and not visited_small:
# we can have the same element twice, so try both options
path_sum += part2(input_value, current_path[:] + [i], True)
else:
path_sum += part2(input_value, current_path[:] + [i], visited_small)
return path_sum
if __name__ == "__main__":
puzzle_input = read_input("day12.txt")
start = time.time()
print(f"Part 1: {part1(copy.deepcopy(puzzle_input), ['start'])}")
print(f"Part 2: {part2(copy.deepcopy(puzzle_input), ['start'])}")
end = time.time()
print(f"Took {round(end - start, 5)} to process the puzzle")
| 31.422535 | 87 | 0.580457 |
ace3acdb4ef2a21ed68a331a3b2bd89a8d5bd1cf | 6,871 | py | Python | tflu-kws-cortex-m/Training/test.py | AlexanderEfremovArm/ML-examples | d2bb38d438fe55a54bb04595a5b29c0e8ba4716d | [
"Apache-2.0"
] | 309 | 2018-01-11T20:31:59.000Z | 2022-03-26T11:37:31.000Z | tflu-kws-cortex-m/Training/test.py | AlexanderEfremovArm/ML-examples | d2bb38d438fe55a54bb04595a5b29c0e8ba4716d | [
"Apache-2.0"
] | 60 | 2018-04-10T00:09:37.000Z | 2022-02-10T04:02:11.000Z | tflu-kws-cortex-m/Training/test.py | AlexanderEfremovArm/ML-examples | d2bb38d438fe55a54bb04595a5b29c0e8ba4716d | [
"Apache-2.0"
] | 172 | 2018-01-11T20:32:01.000Z | 2022-03-16T07:24:08.000Z | # Copyright © 2021 Arm Ltd. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for testing trained keyword spotting models from checkpoint files."""
import argparse
import numpy as np
import tensorflow as tf
import data
import models
def test():
"""Calculate accuracy and confusion matrices on validation and test sets.
Model is created and weights loaded from supplied command line arguments.
"""
model_settings = models.prepare_model_settings(len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
model = models.create_model(model_settings, FLAGS.model_architecture, FLAGS.model_size_info, False)
audio_processor = data.AudioProcessor(data_url=FLAGS.data_url,
data_dir=FLAGS.data_dir,
silence_percentage=FLAGS.silence_percentage,
unknown_percentage=FLAGS.unknown_percentage,
wanted_words=FLAGS.wanted_words.split(','),
validation_percentage=FLAGS.validation_percentage,
testing_percentage=FLAGS.testing_percentage,
model_settings=model_settings)
model.load_weights(FLAGS.checkpoint).expect_partial()
# Evaluate on validation set.
print("Running testing on validation set...")
val_data = audio_processor.get_data(audio_processor.Modes.VALIDATION).batch(FLAGS.batch_size)
expected_indices = np.concatenate([y for x, y in val_data])
predictions = model.predict(val_data)
predicted_indices = tf.argmax(predictions, axis=1)
val_accuracy = calculate_accuracy(predicted_indices, expected_indices)
confusion_matrix = tf.math.confusion_matrix(expected_indices, predicted_indices,
num_classes=model_settings['label_count'])
print(confusion_matrix.numpy())
print(f'Validation accuracy = {val_accuracy * 100:.2f}%'
f'(N={audio_processor.set_size(audio_processor.Modes.VALIDATION)})')
# Evaluate on testing set.
print("Running testing on test set...")
test_data = audio_processor.get_data(audio_processor.Modes.TESTING).batch(FLAGS.batch_size)
expected_indices = np.concatenate([y for x, y in test_data])
predictions = model.predict(test_data)
predicted_indices = tf.argmax(predictions, axis=1)
test_accuracy = calculate_accuracy(predicted_indices, expected_indices)
confusion_matrix = tf.math.confusion_matrix(expected_indices, predicted_indices,
num_classes=model_settings['label_count'])
print(confusion_matrix.numpy())
print(f'Test accuracy = {test_accuracy * 100:.2f}%'
f'(N={audio_processor.set_size(audio_processor.Modes.TESTING)})')
def calculate_accuracy(predicted_indices, expected_indices):
"""Calculates and returns accuracy.
Args:
predicted_indices: List of predicted integer indices.
expected_indices: List of expected integer indices.
Returns:
Accuracy value between 0 and 1.
"""
correct_prediction = tf.equal(predicted_indices, expected_indices)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
default='http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--checkpoint',
type=str,
help='Checkpoint to load the weights from.')
parser.add_argument(
'--model_architecture',
type=str,
default='dnn',
help='What model architecture to use')
parser.add_argument(
'--model_size_info',
type=int,
nargs="+",
default=[128, 128, 128],
help='Model dimensions - different for various models')
FLAGS, _ = parser.parse_known_args()
test()
| 37.546448 | 115 | 0.636734 |
ace3ada899f739ae9ad6e47ad6aaa4b65548f67d | 21,670 | py | Python | pgsqltoolsservice/language/language_service.py | sergb213/pgtoolsservice | 6296a207e7443fe4ebd5c91d837c033ee7886cab | [
"MIT"
] | null | null | null | pgsqltoolsservice/language/language_service.py | sergb213/pgtoolsservice | 6296a207e7443fe4ebd5c91d837c033ee7886cab | [
"MIT"
] | null | null | null | pgsqltoolsservice/language/language_service.py | sergb213/pgtoolsservice | 6296a207e7443fe4ebd5c91d837c033ee7886cab | [
"MIT"
] | 1 | 2020-07-30T11:46:44.000Z | 2020-07-30T11:46:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Language Service Implementation
"""
import functools
from logging import Logger # noqa
import threading
from typing import Any, Dict, Set, List # noqa
from prompt_toolkit.completion import Completion # noqa
from prompt_toolkit.document import Document # noqa
import sqlparse
from pgsqltoolsservice.hosting import JSONRPCServer, NotificationContext, RequestContext, ServiceProvider # noqa
from pgsqltoolsservice.connection import ConnectionService, ConnectionInfo
from pgsqltoolsservice.connection.contracts import ConnectionType
from pgsqltoolsservice.workspace.contracts import Position, TextDocumentPosition, Range, Location
from pgsqltoolsservice.workspace import WorkspaceService # noqa
from pgsqltoolsservice.workspace.script_file import ScriptFile # noqa
from pgsqltoolsservice.language.contracts import (
COMPLETION_REQUEST, CompletionItem, CompletionItemKind,
COMPLETION_RESOLVE_REQUEST, DEFINITION_REQUEST,
LANGUAGE_FLAVOR_CHANGE_NOTIFICATION, LanguageFlavorChangeParams,
INTELLISENSE_READY_NOTIFICATION, IntelliSenseReadyParams,
DOCUMENT_FORMATTING_REQUEST, DocumentFormattingParams,
DOCUMENT_RANGE_FORMATTING_REQUEST, DocumentRangeFormattingParams,
TextEdit, FormattingOptions, StatusChangeParams, STATUS_CHANGE_NOTIFICATION
)
from pgsqltoolsservice.language.completion import PGCompleter # noqa
from pgsqltoolsservice.language.operations_queue import ConnectionContext, OperationsQueue, QueuedOperation
from pgsqltoolsservice.language.keywords import DefaultCompletionHelper
from pgsqltoolsservice.language.script_parse_info import ScriptParseInfo
from pgsqltoolsservice.language.text import TextUtilities
from pgsqltoolsservice.language.peek_definition_result import DefinitionResult
from pgsqltoolsservice.scripting.contracts import ScriptOperation
import pgsqltoolsservice.utils as utils
from pgsqltoolsservice.metadata.contracts import ObjectMetadata
import pgsqltoolsservice.scripting.scripter as scripter
import tempfile
# Map of meta or display_meta values to completion items. Based on SqlToolsService definitions
DISPLAY_META_MAP: Dict[str, CompletionItemKind] = {
'column': CompletionItemKind.Field,
'columns': CompletionItemKind.Field,
'database': CompletionItemKind.Method,
'datatype': CompletionItemKind.Unit, # TODO review this
'fk join': CompletionItemKind.Reference, # TODO review this. As it's an FK join, that's like a reference?
'function': CompletionItemKind.Function,
'join': CompletionItemKind.Snippet, # TODO review this. Join suggest is kind of like a snippet?
'keyword': CompletionItemKind.Keyword,
'name join': CompletionItemKind.Snippet, # TODO review this. Join suggest is kind of like a snippet?
'schema': CompletionItemKind.Module,
'table': CompletionItemKind.File,
'table alias': CompletionItemKind.File,
'view': CompletionItemKind.File
}
class LanguageService:
"""
Class for handling requests/events that deal with Language requests such as auto-complete
"""
def __init__(self):
self._service_provider: ServiceProvider = None
self._server: JSONRPCServer = None
self._logger: [Logger, None] = None
self._non_pgsql_uris: Set[str] = set()
self._completion_helper = DefaultCompletionHelper()
self._script_map: Dict[str, 'ScriptParseInfo'] = {}
self._script_map_lock: threading.Lock = threading.Lock()
self._binding_queue_map: Dict[str, 'ScriptParseInfo'] = {}
self.operations_queue: OperationsQueue = None
def register(self, service_provider: ServiceProvider) -> None:
"""
Called by the ServiceProvider to allow init and registration of service handler methods
"""
self._service_provider = service_provider
self._logger = service_provider.logger
self._server = service_provider.server
self.operations_queue = OperationsQueue(service_provider)
self.operations_queue.start()
# Register request handlers
self._server.set_request_handler(COMPLETION_REQUEST, self.handle_completion_request)
self._server.set_request_handler(DEFINITION_REQUEST, self.handle_definition_request)
self._server.set_request_handler(COMPLETION_RESOLVE_REQUEST, self.handle_completion_resolve_request)
self._server.set_request_handler(DOCUMENT_FORMATTING_REQUEST, self.handle_doc_format_request)
self._server.set_request_handler(DOCUMENT_RANGE_FORMATTING_REQUEST, self.handle_doc_range_format_request)
self._server.set_notification_handler(LANGUAGE_FLAVOR_CHANGE_NOTIFICATION, self.handle_flavor_change)
# Register internal service notification handlers
self._connection_service.register_on_connect_callback(self.on_connect)
self._service_provider.server.add_shutdown_handler(self._handle_shutdown)
# REQUEST HANDLERS #####################################################
def handle_definition_request(self, request_context: RequestContext, text_document_position: TextDocumentPosition) -> None:
request_context.send_notification(STATUS_CHANGE_NOTIFICATION, StatusChangeParams(owner_uri=text_document_position.text_document.uri,
status="DefinitionRequested"))
def do_send_default_empty_response():
request_context.send_response([])
if self.should_skip_intellisense(text_document_position.text_document.uri):
do_send_default_empty_response()
return
script_file: ScriptFile = self._workspace_service.workspace.get_file(text_document_position.text_document.uri)
if script_file is None:
do_send_default_empty_response()
return
script_parse_info: ScriptParseInfo = self.get_script_parse_info(text_document_position.text_document.uri, create_if_not_exists=False)
if not script_parse_info or not script_parse_info.can_queue():
do_send_default_empty_response()
return
cursor_position: int = len(script_file.get_text_in_range(Range.from_data(0, 0, text_document_position.position.line,
text_document_position.position.character)))
text: str = script_file.get_all_text()
script_parse_info.document = Document(text, cursor_position)
operation = QueuedOperation(script_parse_info.connection_key,
functools.partial(self.send_definition_using_connected_completions, request_context, script_parse_info,
text_document_position),
functools.partial(do_send_default_empty_response))
self.operations_queue.add_operation(operation)
request_context.send_notification(STATUS_CHANGE_NOTIFICATION, StatusChangeParams(owner_uri=text_document_position.text_document.uri,
status="DefinitionRequestCompleted"))
def handle_completion_request(self, request_context: RequestContext, params: TextDocumentPosition) -> None:
"""
Lookup available completions when valid completion suggestions are requested.
Sends an array of CompletionItem objects over the wire
"""
response = []
def do_send_default_empty_response():
request_context.send_response(response)
script_file: ScriptFile = self._workspace_service.workspace.get_file(params.text_document.uri)
if script_file is None:
do_send_default_empty_response()
return
if self.should_skip_intellisense(script_file.file_uri):
do_send_default_empty_response()
return
script_parse_info: ScriptParseInfo = self.get_script_parse_info(params.text_document.uri, create_if_not_exists=False)
if not script_parse_info or not script_parse_info.can_queue():
self._send_default_completions(request_context, script_file, params)
else:
cursor_position: int = len(script_file.get_text_in_range(Range.from_data(0, 0, params.position.line, params.position.character)))
text: str = script_file.get_all_text()
script_parse_info.document = Document(text, cursor_position)
operation = QueuedOperation(script_parse_info.connection_key,
functools.partial(self.send_connected_completions, request_context, script_parse_info, params),
functools.partial(self._send_default_completions, request_context, script_file, params))
self.operations_queue.add_operation(operation)
def handle_completion_resolve_request(self, request_context: RequestContext, params: CompletionItem) -> None:
"""Fill in additional details for a CompletionItem. Returns the same CompletionItem over the wire"""
request_context.send_response(params)
def handle_flavor_change(self,
context: NotificationContext,
params: LanguageFlavorChangeParams) -> None:
"""
Processes a language flavor change notification, adding non-PGSQL files to a tracking set
so they can be excluded from intellisense processing
"""
if params is not None and params.uri is not None:
if params.language.lower() == 'sql' and params.flavor.lower() != 'pgsql':
self._non_pgsql_uris.add(params.uri)
else:
self._non_pgsql_uris.discard(params.uri)
def handle_doc_format_request(self, request_context: RequestContext, params: DocumentFormattingParams) -> None:
"""
Processes a formatting request by sending the entire documents text to sqlparse and returning a formatted document as a
single TextEdit
"""
response: List[TextEdit] = []
def do_send_default_empty_response():
request_context.send_response(response)
if self.should_skip_formatting(params.text_document.uri):
do_send_default_empty_response()
return
file: ScriptFile = self._workspace_service.workspace.get_file(params.text_document.uri)
if file is None:
do_send_default_empty_response()
return
sql: str = file.get_all_text()
if sql is None or sql.strip() == '':
do_send_default_empty_response()
return
edit: TextEdit = self._prepare_edit(file)
self._format_and_add_response(response, edit, sql, params)
do_send_default_empty_response()
def handle_doc_range_format_request(self, request_context: RequestContext, params: DocumentRangeFormattingParams) -> None:
"""
Processes a formatting request by sending the entire documents text to sqlparse and returning a formatted document as a
single TextEdit
"""
# Validate inputs and set up response
response: List[TextEdit] = []
def do_send_default_empty_response():
request_context.send_response(response)
if self.should_skip_formatting(params.text_document.uri):
do_send_default_empty_response()
return
file: ScriptFile = self._workspace_service.workspace.get_file(params.text_document.uri)
if file is None:
do_send_default_empty_response()
return
# Process the text range and respond with the edit
text_range = params.range
sql: str = file.get_text_in_range(text_range)
if sql is None or sql.strip() == '':
do_send_default_empty_response()
return
edit: TextEdit = TextEdit.from_data(text_range, None)
self._format_and_add_response(response, edit, sql, params)
do_send_default_empty_response()
# SERVICE NOTIFICATION HANDLERS #####################################################
def on_connect(self, conn_info: ConnectionInfo) -> threading.Thread:
"""Set up intellisense cache on connection to a new database"""
return utils.thread.run_as_thread(self._build_intellisense_cache_thread, conn_info)
# PROPERTIES ###########################################################
@property
def _workspace_service(self) -> WorkspaceService:
return self._service_provider[utils.constants.WORKSPACE_SERVICE_NAME]
@property
def _connection_service(self) -> ConnectionService:
return self._service_provider[utils.constants.CONNECTION_SERVICE_NAME]
@property
def should_lowercase(self) -> bool:
"""Looks up enable_lowercase_suggestions from the workspace config"""
return self._workspace_service.configuration.sql.intellisense.enable_lowercase_suggestions
# METHODS ##############################################################
def _handle_shutdown(self) -> None:
"""Stop the operations queue on shutdown"""
if self.operations_queue is not None:
self.operations_queue.stop()
def should_skip_intellisense(self, uri: str) -> bool:
return not self._workspace_service.configuration.sql.intellisense.enable_intellisense or not self.is_pgsql_uri(uri)
def should_skip_formatting(self, uri: str) -> bool:
return not self.is_pgsql_uri(uri)
def is_pgsql_uri(self, uri: str) -> bool:
"""
Checks if this URI can be treated as a PGSQL candidate for processing or should be skipped
"""
return uri not in self._non_pgsql_uris
def _build_intellisense_cache_thread(self, conn_info: ConnectionInfo) -> None:
# TODO build the cache. For now, sending intellisense ready as a test
scriptparseinfo: ScriptParseInfo = self.get_script_parse_info(conn_info.owner_uri, create_if_not_exists=True)
if scriptparseinfo is not None:
# This is a connection for an actual script in the workspace. Build the intellisense cache for it
connection_context: ConnectionContext = self.operations_queue.add_connection_context(conn_info, False)
# Wait until the intellisense is completed before sending back the message and caching the key
connection_context.intellisense_complete.wait()
scriptparseinfo.connection_key = connection_context.key
response = IntelliSenseReadyParams.from_data(conn_info.owner_uri)
self._server.send_notification(INTELLISENSE_READY_NOTIFICATION, response)
# TODO Ideally would support connected diagnostics for missing references
def _get_sqlparse_options(self, options: FormattingOptions) -> Dict[str, Any]:
sqlparse_options = {}
sqlparse_options['indent_tabs'] = not options.insert_spaces
if options.tab_size and options.tab_size > 0:
sqlparse_options['indent_width'] = options.tab_size
try:
# Look up workspace config in a try block in case it's not defined / set
format_options = self._workspace_service.configuration.pgsql.format
if format_options:
sqlparse_options = {**sqlparse_options, **format_options.__dict__}
except AttributeError:
# Indicates the config isn't defined. We are OK with this as it's not required
pass
return sqlparse_options
def _prepare_edit(self, file: ScriptFile) -> TextEdit:
file_line_count: int = len(file.file_lines)
last_char = len(file.file_lines[file_line_count - 1])
text_range = Range.from_data(0, 0, file_line_count - 1, last_char)
return TextEdit.from_data(text_range, None)
def _format_and_add_response(self, response: List[TextEdit], edit: TextEdit, text: str, params: DocumentFormattingParams) -> None:
options = self._get_sqlparse_options(params.options)
edit.new_text = sqlparse.format(text, **options)
response.append(edit)
def get_script_parse_info(self, owner_uri, create_if_not_exists=False) -> ScriptParseInfo:
with self._script_map_lock:
if owner_uri in self._script_map:
return self._script_map[owner_uri]
if create_if_not_exists:
script_parse_info = ScriptParseInfo()
self._script_map[owner_uri] = script_parse_info
return script_parse_info
return None
def _send_default_completions(self, request_context: RequestContext, script_file: ScriptFile, params: TextDocumentPosition) -> bool:
response = []
line: str = script_file.get_line(params.position.line)
(token_text, text_range) = TextUtilities.get_text_and_range(params.position, line)
if token_text:
completions = self._completion_helper.get_matches(token_text, text_range, self.should_lowercase)
response = completions
request_context.send_response(response)
return True
def send_connected_completions(self, request_context: RequestContext, scriptparseinfo: ScriptParseInfo,
params: TextDocumentPosition, context: ConnectionContext) -> bool:
if not context or not context.is_connected:
return False
# Else use the completer to query for completions
completer: PGCompleter = context.pgcompleter
completions: List[Completion] = completer.get_completions(scriptparseinfo.document, None)
if completions:
response = [LanguageService.to_completion_item(completion, params) for completion in completions]
request_context.send_response(response)
return True
# Else return false so the timeout task can be sent instead
return False
def send_definition_using_connected_completions(self, request_context: RequestContext, scriptparseinfo: ScriptParseInfo,
params: TextDocumentPosition, context: ConnectionContext) -> bool:
if not context or not context.is_connected:
return False
definition_result: DefinitionResult = None
completer: PGCompleter = context.pgcompleter
completions: List[Completion] = completer.get_completions(scriptparseinfo.document, None)
if completions:
word_under_cursor = scriptparseinfo.document.get_word_under_cursor()
matching_completion = next(completion for completion in completions if completion.display == word_under_cursor)
if matching_completion:
connection = self._connection_service.get_connection(params.text_document.uri,
ConnectionType.QUERY)
scripter_instance = scripter.Scripter(connection)
object_metadata = ObjectMetadata(None, None, matching_completion.display_meta,
matching_completion.display,
matching_completion.schema)
create_script = scripter_instance.script(ScriptOperation.CREATE, object_metadata)
if create_script:
with tempfile.NamedTemporaryFile(mode='wt', delete=False, encoding='utf-8', suffix='.sql', newline=None) as namedfile:
namedfile.write(create_script)
if namedfile.name:
file_uri = "file:///" + namedfile.name.strip('/')
location_in_script = Location(file_uri, Range(Position(0, 1), Position(1, 1)))
definition_result = DefinitionResult(False, None, [location_in_script, ])
request_context.send_response(definition_result.locations)
return True
if definition_result is None:
request_context.send_response(DefinitionResult(True, '', []))
return False
@classmethod
def to_completion_item(cls, completion: Completion, params: TextDocumentPosition) -> CompletionItem:
key = completion.text
start_position = LanguageService._get_start_position(params.position, completion.start_position)
text_range = Range(start=start_position, end=params.position)
kind = DISPLAY_META_MAP.get(completion.display_meta, CompletionItemKind.Unit)
completion_item = CompletionItem()
completion_item.label = key
completion_item.detail = completion.display
completion_item.insert_text = key
completion_item.kind = kind
completion_item.text_edit = TextEdit.from_data(text_range, key)
# Add a sort text to put keywords after all other items
completion_item.sort_text = f'~{key}' if completion_item.kind == CompletionItemKind.Keyword else key
return completion_item
@classmethod
def _get_start_position(cls, end: Position, start_index: int) -> Position:
start_col = end.character + start_index
if start_col < 0:
# Should not happen - for now, just set to 0 and assume it's a mistake
start_col = 0
return Position(end.line, start_col)
| 53.112745 | 141 | 0.685695 |
ace3aec8a64ed4836bb3b4ebe7fcb96e2bb539cd | 745 | py | Python | six_or_bust.py | JakeMcgough/p01.1 | 8b9905e57a475ab9ef3496c1de8bfad6b79c273f | [
"MIT"
] | null | null | null | six_or_bust.py | JakeMcgough/p01.1 | 8b9905e57a475ab9ef3496c1de8bfad6b79c273f | [
"MIT"
] | null | null | null | six_or_bust.py | JakeMcgough/p01.1 | 8b9905e57a475ab9ef3496c1de8bfad6b79c273f | [
"MIT"
] | null | null | null | """
Problem:
A desperate gambler has bet all of his money on a final last throw of the die.
If he rolls a six, he survives to fight another day.
Anything else, and he gets thrown to the thugs waiting for him in the back alley.
The gone_bust function takes an integer n, and should print either "Pheww!" or
"HELP! HELP!" depending on whether he survives or not.
Tests:
>>> gone_bust(2)
HELP! HELP!
>>> gone_bust(6)
Pheww!
>>> gone_bust(10)
HELP! HELP!
"""
# This code tests your solution. Don't edit it.
import doctest
def run_tests():
doctest.testmod(verbose=True)
# Edit this function:
def gone_bust(n):
if n == (6):
print("Pheww!")
else:
print("HELP! HELP!")
| 19.605263 | 85 | 0.640268 |
ace3af84a7c520a4b164fa8e070774f548bfdd3c | 943 | py | Python | api/api/views/application_list_view.py | arunrapolu4491/representation-grant-app | e9e70cb095be68211539a533708023c8842cded4 | [
"Apache-2.0"
] | null | null | null | api/api/views/application_list_view.py | arunrapolu4491/representation-grant-app | e9e70cb095be68211539a533708023c8842cded4 | [
"Apache-2.0"
] | null | null | null | api/api/views/application_list_view.py | arunrapolu4491/representation-grant-app | e9e70cb095be68211539a533708023c8842cded4 | [
"Apache-2.0"
] | null | null | null | from django.http import HttpResponseForbidden, Http404
from rest_framework import permissions
from rest_framework import generics
from rest_framework.response import Response
from api.models.application import Application
from api.serializers import ApplicationListSerializer
class ApplicationListView(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
"""
List all application for a user
"""
def get_app_list(self, id):
try:
return Application.objects.filter(user_id=id)
except Application.DoesNotExist:
raise Http404
def get(self, request, format=None):
user_id = request.user.id
if not user_id:
return HttpResponseForbidden("User id not provided")
applications = self.get_app_list(request.user.id)
serializer = ApplicationListSerializer(applications, many=True)
return Response(serializer.data)
| 32.517241 | 71 | 0.730647 |
ace3b00f68aeb8ad05772b35666b5835a47edbe9 | 2,865 | py | Python | girder_annotation/girder_large_image_annotation/__init__.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 85 | 2017-03-10T09:48:17.000Z | 2022-03-31T18:55:58.000Z | girder_annotation/girder_large_image_annotation/__init__.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 248 | 2017-01-27T16:11:13.000Z | 2022-03-31T14:05:18.000Z | girder_annotation/girder_large_image_annotation/__init__.py | naglepuff/large_image | 4e928166f228fe894c38e4b01af5370e72f7229c | [
"Apache-2.0"
] | 33 | 2017-03-10T14:06:35.000Z | 2022-03-19T08:32:06.000Z | #############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
from girder.exceptions import ValidationException
from girder.plugin import GirderPlugin, getPlugin
from girder.settings import SettingDefault
from girder.utility import setting_utilities
from girder.utility.model_importer import ModelImporter
from . import constants
from .models.annotation import Annotation
from .rest.annotation import AnnotationResource
# Validators
@setting_utilities.validator({
constants.PluginSettings.LARGE_IMAGE_ANNOTATION_HISTORY,
})
def validateBoolean(doc):
val = doc['value']
if str(val).lower() not in ('false', 'true', ''):
raise ValidationException('%s must be a boolean.' % doc['key'], 'value')
doc['value'] = (str(val).lower() != 'false')
# Defaults
# Defaults that have fixed values can just be added to the system defaults
# dictionary.
SettingDefault.defaults.update({
constants.PluginSettings.LARGE_IMAGE_ANNOTATION_HISTORY: True,
})
class LargeImageAnnotationPlugin(GirderPlugin):
DISPLAY_NAME = 'Large Image Annotation'
CLIENT_SOURCE_PATH = 'web_client'
def load(self, info):
getPlugin('large_image').load(info)
ModelImporter.registerModel('annotation', Annotation, 'large_image')
info['apiRoot'].annotation = AnnotationResource()
# Ask for some models to make sure their singletons are initialized.
# Also migrate the database as a one-time action.
Annotation()._migrateDatabase()
# add copyAnnotations option to POST resource/copy, POST item/{id}/copy
# and POST folder/{id}/copy
info['apiRoot'].resource.copyResources.description.param(
'copyAnnotations', 'Copy annotations when copying resources (default true)',
required=False, dataType='boolean')
info['apiRoot'].item.copyItem.description.param(
'copyAnnotations', 'Copy annotations when copying item (default true)',
required=False, dataType='boolean')
info['apiRoot'].folder.copyFolder.description.param(
'copyAnnotations', 'Copy annotations when copying folder (default true)',
required=False, dataType='boolean')
| 39.246575 | 88 | 0.683421 |
ace3b089830c9a2eb491dbb910982466a65a01a2 | 2,397 | py | Python | test/py2sql/test_db_hierarchy_ops.py | gevorgyana/lab3 | bbd5dfb055f6028d69c22ed1779ab71190f08b3c | [
"MIT"
] | null | null | null | test/py2sql/test_db_hierarchy_ops.py | gevorgyana/lab3 | bbd5dfb055f6028d69c22ed1779ab71190f08b3c | [
"MIT"
] | null | null | null | test/py2sql/test_db_hierarchy_ops.py | gevorgyana/lab3 | bbd5dfb055f6028d69c22ed1779ab71190f08b3c | [
"MIT"
] | null | null | null | import unittest
from dataclasses import dataclass
import sys
import os
sys.path.insert(0, os.getcwd())
from py2sql import py2sql
class TestSaveDeleteObject(unittest.TestCase):
db_name = "test"
db_config = py2sql.DBConnectionInfo(db_name, "localhost", "adminadminadmin", "postgres")
@dataclass
class Foo:
foo_1: str = "s1"
foo_2: int = 2
@dataclass
class Miss:
miss: str = "-1"
@dataclass
class Foo_der1(Foo):
der_1: int = 3
@dataclass
class Foo_der11(Foo_der1):
der_11: str = "dd"
@dataclass
class Foo_der2(Foo):
der_2: bool = True
def test_create_hierarchy(self):
db_con_info = py2sql.DBConnectionInfo("test", "localhost", "adminadminadmin", "postgres")
py2sql.Py2SQL.db_connect(db_con_info)
py2sql.Py2SQL.save_hierarchy(TestSaveDeleteObject.Foo)
tables = py2sql.Py2SQL.db_tables()
self.assertTrue("foo" in tables and "foo_der1" in tables and
"foo_der11" in tables and "foo_der2" in tables
and "miss" not in tables)
foo_der11_columns = [j for i in py2sql.Py2SQL._select_from_table('foo_der11') for j in i]
self.assertTrue('id' in foo_der11_columns and 'foo_1' in foo_der11_columns and
'foo_2' in foo_der11_columns and 'der_1' in foo_der11_columns and
'der_11' in foo_der11_columns)
foo_der1_columns = [j for i in py2sql.Py2SQL._select_from_table('foo_der1') for j in i]
self.assertTrue('id' in foo_der1_columns and 'foo_1' in foo_der1_columns and
'foo_2' in foo_der1_columns and 'der_1' in foo_der1_columns)
py2sql.Py2SQL.delete_hierarchy(TestSaveDeleteObject.Foo)
py2sql.Py2SQL.db_disconnect()
def test_delete_hierarchy(self):
db_con_info = py2sql.DBConnectionInfo("test", "localhost", "adminadminadmin", "postgres")
py2sql.Py2SQL.db_connect(db_con_info)
py2sql.Py2SQL.save_hierarchy(TestSaveDeleteObject.Foo)
py2sql.Py2SQL.delete_hierarchy(TestSaveDeleteObject.Foo)
tables = py2sql.Py2SQL.db_tables()
self.assertFalse("foo" in tables or "foo_der1" in tables or
"foo_der11" in tables or "foo_der2" in tables
or "miss" in tables)
py2sql.Py2SQL.db_disconnect()
| 33.760563 | 97 | 0.649562 |
ace3b1150461551b08eda38848985adbf2659c04 | 5,909 | py | Python | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/operations/_private_link_resources_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/operations/_private_link_resources_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2022_01_02_preview/operations/_private_link_resources_operations.py | zjpjack/azure-cli-extensions | 17dd637317633ea7984e168900bdf4e62c265096 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_01_02_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourcesListResult":
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.PrivateLinkResourcesListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourcesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourcesListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources'} # type: ignore
| 43.131387 | 202 | 0.703503 |
ace3b15b586dd397db3c54a773d3d2cce316e816 | 1,246 | py | Python | core/models/user.py | daichi-yoshikawa/django-boilerplate | bcae204d28ed83d50b2300d97e7329f54ed9f7da | [
"MIT"
] | 4 | 2021-02-17T01:53:06.000Z | 2021-09-30T13:16:26.000Z | core/models/user.py | daichi-yoshikawa/django-boilerplate | bcae204d28ed83d50b2300d97e7329f54ed9f7da | [
"MIT"
] | null | null | null | core/models/user.py | daichi-yoshikawa/django-boilerplate | bcae204d28ed83d50b2300d97e7329f54ed9f7da | [
"MIT"
] | 1 | 2022-03-18T08:39:29.000Z | 2022-03-18T08:39:29.000Z | import os
from django.db import models
from api.common.utils import generate_random_letters
from core.models.base_models import BaseUserModel
def get_user_image_path(instance, filename):
_, ext = os.path.splitext(filename)
filename = generate_random_letters(length=24) + ext
return f'images/user/{instance.id}/{filename}'
class User(BaseUserModel):
class Meta(BaseUserModel.Meta):
db_table = 'users'
email = models.EmailField(
max_length=200, unique=True, null=False, blank=False)
first_name = models.CharField(
max_length=200, unique=False, null=False, blank=False)
last_name = models.CharField(
max_length=200, unique=False, null=False, blank=False)
image = models.ImageField(
verbose_name='user image', null=True, blank=True,
upload_to=get_user_image_path)
status = models.IntegerField(
unique=False, null=False, blank=False, default=0)
language_code = models.CharField(
max_length=20, null=False, blank=False, default='en')
timezone_code = models.CharField(
max_length=200, null=False, blank=False, default='America/Los_Angeles')
def __str__(self):
return (f'({self.id}){self.first_name}, '
f'{self.last_name}, '
f'{self.email}')
| 31.948718 | 77 | 0.714286 |
ace3b165a865c910fded918f231c9729e7022d3c | 665 | py | Python | python/examples/example1.py | JacekPierzchlewski/cosniwa | d7b86dd452ab3df06f42ac205c2d1bcfbae2f288 | [
"BSD-2-Clause"
] | 2 | 2017-03-18T12:54:13.000Z | 2017-05-02T09:51:17.000Z | python/examples/example1.py | JacekPierzchlewski/cosniwa | d7b86dd452ab3df06f42ac205c2d1bcfbae2f288 | [
"BSD-2-Clause"
] | null | null | null | python/examples/example1.py | JacekPierzchlewski/cosniwa | d7b86dd452ab3df06f42ac205c2d1bcfbae2f288 | [
"BSD-2-Clause"
] | null | null | null | #
# example1.py [version 1.0]
# CoSniWa: COde SNIppet stopWAtch [Python port] - example 1
#
# Example1: The simplest example
#
# read more on: www.speedupcode.com
#
# (c) Jacek Pierzchlewski, 2017 jacek@pierzchlewski.com
# license: BSD-2-Clause.
#
try:
import cCosniwa as csw
except ImportError:
print("\nERROR: cCosniwa was not found! \n")
def add(iA, iB):
"""
Add iA + iB in a slow way.
"""
for i in range(iB):
iA = iA + 1
def main():
csw.call_start(1) # Start code snippet
add(1, 100000)
csw.call_stop(1) # Stop code snippet
# Print the results
csw.resultc()
if __name__ == '__main__':
main()
| 17.5 | 60 | 0.62406 |
ace3b1b782cdaf367b08c75a11e90e6925eda24d | 38,417 | py | Python | eval.py | zllrunning/yolact | 0d9f1dd7ee8ee7d9a92d4d0089915c7fa8d9edd0 | [
"MIT"
] | 1 | 2020-10-04T19:47:58.000Z | 2020-10-04T19:47:58.000Z | eval.py | zllrunning/yolact | 0d9f1dd7ee8ee7d9a92d4d0089915c7fa8d9edd0 | [
"MIT"
] | 1 | 2021-07-05T14:17:21.000Z | 2021-07-05T14:17:21.000Z | eval.py | zllrunning/yolact | 0d9f1dd7ee8ee7d9a92d4d0089915c7fa8d9edd0 | [
"MIT"
] | 2 | 2020-03-31T07:39:25.000Z | 2021-03-04T01:34:33.000Z | from data import COCODetection, MEANS, COLORS, COCO_CLASSES
from yolact import Yolact
from utils.augmentations import BaseTransform, FastBaseTransform, Resize
from utils.functions import MovingAverage, ProgressBar
from layers.box_utils import jaccard, center_size
from utils import timer
from utils.functions import SavePath
from layers.output_utils import postprocess, undo_image_transformation
import pycocotools
from data import cfg, set_cfg, set_dataset
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import argparse
import time
import random
import cProfile
import pickle
import json
import os
from pathlib import Path
from collections import OrderedDict
from PIL import Image
import matplotlib.pyplot as plt
import cv2
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args(argv=None):
parser = argparse.ArgumentParser(
description='YOLACT COCO Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open. If "interrupt", this will open the interrupt file.')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to evaulate model')
parser.add_argument('--cross_class_nms', default=True, type=str2bool,
help='Whether to use cross-class nms (faster) or do nms per class')
parser.add_argument('--fast_nms', default=True, type=str2bool,
help='Whether to use a faster, but not entirely correct version of NMS.')
parser.add_argument('--display_masks', default=True, type=str2bool,
help='Whether or not to display masks over bounding boxes')
parser.add_argument('--display_bboxes', default=True, type=str2bool,
help='Whether or not to display bboxes around masks')
parser.add_argument('--display_text', default=True, type=str2bool,
help='Whether or not to display text (class [score])')
parser.add_argument('--display_scores', default=True, type=str2bool,
help='Whether or not to display scores in addition to classes')
parser.add_argument('--display', dest='display', action='store_true',
help='Display qualitative results instead of quantitative ones.')
parser.add_argument('--shuffle', dest='shuffle', action='store_true',
help='Shuffles the images when displaying them. Doesn\'t have much of an effect when display is off though.')
parser.add_argument('--ap_data_file', default='results/ap_data.pkl', type=str,
help='In quantitative mode, the file to save detections before calculating mAP.')
parser.add_argument('--resume', dest='resume', action='store_true',
help='If display not set, this resumes mAP calculations from the ap_data_file.')
parser.add_argument('--max_images', default=-1, type=int,
help='The maximum number of images from the dataset to consider. Use -1 for all.')
parser.add_argument('--output_coco_json', dest='output_coco_json', action='store_true',
help='If display is not set, instead of processing IoU values, this just dumps detections into the coco json file.')
parser.add_argument('--bbox_det_file', default='results/bbox_detections.json', type=str,
help='The output file for coco bbox results if --coco_results is set.')
parser.add_argument('--mask_det_file', default='results/mask_detections.json', type=str,
help='The output file for coco mask results if --coco_results is set.')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--output_web_json', dest='output_web_json', action='store_true',
help='If display is not set, instead of processing IoU values, this dumps detections for usage with the detections viewer web thingy.')
parser.add_argument('--web_det_path', default='web/dets/', type=str,
help='If output_web_json is set, this is the path to dump detections into.')
parser.add_argument('--no_bar', dest='no_bar', action='store_true',
help='Do not output the status bar. This is useful for when piping to a file.')
parser.add_argument('--display_lincomb', default=False, type=str2bool,
help='If the config uses lincomb masks, output a visualization of how those masks are created.')
parser.add_argument('--benchmark', default=False, dest='benchmark', action='store_true',
help='Equivalent to running display mode but without displaying an image.')
parser.add_argument('--no_sort', default=False, dest='no_sort', action='store_true',
help='Do not sort images by hashed image ID.')
parser.add_argument('--seed', default=None, type=int,
help='The seed to pass into random.seed. Note: this is only really for the shuffle and does not (I think) affect cuda stuff.')
parser.add_argument('--mask_proto_debug', default=False, dest='mask_proto_debug', action='store_true',
help='Outputs stuff for scripts/compute_mask.py.')
parser.add_argument('--no_crop', default=False, dest='crop', action='store_false',
help='Do not crop output masks with the predicted bounding box.')
parser.add_argument('--image', default=None, type=str,
help='A path to an image to use for display.')
parser.add_argument('--images', default=None, type=str,
help='An input folder of images and output folder to save detected images. Should be in the format input->output.')
parser.add_argument('--video', default=None, type=str,
help='A path to a video to evaluate on.')
parser.add_argument('--video_multiframe', default=1, type=int,
help='The number of frames to evaluate in parallel to make videos play at higher fps.')
parser.add_argument('--score_threshold', default=0, type=float,
help='Detections with a score under this threshold will not be considered. This currently only works in display mode.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.add_argument('--detect', default=False, dest='detect', action='store_true',
help='Don\'t evauluate the mask branch at all and only do object detection. This only works for --display and --benchmark.')
parser.set_defaults(no_bar=False, display=False, resume=False, output_coco_json=False, output_web_json=False, shuffle=False,
benchmark=False, no_sort=False, no_hash=False, mask_proto_debug=False, crop=True, detect=False)
global args
args = parser.parse_args(argv)
if args.output_web_json:
args.output_coco_json = True
if args.seed is not None:
random.seed(args.seed)
iou_thresholds = [x / 100 for x in range(50, 100, 5)]
coco_cats = [] # Call prep_coco_cats to fill this
coco_cats_inv = {}
def prep_display(dets_out, img, gt, gt_masks, h, w, undo_transform=True, class_color=False):
"""
Note: If undo_transform=False then im_h and im_w are allowed to be None.
gt and gt_masks are also allowed to be none (until I reimplement that functionality).
"""
if undo_transform:
img_numpy = undo_image_transformation(img, w, h)
img_gpu = torch.Tensor(img_numpy).cuda()
else:
img_gpu = img / 255.0
h, w, _ = img.shape
with timer.env('Postprocess'):
t = postprocess(dets_out, w, h, visualize_lincomb=args.display_lincomb, crop_masks=args.crop, score_threshold=args.score_threshold)
torch.cuda.synchronize()
with timer.env('Copy'):
if cfg.eval_mask_branch:
masks = t[3][:args.top_k] # We'll need this later
classes, scores, boxes = [x[:args.top_k].cpu().numpy() for x in t[:3]]
if classes.shape[0] == 0:
return (img_gpu * 255).byte().cpu().numpy()
def get_color(j):
color = COLORS[(classes[j] * 5 if class_color else j * 5) % len(COLORS)]
if not undo_transform:
color = (color[2], color[1], color[0])
return color
# Draw masks first on the gpu
if args.display_masks and cfg.eval_mask_branch:
for j in reversed(range(min(args.top_k, classes.shape[0]))):
if scores[j] >= args.score_threshold:
color = get_color(j)
mask = masks[j, :, :, None]
mask_color = mask @ (torch.Tensor(color).view(1, 3) / 255.0)
mask_alpha = 0.45
# Alpha only the region of the image that contains the mask
img_gpu = img_gpu * (1 - mask) \
+ img_gpu * mask * (1-mask_alpha) + mask_color * mask_alpha
# Then draw the stuff that needs to be done on the cpu
# Note, make sure this is a uint8 tensor or opencv will not anti alias text for whatever reason
img_numpy = (img_gpu * 255).byte().cpu().numpy()
if args.display_text or args.display_bboxes:
for j in reversed(range(min(args.top_k, classes.shape[0]))):
score = scores[j]
if scores[j] >= args.score_threshold:
x1, y1, x2, y2 = boxes[j, :]
color = get_color(j)
if args.display_bboxes:
cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)
if args.display_text:
_class = COCO_CLASSES[classes[j]]
text_str = '%s: %.2f' % (_class, score) if args.display_scores else _class
font_face = cv2.FONT_HERSHEY_DUPLEX
font_scale = 0.6
font_thickness = 1
text_w, text_h = cv2.getTextSize(text_str, font_face, font_scale, font_thickness)[0]
text_pt = (x1, y1 - 3)
text_color = [255, 255, 255]
cv2.rectangle(img_numpy, (x1, y1), (x1 + text_w, y1 - text_h - 4), color, -1)
cv2.putText(img_numpy, text_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)
return img_numpy
def prep_benchmark(dets_out, h, w):
with timer.env('Postprocess'):
t = postprocess(dets_out, w, h, crop_masks=args.crop, score_threshold=args.score_threshold)
with timer.env('Copy'):
classes, scores, boxes, masks = [x[:args.top_k].cpu().numpy() for x in t]
with timer.env('Sync'):
# Just in case
torch.cuda.synchronize()
def prep_coco_cats(cats):
""" Prepare inverted table for category id lookup given a coco cats object. """
name_lookup = {}
for _id, cat_obj in cats.items():
name_lookup[cat_obj['name']] = _id
# Bit of a roundabout way to do this but whatever
for i in range(len(COCO_CLASSES)):
coco_cats.append(name_lookup[COCO_CLASSES[i]])
coco_cats_inv[coco_cats[-1]] = i
def get_coco_cat(transformed_cat_id):
""" transformed_cat_id is [0,80) as indices in COCO_CLASSES """
return coco_cats[transformed_cat_id]
def get_transformed_cat(coco_cat_id):
""" transformed_cat_id is [0,80) as indices in COCO_CLASSES """
return coco_cats_inv[coco_cat_id]
class Detections:
def __init__(self):
self.bbox_data = []
self.mask_data = []
def add_bbox(self, image_id:int, category_id:int, bbox:list, score:float):
""" Note that bbox should be a list or tuple of (x1, y1, x2, y2) """
bbox = [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]
# Round to the nearest 10th to avoid huge file sizes, as COCO suggests
bbox = [round(float(x)*10)/10 for x in bbox]
self.bbox_data.append({
'image_id': int(image_id),
'category_id': get_coco_cat(int(category_id)),
'bbox': bbox,
'score': float(score)
})
def add_mask(self, image_id:int, category_id:int, segmentation:np.ndarray, score:float):
""" The segmentation should be the full mask, the size of the image and with size [h, w]. """
rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))
rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings
self.mask_data.append({
'image_id': int(image_id),
'category_id': get_coco_cat(int(category_id)),
'segmentation': rle,
'score': float(score)
})
def dump(self):
dump_arguments = [
(self.bbox_data, args.bbox_det_file),
(self.mask_data, args.mask_det_file)
]
for data, path in dump_arguments:
with open(path, 'w') as f:
json.dump(data, f)
def dump_web(self):
""" Dumps it in the format for my web app. Warning: bad code ahead! """
config_outs = ['preserve_aspect_ratio', 'use_prediction_module',
'use_yolo_regressors', 'use_prediction_matching',
'train_masks']
output = {
'info' : {
'Config': {key: getattr(cfg, key) for key in config_outs},
}
}
image_ids = list(set([x['image_id'] for x in self.bbox_data]))
image_ids.sort()
image_lookup = {_id: idx for idx, _id in enumerate(image_ids)}
output['images'] = [{'image_id': image_id, 'dets': []} for image_id in image_ids]
# These should already be sorted by score with the way prep_metrics works.
for bbox, mask in zip(self.bbox_data, self.mask_data):
image_obj = output['images'][image_lookup[bbox['image_id']]]
image_obj['dets'].append({
'score': bbox['score'],
'bbox': bbox['bbox'],
'category': COCO_CLASSES[get_transformed_cat(bbox['category_id'])],
'mask': mask['segmentation'],
})
with open(os.path.join(args.web_det_path, '%s.json' % cfg.name), 'w') as f:
json.dump(output, f)
def mask_iou(mask1, mask2, iscrowd=False):
"""
Inputs inputs are matricies of size _ x N. Output is size _1 x _2.
Note: if iscrowd is True, then mask2 should be the crowd.
"""
timer.start('Mask IoU')
intersection = torch.matmul(mask1, mask2.t())
area1 = torch.sum(mask1, dim=1).view(1, -1)
area2 = torch.sum(mask2, dim=1).view(1, -1)
union = (area1.t() + area2) - intersection
if iscrowd:
# Make sure to brodcast to the right dimension
ret = intersection / area1.t()
else:
ret = intersection / union
timer.stop('Mask IoU')
return ret.cpu()
def bbox_iou(bbox1, bbox2, iscrowd=False):
with timer.env('BBox IoU'):
ret = jaccard(bbox1, bbox2, iscrowd)
return ret.cpu()
def prep_metrics(ap_data, dets, img, gt, gt_masks, h, w, num_crowd, image_id, detections:Detections=None):
""" Returns a list of APs for this image, with each element being for a class """
if not args.output_coco_json:
with timer.env('Prepare gt'):
gt_boxes = torch.Tensor(gt[:, :4])
gt_boxes[:, [0, 2]] *= w
gt_boxes[:, [1, 3]] *= h
gt_classes = list(gt[:, 4].astype(int))
gt_masks = torch.Tensor(gt_masks).view(-1, h*w)
if num_crowd > 0:
split = lambda x: (x[-num_crowd:], x[:-num_crowd])
crowd_boxes , gt_boxes = split(gt_boxes)
crowd_masks , gt_masks = split(gt_masks)
crowd_classes, gt_classes = split(gt_classes)
with timer.env('Postprocess'):
classes, scores, boxes, masks = postprocess(dets, w, h, crop_masks=args.crop, score_threshold=args.score_threshold)
if classes.size(0) == 0:
return
classes = list(classes.cpu().numpy().astype(int))
scores = list(scores.cpu().numpy().astype(float))
masks = masks.view(-1, h*w).cuda()
boxes = boxes.cuda()
if args.output_coco_json:
with timer.env('JSON Output'):
boxes = boxes.cpu().numpy()
masks = masks.view(-1, h, w).cpu().numpy()
for i in range(masks.shape[0]):
# Make sure that the bounding box actually makes sense and a mask was produced
if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] - boxes[i, 0]) > 0:
detections.add_bbox(image_id, classes[i], boxes[i,:], scores[i])
detections.add_mask(image_id, classes[i], masks[i,:,:], scores[i])
return
with timer.env('Eval Setup'):
num_pred = len(classes)
num_gt = len(gt_classes)
mask_iou_cache = mask_iou(masks, gt_masks)
bbox_iou_cache = bbox_iou(boxes.float(), gt_boxes.float())
if num_crowd > 0:
crowd_mask_iou_cache = mask_iou(masks, crowd_masks, iscrowd=True)
crowd_bbox_iou_cache = bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)
else:
crowd_mask_iou_cache = None
crowd_bbox_iou_cache = None
iou_types = [
('box', lambda i,j: bbox_iou_cache[i, j].item(), lambda i,j: crowd_bbox_iou_cache[i,j].item()),
('mask', lambda i,j: mask_iou_cache[i, j].item(), lambda i,j: crowd_mask_iou_cache[i,j].item())
]
timer.start('Main loop')
for _class in set(classes + gt_classes):
ap_per_iou = []
num_gt_for_class = sum([1 for x in gt_classes if x == _class])
for iouIdx in range(len(iou_thresholds)):
iou_threshold = iou_thresholds[iouIdx]
for iou_type, iou_func, crowd_func in iou_types:
gt_used = [False] * len(gt_classes)
ap_obj = ap_data[iou_type][iouIdx][_class]
ap_obj.add_gt_positives(num_gt_for_class)
for i in range(num_pred):
if classes[i] != _class:
continue
max_iou_found = iou_threshold
max_match_idx = -1
for j in range(num_gt):
if gt_used[j] or gt_classes[j] != _class:
continue
iou = iou_func(i, j)
if iou > max_iou_found:
max_iou_found = iou
max_match_idx = j
if max_match_idx >= 0:
gt_used[max_match_idx] = True
ap_obj.push(scores[i], True)
else:
# If the detection matches a crowd, we can just ignore it
matched_crowd = False
if num_crowd > 0:
for j in range(len(crowd_classes)):
if crowd_classes[j] != _class:
continue
iou = crowd_func(i, j)
if iou > iou_threshold:
matched_crowd = True
break
# All this crowd code so that we can make sure that our eval code gives the
# same result as COCOEval. There aren't even that many crowd annotations to
# begin with, but accuracy is of the utmost importance.
if not matched_crowd:
ap_obj.push(scores[i], False)
timer.stop('Main loop')
class APDataObject:
"""
Stores all the information necessary to calculate the AP for one IoU and one class.
Note: I type annotated this because why not.
"""
def __init__(self):
self.data_points = []
self.num_gt_positives = 0
def push(self, score:float, is_true:bool):
self.data_points.append((score, is_true))
def add_gt_positives(self, num_positives:int):
""" Call this once per image. """
self.num_gt_positives += num_positives
def is_empty(self) -> bool:
return len(self.data_points) == 0 and self.num_gt_positives == 0
def get_ap(self) -> float:
""" Warning: result not cached. """
if self.num_gt_positives == 0:
return 0
# Sort descending by score
self.data_points.sort(key=lambda x: -x[0])
precisions = []
recalls = []
num_true = 0
num_false = 0
# Compute the precision-recall curve. The x axis is recalls and the y axis precisions.
for datum in self.data_points:
# datum[1] is whether the detection a true or false positive
if datum[1]: num_true += 1
else: num_false += 1
precision = num_true / (num_true + num_false)
recall = num_true / self.num_gt_positives
precisions.append(precision)
recalls.append(recall)
# Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]
# Basically, remove any temporary dips from the curve.
# At least that's what I think, idk. COCOEval did it so I do too.
for i in range(len(precisions)-1, 0, -1):
if precisions[i] > precisions[i-1]:
precisions[i-1] = precisions[i]
# Compute the integral of precision(recall) d_recall from recall=0->1 using fixed-length riemann summation with 101 bars.
y_range = [0] * 101 # idx 0 is recall == 0.0 and idx 100 is recall == 1.00
x_range = np.array([x / 100 for x in range(101)])
recalls = np.array(recalls)
# I realize this is weird, but all it does is find the nearest precision(x) for a given x in x_range.
# Basically, if the closest recall we have to 0.01 is 0.009 this sets precision(0.01) = precision(0.009).
# I approximate the integral this way, because that's how COCOEval does it.
indices = np.searchsorted(recalls, x_range, side='left')
for bar_idx, precision_idx in enumerate(indices):
if precision_idx < len(precisions):
y_range[bar_idx] = precisions[precision_idx]
# Finally compute the riemann sum to get our integral.
# avg([precision(x) for x in 0:0.01:1])
return sum(y_range) / len(y_range)
def badhash(x):
"""
Just a quick and dirty hash function for doing a deterministic shuffle based on image_id.
Source:
https://stackoverflow.com/questions/664014/what-integer-hash-function-are-good-that-accepts-an-integer-hash-key
"""
x = (((x >> 16) ^ x) * 0x45d9f3b) & 0xFFFFFFFF
x = (((x >> 16) ^ x) * 0x45d9f3b) & 0xFFFFFFFF
x = ((x >> 16) ^ x) & 0xFFFFFFFF
return x
def evalimage(net:Yolact, path:str, save_path:str=None):
frame = torch.Tensor(cv2.imread(path)).cuda().float()
batch = FastBaseTransform()(frame.unsqueeze(0))
preds = net(batch)
img_numpy = prep_display(preds, frame, None, None, None, None, undo_transform=False)
if save_path is None:
img_numpy = img_numpy[:, :, (2, 1, 0)]
if save_path is None:
plt.imshow(img_numpy)
plt.title(path)
plt.show()
else:
cv2.imwrite(save_path, img_numpy)
def evalimages(net:Yolact, input_folder:str, output_folder:str):
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print()
for p in Path(input_folder).glob('*'):
path = str(p)
name = os.path.basename(path)
name = '.'.join(name.split('.')[:-1]) + '.png'
out_path = os.path.join(output_folder, name)
evalimage(net, path, out_path)
print(path + ' -> ' + out_path)
print('Done.')
from multiprocessing.pool import ThreadPool
def evalvideo(net:Yolact, path:str):
vid = cv2.VideoCapture(path)
transform = FastBaseTransform()
frame_times = MovingAverage()
fps = 0
frame_time_target = 1 / vid.get(cv2.CAP_PROP_FPS)
def cleanup_and_exit():
print()
pool.terminate()
vid.release()
cv2.destroyAllWindows()
exit()
def get_next_frame(vid):
return [vid.read()[1] for _ in range(args.video_multiframe)]
def transform_frame(frames):
with torch.no_grad():
frames = [torch.Tensor(frame).float().cuda() for frame in frames]
return frames, transform(torch.stack(frames, 0))
def eval_network(inp):
with torch.no_grad():
frames, imgs = inp
return frames, net(imgs)
def prep_frame(inp):
with torch.no_grad():
frame, preds = inp
return prep_display(preds, frame, None, None, None, None, undo_transform=False, class_color=True)
extract_frame = lambda x, i: (x[0][i], [x[1][i]])
# Prime the network on the first frame because I do some thread unsafe things otherwise
print('Initializing model... ', end='')
eval_network(transform_frame(get_next_frame(vid)))
print('Done.')
# For each frame the sequence of functions it needs to go through to be processed (in reversed order)
sequence = [prep_frame, eval_network, transform_frame]
pool = ThreadPool(processes=len(sequence) + args.video_multiframe)
active_frames = []
print()
while vid.isOpened():
start_time = time.time()
# Start loading the next frames from the disk
next_frames = pool.apply_async(get_next_frame, args=(vid,))
# For each frame in our active processing queue, dispatch a job
# for that frame using the current function in the sequence
for frame in active_frames:
frame['value'] = pool.apply_async(sequence[frame['idx']], args=(frame['value'],))
# For each frame whose job was the last in the sequence (i.e. for all final outputs)
for frame in active_frames:
if frame['idx'] == 0:
# Wait here so that the frame has time to process and so that the video plays at the proper speed
time.sleep(frame_time_target)
cv2.imshow(path, frame['value'].get())
if cv2.waitKey(1) == 27: # Press Escape to close
cleanup_and_exit()
# Remove the finished frames from the processing queue
active_frames = [x for x in active_frames if x['idx'] > 0]
# Finish evaluating every frame in the processing queue and advanced their position in the sequence
for frame in list(reversed(active_frames)):
frame['value'] = frame['value'].get()
frame['idx'] -= 1
if frame['idx'] == 0:
# Split this up into individual threads for prep_frame since it doesn't support batch size
active_frames += [{'value': extract_frame(frame['value'], i), 'idx': 0} for i in range(1, args.video_multiframe)]
frame['value'] = extract_frame(frame['value'], 0)
# Finish loading in the next frames and add them to the processing queue
active_frames.append({'value': next_frames.get(), 'idx': len(sequence)-1})
# Compute FPS
frame_times.add(time.time() - start_time)
fps = args.video_multiframe / frame_times.get_avg()
print('\rAvg FPS: %.2f ' % fps, end='')
cleanup_and_exit()
def savevideo(net:Yolact, in_path:str, out_path:str):
vid = cv2.VideoCapture(in_path)
target_fps = round(vid.get(cv2.CAP_PROP_FPS))
frame_width = round(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = round(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
num_frames = round(vid.get(cv2.CAP_PROP_FRAME_COUNT))
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*"mp4v"), target_fps, (frame_width, frame_height))
transform = FastBaseTransform()
frame_times = MovingAverage()
progress_bar = ProgressBar(30, num_frames)
try:
for i in range(num_frames):
timer.reset()
with timer.env('Video'):
frame = torch.Tensor(vid.read()[1]).float().cuda()
batch = transform(frame.unsqueeze(0))
preds = net(batch)
processed = prep_display(preds, frame, None, None, None, None, undo_transform=False, class_color=True)
out.write(processed)
if i > 1:
frame_times.add(timer.total_time())
fps = 1 / frame_times.get_avg()
progress = (i+1) / num_frames * 100
progress_bar.set_val(i+1)
print('\rProcessing Frames %s %6d / %6d (%5.2f%%) %5.2f fps '
% (repr(progress_bar), i+1, num_frames, progress, fps), end='')
except KeyboardInterrupt:
print('Stopping early.')
vid.release()
out.release()
print()
def evaluate(net:Yolact, dataset, train_mode=False):
net.detect.cross_class_nms = args.cross_class_nms
net.detect.use_fast_nms = args.fast_nms
cfg.mask_proto_debug = args.mask_proto_debug
if args.image is not None:
if ':' in args.image:
inp, out = args.image.split(':')
evalimage(net, inp, out)
else:
evalimage(net, args.image)
return
elif args.images is not None:
inp, out = args.images.split(':')
evalimages(net, inp, out)
return
elif args.video is not None:
if ':' in args.video:
inp, out = args.video.split(':')
savevideo(net, inp, out)
else:
evalvideo(net, args.video)
return
frame_times = MovingAverage()
dataset_size = len(dataset) if args.max_images < 0 else min(args.max_images, len(dataset))
progress_bar = ProgressBar(30, dataset_size)
print()
if not args.display and not args.benchmark:
# For each class and iou, stores tuples (score, isPositive)
# Index ap_data[type][iouIdx][classIdx]
ap_data = {
'box' : [[APDataObject() for _ in COCO_CLASSES] for _ in iou_thresholds],
'mask': [[APDataObject() for _ in COCO_CLASSES] for _ in iou_thresholds]
}
detections = Detections()
else:
timer.disable('Load Data')
dataset_indices = list(range(len(dataset)))
if args.shuffle:
random.shuffle(dataset_indices)
elif not args.no_sort:
# Do a deterministic shuffle based on the image ids
#
# I do this because on python 3.5 dictionary key order is *random*, while in 3.6 it's
# the order of insertion. That means on python 3.6, the images come in the order they are in
# in the annotations file. For some reason, the first images in the annotations file are
# the hardest. To combat this, I use a hard-coded hash function based on the image ids
# to shuffle the indices we use. That way, no matter what python version or how pycocotools
# handles the data, we get the same result every time.
hashed = [badhash(x) for x in dataset.ids]
dataset_indices.sort(key=lambda x: hashed[x])
dataset_indices = dataset_indices[:dataset_size]
try:
# Main eval loop
for it, image_idx in enumerate(dataset_indices):
timer.reset()
with timer.env('Load Data'):
img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(image_idx)
# Test flag, do not upvote
if cfg.mask_proto_debug:
with open('scripts/info.txt', 'w') as f:
f.write(str(dataset.ids[image_idx]))
np.save('scripts/gt.npy', gt_masks)
batch = Variable(img.unsqueeze(0))
if args.cuda:
batch = batch.cuda()
with timer.env('Network Extra'):
preds = net(batch)
# Perform the meat of the operation here depending on our mode.
if args.display:
img_numpy = prep_display(preds, img, gt, gt_masks, h, w)
elif args.benchmark:
prep_benchmark(preds, h, w)
else:
prep_metrics(ap_data, preds, img, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], detections)
# First couple of images take longer because we're constructing the graph.
# Since that's technically initialization, don't include those in the FPS calculations.
if it > 1:
frame_times.add(timer.total_time())
if args.display:
if it > 1:
print('Avg FPS: %.4f' % (1 / frame_times.get_avg()))
plt.imshow(img_numpy)
plt.title(str(dataset.ids[image_idx]))
plt.show()
elif not args.no_bar:
if it > 1: fps = 1 / frame_times.get_avg()
else: fps = 0
progress = (it+1) / dataset_size * 100
progress_bar.set_val(it+1)
print('\rProcessing Images %s %6d / %6d (%5.2f%%) %5.2f fps '
% (repr(progress_bar), it+1, dataset_size, progress, fps), end='')
if not args.display and not args.benchmark:
print()
if args.output_coco_json:
print('Dumping detections...')
if args.output_web_json:
detections.dump_web()
else:
detections.dump()
else:
if not train_mode:
print('Saving data...')
with open(args.ap_data_file, 'wb') as f:
pickle.dump(ap_data, f)
return calc_map(ap_data)
elif args.benchmark:
print()
print()
print('Stats for the last frame:')
timer.print_stats()
avg_seconds = frame_times.get_avg()
print('Average: %5.2f fps, %5.2f ms' % (1 / frame_times.get_avg(), 1000*avg_seconds))
except KeyboardInterrupt:
print('Stopping...')
def calc_map(ap_data):
print('Calculating mAP...')
aps = [{'box': [], 'mask': []} for _ in iou_thresholds]
for _class in range(len(COCO_CLASSES)):
for iou_idx in range(len(iou_thresholds)):
for iou_type in ('box', 'mask'):
ap_obj = ap_data[iou_type][iou_idx][_class]
if not ap_obj.is_empty():
aps[iou_idx][iou_type].append(ap_obj.get_ap())
all_maps = {'box': OrderedDict(), 'mask': OrderedDict()}
# Looking back at it, this code is really hard to read :/
for iou_type in ('box', 'mask'):
all_maps[iou_type]['all'] = 0 # Make this first in the ordereddict
for i, threshold in enumerate(iou_thresholds):
mAP = sum(aps[i][iou_type]) / len(aps[i][iou_type]) * 100 if len(aps[i][iou_type]) > 0 else 0
all_maps[iou_type][int(threshold*100)] = mAP
all_maps[iou_type]['all'] = (sum(all_maps[iou_type].values()) / (len(all_maps[iou_type].values())-1))
print_maps(all_maps)
return all_maps
def print_maps(all_maps):
# Warning: hacky
make_row = lambda vals: (' %5s |' * len(vals)) % tuple(vals)
make_sep = lambda n: ('-------+' * n)
print()
print(make_row([''] + [('.%d ' % x if isinstance(x, int) else x + ' ') for x in all_maps['box'].keys()]))
print(make_sep(len(all_maps['box']) + 1))
for iou_type in ('box', 'mask'):
print(make_row([iou_type] + ['%.2f' % x for x in all_maps[iou_type].values()]))
print(make_sep(len(all_maps['box']) + 1))
print()
if __name__ == '__main__':
parse_args()
if args.config is not None:
set_cfg(args.config)
if args.trained_model == 'interrupt':
args.trained_model = SavePath.get_interrupt('weights/')
elif args.trained_model == 'latest':
args.trained_model = SavePath.get_latest('weights/', cfg.name)
if args.config is None:
model_path = SavePath.from_str(args.trained_model)
# TODO: Bad practice? Probably want to do a name lookup instead.
args.config = model_path.model_name + '_config'
print('Config not specified. Parsed %s from the file name.\n' % args.config)
set_cfg(args.config)
if args.detect:
cfg.eval_mask_branch = False
if args.dataset is not None:
set_dataset(args.dataset)
with torch.no_grad():
if not os.path.exists('results'):
os.makedirs('results')
if args.cuda:
cudnn.benchmark = True
cudnn.fastest = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
if args.resume and not args.display:
with open(args.ap_data_file, 'rb') as f:
ap_data = pickle.load(f)
calc_map(ap_data)
exit()
if args.image is None and args.video is None and args.images is None:
dataset = COCODetection(cfg.dataset.valid_images, cfg.dataset.valid_info, transform=BaseTransform())
prep_coco_cats(dataset.coco.cats)
else:
dataset = None
print('Loading model...', end='')
net = Yolact()
net.load_weights(args.trained_model)
net.eval()
print(' Done.')
if args.cuda:
net = net.cuda()
evaluate(net, dataset)
| 40.95629 | 159 | 0.592863 |
ace3b40019aecdc60911e421d1c7fe29b7cd3b5c | 35,199 | py | Python | extrahop/getmessages_extrahop.py | VuAnhIT/InsightAgent-100 | 5146f3df28b6b6873e1af2ac410219f5946168b9 | [
"Apache-2.0"
] | null | null | null | extrahop/getmessages_extrahop.py | VuAnhIT/InsightAgent-100 | 5146f3df28b6b6873e1af2ac410219f5946168b9 | [
"Apache-2.0"
] | null | null | null | extrahop/getmessages_extrahop.py | VuAnhIT/InsightAgent-100 | 5146f3df28b6b6873e1af2ac410219f5946168b9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import configparser
import json
import logging
import os
import regex
import socket
import sys
import time
import pytz
import arrow
import urllib.parse
import http.client
import requests
import shlex
import traceback
import sqlite3
from sys import getsizeof
from itertools import chain
from optparse import OptionParser
from multiprocessing.pool import ThreadPool
"""
This script gathers data to send to Insightfinder
"""
def start_data_processing():
logger.info('Started......')
# build ThreadPool
try:
pool_map = ThreadPool(agent_config_vars['thread_pool'])
# build request headers
api_key = agent_config_vars['api_key']
headers = {
"Accept": "application/json",
"Authorization": "ExtraHop apikey=" + api_key
}
metric_query_params = agent_config_vars['metric_query_params']
device_ip_list = agent_config_vars['device_ip_list'] or []
# merge all device ip list
for param in metric_query_params:
ips = param.get('device_ip_list') or []
device_ip_list.extend(ips)
device_ip_list = list(set(device_ip_list))
# get devices list and id maps
devices_ids = []
devices_ids_map = {}
devices_ips_map = {}
url = urllib.parse.urljoin(agent_config_vars['host'], '/api/v1/devices')
result_list = []
if device_ip_list:
def query_devices(args):
ip, params = args
logger.debug('Starting query device ip: {}'.format(ip))
data = []
try:
# execute sql string
response = send_request(url, headers=headers, params=params, verify=False,
proxies=agent_config_vars['proxies'])
if response != -1:
result = response.json()
data = result or []
except Exception as e:
logger.error(e)
logger.error('Query device error: ' + ip)
return data
params_list = [(ip, {
"search_type": 'ip address',
"value": ip
}) for ip in device_ip_list]
results = pool_map.map(query_devices, params_list)
result_list = list(chain(*results))
else:
params = {
"search_type": 'any',
}
try:
# execute sql string
response = send_request(url, headers=headers, params=params, verify=False,
proxies=agent_config_vars['proxies'])
if response != -1:
result = response.json()
result_list = result or []
except Exception as e:
logger.error(e)
logger.error('Query device list error')
# parse device list
for device in result_list:
device_id = device['id']
devices_ids.append(device_id)
devices_ids_map[device_id] = device['ipaddr4']
devices_ips_map[device['ipaddr4']] = device_id
# filter devices ids
if len(devices_ids) == 0:
logger.error('Devices list is empty')
sys.exit(1)
# parse sql string by params
logger.debug('history range config: {}'.format(agent_config_vars['his_time_range']))
if agent_config_vars['his_time_range']:
logger.debug('Using time range for replay data')
for timestamp in range(agent_config_vars['his_time_range'][0],
agent_config_vars['his_time_range'][1],
if_config_vars['sampling_interval']):
start_time = timestamp
end_time = timestamp + if_config_vars['sampling_interval']
params = build_query_params(headers, devices_ips_map, devices_ids, metric_query_params, start_time,
end_time)
results = pool_map.map(query_messages_extrahop, params)
result_list = list(chain(*results))
parse_messages_extrahop(result_list, devices_ids_map)
# clear metric buffer when piece of time range end
clear_metric_buffer()
else:
logger.debug('Using current time for streaming data')
time_now = int(arrow.utcnow().float_timestamp)
start_time = time_now - if_config_vars['sampling_interval']
end_time = time_now
params = build_query_params(headers, devices_ips_map, devices_ids, metric_query_params, start_time, end_time)
results = pool_map.map(query_messages_extrahop, params)
result_list = list(chain(*results))
parse_messages_extrahop(result_list, devices_ids_map)
logger.info('Closed......')
finally:
pool_map.close()
def build_query_params(headers, devices_ips_map, devices_ids, metric_query_params, start_time, end_time):
params = []
for metric_query in metric_query_params:
device_ip_list = metric_query['device_ip_list']
current_devices_ids = devices_ids
if device_ip_list and len(device_ip_list) > 0:
current_devices_ids = [devices_ips_map.get(ip) for ip in device_ip_list if devices_ips_map.get(ip)]
for metric_obj in metric_query['metric_specs']:
metric = metric_obj['name']
metric_specs = [metric_obj]
params.append((
metric,
headers,
{
"from": start_time * 1000,
"until": end_time * 1000,
"metric_category": metric_query["metric_category"],
"metric_specs": metric_specs,
"object_type": agent_config_vars['object_type'],
"object_ids": current_devices_ids,
"cycle": metric_query['cycle'] or 'auto',
}
))
return params
def query_messages_extrahop(args):
metric, headers, params = args
logger.info('Starting query metrics with params: {}'.format(str(json.dumps(params))))
data = []
try:
# execute sql string
url = urllib.parse.urljoin(agent_config_vars['host'], '/api/v1/metrics')
response = send_request(url, mode='POST', headers=headers, data=json.dumps(params), verify=False,
proxies=agent_config_vars['proxies'])
if response == -1:
logger.error('Query metrics error')
else:
result = response.json()
# Check the result is Dict, and has field stats
data = result["stats"] or []
except Exception as e:
logger.error(e)
# add metric name in the value
data = [{**item, 'metric_name': metric, } for item in data]
return data
def parse_messages_extrahop(result, devices_ids_map):
count = 0
logger.info('Reading {} messages'.format(len(result)))
for message in result:
try:
logger.debug(message)
date_field = message.get('metric_name')
instance = message.get(
agent_config_vars['instance_field'][0] if agent_config_vars['instance_field'] and len(
agent_config_vars['instance_field']) > 0 else 'oid')
instance = devices_ids_map.get(instance, instance)
# filter by instance whitelist
if agent_config_vars['instance_whitelist_regex'] \
and not agent_config_vars['instance_whitelist_regex'].match(instance):
continue
# timestamp should be misc unit
timestamp = message.get(
agent_config_vars['timestamp_field'][0] if agent_config_vars['timestamp_field'] else 'time')
# set offset for timestamp
timestamp += agent_config_vars['target_timestamp_timezone'] * 1000
timestamp = str(timestamp)
# get values with different format
values = message.get('values')
if len(values) == 0:
continue
value_val = values[0]
if isinstance(value_val, list):
for value_item in value_val:
data_value = value_item['value']
key_meta_data = value_item['key'] or {}
# add device info if has
device = None
device_field = agent_config_vars['device_field']
if device_field and len(device_field) > 0:
devices = [key_meta_data.get(d) for d in device_field]
devices = [d for d in devices if d]
device = devices[0] if len(devices) > 0 else None
full_instance = make_safe_instance_string(instance, device)
# get component, and build component instance map info
component_map = None
if agent_config_vars['component_field']:
component = key_meta_data.get(agent_config_vars['component_field'])
if component:
component_map = {"instanceName": full_instance, "componentName": component}
key = '{}-{}'.format(timestamp, full_instance)
if key not in metric_buffer['buffer_dict']:
metric_buffer['buffer_dict'][key] = {"timestamp": timestamp, "component_map": component_map}
metric_key = '{}[{}]'.format(date_field, full_instance)
metric_buffer['buffer_dict'][key][metric_key] = str(data_value)
else:
data_value = value_val
# add device info if has
device = None
device_field = agent_config_vars['device_field']
if device_field and len(device_field) > 0:
devices = [message.get(d) for d in device_field]
devices = [d for d in devices if d]
device = devices[0] if len(devices) > 0 else None
full_instance = make_safe_instance_string(instance, device)
# get component, and build component instance map info
component_map = None
if agent_config_vars['component_field']:
component = message.get(agent_config_vars['component_field'])
if component:
component_map = {"instanceName": full_instance, "componentName": component}
key = '{}-{}'.format(timestamp, full_instance)
if key not in metric_buffer['buffer_dict']:
metric_buffer['buffer_dict'][key] = {"timestamp": timestamp, "component_map": component_map}
metric_key = '{}[{}]'.format(date_field, full_instance)
metric_buffer['buffer_dict'][key][metric_key] = str(data_value)
except Exception as e:
logger.warn('Error when parsing message')
logger.warn(e)
logger.debug(traceback.format_exc())
continue
track['entry_count'] += 1
count += 1
if count % 1000 == 0:
logger.info('Parse {0} messages'.format(count))
logger.info('Parse {0} messages'.format(count))
def get_agent_config_vars():
""" Read and parse config.ini """
config_ini = config_ini_path()
if os.path.exists(config_ini):
config_parser = configparser.ConfigParser()
config_parser.read(config_ini)
extrahop_kwargs = {}
host = None
api_key = None
metric_query_params = None
device_ip_list = None
object_type = None
his_time_range = None
instance_whitelist_regex = None
try:
# extrahop settings
extrahop_config = {}
# only keep settings with values
extrahop_kwargs = {k: v for (k, v) in list(extrahop_config.items()) if v}
host = config_parser.get('extrahop', 'host')
api_key = config_parser.get('extrahop', 'api_key')
object_type = config_parser.get('extrahop', 'object_type')
device_ip_list = config_parser.get('extrahop', 'device_ip_list')
metric_query_params = config_parser.get('extrahop', 'metric_query_params')
# time range
his_time_range = config_parser.get('extrahop', 'his_time_range')
# proxies
agent_http_proxy = config_parser.get('extrahop', 'agent_http_proxy')
agent_https_proxy = config_parser.get('extrahop', 'agent_https_proxy')
# message parsing
data_format = config_parser.get('extrahop', 'data_format').upper()
component_field = config_parser.get('extrahop', 'component_field', raw=True)
instance_field = config_parser.get('extrahop', 'instance_field', raw=True)
instance_whitelist = config_parser.get('extrahop', 'instance_whitelist')
device_field = config_parser.get('extrahop', 'device_field', raw=True)
timestamp_field = config_parser.get('extrahop', 'timestamp_field', raw=True) or 'timestamp'
target_timestamp_timezone = config_parser.get('extrahop', 'target_timestamp_timezone', raw=True) or 'UTC'
timestamp_format = config_parser.get('extrahop', 'timestamp_format', raw=True)
timezone = config_parser.get('extrahop', 'timezone') or 'UTC'
thread_pool = config_parser.get('extrahop', 'thread_pool', raw=True)
except configparser.NoOptionError as cp_noe:
logger.error(cp_noe)
config_error()
# handle boolean setting
# handle required arrays
if not host:
config_error('host')
if not api_key:
config_error('api_key')
if not object_type:
config_error('object_type')
if device_ip_list:
device_ip_list = [ip.strip() for ip in device_ip_list.split(',') if ip.strip()]
if metric_query_params:
try:
metric_query_params = eval(metric_query_params)
except Exception as e:
logger.error(e)
config_error('metric_query_params')
else:
config_error('metric_query_params')
if not isinstance(metric_query_params, list):
config_error('metric_query_params')
for param in metric_query_params:
if param.get('device_ip_list') and not isinstance(param['device_ip_list'], list):
config_error('metric_query_params->device_ip_list')
if len(instance_whitelist) != 0:
try:
instance_whitelist_regex = regex.compile(instance_whitelist)
except Exception:
config_error('instance_whitelist')
if len(his_time_range) != 0:
his_time_range = [x.strip() for x in his_time_range.split(',') if x.strip()]
his_time_range = [int(arrow.get(x).float_timestamp) for x in his_time_range]
if len(target_timestamp_timezone) != 0:
target_timestamp_timezone = int(arrow.now(target_timestamp_timezone).utcoffset().total_seconds())
else:
config_error('target_timestamp_timezone')
if timezone:
if timezone not in pytz.all_timezones:
config_error('timezone')
else:
timezone = pytz.timezone(timezone)
# data format
if data_format in {'JSON',
'JSONTAIL',
'AVRO',
'XML'}:
pass
else:
config_error('data_format')
# proxies
agent_proxies = dict()
if len(agent_http_proxy) > 0:
agent_proxies['http'] = agent_http_proxy
if len(agent_https_proxy) > 0:
agent_proxies['https'] = agent_https_proxy
# fields
instance_fields = [x.strip() for x in instance_field.split(',') if x.strip()]
device_fields = [x.strip() for x in device_field.split(',') if x.strip()]
timestamp_fields = timestamp_field.split(',')
if len(thread_pool) != 0:
thread_pool = int(thread_pool)
else:
thread_pool = 20
# add parsed variables to a global
config_vars = {
'extrahop_kwargs': extrahop_kwargs,
'host': host,
'api_key': api_key,
'object_type': object_type,
'device_ip_list': device_ip_list,
'metric_query_params': metric_query_params,
'his_time_range': his_time_range,
'proxies': agent_proxies,
'data_format': data_format,
'component_field': component_field,
'instance_field': instance_fields,
"instance_whitelist_regex": instance_whitelist_regex,
'device_field': device_fields,
'timestamp_field': timestamp_fields,
'target_timestamp_timezone': target_timestamp_timezone,
'timezone': timezone,
'timestamp_format': timestamp_format,
'thread_pool': thread_pool,
}
return config_vars
else:
config_error_no_config()
#########################
# START_BOILERPLATE #
#########################
def get_if_config_vars():
""" get config.ini vars """
config_ini = config_ini_path()
if os.path.exists(config_ini):
config_parser = configparser.ConfigParser()
config_parser.read(config_ini)
try:
user_name = config_parser.get('insightfinder', 'user_name')
license_key = config_parser.get('insightfinder', 'license_key')
token = config_parser.get('insightfinder', 'token')
project_name = config_parser.get('insightfinder', 'project_name')
project_type = config_parser.get('insightfinder', 'project_type').upper()
sampling_interval = config_parser.get('insightfinder', 'sampling_interval')
run_interval = config_parser.get('insightfinder', 'run_interval')
chunk_size_kb = config_parser.get('insightfinder', 'chunk_size_kb')
if_url = config_parser.get('insightfinder', 'if_url')
if_http_proxy = config_parser.get('insightfinder', 'if_http_proxy')
if_https_proxy = config_parser.get('insightfinder', 'if_https_proxy')
except configparser.NoOptionError as cp_noe:
logger.error(cp_noe)
config_error()
# check required variables
if len(user_name) == 0:
config_error('user_name')
if len(license_key) == 0:
config_error('license_key')
if len(project_name) == 0:
config_error('project_name')
if len(project_type) == 0:
config_error('project_type')
if project_type not in {
'METRIC',
'METRICREPLAY',
'LOG',
'LOGREPLAY',
'INCIDENT',
'INCIDENTREPLAY',
'ALERT',
'ALERTREPLAY',
'DEPLOYMENT',
'DEPLOYMENTREPLAY'
}:
config_error('project_type')
is_replay = 'REPLAY' in project_type
if len(sampling_interval) == 0:
if 'METRIC' in project_type:
config_error('sampling_interval')
else:
# set default for non-metric
sampling_interval = 10
if sampling_interval.endswith('s'):
sampling_interval = int(sampling_interval[:-1])
else:
sampling_interval = int(sampling_interval) * 60
if len(run_interval) == 0:
config_error('run_interval')
if run_interval.endswith('s'):
run_interval = int(run_interval[:-1])
else:
run_interval = int(run_interval) * 60
# defaults
if len(chunk_size_kb) == 0:
chunk_size_kb = 2048 # 2MB chunks by default
if len(if_url) == 0:
if_url = 'https://app.insightfinder.com'
# set IF proxies
if_proxies = dict()
if len(if_http_proxy) > 0:
if_proxies['http'] = if_http_proxy
if len(if_https_proxy) > 0:
if_proxies['https'] = if_https_proxy
config_vars = {
'user_name': user_name,
'license_key': license_key,
'token': token,
'project_name': project_name,
'project_type': project_type,
'sampling_interval': int(sampling_interval), # as seconds
'run_interval': int(run_interval), # as seconds
'chunk_size': int(chunk_size_kb) * 1024, # as bytes
'if_url': if_url,
'if_proxies': if_proxies,
'is_replay': is_replay
}
return config_vars
else:
config_error_no_config()
def config_ini_path():
return abs_path_from_cur(cli_config_vars['config'])
def abs_path_from_cur(filename=''):
return os.path.abspath(os.path.join(__file__, os.pardir, filename))
def get_cli_config_vars():
""" get CLI options. use of these options should be rare """
usage = 'Usage: %prog [options]'
parser = OptionParser(usage=usage)
"""
## not ready.
parser.add_option('--threads', default=1, action='store', dest='threads',
help='Number of threads to run')
"""
parser.add_option('-c', '--config', action='store', dest='config', default=abs_path_from_cur('config.ini'),
help='Path to the config file to use. Defaults to {}'.format(abs_path_from_cur('config.ini')))
parser.add_option('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='Only display warning and error log messages')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Enable verbose logging')
parser.add_option('-t', '--testing', action='store_true', dest='testing', default=False,
help='Set to testing mode (do not send data).' +
' Automatically turns on verbose logging')
(options, args) = parser.parse_args()
"""
# not ready
try:
threads = int(options.threads)
except ValueError:
threads = 1
"""
config_vars = {
'config': options.config if os.path.isfile(options.config) else abs_path_from_cur('config.ini'),
'threads': 1,
'testing': False,
'log_level': logging.INFO
}
if options.testing:
config_vars['testing'] = True
if options.verbose:
config_vars['log_level'] = logging.DEBUG
elif options.quiet:
config_vars['log_level'] = logging.WARNING
return config_vars
def config_error(setting=''):
info = ' ({})'.format(setting) if setting else ''
logger.error('Agent not correctly configured{}. Check config file.'.format(
info))
sys.exit(1)
def config_error_no_config():
logger.error('No config file found. Exiting...')
sys.exit(1)
def get_json_size_bytes(json_data):
""" get size of json object in bytes """
# return len(bytearray(json.dumps(json_data)))
return getsizeof(json.dumps(json_data))
def make_safe_instance_string(instance, device=''):
""" make a safe instance name string, concatenated with device if appropriate """
# strip underscores
instance = UNDERSCORE.sub('.', str(instance))
instance = COLONS.sub('-', instance)
# if there's a device, concatenate it to the instance with an underscore
if device:
instance = '{}_{}'.format(make_safe_instance_string(device), instance)
return instance
def make_safe_metric_key(metric):
""" make safe string already handles this """
metric = LEFT_BRACE.sub('(', metric)
metric = RIGHT_BRACE.sub(')', metric)
metric = PERIOD.sub('/', metric)
return metric
def make_safe_string(string):
"""
Take a single string and return the same string with spaces, slashes,
underscores, and non-alphanumeric characters subbed out.
"""
string = SPACES.sub('-', string)
string = SLASHES.sub('.', string)
string = UNDERSCORE.sub('.', string)
string = NON_ALNUM.sub('', string)
return string
def format_command(cmd):
if not isinstance(cmd, (list, tuple)): # no sets, as order matters
cmd = shlex.split(cmd)
return list(cmd)
def set_logger_config(level):
""" set up logging according to the defined log level """
# Get the root logger
logger_obj = logging.getLogger(__name__)
# Have to set the root logger level, it defaults to logging.WARNING
logger_obj.setLevel(level)
# route INFO and DEBUG logging to stdout from stderr
logging_handler_out = logging.StreamHandler(sys.stdout)
logging_handler_out.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter(
'{ts} [pid {pid}] {lvl} {mod}.{func}():{line} {msg}'.format(
ts='%(asctime)s',
pid='%(process)d',
lvl='%(levelname)-8s',
mod='%(module)s',
func='%(funcName)s',
line='%(lineno)d',
msg='%(message)s'),
ISO8601[0])
logging_handler_out.setFormatter(formatter)
logger_obj.addHandler(logging_handler_out)
logging_handler_err = logging.StreamHandler(sys.stderr)
logging_handler_err.setLevel(logging.WARNING)
logger_obj.addHandler(logging_handler_err)
return logger_obj
def print_summary_info():
# info to be sent to IF
post_data_block = '\nIF settings:'
for ik, iv in sorted(if_config_vars.items()):
post_data_block += '\n\t{}: {}'.format(ik, iv)
logger.debug(post_data_block)
# variables from agent-specific config
agent_data_block = '\nAgent settings:'
for jk, jv in sorted(agent_config_vars.items()):
agent_data_block += '\n\t{}: {}'.format(jk, jv)
logger.debug(agent_data_block)
# variables from cli config
cli_data_block = '\nCLI settings:'
for kk, kv in sorted(cli_config_vars.items()):
cli_data_block += '\n\t{}: {}'.format(kk, kv)
logger.debug(cli_data_block)
def initialize_data_gathering():
reset_metric_buffer()
reset_track()
track['chunk_count'] = 0
track['entry_count'] = 0
start_data_processing()
# clear metric buffer when data processing end
clear_metric_buffer()
logger.info('Total chunks created: ' + str(track['chunk_count']))
logger.info('Total {} entries: {}'.format(
if_config_vars['project_type'].lower(), track['entry_count']))
def clear_metric_buffer():
# move all buffer data to current data, and send
buffer_values = list(metric_buffer['buffer_dict'].values())
count = 0
for row in buffer_values:
# pop component map info
component_map = row.pop('component_map')
if component_map:
track['component_map_list'].append(component_map)
track['current_row'].append(row)
count += 1
if count % 100 == 0 or get_json_size_bytes(track['current_row']) >= if_config_vars['chunk_size']:
logger.debug('Sending buffer chunk')
send_data_wrapper()
# last chunk
if len(track['current_row']) > 0:
logger.debug('Sending last chunk')
send_data_wrapper()
reset_metric_buffer()
def reset_metric_buffer():
metric_buffer['buffer_key_list'] = []
metric_buffer['buffer_ts_list'] = []
metric_buffer['buffer_dict'] = {}
metric_buffer['buffer_collected_list'] = []
metric_buffer['buffer_collected_dict'] = {}
def reset_track():
""" reset the track global for the next chunk """
track['start_time'] = time.time()
track['line_count'] = 0
track['current_row'] = []
track['component_map_list'] = []
################################
# Functions to send data to IF #
################################
def send_data_wrapper():
""" wrapper to send data """
logger.debug('--- Chunk creation time: {} seconds ---'.format(
round(time.time() - track['start_time'], 2)))
send_data_to_if(track['current_row'])
track['chunk_count'] += 1
reset_track()
def send_data_to_if(chunk_metric_data):
send_data_time = time.time()
# prepare data for metric streaming agent
data_to_post = initialize_api_post_data()
if 'DEPLOYMENT' in if_config_vars['project_type'] or 'INCIDENT' in if_config_vars['project_type']:
for chunk in chunk_metric_data:
chunk['data'] = json.dumps(chunk['data'])
data_to_post[get_data_field_from_project_type()] = json.dumps(chunk_metric_data)
# add component mapping to the post data
track['component_map_list'] = list({v['instanceName']: v for v in track['component_map_list']}.values())
data_to_post['instanceMetaData'] = json.dumps(track['component_map_list'] or [])
logger.debug('First:\n' + str(chunk_metric_data[0]))
logger.debug('Last:\n' + str(chunk_metric_data[-1]))
logger.info('Total Data (bytes): ' + str(get_json_size_bytes(data_to_post)))
logger.info('Total Lines: ' + str(track['line_count']))
# do not send if only testing
if cli_config_vars['testing']:
return
# send the data
post_url = urllib.parse.urljoin(if_config_vars['if_url'], get_api_from_project_type())
send_request(post_url, 'POST', 'Could not send request to IF',
str(get_json_size_bytes(data_to_post)) + ' bytes of data are reported.',
data=data_to_post, verify=False, proxies=if_config_vars['if_proxies'])
logger.info('--- Send data time: %s seconds ---' % round(time.time() - send_data_time, 2))
def send_request(url, mode='GET', failure_message='Failure!', success_message='Success!', **request_passthrough):
""" sends a request to the given url """
# determine if post or get (default)
requests.packages.urllib3.disable_warnings()
req = requests.get
if mode.upper() == 'POST':
req = requests.post
req_num = 0
for req_num in range(ATTEMPTS):
try:
response = req(url, **request_passthrough)
if response.status_code == http.client.OK:
return response
else:
logger.warn(failure_message)
logger.info('Response Code: {}\nTEXT: {}'.format(
response.status_code, response.text))
# handle various exceptions
except requests.exceptions.Timeout:
logger.exception('Timed out. Reattempting...')
continue
except requests.exceptions.TooManyRedirects:
logger.exception('Too many redirects.')
break
except requests.exceptions.RequestException as e:
logger.exception('Exception ' + str(e))
break
logger.error('Failed! Gave up after {} attempts.'.format(req_num + 1))
return -1
def get_data_type_from_project_type():
if 'METRIC' in if_config_vars['project_type']:
return 'Metric'
elif 'LOG' in if_config_vars['project_type']:
return 'Log'
elif 'ALERT' in if_config_vars['project_type']:
return 'Alert'
elif 'INCIDENT' in if_config_vars['project_type']:
return 'Incident'
elif 'DEPLOYMENT' in if_config_vars['project_type']:
return 'Deployment'
else:
logger.warning('Project Type not correctly configured')
sys.exit(1)
def get_insight_agent_type_from_project_type():
if 'containerize' in agent_config_vars and agent_config_vars['containerize']:
if if_config_vars['is_replay']:
return 'containerReplay'
else:
return 'containerStreaming'
elif if_config_vars['is_replay']:
if 'METRIC' in if_config_vars['project_type']:
return 'MetricFile'
else:
return 'LogFile'
else:
return 'Custom'
def get_agent_type_from_project_type():
""" use project type to determine agent type """
if 'METRIC' in if_config_vars['project_type']:
if if_config_vars['is_replay']:
return 'MetricFileReplay'
else:
return 'CUSTOM'
elif if_config_vars['is_replay']:
return 'LogFileReplay'
else:
return 'LogStreaming'
# INCIDENT and DEPLOYMENT don't use this
def get_data_field_from_project_type():
""" use project type to determine which field to place data in """
# incident uses a different API endpoint
if 'INCIDENT' in if_config_vars['project_type']:
return 'incidentData'
elif 'DEPLOYMENT' in if_config_vars['project_type']:
return 'deploymentData'
else: # MERTIC, LOG, ALERT
return 'metricData'
def get_api_from_project_type():
""" use project type to determine which API to post to """
# incident uses a different API endpoint
if 'INCIDENT' in if_config_vars['project_type']:
return 'incidentdatareceive'
elif 'DEPLOYMENT' in if_config_vars['project_type']:
return 'deploymentEventReceive'
else: # MERTIC, LOG, ALERT
return 'customprojectrawdata'
def initialize_api_post_data():
""" set up the unchanging portion of this """
to_send_data_dict = dict()
to_send_data_dict['userName'] = if_config_vars['user_name']
to_send_data_dict['licenseKey'] = if_config_vars['license_key']
to_send_data_dict['projectName'] = if_config_vars['project_name']
to_send_data_dict['instanceName'] = HOSTNAME
to_send_data_dict['agentType'] = get_agent_type_from_project_type()
if 'METRIC' in if_config_vars['project_type'] and 'sampling_interval' in if_config_vars:
to_send_data_dict['samplingInterval'] = str(if_config_vars['sampling_interval'])
logger.debug(to_send_data_dict)
return to_send_data_dict
if __name__ == "__main__":
# declare a few vars
TRUE = regex.compile(r"T(RUE)?", regex.IGNORECASE)
FALSE = regex.compile(r"F(ALSE)?", regex.IGNORECASE)
SPACES = regex.compile(r"\s+")
SLASHES = regex.compile(r"\/+")
UNDERSCORE = regex.compile(r"\_+")
COLONS = regex.compile(r"\:+")
LEFT_BRACE = regex.compile(r"\[")
RIGHT_BRACE = regex.compile(r"\]")
PERIOD = regex.compile(r"\.")
COMMA = regex.compile(r"\,")
NON_ALNUM = regex.compile(r"[^a-zA-Z0-9]")
FORMAT_STR = regex.compile(r"{(.*?)}")
HOSTNAME = socket.gethostname().partition('.')[0]
ISO8601 = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S', '%Y%m%dT%H%M%SZ', 'epoch']
JSON_LEVEL_DELIM = '.'
CSV_DELIM = r",|\t"
ATTEMPTS = 3
CACHE_NAME = 'cache.db'
track = dict()
metric_buffer = dict()
# get config
cli_config_vars = get_cli_config_vars()
logger = set_logger_config(cli_config_vars['log_level'])
logger.debug(cli_config_vars)
if_config_vars = get_if_config_vars()
agent_config_vars = get_agent_config_vars()
print_summary_info()
initialize_data_gathering()
| 36.896226 | 121 | 0.604477 |
ace3b433f72e0b4a515cb7f42b6d3d34af3e3568 | 442 | py | Python | shell_sort.py | prkhrv/Algorithms | 6f7c9bb907a6ee48f4a0267e8e948bcb537ad2c6 | [
"MIT"
] | null | null | null | shell_sort.py | prkhrv/Algorithms | 6f7c9bb907a6ee48f4a0267e8e948bcb537ad2c6 | [
"MIT"
] | null | null | null | shell_sort.py | prkhrv/Algorithms | 6f7c9bb907a6ee48f4a0267e8e948bcb537ad2c6 | [
"MIT"
] | null | null | null | def shellSort(arr):
n = len(arr)
gap = int(n/2)
while(gap > 0):
for i in range(gap,n):
temp = arr[i]
j = i
while(j >= gap and arr[j-gap] > temp):
arr[j] = arr[j-gap]
j = j-gap
arr[j] = temp
gap = int(gap/2)
arr = []
for i in range(0,5):
b = int(input())
arr.append(b)
shellSort(arr)
print(arr)
| 17 | 51 | 0.39819 |
ace3b4ca5e798d819356f106129de8a031dcdd71 | 5,413 | py | Python | app/tests/test_livestorm.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | null | null | null | app/tests/test_livestorm.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | 8 | 2021-04-19T17:47:55.000Z | 2022-02-16T17:40:18.000Z | app/tests/test_livestorm.py | MTES-MCT/mobilic-api | b3754de2282262fd60a27dc90e40777df9c1e230 | [
"MIT"
] | null | null | null | import json
from unittest.mock import patch
from app import app
from app.helpers.livestorm import livestorm
from app.tests import BaseTest
def generate_livestorm_response_payload(
number_of_pages, event_title="Webinaire Mobilic"
):
raw_json = (
'{"data":[{"id":"487","type":"sessions","attributes":{"event_id":"3b0","status":"upcoming","timezone":"Europe/Paris","room_link":"https://app.livestorm.co","attendees_count":0,"duration":null,"estimated_started_at":1638880200,"started_at":0,"ended_at":0,"canceled_at":0,"created_at":1625046453,"updated_at":1631002598,"registrants_count":54},"relationships":{"event":{"data":{"type":"events","id":"3b0"}}}},{"id":"4d9","type":"sessions","attributes":{"event_id":"3b0","status":"upcoming","timezone":"Europe/Paris","room_link":"https://app.livestorm.co","attendees_count":0,"duration":null,"estimated_started_at":1635856200,"started_at":0,"ended_at":0,"canceled_at":0,"created_at":1625046453,"updated_at":1630996348,"registrants_count":52},"relationships":{"event":{"data":{"type":"events","id":"3b0"}}}}],"included":[{"id":"3b0","type":"events","attributes":{"title":"'
+ event_title
+ '","slug":"bla-bla-bla-3","registration_link":"https://app.livestorm.co","estimated_duration":30,"registration_page_enabled":true,"everyone_can_speak":false,"description":null,"status":"published","light_registration_page_enabled":true,"recording_enabled":true,"recording_public":null,"show_in_company_page":false,"chat_enabled":true,"polls_enabled":true,"questions_enabled":true,"language":"fr","published_at":1624092934,"created_at":1624092921,"updated_at":1631524373,"owner":{"id":"2fb","type":"people","attributes":{"role":"team_member","created_at":1611935738,"updated_at":1631524373,"timezone":"Europe/Paris","first_name":"Equipe","last_name":"BLA BLA","email":"blabla","avatar_link":null}},"sessions_count":5,"fields":[{"id":"email","type":"text","order":0,"required":true},{"id":"first_name","type":"text","order":1,"required":true},{"id":"last_name","type":"text","order":2,"required":true},{"id":"avatar","type":"file","order":3,"required":false}]}}],"meta":{"record_count":'
+ str(2 * number_of_pages)
+ ',"page_count":'
+ str(number_of_pages)
+ ',"items_per_page":2}}'
)
return json.loads(raw_json)
LIVESTORM_ENDPOINT = "/sessions?filter[status]=upcoming&include=event"
MOBILIC_WEBINARS_ENDPOINT = "/next-webinars"
class TestLiveStormWebinars(BaseTest):
@patch(
"app.helpers.livestorm.LivestormAPIClient._request_page_and_get_results_and_page_count"
)
def test_livestorm_pagination(self, mock):
for number_of_pages in [1, 2, 3, 5, 10]:
mock.reset_mock()
mock.side_effect = (
lambda *args, **kwargs: generate_livestorm_response_payload(
number_of_pages
)
)
webinars = livestorm.get_next_webinars()
self.assertEqual(mock.call_count, number_of_pages)
for page_number in range(0, number_of_pages):
mock.assert_any_call(LIVESTORM_ENDPOINT, number=page_number)
self.assertEqual(len(webinars), 2 * number_of_pages)
@patch(
"app.helpers.livestorm.LivestormAPIClient._request_page_and_get_results_and_page_count"
)
def test_livestorm_request_only_returns_mobilic_events(self, mock):
mock.side_effect = (
lambda *args, **kwargs: generate_livestorm_response_payload(
2, "Webinaire beta.gouv"
)
)
webinars = livestorm.get_next_webinars()
self.assertEqual(len(webinars), 0)
mock.reset_mock()
mock.side_effect = (
lambda *args, **kwargs: generate_livestorm_response_payload(
2, "Présentation Mobilic"
)
)
webinars = livestorm.get_next_webinars()
self.assertEqual(len(webinars), 4)
@patch(
"app.helpers.livestorm.LivestormAPIClient._request_page_and_get_results_and_page_count"
)
def test_webinars_endpoint(self, mock):
app.config["LIVESTORM_API_TOKEN"] = "abc"
with app.test_client() as c:
mock.side_effect = (
lambda *args, **kwargs: generate_livestorm_response_payload(
2, "Présentation Mobilic"
)
)
webinars_response = c.get(MOBILIC_WEBINARS_ENDPOINT)
self.assertEqual(webinars_response.status_code, 200)
self.assertEqual(len(webinars_response.json), 4)
@patch(
"app.helpers.livestorm.LivestormAPIClient._request_page_and_get_results_and_page_count"
)
def test_webinars_endpoint_caches_livestorm_requests(self, mock):
app.config["LIVESTORM_API_TOKEN"] = "abc"
with app.test_client() as c:
mock.side_effect = (
lambda *args, **kwargs: generate_livestorm_response_payload(
2, "Présentation Mobilic"
)
)
webinars_response = c.get(MOBILIC_WEBINARS_ENDPOINT)
self.assertEqual(webinars_response.status_code, 200)
self.assertEqual(len(webinars_response.json), 4)
mock.reset_mock()
webinars_response = c.get(MOBILIC_WEBINARS_ENDPOINT)
mock.assert_not_called()
self.assertEqual(len(webinars_response.json), 4)
| 53.594059 | 995 | 0.663957 |
ace3b5b063a5295bb1e6f50ba3b7ec46ed7c5fa7 | 3,034 | py | Python | ucsc/test_api.py | Eyadhamza/UCSC-Genomic-REST-Api-Wrapper | e367b2a97c7de96393c2275a271b8ec8a2908f63 | [
"MIT"
] | 1 | 2021-05-28T16:31:20.000Z | 2021-05-28T16:31:20.000Z | ucsc/test_api.py | Eyadhamza/UCSC-Genomic-REST-Api-Wrapper | e367b2a97c7de96393c2275a271b8ec8a2908f63 | [
"MIT"
] | null | null | null | ucsc/test_api.py | Eyadhamza/UCSC-Genomic-REST-Api-Wrapper | e367b2a97c7de96393c2275a271b8ec8a2908f63 | [
"MIT"
] | null | null | null | import unittest
from ucsc.api import Hub, NotFoundException, Genome, Track, TrackSchema, Chromosome
class MyTestCase(unittest.TestCase):
def test_it_returns_a_hub_object_if_it_exists(self):
hub = Hub.find('ALFA Hub')
self.assertIsInstance(hub, Hub)
with self.assertRaises(NotFoundException):
Hub.find('Hello there')
def test_it_returns_a_hub_object_by_attribute_if_it_exists(self):
hub = Hub.findBy('name','ALFA Hub')
self.assertIsInstance(hub, Hub)
with self.assertRaises(NotFoundException):
Hub.findBy('name','Hello there')
def test_it_returns_all_hubs_as_lists(self):
hubs = Hub.get()
self.assertIsInstance(hubs, list)
def test_it_returns_all_genomes_in_a_hub_as_lists(self):
hub = Hub.findBy('name', 'ALFA Hub')
genomes = hub.genomes
self.assertIsInstance(genomes, list)
def test_it_can_find_a_genome_by_name_if_it_exists(self):
self.assertTrue(Genome.exists('hg38','https://ftp.ncbi.nlm.nih.gov/snp/population_frequency/TrackHub/20200227123210/hub.txt'))
genome = Genome.find('hg38')
self.assertIsInstance(genome, Genome)
with self.assertRaises(NotFoundException):
Genome.find('hsssg38')
def test_it_can_find_a_genome_by_attribute_if_it_exists(self):
genome = Genome.findBy('name','hg38')
self.assertIsInstance(genome, Genome)
with self.assertRaises(NotFoundException):
Genome.findBy('name','hsssg38')
def test_it_can_get_tracks_of_genome(self):
genome = Genome.findBy('name', 'hg38')
tracks = genome.tracks
self.assertIsInstance(tracks, list)
def test_it_can_get_a_track_of_genome(self):
genome = Genome.findBy('name', 'hg38')
self.assertTrue(genome.isTrackExists('gold'))
track = genome.findTrack('gold')
self.assertIsInstance(track, Track)
track2 = genome.findTrackBy('name','gold')
self.assertIsInstance(track2, Track)
def test_it_can_get_track_data_of_a_chromosome(self):
chromosome = Chromosome.find('chr1', genome='hg38', track='gold')
def test_it_can_get_schema_of_track(self):
track = Track.find('gold','hg38')
self.assertIsInstance(track.schema,list)
def test_it_returns_a_list_of_all_chromosomes(self):
chromosomes = Chromosome.get(genome='hg38',track='gold')
self.assertIsInstance(chromosomes,list)
def test_it_checks_if_chromosome_exists(self):
self.assertTrue(Chromosome.exists('chr1',genome='hg38',track='gold'))
self.assertFalse(Chromosome.exists('cr1',genome='hg38',track='gold'))
def test_it_returns_a_chromosome_by_name(self):
chromosomes = Chromosome.find('chr1',genome='hg38',track='gold')
self.assertIsInstance(chromosomes,Chromosome)
def test_practical_example(self):
assembly = Genome.find('wuhCor1')
print(assembly.__dict__)
if __name__ == '__main__':
unittest.main()
| 35.27907 | 134 | 0.692485 |
ace3b7640be4f175e781b245c0a906027b83087c | 19,000 | py | Python | keylime/ca_util.py | elfosardo/keylime | 110c4caa560786e0589a206990ec137b7b37cb57 | [
"BSD-2-Clause"
] | null | null | null | keylime/ca_util.py | elfosardo/keylime | 110c4caa560786e0589a206990ec137b7b37cb57 | [
"BSD-2-Clause"
] | null | null | null | keylime/ca_util.py | elfosardo/keylime | 110c4caa560786e0589a206990ec137b7b37cb57 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import sys
import os
import base64
import argparse
import configparser
import datetime
import getpass
import zipfile
import io
import socket
from keylime import revocation_notifier
import threading
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import functools
import signal
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
from keylime import crypto
from keylime import cmd_exec
from keylime import common
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if common.CA_IMPL=='cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif common.CA_IMPL=='openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
from M2Crypto import X509, EVP, BIO
config = configparser.ConfigParser()
config.read(common.CONFIG_FILE)
"""
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
"""
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
global_password=None
def globalcb(*args):
global global_password
return global_password.encode()
def setpassword(pw):
global global_password
if len(pw)==0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir,name):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
cacert = X509.load_cert('cacert.crt')
ca_pk = EVP.load_key_string(priv[0]['ca'])
cert,pk = ca_impl.mk_signed_cert(cacert,ca_pk,name,priv[0]['lastserial']+1)
with open('%s-cert.crt'%name, 'wb') as f:
f.write(cert.as_pem())
f = BIO.MemoryBuffer()
pk.save_key_bio(f,None)
priv[0][name]=f.getvalue()
f.close()
#increment serial number after successful creation
priv[0]['lastserial']+=1
write_private(priv)
# write out the private key with password
with os.fdopen(os.open("%s-private.pem"%name,os.O_WRONLY | os.O_CREAT,0o600), 'wb') as f:
biofile = BIO.File(f)
pk.save_key_bio(biofile, 'aes_256_cbc', globalcb)
biofile.close()
pk.get_rsa().save_pub_key('%s-public.pem'%name)
cc = X509.load_cert('%s-cert.crt'%name)
if cc.verify(cacert.get_pubkey()):
logger.info("Created certificate for name %s successfully in %s"%(name,workingdir))
else:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
if common.CA_IMPL=='cfssl':
pk_str, cacert, ca_pk, _ = ca_impl.mk_cacert()
elif common.CA_IMPL=='openssl':
cacert, ca_pk, _ = ca_impl.mk_cacert()
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
priv=read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.as_pem())
f = BIO.MemoryBuffer()
ca_pk.save_key_bio(f,None)
priv[0]['ca']=f.getvalue()
f.close()
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
ca_pk.get_rsa().save_pub_key('ca-public.pem')
# generate an empty crl
if common.CA_IMPL=='cfssl':
crl = ca_impl.gencrl([],cacert.as_pem(), pk_str)
elif common.CA_IMPL=='openssl':
crl = ca_impl.gencrl([],cacert.as_pem(),str(priv[0]['ca']))
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
# Sanity checks...
cac = X509.load_cert('cacert.crt')
if cac.verify():
logger.info("CA certificate created successfully in %s"%workingdir)
else:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir,name,insecure=False):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
# zip up the crt, private key, and public key
with open('cacert.crt','r') as f:
cacert = f.read()
with open("%s-public.pem"%name,'r') as f:
pub = f.read()
with open("%s-cert.crt"%name,'r') as f:
cert = f.read()
with open('cacrl.der','rb') as f:
crl = f.read()
with open('cacrl.pem','r') as f:
crlpem = f.read()
cert_obj = X509.load_cert_string(cert)
serial = cert_obj.get_serial_number()
subject = str(cert_obj.get_subject())
priv = read_private()
private = priv[0][name]
with open("%s-private.pem"%name,'r') as f:
prot_priv = f.read()
#code to create a pem formatted protected private key using the keystore password
# pk = EVP.load_key_string(str(priv[0][name]))
# f = BIO.MemoryBuffer()
# # globalcb will return the global password provided by the user
# pk.save_key_bio(f, 'aes_256_cbc', globalcb)
# prot_priv = f.getvalue()
# f.close()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,private)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
pkg = sf.getvalue()
if insecure:
logger.warn("Unprotected private keys in cert package being written to disk")
with open('%s-pkg.zip'%name,'w') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip'%name,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,prot_priv)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip"%(name,name))
return pkg,serial,subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile,pemfile):
if config.get('general','ca_implementation')=='openssl':
with open(pemfile,'w') as f:
f.write("")
else:
cmd_exec.run("openssl crl -in %s -inform der -out %s"%(derfile,pemfile),lock=False)
def get_crl_distpoint(cert_path):
cert_obj = X509.load_cert(cert_path)
text= cert_obj.as_text()
incrl=False
distpoint=""
for line in text.split('\n'):
line = line.strip()
if line.startswith("X509v3 CRL Distribution Points:"):
incrl = True
if incrl and line.startswith("URI:"):
distpoint = line[4:]
break
return distpoint
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir,name=None,serial=None):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
if name is not None and serial is not None:
raise Exception("You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = X509.load_cert("%s-cert.crt"%name)
serial = cert.get_serial_number()
#convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = str(priv[0]['ca'])
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir,cert_path):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
#just load up the password for later
read_private(True)
serveraddr = ('', common.CRL_PORT)
server = ThreadedCRLServer(serveraddr,CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s"%os.path.abspath("cacrl.der"))
with open('cacrl.der','rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d"%(socket.getfqdn(),common.CRL_PORT))
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True:
try:
if os.path.exists('cacrl.der'):
retout = cmd_exec.run("openssl crl -inform der -in cacrl.der -text -noout",lock=False)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(line[13:].decode('utf-8'),"%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow()+datetime.timedelta(hours=6)
if expire<=in1hour:
logger.info("Certificate to expire soon %s, re-issuing"%expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
#server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type',None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s"%revocation)
return
logger.info("Revoking certificate: %s"%serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(revoke_callback,revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning("No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self,crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
import glob
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password,salt)
ciphertext = crypto.encrypt(priv_encoded,key)
towrite = {'salt':salt,'priv':ciphertext}
with os.fdopen(os.open('private.yml',os.O_WRONLY | os.O_CREAT,0o600), 'w') as f:
yaml.dump(towrite,f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass("Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml','r') as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password,toread['salt'])
try:
plain = crypto.decrypt(toread['priv'],key)
except ValueError:
raise Exception("Invalid password for keystore")
return yaml.load(plain, Loader=SafeLoader),toread['salt']
else:
if warn:
#file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet."%os.path.abspath("private.yml"))
logger.warning("Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys':[]},base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command',action='store',dest='command',required=True,help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name',action='store',help='the common name of the certificate to create')
parser.add_argument('-d','--dir',action='store',help='use a custom directory to store certificates and keys')
parser.add_argument('-i','--insecure',action='store_true',default=False,help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
if common.DEVELOP_IN_ECLIPSE and len(argv)==1:
argv=['-c','init']
#argv=['-c','create','-n',socket.getfqdn()]
argv=['-c','create','-n','client']
#argv=['-c','pkg','-n','client']
argv=['-c','revoke','-n','client']
argv=['-c','listen','-d','ca']
else:
argv = argv[1:]
# never prompt for passwords in development mode
if common.DEVELOP_IN_ECLIPSE:
setpassword('default')
args = parser.parse_args(argv)
if args.dir==None:
if os.getuid()!=0 and common.REQUIRE_ROOT:
logger.error("If you don't specify a working directory, this process must be run as root to access %s"%common.WORK_DIR)
sys.exit(-1)
workingdir = common.CA_WORK_DIR
else:
workingdir = args.dir
if args.command=='init':
cmd_init(workingdir)
elif args.command=='create':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir,args.name)
elif args.command=='pkg':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir,args.name,args.insecure)
elif args.command=='revoke':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command=='listen':
if args.name is None:
args.name = "%s/RevocationNotifier-cert.crt"%workingdir
logger.warning("using default name for revocation cert %s"%args.name)
cmd_listen(workingdir,args.name)
else:
logger.error("Invalid command: %s"%args.command)
parser.print_help()
sys.exit(-1)
if __name__=="__main__":
try:
main()
except Exception as e:
logger.exception(e)
| 34.608379 | 177 | 0.614105 |
ace3b7af30fe602f9efddd9d46d50d4de8eb8d96 | 4,134 | py | Python | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/skus_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/skus_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/skus_operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class SkusOperations(object):
"""SkusOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists the available SKUs supported by Microsoft.Storage for given
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Sku
:rtype:
~azure.mgmt.storage.v2017_06_01.models.SkuPaged[~azure.mgmt.storage.v2017_06_01.models.Sku]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SkuPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SkuPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus'}
| 39.371429 | 144 | 0.626754 |
ace3b837a9c226f1224a41fc07ce47fe78bc729d | 1,038 | py | Python | leapp/compat.py | vinzenz/prototype | 6375992ab373aa9fa5f19de06a3ad7a0895b8816 | [
"Apache-2.0"
] | null | null | null | leapp/compat.py | vinzenz/prototype | 6375992ab373aa9fa5f19de06a3ad7a0895b8816 | [
"Apache-2.0"
] | null | null | null | leapp/compat.py | vinzenz/prototype | 6375992ab373aa9fa5f19de06a3ad7a0895b8816 | [
"Apache-2.0"
] | null | null | null | import sys
__all__ = ('string_types', 'IS_PYTHON2', 'IS_PYTHON3', 'httplib', 'unicode_type', 'raise_with_traceback')
IS_PYTHON2 = sys.version_info < (3,)
IS_PYTHON3 = not IS_PYTHON2
string_types = ()
# Python 2 code
if IS_PYTHON2:
import httplib
string_types = (str, globals()['__builtins__']['unicode'])
unicode_type = string_types[1]
from leapp.compatpy2only import raise_with_traceback
# Python 3 code
else:
import http.client as httplib
string_types = (str,)
unicode_type = str
def raise_with_traceback(exc, tb):
"""
This is a helper function to raise exceptions with a traceback.
This is function is required to workaround the syntax changes between Python 2 and 3
Python 3.4 introduced a with_traceback method to Exception classes and Python 3 removed the syntax
which used to be used in Python 2.
:param exc: Exception to raise
:param tb: Traceback to use
:return: Nothing
"""
raise exc.with_traceback(tb)
| 25.95 | 106 | 0.683044 |
ace3b8cd54ca53bc683002cbdaef4ad9b4b0fe1d | 533 | py | Python | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
class People(models.Model):
name = models.CharField(
max_length=30,
verbose_name='名字'
)
age = models.IntegerField()
sexy = models.CharField(
max_length=10,
verbose_name='性别'
)
birthday = models.DateField(
verbose_name="出生日期"
)
is_married = models.BooleanField(
default=False,
verbose_name="是否已婚"
)
class Meta():
verbose_name="个人信息",
db_table = "person_imformation" | 23.173913 | 39 | 0.606004 |
ace3b9ef536b0725e2e0e9edddc6c3ab11104529 | 11,066 | py | Python | airflow/contrib/operators/jenkins_job_trigger_operator.py | shuva10v/airflow | a6daeb544e815fe350a96d24ae3bb14aee4079a7 | [
"Apache-2.0"
] | 2 | 2020-09-30T01:06:15.000Z | 2021-08-07T09:16:21.000Z | airflow/contrib/operators/jenkins_job_trigger_operator.py | shuva10v/airflow | a6daeb544e815fe350a96d24ae3bb14aee4079a7 | [
"Apache-2.0"
] | 3 | 2019-02-14T09:27:44.000Z | 2019-04-04T18:55:10.000Z | airflow/contrib/operators/jenkins_job_trigger_operator.py | shuva10v/airflow | a6daeb544e815fe350a96d24ae3bb14aee4079a7 | [
"Apache-2.0"
] | 2 | 2019-07-04T02:46:30.000Z | 2019-07-15T00:56:09.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import socket
import json
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
import jenkins
from jenkins import JenkinsException
from requests import Request
import six
from six.moves.urllib.error import HTTPError, URLError
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason)
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block to provide to jenkins. (templated)
:type parameters: str
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
@apply_defaults
def __init__(self,
jenkins_connection_id,
job_name,
parameters="",
sleep_time=10,
max_try_before_job_appears=10,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.job_name = job_name
self.parameters = parameters
if sleep_time < 1:
sleep_time = 1
self.sleep_time = sleep_time
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
def build_job(self, jenkins_server):
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Warning if the parameter is too long, the URL can be longer than
# the maximum allowed size
if self.parameters and isinstance(self.parameters, six.string_types):
import ast
self.parameters = ast.literal_eval(self.parameters)
if not self.parameters:
# We need a None to call the non parametrized jenkins api end point
self.parameters = None
request = Request(jenkins_server.build_job_url(self.job_name,
self.parameters, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(jenkins_server,
Request(location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed"
" after polling the queue %d times",
self.max_try_before_job_appears)
def get_hook(self):
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context):
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator')
raise AirflowException('The jenkins_connection_id parameter is missing,'
'impossible to trigger the job')
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,'
'impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name, self.jenkins_connection_id, self.parameters)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server)
build_number = self.poll_job_in_queue(
jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name,
number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job had errors.
if build_info['result'] != 'SUCCESS':
raise AirflowException(
'Jenkins job failed, final state : %s.'
'Find more information on job url : %s'
% (build_info['result'], build_info['url']))
else:
self.log.info('Waiting for job to complete : %s , build %s',
self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
raise AirflowException(
'Jenkins job status check failed. Final error was: %s'
% err.resp.status)
except jenkins.JenkinsException as err:
raise AirflowException(
'Jenkins call failed with error : %s, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)', str(err))
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
| 45.352459 | 90 | 0.62552 |
ace3ba18b3c9a46c36304badf4475320f04bd289 | 1,459 | py | Python | src/papaprice/papaprice.py | chinchillaLiao/papaprice | 98f8f726d3b938ed2857c4b3bc290ed883cf7b92 | [
"MIT"
] | 2 | 2021-04-29T09:54:00.000Z | 2021-05-03T02:06:02.000Z | src/papaprice/papaprice.py | chinchillaLiao/papaprice | 98f8f726d3b938ed2857c4b3bc290ed883cf7b92 | [
"MIT"
] | null | null | null | src/papaprice/papaprice.py | chinchillaLiao/papaprice | 98f8f726d3b938ed2857c4b3bc290ed883cf7b92 | [
"MIT"
] | null | null | null | import re
import requests
import json
import js2py
from bs4 import BeautifulSoup as bs
class PapaPrice():
def __init__(self, proxies = None):
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
self.headers = {'user-agent':self.user_agent}
self.url_template = 'https://need.to.find/out'
self.proxies = proxies
def _url(self, i_code):
url = self.url_template.format(str(i_code))
return url
def _headers(self, params = dict()):
assert isinstance(params, dict)
headers = self.headers.copy()
headers.update(params)
return headers
def _proxies(self):
proxies = self.proxies
return proxies
def _request(self, i_code):
url = self._url(i_code)
headers = self._headers()
proxies = self._proxies()
response = requests.get(url, headers = headers, proxies = proxies, timeout = 10)
return response
def _parse(self, response):
name = None
price = None
'''
Need to be done
'''
return name, price
def query(self, i_code):
try:
response = self._request(i_code)
name, price = self._parse(response)
return name, price
except:
return None, None
| 27.018519 | 142 | 0.570254 |
ace3ba450f022c5f386912e3bf640632f6f2eacf | 158 | py | Python | tests/test/src/finish.py | flywheel-apps/bids-app-template-test | 0accb81738359f6a7f0974d726a4cbef2b84838d | [
"MIT"
] | null | null | null | tests/test/src/finish.py | flywheel-apps/bids-app-template-test | 0accb81738359f6a7f0974d726a4cbef2b84838d | [
"MIT"
] | null | null | null | tests/test/src/finish.py | flywheel-apps/bids-app-template-test | 0accb81738359f6a7f0974d726a4cbef2b84838d | [
"MIT"
] | null | null | null |
# editme: add test initialization stuff here e.g. copy files from test_files/ to input/
# vi:set autoindent ts=4 sw=4 expandtab : See Vim, :help 'modeline'
| 31.6 | 87 | 0.734177 |
ace3ba5c8349dec77642c6d5ea424d46115939c9 | 123 | py | Python | ubermagutil/tests/test_init.py | joommf/joommfutils | 2b0d232b9f71869269494b47a73b05b79c68083c | [
"BSD-3-Clause"
] | null | null | null | ubermagutil/tests/test_init.py | joommf/joommfutils | 2b0d232b9f71869269494b47a73b05b79c68083c | [
"BSD-3-Clause"
] | null | null | null | ubermagutil/tests/test_init.py | joommf/joommfutils | 2b0d232b9f71869269494b47a73b05b79c68083c | [
"BSD-3-Clause"
] | null | null | null | import ubermagutil as uu
def test_version():
assert isinstance(uu.__version__, str)
assert "." in uu.__version__
| 17.571429 | 42 | 0.723577 |
ace3bb7d5feaac38c74ac99eb57743107f252e04 | 701 | py | Python | examples/mnist_rbm/show_results.py | magic2du/dlnn | 9a6fdd1e9e84826c402b564853673dd1c9cd12b9 | [
"Apache-2.0"
] | null | null | null | examples/mnist_rbm/show_results.py | magic2du/dlnn | 9a6fdd1e9e84826c402b564853673dd1c9cd12b9 | [
"Apache-2.0"
] | null | null | null | examples/mnist_rbm/show_results.py | magic2du/dlnn | 9a6fdd1e9e84826c402b564853673dd1c9cd12b9 | [
"Apache-2.0"
] | null | null | null |
import cPickle, gzip
import numpy
import os
import sys
pred_file = sys.argv[1]
if '.gz' in pred_file:
pred_mat = cPickle.load(gzip.open(pred_file, 'rb'))
else:
pred_mat = cPickle.load(open(pred_file, 'rb'))
# load the testing set to get the labels
test_data, test_labels = cPickle.load(gzip.open('test.pickle.gz', 'rb'))
test_labels = test_labels.astype(numpy.int32)
correct_number = 0.0
for i in xrange(pred_mat.shape[0]):
p = pred_mat[i, :]
p_sorted = (-p).argsort()
if p_sorted[0] == test_labels[i]:
correct_number += 1
# output the final error rate
error_rate = 100 * (1.0 - correct_number / pred_mat.shape[0])
print 'Error rate is ' + str(error_rate) + ' (%)'
| 23.366667 | 72 | 0.67903 |
ace3bbca0748be68c487ac82754d11afe8472ee6 | 3,819 | py | Python | Python3/savecsv.py | AixMoon/LearnigRepo | ee98fb352735e2b4f97304847b6c0311bc30195e | [
"MIT"
] | 11 | 2020-05-02T20:06:07.000Z | 2021-06-24T10:01:29.000Z | Python3/savecsv.py | AixMoon/LearnigRepo | ee98fb352735e2b4f97304847b6c0311bc30195e | [
"MIT"
] | null | null | null | Python3/savecsv.py | AixMoon/LearnigRepo | ee98fb352735e2b4f97304847b6c0311bc30195e | [
"MIT"
] | 6 | 2020-06-04T04:29:28.000Z | 2020-11-15T08:15:01.000Z | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from lxml import etree
import csv
import pickle
import time
timestart = time.time()
with open('/Users/billchen/OneDrive/Workspace/LearningRepo/Python3/专利检索爬虫/output/20160101_20181231_C02F_PAGE_306.pickle', 'rb') as f:
doc_2 = pickle.load(f)
def save_csv(out_txt):
global timestart
open_csv = open('/Users/billchen/OneDrive/Workspace/LearningRepo/Python3/专利检索爬虫/result.csv', 'a+', newline='')
out_str = str(out_txt)
a = []
b = []
c = []
d = []
e = []
f = []
g = []
h = []
i = []
j = []
k = []
l = []
m = []
n = []
xcontent = etree.HTML(out_str)
num1 = 1
head = ['分类号', '申请人', '发明人', '申请号', '申请日', '发明名称', '申请国家','邮编', '联系地址', '国家/省市', '专利公开号', '主分类号', '公开/公告日', '法律状态']
while num1 < 101:
strnum = str(num1)
tempk = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[1]/td[2]/text()')
tempa = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[10]/td[2]/text()')
tempb = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[3]/td[2]/text()')
tempc = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[4]/td[2]/text()')
tempd = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[1]/td[2]/a/text()')
tempe = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[7]/td[2]/text()')
tempf = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[2]/td[2]/text()')
tempg = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[8]/td[2]/text()')
temph = xcontent.xpath( '//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[12]/td[1]/text()')
temphh = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[12]/td[2]/text()')
if not len(tempk) == 0:
if temph[0][0] == "邮":
h.append(temphh[0][0:-1])
tempnn = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[13]/td[2]/text()')
n.append(tempnn[0])
else:
h.append(' ')
n.append(temphh[0])
tempi = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[5]/td[2]/text()')
tempj = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[11]/td[2]/text()')
templ = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[9]/td[2]/text()')
tempm = xcontent.xpath('//*[@id="tab"]/tbody/tr[' + strnum + ']/td/table/tbody/tr[6]/td[2]/text()')
if not len(tempk) == 0:
m.append(tempm[0][0:-1])
l.append(templ[0][0:-1])
j.append(tempj[0][0:-1])
i.append(tempi[0][0:-1])
g.append(tempg[0][0:-1])
f.append(tempf[0][0:-1])
e.append(tempe[0][0:-1])
d.append(tempd[0][0:-1])
c.append(tempc[0][0:-1])
b.append(tempb[0][0:-1])
a.append(tempa[0][0:-1])
k.append(list(filter(lambda x: x, tempk[0].split(' ')))[1])
num1 = num1+1
label = zip(a, b, c, d, e, f, g, h, i, j, k, l, m, n)
csv_write = csv.writer(open_csv, dialect='excel')
csv_write.writerow(head)
w=0
for v in label:
csv_write.writerow(v)
w = w + 1
print(w)
open_csv.close()
timeend = time.time()
print("%.2fs" % (timeend - timestart))
save_csv(doc_2)
| 33.79646 | 133 | 0.489395 |
ace3bbcc305db8dd36a5bf648d6f2e3bfba49343 | 4,934 | py | Python | main/schema/media_type.py | kristianmk/tator | 0eb75ee9333316b06f773de2b75e8e797a98ffdb | [
"MIT"
] | 50 | 2019-09-18T14:32:18.000Z | 2022-03-31T16:26:07.000Z | main/schema/media_type.py | kristianmk/tator | 0eb75ee9333316b06f773de2b75e8e797a98ffdb | [
"MIT"
] | 566 | 2019-09-18T16:33:40.000Z | 2022-03-31T20:01:38.000Z | main/schema/media_type.py | kristianmk/tator | 0eb75ee9333316b06f773de2b75e8e797a98ffdb | [
"MIT"
] | 19 | 2019-09-21T20:08:12.000Z | 2022-03-17T14:53:11.000Z | from textwrap import dedent
from rest_framework.schemas.openapi import AutoSchema
from ._errors import error_responses
from ._message import message_schema
from ._message import message_with_id_schema
from ._attribute_type import attribute_type_example
from ._entity_type_mixins import entity_type_filter_parameters_schema
boilerplate = dedent("""\
A media type is the metadata definition object for media. It includes file format,
name, description, and may have any number of user defined attribute
types associated with it.
""")
class MediaTypeListSchema(AutoSchema):
def get_operation(self, path, method):
operation = super().get_operation(path, method)
if method == 'POST':
operation['operationId'] = 'CreateMediaType'
elif method == 'GET':
operation['operationId'] = 'GetMediaTypeList'
operation['tags'] = ['Tator']
return operation
def get_description(self, path, method):
if method == 'GET':
short_desc = 'Get media type list.'
elif method == 'POST':
short_desc = 'Create media type.'
return f"{short_desc}\n\n{boilerplate}"
def _get_path_parameters(self, path, method):
return [{
'name': 'project',
'in': 'path',
'required': True,
'description': 'A unique integer identifying a project.',
'schema': {'type': 'integer'},
}]
def _get_filter_parameters(self, path, method):
return {}
def _get_request_body(self, path, method):
body = {}
if method == 'POST':
body = {
'required': True,
'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/MediaTypeSpec'},
'example': {
'name': 'My media type',
'dtype': 'video',
'attribute_types': attribute_type_example,
},
}}}
return body
def _get_responses(self, path, method):
responses = error_responses()
if method == 'GET':
responses['200'] = {
'description': 'Successful retrieval of media type list.',
'content': {'application/json': {'schema': {
'type': 'array',
'items': {'$ref': '#/components/schemas/MediaType'},
}}},
}
elif method == 'POST':
responses['201'] = message_with_id_schema('media type')
return responses
class MediaTypeDetailSchema(AutoSchema):
def get_operation(self, path, method):
operation = super().get_operation(path, method)
if method == 'GET':
operation['operationId'] = 'GetMediaType'
elif method == 'PATCH':
operation['operationId'] = 'UpdateMediaType'
elif method == 'DELETE':
operation['operationId'] = 'DeleteMediaType'
operation['tags'] = ['Tator']
return operation
def get_description(self, path, method):
long_desc = ''
if method == 'GET':
short_desc = 'Get media type.'
elif method == 'PATCH':
short_desc = 'Update media type.'
elif method == 'DELETE':
short_desc = 'Delete media type.'
long_desc = dedent("""\
Note that this will also delete any media associated with the media type.
""")
return f"{short_desc}\n\n{boilerplate}\n\n{long_desc}"
def _get_path_parameters(self, path, method):
return [{
'name': 'id',
'in': 'path',
'required': True,
'description': 'A unique integer identifying an media type.',
'schema': {'type': 'integer'},
}]
def _get_filter_parameters(self, path, method):
return []
def _get_request_body(self, path, method):
body = {}
if method == 'PATCH':
body = {
'required': True,
'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/MediaTypeUpdate'},
'example': {
'name': 'New name',
'description': 'New description',
}
}}}
return body
def _get_responses(self, path, method):
responses = error_responses()
if method == 'GET':
responses['200'] = {
'description': 'Successful retrieval of media type.',
'content': {'application/json': {'schema': {
'$ref': '#/components/schemas/MediaType',
}}},
}
elif method == 'PATCH':
responses['200'] = message_schema('update', 'media type')
elif method == 'DELETE':
responses['200'] = message_schema('deletion', 'media type')
return responses
| 35.242857 | 85 | 0.543778 |
ace3bc658ea7aa8cef2148cd287261627085baaa | 1,158 | py | Python | liminal/runners/airflow/tasks/spark.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | liminal/runners/airflow/tasks/spark.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | liminal/runners/airflow/tasks/spark.py | michaelloewenstein/incubator-liminal | b8439cfac6cd892ed08283ae75b5ab6e01e13b2c | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from liminal.runners.airflow.model import task
class SparkTask(task.Task):
"""
Executes a Spark application.
"""
def __init__(self, dag, liminal_config, pipeline_config, task_config, parent, trigger_rule):
super().__init__(dag, liminal_config, pipeline_config, task_config, parent, trigger_rule)
def apply_task_to_dag(self):
pass
| 36.1875 | 97 | 0.753022 |
ace3bd52249ea1272cb31480c1cbabd7aebe9eb1 | 76 | py | Python | run.py | dmtdenisa/SimpleUserManagement | 45663c0f782a2df583056bdcf13caaf8b10a1959 | [
"MIT"
] | null | null | null | run.py | dmtdenisa/SimpleUserManagement | 45663c0f782a2df583056bdcf13caaf8b10a1959 | [
"MIT"
] | null | null | null | run.py | dmtdenisa/SimpleUserManagement | 45663c0f782a2df583056bdcf13caaf8b10a1959 | [
"MIT"
] | null | null | null | from userMan import app
if __name__=='__main__':
app.run(debug=True) | 19 | 25 | 0.697368 |
ace3be0941d3b20c22adbbb6deab08b7c919bc80 | 973 | py | Python | zerver/migrations/0256_userprofile_stream_set_recipient_column_values.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | zerver/migrations/0256_userprofile_stream_set_recipient_column_values.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | zerver/migrations/0256_userprofile_stream_set_recipient_column_values.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0255_userprofile_stream_add_recipient_column"),
]
operations = [
migrations.RunSQL(
"""
UPDATE zerver_userprofile
SET recipient_id = zerver_recipient.id
FROM zerver_recipient
WHERE zerver_recipient.type_id = zerver_userprofile.id AND zerver_recipient.type = 1;
""",
reverse_sql="UPDATE zerver_userprofile SET recipient_id = NULL",
elidable=True,
),
migrations.RunSQL(
"""
UPDATE zerver_stream
SET recipient_id = zerver_recipient.id
FROM zerver_recipient
WHERE zerver_recipient.type_id = zerver_stream.id AND zerver_recipient.type = 2;
""",
reverse_sql="UPDATE zerver_stream SET recipient_id = NULL",
elidable=True,
),
]
| 30.40625 | 97 | 0.596095 |
ace3be8670f1e538eae8561b80698edc11232329 | 4,744 | py | Python | test/functional/wallet_reorgsrestore.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | test/functional/wallet_reorgsrestore.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | test/functional/wallet_reorgsrestore.py | Cminor-pools/bitcoinvg | d47a3cf13e06f4fe03d965826f5309e6d5706470 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test tx status in case of reorgs while wallet being shutdown.
Wallet txn status rely on block connection/disconnection for its
accuracy. In case of reorgs happening while wallet being shutdown
block updates are not going to be received. At wallet loading, we
check against chain if confirmed txn are still in chain and change
their status if block in which they have been included has been
disconnected.
"""
from decimal import Decimal
import os
import shutil
from test_framework.test_framework import BitcoinVGTestFramework
from test_framework.util import (
assert_equal,
)
class ReorgsRestoreTest(BitcoinVGTestFramework):
def set_test_params(self):
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Send a tx from which to conflict outputs later
txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.nodes[0].generate(1)
self.sync_blocks()
# Disconnect node1 from others to reorg its chain later
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.connect_nodes(0, 2)
# Send a tx to be unconfirmed later
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.nodes[0].generate(4)
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
# Disconnect node0 from node2 to broadcast a conflict on their respective chains
self.disconnect_nodes(0, 2)
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
inputs.append({"txid": txid_conflict_from, "vout": nA})
outputs_1 = {}
outputs_2 = {}
# Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from
outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1))
conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2))
conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
self.nodes[0].generate(1)
conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
self.nodes[2].generate(9)
# Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted
self.connect_nodes(0, 2)
self.sync_blocks([self.nodes[0], self.nodes[2]])
conflicted = self.nodes[0].gettransaction(conflicted_txid)
conflicting = self.nodes[0].gettransaction(conflicting_txid)
assert_equal(conflicted["confirmations"], -9)
assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])
# Node0 wallet is shutdown
self.restart_node(0)
# The block chain re-orgs and the tx is included in a different block
self.nodes[1].generate(9)
self.nodes[1].sendrawtransaction(tx["hex"])
self.nodes[1].generate(1)
self.nodes[1].sendrawtransaction(conflicted["hex"])
self.nodes[1].generate(1)
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, self.default_wallet_name, self.wallet_data_filename))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different blockhash
assert_equal(tx_after_reorg["confirmations"], 2)
assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"])
conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
# Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx
assert_equal(conflicted_after_reorg["confirmations"], 1)
assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"])
if __name__ == '__main__':
ReorgsRestoreTest().main()
| 46.058252 | 176 | 0.70468 |
ace3befde4f195d649ef29349826bf91e4d3941c | 4,830 | py | Python | distance_tracker/settings.py | tkettu/rokego | 95c89c45f69604bb407927a1f684f90cc51dd878 | [
"MIT"
] | null | null | null | distance_tracker/settings.py | tkettu/rokego | 95c89c45f69604bb407927a1f684f90cc51dd878 | [
"MIT"
] | null | null | null | distance_tracker/settings.py | tkettu/rokego | 95c89c45f69604bb407927a1f684f90cc51dd878 | [
"MIT"
] | null | null | null | """
Django settings for distance_tracker project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
#SECRET_KEY = os.environ["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# For local 404, 500 test etc.
#DEBUG = False
#ALLOWED_HOSTS = ['localhost']
# End for local
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'bootstrap3',
'django_tables2',
'django_filters',
'crispy_forms',
# My apps
'distances',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'distance_tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'distance_tracker/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'distance_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
DATE_INPUT_FORMATS = ('%d-%m-%Y','%Y-%m-%d')
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'rokegoteam@gmail.com'
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# My settings
LOGIN_URL = '/users/login/'
# Settings for bootstrap
BOOTSTRAP3 = {
'include_jquery': True,
}
# Heroku settings
if os.getcwd() == '/app':
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost')
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure().
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers.
ALLOWED_HOSTS = ['rokego.herokuapp.com']
DEBUG = False
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#STATIC_ROOT = 'staticfiles'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
try:
from .local_settings import *
except ImportError:
pass
| 25.555556 | 92 | 0.70207 |
ace3bf79e9e91ec2d0773c74a2883aafa31be29b | 956 | py | Python | samples/post_request.py | tng016/amazon-kinesis-client-python | e9b733dcd36cf43a91094a10475e678640e9c056 | [
"Apache-2.0"
] | null | null | null | samples/post_request.py | tng016/amazon-kinesis-client-python | e9b733dcd36cf43a91094a10475e678640e9c056 | [
"Apache-2.0"
] | null | null | null | samples/post_request.py | tng016/amazon-kinesis-client-python | e9b733dcd36cf43a91094a10475e678640e9c056 | [
"Apache-2.0"
] | null | null | null | # importing the requests library
import requests
import json
def post_to_app(data):
# defining the api-endpoint
API_ENDPOINT = "http://grabassignment-env-1.ea33qnuunt.us-west-1.elasticbeanstalk.com/populate"
# your API key here
# API_KEY = "XXXXXXXXXXXXXXXXX"
# data to be sent to api
# data = {'surge[supply]':1,
# 'surge[demand]':2,
# 'surge[geohash]':'abcd123',
# 'surge[congestion]':0.5}
# data = {"eventType": "AAS_PORTAL_START", "data": {"uid": "hfe3hf45huf33545", "aid": "1", "vid": "1"}}
processed_data = {'surge': data}
# sending post request and saving response as response object
try:
headers = {'content-type': 'application/json'}
r = requests.post(url = API_ENDPOINT, data=json.dumps(processed_data), headers=headers)
# extracting response text
return(r.status_code)
except requests.exceptions.RequestException as e:
return("Post to application failed. Exception was" + e)
| 34.142857 | 104 | 0.682008 |
ace3c0a236e7c1fcb141d7672578cf563451dcab | 817 | py | Python | mysite/urls.py | DarkPotatoKing/InternalHackathon | 2170e2508580fe734765a5fa1d4b8afa5ecdbc05 | [
"MIT"
] | null | null | null | mysite/urls.py | DarkPotatoKing/InternalHackathon | 2170e2508580fe734765a5fa1d4b8afa5ecdbc05 | [
"MIT"
] | null | null | null | mysite/urls.py | DarkPotatoKing/InternalHackathon | 2170e2508580fe734765a5fa1d4b8afa5ecdbc05 | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^scraper/', include('scraper.urls')),
url(r'^admin/', admin.site.urls),
]
| 35.521739 | 79 | 0.701346 |
ace3c333e41b893456b2af41f51c2610da78bd9c | 650 | py | Python | federatedml/ftl/test/__init__.py | chenj133/FATE | 7065fc73ab83f83e699efec69ff8efb499159ef4 | [
"Apache-2.0"
] | 32 | 2020-06-12T08:39:58.000Z | 2022-03-20T06:57:08.000Z | federatedml/ftl/test/__init__.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 14 | 2019-11-13T11:25:36.000Z | 2021-12-14T21:31:59.000Z | federatedml/ftl/test/__init__.py | ErikSun2020/FATE | bdda535c7d8a974fc2c43102837964b7da199730 | [
"Apache-2.0"
] | 16 | 2020-06-12T06:51:46.000Z | 2022-03-29T10:23:42.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from .util import assert_matrix
| 38.235294 | 75 | 0.749231 |
ace3c362bebd65ef1741f086274328e456212db3 | 263 | py | Python | part-data/test-hex.py | wuljchange/interesting_python | 3fdf9f7f17f7b361be030bb4eadf7aab889b15fe | [
"MIT"
] | 1 | 2019-03-29T14:09:43.000Z | 2019-03-29T14:09:43.000Z | part-data/test-hex.py | wuljchange/interesting_python | 3fdf9f7f17f7b361be030bb4eadf7aab889b15fe | [
"MIT"
] | null | null | null | part-data/test-hex.py | wuljchange/interesting_python | 3fdf9f7f17f7b361be030bb4eadf7aab889b15fe | [
"MIT"
] | null | null | null | import base64
import binascii
if __name__ == "__main__":
s = b'hello world!'
# 2进制转换成16进制
h = binascii.b2a_hex(s)
print(h)
# 16进制转换成2进制
print(binascii.a2b_hex(h))
h1 = base64.b16encode(s)
print(h1)
print(base64.b16decode(h1)) | 18.785714 | 31 | 0.634981 |
ace3c3d4c94945355b62dc4c6fe4d3693c9cd0df | 1,968 | py | Python | setup.py | mjziebarth/geovoronoi | 40c65aefa1c754975c41bda57279e289c6b04222 | [
"Apache-2.0"
] | null | null | null | setup.py | mjziebarth/geovoronoi | 40c65aefa1c754975c41bda57279e289c6b04222 | [
"Apache-2.0"
] | null | null | null | setup.py | mjziebarth/geovoronoi | 40c65aefa1c754975c41bda57279e289c6b04222 | [
"Apache-2.0"
] | null | null | null | """
geovoronoi setuptools based setup module
"""
import os
from setuptools import setup
import geovoronoi
GITHUB_URL = 'https://github.com/WZBSocialScienceCenter/geovoronoi'
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=geovoronoi.__title__,
version=geovoronoi.__version__,
description='a package to create and plot Voronoi regions in geographic areas',
long_description=long_description,
long_description_content_type='text/markdown',
url=GITHUB_URL,
project_urls={
'Source': GITHUB_URL,
'Tracker': 'https://github.com/WZBSocialScienceCenter/geovoronoi' + '/issues',
},
author='Markus Konrad',
author_email='markus.konrad@wzb.eu',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
keywords='voronoi tesselation gis geographic area visualization plotting',
packages=['geovoronoi'],
# include_package_data=True,
python_requires='>=3.4',
install_requires=['numpy>=1.11.0', 'scipy>=0.12.0', 'shapely>=1.6.0'],
extras_require={
'plotting': ['matplotlib>=2.1.0', 'geopandas>=0.5.0', 'descartes>=1.1.0'],
}
)
| 30.75 | 86 | 0.653455 |
ace3c592f0bbdde6d21378016e21f26229c5a8d0 | 821 | py | Python | frontend/couchdocs/map_reduce.py | COMP90024CloudComputing/Submit_Cloud_Computing | 8338e5c703a30985deeff95dccacb3b06f36e4ef | [
"Apache-2.0"
] | 1 | 2017-12-17T21:18:46.000Z | 2017-12-17T21:18:46.000Z | frontend/couchdocs/map_reduce.py | COMP90024CloudComputing/Submit_Cloud_Computing | 8338e5c703a30985deeff95dccacb3b06f36e4ef | [
"Apache-2.0"
] | null | null | null | frontend/couchdocs/map_reduce.py | COMP90024CloudComputing/Submit_Cloud_Computing | 8338e5c703a30985deeff95dccacb3b06f36e4ef | [
"Apache-2.0"
] | null | null | null | import couchdb
import re
MONTH = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
MONTH_NUM = ['01','02','03','04','05','06','07','08','09','10','11','12']
couch = couchdb.Server('http://admin:123@localhost:15984/')
db1 = couch['suburb_data']
db2 = couch['geotwitter']
post_income = {}
post_senti = {}
# for each in db1:
# postcode = db1[each].get('postcode')
# income = db1[each].get('average_income')
# post_income[postcode] = income
for each in db2.view("for_frontend/post_senti_count",group = True):
# print each.key+':'+str(each.value)
post_senti[str(each.key)] = each.value
for each in db2.view("for_frontend/post_senti", group = True):
# print each.key+':'+str(each.value)
post_senti[str(each.key)] = float(each.value) / post_senti[each.key]
print post_senti
| 35.695652 | 81 | 0.649208 |
ace3c5a855473ad9a67a49d9043134a8395156f6 | 1,318 | py | Python | reviewsapp/core/tests/test_model.py | xN03Lx/reviewsapp | 65b79de0b6f30694de60c50e501d1edbdc5e125f | [
"MIT"
] | null | null | null | reviewsapp/core/tests/test_model.py | xN03Lx/reviewsapp | 65b79de0b6f30694de60c50e501d1edbdc5e125f | [
"MIT"
] | null | null | null | reviewsapp/core/tests/test_model.py | xN03Lx/reviewsapp | 65b79de0b6f30694de60c50e501d1edbdc5e125f | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@gmail.com'
password = 'Test123456'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@GMAIL.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raise error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test1234')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@gmail.com',
'Test123456'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_critic)
| 34.684211 | 69 | 0.655539 |
ace3c638a29827a95b22bbd32bfde122d3cf6133 | 419 | py | Python | pythonforandroid/recipes/dateutil/__init__.py | strubbi77/python-for-android | 230fb66449f18217efa440b942ab6659f3f62edc | [
"MIT"
] | 2 | 2019-01-07T12:13:25.000Z | 2019-10-19T09:53:50.000Z | pythonforandroid/recipes/dateutil/__init__.py | strubbi77/python-for-android | 230fb66449f18217efa440b942ab6659f3f62edc | [
"MIT"
] | null | null | null | pythonforandroid/recipes/dateutil/__init__.py | strubbi77/python-for-android | 230fb66449f18217efa440b942ab6659f3f62edc | [
"MIT"
] | 3 | 2018-12-13T09:57:33.000Z | 2019-01-09T15:36:46.000Z | from pythonforandroid.recipe import PythonRecipe
class DateutilRecipe(PythonRecipe):
name = 'dateutil'
version = '2.6.0'
url = 'https://pypi.python.org/packages/51/fc/39a3fbde6864942e8bb24c93663734b74e281b984d1b8c4f95d64b0c21f6/python-dateutil-2.6.0.tar.gz'
depends = ['python2', "setuptools"]
call_hostpython_via_targetpython = False
install_in_hostpython = True
recipe = DateutilRecipe()
| 27.933333 | 140 | 0.75895 |
ace3c642363a8164b954013a3886c59daef22bfb | 8,284 | py | Python | parlai/mturk/tasks/wizard_of_wikipedia/task_config.py | markr-fu-berlin/ParlAI | 23f014c38ee502091fdd8623f5c8a6f2c3216e92 | [
"BSD-3-Clause"
] | 2 | 2020-03-22T10:18:09.000Z | 2020-05-06T21:48:47.000Z | parlai/mturk/tasks/wizard_of_wikipedia/task_config.py | urvishdesai/dialogue-encoding-tasks-parlai | 29743cc7b47c413c2181f68c0b7ef40a6f06a40f | [
"BSD-3-Clause"
] | null | null | null | parlai/mturk/tasks/wizard_of_wikipedia/task_config.py | urvishdesai/dialogue-encoding-tasks-parlai | 29743cc7b47c413c2181f68c0b7ef40a6f06a40f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
task_config = {}
end_info = """
<h4><span style="color:blue"><b>Reward/Bonus</b></span></h4>
If you complete the task, you will receive $1.62.
<br>
<b>We will reward engaging and knowledgeable chats with a bonus.</b>
<br>
<br>
<h4><span style="color:blue"><b>Close Window/Timeout/Return HIT</b></span></h4>
Once the conversation has started, close window/timeout or return HIT during the
chat will result in
<b>HIT EXPIRED</b> to you and NO reward paid.
<br>
<br>
<h4><span style="color:blue"><b>Important Notice</b></span></h4>
1. <b>Be aware the conversations you have will be made public, so act as you
would e.g. on a public social network like Twitter.</b>
<br>
2. Please do not send long messages: messages cannot exceed 30 words.
<br>
3. Please do not reference the task or MTurk itself during the conversation,
but speak naturally to the other person.
<br>
4. Please do not send any message that could make others uncomfortable,
including any level of discrimination, racism, sexism and offensive
religious/politics comments, otherwise the submission will be rejected.
<br>
<br>
<br>
"""
"""A short and descriptive title about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT title appears in search results,
and everywhere the HIT is mentioned.
"""
task_config['hit_title'] = 'Chat with a Real Person'
"""A description includes detailed information about the kind of task the HIT contains.
On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
view of search results, and in the HIT and assignment screens.
"""
task_config['hit_description'] = 'You will chat with another person, either freely '
'or in the context of information provided for each response.'
"""One or more words or phrases that describe the HIT, separated by commas.
On MTurk website, these words are used in searches to find HITs.
"""
task_config['hit_keywords'] = 'chat,dialog'
"""A detailed task description that will be shown on the HIT task preview page
and on the left side of the chat page. Supports HTML formatting.
"""
task_config['task_description'] = \
'''
<h2><b>Description</b></h2>
In this task, you will have a conversation with another person. The goal of
this task is to go into depth about something that interests you or the other
player, while keeping the conversation engaging and fun.
<br>
<br>
<h4><span style='color:blue'><b>Sample Conversation</b></span></h4>
<b>Person 1</b>: Hi! I really like board games.
<br>
<b>Person 2</b>: Oo, what type of board games?
<br>
<b>Person 1</b>: I like strategy games, especially ones that are sci-fi
<br>
<b>Person 2</b>: I love Risk, but it takes place on earth, so not sci-fi,
and it takes forever
<br>
<b>Person 1</b>: Right? How do you feel about cards against humanity?
<br>
<b>Person 2</b>: Cards against humanity is fun but a little too risque for me
<br>
<br>
{}
If you are ready, please click "Accept HIT" to start this task.
'''.format(end_info)
task_config['wizard_onboarding'] = \
'''
<h2>You have just met the other person, who seems quite curious, and you are
eager to discuss a topic with them!</h2>
<br>
You will try to inform your conversation partner about a topic that one of you
will choose. After a topic is chosen, you will receive information about that
topic that will be visible throughout the chat.
<br>
Additionally, after any message is sent in the chat, you will have access
to a set of relevant information.
<br>
Try to use one of these sentences to answer your partner's questions, and in
general have an engaging conversation.
<br>
<br>
<b><span>Please indicate which sentence you used by checking the box next to it.
If you do not use any sentence, check the "No Sentence Used" box.</span></b>
If you use a sentence even slightly, please check it.
<b>If you do not use enough external
information throughout the conversation, you may be prevented from
doing more of these HITs</b>.
<br>
<br>
Some Guidelines:
<br><br>
<ol>
<li>Please do not simply copy and paste the provided checked sentence
- there's no fun in that!</li>
<li>Do not use “know-it-all” phrases such as "did you know" in your responses
- e.g., the response "did you know that the Berlin Wall was demolished in 1989"
will not be accepted — be fun and engaging!</li>
<li><b>Important Note</b>: if you do not use enough external information in
your responses throughout the conversation, you may be prevented from
completing this task in the future.</li>
</ol>
<br>
After a minimum number of turns, you will be able to click the
DONE button to finish the chat.
To guarantee an efficient conversation, there is a time limit for sending a
message to another person (3 mins).
<b>Note: we will reward engaging and knowledgeable chats with BONUSES.</b>
<br>
<br>
<h4><span style="color:blue"><b>Conversation Outline</b></span></h4>
Thus, a conversation will proceed as follows (from your perspective):<br>
<ol>
<li>You or your partner will pick an initial conversation topic from the list
provided, at which point you will receive information about the topic, and
then the conversation will begin.</li>
<li>When your partner sends you a message, you will look at the relevant
information, and check a sentence you will use to construct a response.
If you do not use any sentences, you must check the "No Sentence Used" option</li>
<li>You will respond to your partner, basing your response on the chosen sentence.</li>
</ol>
<br>
And the conversation repeats until the minimum number of turns is reached,
at which point your partner will evaluate your level of engagingness.
<br>
<br>
<h4><span style="color:blue"><b>Sample Conversation</b></span></h4>
<b>Them</b>: Hi! I really like board games.
<br>
<b>You</b>: Oo, what type of board games?
<br>
<b>Them</b>: I like strategy games, especially ones that are sci-fi
<br>
<b>You</b>: I love Risk, but it takes place on earth, so not sci-fi, and it
takes forever
<br>
<b>Them</b>: Right? How do you feel about cards against humanity?
<br>
<b>You</b>: Cards against humanity is fun but a little too risque for me
<br>
<br>
{}
<br>
<br>
'''.format(end_info)
task_config['apprentice_onboarding'] = \
'''
<h2> You have just met the other person, who seems quite knowledgable, and you are
curious to discuss a topic with them!</h2>
<br>
You will try to learn from your conversation partner about a topic that one of you will
choose. Feel free to dive deep on specifics - your partner will have access to
external information that will help them craft their response.
<br>
<br>
After a minimum number of turns, you will be able to click the DONE button
to finish the chat.
To guarantee an efficient conversation, there is a time limit for sending a message
to another person (2 mins).
<br>
<br>
<h4><span style="color:blue"><b>Conversation Outline</b></span></h4>
Thus, a conversation will proceed as follows: (from your perspective)<br>
<ol>
<li>You or your partner will pick an initial conversation topic from the
list provided, and open the conversation.</li>
<li>You will try to learn about the chosen topic</li>
<li>After you send a message, your partner will respond, continuing the
conversation and hopefully providing more information about the topic.</li>
</ol>
<br>
And the conversation continues until the minimum number of chat turns is
reached, at which point you will evaluate the quality of your conversation
partner, based on how relevant, engaging, and <b>knowledgeable</b> they were.
<br>
<br>
<h4><span style="color:blue"><b>Sample Conversation</b></span></h4>
<br>
<br>
<b>You</b>: Hi! I really like board games.
<br>
<b>Them</b>: Oo, what type of board games?
<br>
<b>You</b>: I like strategy games, especially ones that are sci-fi
<br>
<b>Them</b>: I love Risk, but it takes place on earth, so not sci-fi,
and it takes forever
<br>
<b>You</b>: Right? How do you feel about cards against humanity?
<br>
<b>Them</b>: Cards against humanity is fun but a little too risque for me
<br>
<br>
{}
'''.format(end_info)
| 37.826484 | 87 | 0.742878 |
ace3c66186b97cea85828bdbc740c8bff6023fde | 4,190 | py | Python | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_probackup/Opengauss_Function_Tools_Gs_Probackup_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统内部使用工具
Case Name : 指定--format=json,使用show命令显示备份目录的内容,
显示是否为json格式
Description :
1.新建目录
2.进行初始化
3.在备份路径内初始化一个新的备份实例
4.执行全量备份
5.显示备份目录的内容
6.删除新建目录
Expect :
1.新建目录成功
2.进行初始化成功
3.在备份路径内初始化一个新的备份实例成功
4.执行全量备份
5.显示备份目录的内容成功,为纯文本格式
6.删除新建目录成功
History :
"""
import unittest
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
LOG = Logger()
class SystemInternalTools(unittest.TestCase):
def setUp(self):
LOG.info('-------------------this is setup--------------------')
LOG.info('-Opengauss_Function_Tools_Gs_Probackup_Case0037开始执行-')
self.constant = Constant()
self.PrimaryNode = Node('PrimaryDbUser')
def test_system_internal_tools(self):
LOG.info('---------step1 新建备份目录--------------')
instance_path = f'{macro.DB_INSTANCE_PATH}'
LOG.info('实例路径为:' + instance_path)
index1 = instance_path.find('/')
index2 = instance_path.rfind('/')
self.cluster_path = instance_path[index1:index2]
LOG.info(self.cluster_path)
init_cmd = f"mkdir {self.cluster_path}/testdir;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], init_msg)
LOG.info('----------step2 进行初始化------------------')
init_cmd = f"source {macro.DB_ENV_PATH};gs_probackup init -B " \
f"{self.cluster_path}/testdir;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertIn(self.constant.init_success, init_msg)
LOG.info('-----step3 在备份路径内初始化一个新的备份实例---')
init_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup add-instance -B {self.cluster_path}/testdir " \
f"-D {macro.DB_INSTANCE_PATH} --instance=pro1;"
LOG.info(init_cmd)
init_msg = self.PrimaryNode.sh(init_cmd).result()
LOG.info(init_msg)
self.assertIn("'pro1' " + self.constant.init_success, init_msg)
LOG.info('-------------step4 执行全量备份---------------')
back_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup backup -B {self.cluster_path}/testdir " \
f" --instance=pro1 -b Full -d {self.PrimaryNode.db_name} -p " \
f"{self.PrimaryNode.db_port} ; "
LOG.info(back_cmd)
back_msg = self.PrimaryNode.sh(back_cmd).result()
LOG.info(back_msg)
self.assertIn('completed', back_msg)
self.backupmsg = back_msg.splitlines()[-1]
LOG.info(self.backupmsg)
self.backupid = self.backupmsg.split()[2]
LOG.info('备份ID为:' + self.backupid)
LOG.info('-------------step5 显示备份目录的内容------------')
cat_cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_probackup show -B " \
f"{self.cluster_path}/testdir --instance=pro1 " \
f"--format=json -i {self.backupid}"
LOG.info(cat_cmd)
cat_msg = self.PrimaryNode.sh(cat_cmd).result()
LOG.info(cat_msg)
self.assertIn('"backup-mode": "FULL"', cat_msg)
def tearDown(self):
LOG.info('------------------this is tearDown--------------------')
LOG.info('---------------step6 删除新建目录-----------------')
clear_cmd = f"rm -rf {self.cluster_path}/testdir;"
LOG.info(clear_cmd)
clear_msg = self.PrimaryNode.sh(clear_cmd).result()
LOG.info(clear_msg)
LOG.info('-Opengauss_Function_Tools_Gs_Probackup_Case0037执行完成-')
| 36.12069 | 84 | 0.613604 |
ace3c77a863ead716361feff3bab4aa4d3cbc770 | 2,876 | py | Python | tests/test_reader.py | EmperorArthur/par2py | 043c8e9bff49cab2cba88b94f499f5d5ffca5c19 | [
"MIT"
] | null | null | null | tests/test_reader.py | EmperorArthur/par2py | 043c8e9bff49cab2cba88b94f499f5d5ffca5c19 | [
"MIT"
] | null | null | null | tests/test_reader.py | EmperorArthur/par2py | 043c8e9bff49cab2cba88b94f499f5d5ffca5c19 | [
"MIT"
] | null | null | null | import mmap
from pathlib import Path
from par2.packets import Packet
from par2.reader import Par2FileReader, DDPacketPointer
from .conftest import in_sample_dir, SAMPLES_PATH, factory_packet_header
def test_reader_empty_bytes():
""" Make sure it at least works with bytes """
reader = Par2FileReader(b'')
assert len(reader) == 0
def test_reader_par2_file_str(in_sample_dir):
reader = Par2FileReader("testfile.txt.par2")
assert isinstance(reader._read_buffer, mmap.mmap), "File should be memmapped"
assert reader._packet_offsets == [], "File should not be parsed immediately"
assert reader._readable_and_seekable is True, "File should support regular file operations"
assert len(reader) == 4, "Parser should have found the packets. Found offsets: {}".format(reader._packet_offsets)
assert reader._packet_offsets == [0, 92, 224, 724], "Offsets should always be here"
def test_reader_par2_file_str_mmap_closed(in_sample_dir):
""" Similar to above, but this time simulating a mmap being closed """
reader = Par2FileReader("testfile.txt.par2")
assert isinstance(reader._read_buffer, mmap.mmap), "File should be memmapped"
reader._read_buffer.close()
assert len(reader) == 4, "Parser should have found the packets. Found offsets: {}".format(reader._packet_offsets)
assert reader._packet_offsets == [0, 92, 224, 724], "Offsets should always be here"
def test_reader_par2_file_path(in_sample_dir):
""" Similar to above, but just checking input type """
reader = Par2FileReader(SAMPLES_PATH.joinpath("testfile.txt.par2"))
assert len(reader) == 4, "Parser should have found the packets. Found offsets: {}".format(reader._packet_offsets)
def test_reader_par2_open_file(in_sample_dir):
""" Similar to above, but just checking input type """
file = Path("testfile.txt.par2").open('rb')
reader = Par2FileReader(file)
assert len(reader) == 4, "Parser should have found the packets. Found offsets: {}".format(reader._packet_offsets)
def test_pointer_set():
""" Make sure two identical pointers properly de-duplicate """
header = factory_packet_header()
reader0 = Par2FileReader(b'')
reader1 = Par2FileReader(b'')
pointer0 = DDPacketPointer(header, reader0, 0)
pointer1 = DDPacketPointer(header, reader1, 1)
assert pointer0 == pointer1
assert hash(pointer0) == hash(pointer1)
assert len({pointer0, pointer1}) == 1
def test_get_pointers(in_sample_dir):
""" Test getting packet pointers, and that they work """
reader = Par2FileReader("testfile.txt.par2")
pointers = reader.get_pointers()
assert isinstance(pointers, set)
assert len(pointers) == 4
for pointer in pointers:
assert pointer.header.set_id.hex() == "be22b3624317366207908eb8aed92827"
packet = pointer.get()
assert isinstance(packet, Packet)
| 41.681159 | 118 | 0.727399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.