hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aef9a269a8feefc95b76496302147f489fe59c84
| 6,886
|
py
|
Python
|
bayesian/utility/v2/stack_analyses.py
|
mbharatk/fabric8-analytics-server
|
5b168e89d3465682e241748da1f6e426e4f3679c
|
[
"Apache-2.0"
] | 18
|
2017-05-02T22:04:32.000Z
|
2021-11-08T10:30:03.000Z
|
bayesian/utility/v2/stack_analyses.py
|
mbharatk/fabric8-analytics-server
|
5b168e89d3465682e241748da1f6e426e4f3679c
|
[
"Apache-2.0"
] | 699
|
2017-05-03T14:55:17.000Z
|
2022-03-17T05:51:58.000Z
|
bayesian/utility/v2/stack_analyses.py
|
mbharatk/fabric8-analytics-server
|
5b168e89d3465682e241748da1f6e426e4f3679c
|
[
"Apache-2.0"
] | 64
|
2017-05-02T05:45:01.000Z
|
2021-11-20T12:20:46.000Z
|
# Copyright © 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dharmendra G Patel <dhpatel@redhat.com>
#
"""Stack analyses API v2 class, implementing functionality to server POST and GET requests."""
import datetime
import uuid
import json
import logging
from flask import g
from flask import request
from flask import has_request_context
from bayesian.dependency_finder import DependencyFinder
from bayesian.utility.db_gateway import RdbAnalyses
from bayesian.utility.v2.backbone_server import BackboneServer
logger = logging.getLogger(__name__)
class StackAnalyses():
"""Implements stack analysis API.
Implements methods to support stack analyses post and get REST APIs calls.
"""
def __init__(self, params):
"""Initialize params to be used for ."""
self.params = params
def post_request(self):
"""Make stack analyses POST request."""
logger.info('SA Post request with ecosystem: %s manifest: %s path: %s '
'show_transitive: %s', self.params.ecosystem,
self.params.manifest.filename, self.params.file_path,
self.params.show_transitive)
# Build manifest file info.
self._manifest_file_info = {
'filename': self.params.manifest.filename,
'filepath': self.params.file_path,
'content': self.params.manifest.read().decode('utf-8')
}
logger.debug('manifest_file_info: %s', self._manifest_file_info)
# Generate unique request id using UUID, also record timestamp in readable form
self._new_request_id = str(uuid.uuid4().hex)
date_str = str(datetime.datetime.now())
# Fetch uuid from header
if has_request_context():
uuid_data = request.headers.get('uuid', None)
else:
uuid_data = None
# Make backbone request
deps = self._make_backbone_request()
# Finally save results in RDS and upon success return request id.
rdbAnalyses = RdbAnalyses(self._new_request_id)
rdbAnalyses.save_post_request(date_str, uuid_data, deps, self._manifest_file_info)
data = {
'status': 'success',
'submitted_at': date_str,
'id': self._new_request_id
}
logger.info('%s response: %s', self._new_request_id, data)
return data
def _read_deps_and_packages(self):
"""Read dependencies and packages information from manifest file content."""
packages = []
try:
deps, resolved = self._get_flat_dependency_tree()
# Build package details.
if resolved is not None:
for p in resolved:
packages.append({
'name': p.get('package', ''),
'version': p.get('version', ''),
'dependencies': [{'name': pkg['package'], 'version': pkg['version']}
for pkg in p.get('deps', [])]
})
return {'deps': deps, 'packages': packages}
except (ValueError, json.JSONDecodeError) as e:
logger.exception('%s Invalid dependencies encountered. %s',
self._new_request_id, str(e))
raise SAInvalidInputException('Error while parsing dependencies information') from e
except Exception as e:
logger.exception('%s Unknown exception encountered while parsing deps. %s',
self._new_request_id, str(e))
raise SAInvalidInputException('Unknown error while parsing dependencies '
'information') from e
def _get_flat_dependency_tree(self):
"""Get Flat dependency tree.
:returns:
save_in_rds: content to be saved in DB.
packages: Flat Package list to be pushed to Backbone.
"""
if self.params.ecosystem == 'golang':
# List flattening is done at Golang frontend client.
deps = json.loads(self._manifest_file_info.get('content', []))
packages = deps.get('packages', None)
save_in_rds = {'result': [{'details': [{
'ecosystem': 'golang',
"manifest_file_path": self.params.file_path,
"manifest_file": self.params.manifest.filename,
"_resolved": packages
}]}]}
return save_in_rds, packages
# Dependency finder
d = DependencyFinder()
save_in_rds = d.scan_and_find_dependencies(
self.params.ecosystem,
[self._manifest_file_info],
json.dumps(self.params.show_transitive))
packages = save_in_rds.get(
'result', [{}])[0].get('details', [{}])[0].get('_resolved', None)
return save_in_rds, packages
def _make_backbone_request(self):
"""Perform backbone request for stack_aggregator and recommender."""
# Read deps and packages from manifest
data = self._read_deps_and_packages()
logger.info('%s deps and packages data: %s', self._new_request_id, data)
# Set backbone API request body and params.
request_body = {
'registration_status': g.user_status.name,
'uuid': g.uuid,
'external_request_id': self._new_request_id,
'ecosystem': self.params.ecosystem,
'packages': data['packages'],
'manifest_name': self._manifest_file_info['filename'],
'manifest_file_path': self._manifest_file_info['filepath'],
'show_transitive': self.params.show_transitive
}
request_params = {
'persist': 'true',
'check_license': 'false'
}
logger.info('%s request_body: %s request_params: %s',
self._new_request_id, request_body, request_params)
# Post Backbone stack_aggregator call.
BackboneServer.post_aggregate_request(request_body, request_params)
BackboneServer.post_recommendations_request(request_body, request_params)
return data['deps']
class SAInvalidInputException(Exception):
"""Exception raised when given input data is not valid.
This exception is raised specially when parsing dependency information from manifest file.
"""
pass
| 39.803468
| 96
| 0.623729
|
0582bf7b469e79445abea5d3dc520323116a27e1
| 39,310
|
py
|
Python
|
src/commands.py
|
Athanasius/miggy-py-ircbot
|
cac44d65e70e56e11b3fd48cf6eca373d5fcb9a3
|
[
"BSD-3-Clause"
] | null | null | null |
src/commands.py
|
Athanasius/miggy-py-ircbot
|
cac44d65e70e56e11b3fd48cf6eca373d5fcb9a3
|
[
"BSD-3-Clause"
] | 4
|
2017-10-23T15:16:40.000Z
|
2018-05-27T10:19:52.000Z
|
src/commands.py
|
Athanasius/miggy-py-ircbot
|
cac44d65e70e56e11b3fd48cf6eca373d5fcb9a3
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010,2015, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import getopt
import inspect
import threading
import multiprocessing #python2.6 or later!
try:
import resource
except ImportError: # Windows!
resource = None
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, \
utils, world
from .utils import minisix
from .i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization()
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds.
<heap_size>, if supplied, limits the memory used by the target
function."""
timeout = kwargs.pop('timeout', None)
heap_size = kwargs.pop('heap_size', None)
if resource and heap_size is None:
heap_size = resource.RLIM_INFINITY
if world.disableMultiprocessing:
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
try:
return f(*args, **kwargs)
except Exception as e:
raise e
try:
q = multiprocessing.Queue()
except OSError:
log.error('Using multiprocessing.Queue raised an OSError.\n'
'This is probably caused by your system denying semaphore\n'
'usage. You should run these two commands:\n'
'\tsudo rmdir /dev/shm\n'
'\tsudo ln -Tsf /{run,dev}/shm\n'
'(See https://github.com/travis-ci/travis-core/issues/187\n'
'for more information about this bug.)\n')
raise
def newf(f, q, *args, **kwargs):
if resource:
rsrc = resource.RLIMIT_DATA
resource.setrlimit(rsrc, (heap_size, heap_size))
try:
r = f(*args, **kwargs)
q.put(r)
except Exception as e:
q.put(e)
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
q.close()
raise ProcessTimeoutError("%s aborted due to timeout." % (p.name,))
try:
v = q.get(block=False)
except minisix.queue.Empty:
return None
finally:
q.close()
if isinstance(v, Exception):
raise v
else:
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return False
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error as e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10 and '.' not in s:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type=_('integer'), p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type=_('non-integer value')):
try:
_int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = minisix.long(state.args[-1])
def getFloat(irc, msg, args, state, type=_('floating point number')):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type=_('positive integer'), *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type=_('non-negative integer'), *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type=_('index'))
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid(_('number of seconds'), args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid(_('boolean'), args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveVoice(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoice(irc.nick):
state.error(_('I need to be voiced to %s.') % action, Raise=True)
def getHaveVoicePlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoicePlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least voiced to %s.') % action,
Raise=True)
def getHaveHalfop(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfop(irc.nick):
state.error(_('I need to be halfopped to %s.') % action, Raise=True)
def getHaveHalfopPlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfopPlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least halfopped to %s.') % action,
Raise=True)
def getHaveOp(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error(_('I need to be opped to %s.') % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('channel'), args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]) or \
(not conf.supybot.protocols.irc.strictRfc() and
args[0].startswith('$')):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid(_('nick or hostmask'), args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1],
channel=state.channel)
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
raise ValueError
except (ValueError, IndexError):
args[:] = original
state.errorInvalid(_('regular expression'), s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getMatcherMany = _getRe(utils.str.perlReToFindall)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0], conf.supybot.protocols.irc.strictRfc()):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid(_('nick'), args[0],
_('That nick is too long for this server.'))
state.args.append(args.pop(0))
else:
state.errorInvalid(_('nick'), args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = _('I haven\'t seen %s.') % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannels(irc, msg, args, state):
if args and all(map(irc.isChannel, args[0].split(','))):
channels = args.pop(0).split(',')
elif irc.isChannel(msg.args[0]):
channels = [msg.args[0]]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.args.append(channels)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not in %s.') % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error(_('This command may only be given in a channel that I am '
'in.'), Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error(_('You must be in %s.') % channel, Raise=True)
else:
state.error(_('I\'m not in %s.') % channel, Raise=True)
else:
state.errorInvalid(_('channel'), args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error(_('%s is not in %s.') % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def getChannelOrGlobal(irc, msg, args, state):
if args and args[0] == 'global':
channel = args.pop(0)
channel = 'global'
elif args and irc.isChannel(args[0]):
channel = args.pop(0)
state.channel = channel
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
state.channel = channel
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.args.append(channel)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = _('You must not give the empty string as an argument.')
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, *L):
def p(s):
return len(s.split(None, 1)) == 1
L = L or [_('You must not give a string containing spaces as an argument.')]
getSomething(irc, msg, args, state, p=p, *L)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = _('This message must be sent in a channel.')
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def checkCapabilityButIgnoreOwner(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap, ignoreOwner=True):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('url'), args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('email'), args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid(_('http url'), args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid(_('command name'), args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('ip'), args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid(_('letter'), args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, minisix.string_types):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid(_('plugin'), args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid(_('irc color'))
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'isGranted': getHaveHalfopPlus, # Backward compatibility
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channels': getChannels,
'channelOrGlobal': getChannelOrGlobal,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkCapabilityButIgnoreOwner': checkCapabilityButIgnoreOwner,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveHalfop': getHaveHalfop,
'haveHalfop+': getHaveHalfopPlus,
'haveOp': getHaveOp,
'haveOp+': getHaveOp, # We don't handle modes greater than op.
'haveVoice': getHaveVoice,
'haveVoice+': getHaveVoicePlus,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpMatcherMany': getMatcherMany,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError as e:
raise UnknownConverter(str(e))
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, minisix.string_types):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error) as e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error) as e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = list(map(contextify, specs))
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception as e:
e2 = e # 'e' is local.
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e2
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
self.getoptLs = ''
for (name, spec) in getopts.items():
if spec == '':
if len(name) == 1:
self.getoptLs += name
self.getopts[name] = None
self.getoptL.append(name)
self.getopts[name] = None
else:
if len(name) == 1:
self.getoptLs += name + ':'
self.getopts[name] = contextify(spec)
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, self.getoptLs, self.getoptL)
getopts = []
for (opt, arg) in optlist:
if opt.startswith('--'):
opt = opt[2:] # Strip --
else:
opt = opt[1:]
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError(attr)
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.items():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def _wrap(f, specList=[], name=None, checkDoc=True, **kw):
name = name or f.__name__
assert (not checkDoc) or (hasattr(f, '__doc__') and f.__doc__), \
'Command %r has no docstring.' % name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.__code__
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
self.log.debug('Make sure you did not wrap a wrapped '
'function ;)')
raise
newf2 = utils.python.changeFunctionName(newf, name, f.__doc__)
newf2.__module__ = f.__module__
return internationalizeDocstring(newf2)
def wrap(f, *args, **kwargs):
if callable(f):
# Old-style call OR decorator syntax with no converter.
# f is the command.
return _wrap(f, *args, **kwargs)
else:
# Call with the Python decorator syntax
assert isinstance(f, list) or isinstance(f, tuple)
specList = f
def decorator(f):
return _wrap(f, specList, *args, **kwargs)
return decorator
wrap.__doc__ = """Useful wrapper for plugin commands.
Valid converters are: %s.
:param f: A command, taking (self, irc, msg, args, ...) as arguments
:param specList: A list of converters and contexts""" % \
', '.join(sorted(wrappers.keys()))
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 34.212359
| 84
| 0.60234
|
c55c0f913a9b984f56a696dac6fcc170262b17ad
| 64,372
|
py
|
Python
|
pysnmp-with-texts/ETH-SWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/ETH-SWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/ETH-SWITCH-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ETH-SWITCH-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ETH-SWITCH-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:06:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
scanet, = mibBuilder.importSymbols("SCANET-MIB", "scanet")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, Integer32, NotificationType, Counter64, MibIdentifier, Unsigned32, ModuleIdentity, ObjectIdentity, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, NotificationType, TimeTicks, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Integer32", "NotificationType", "Counter64", "MibIdentifier", "Unsigned32", "ModuleIdentity", "ObjectIdentity", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "NotificationType", "TimeTicks", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ethSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39))
control = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 1))
module = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 2))
ports = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 3))
statistic = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 4))
adaptiveForwardMode = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 5))
chipSets = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 6))
cards = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 7))
class ProductIds(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 1163072800, 1163072816, 1163073552, 1163073568, 1163073584, 1163073585, 1163073586, 1163073600, 1163073616, 1163073617, 1163076624, 1163076640, 1163076656, 1163076657, 1163067410, 1163067456, 1163067648, 1163067649, 1163067650, 1163067651, 1163067664, 1163067665, 1163067732, 1163067733, 1280323585, 1280323586, 1280323587, 1280323588, 1280323589, 1280323590, 1280323604, 1280323605))
namedValues = NamedValues(("notAvailable", 1), ("es-1520", 1163072800), ("es-1530", 1163072816), ("es-1810", 1163073552), ("es-1820", 1163073568), ("es-1830", 1163073584), ("es-1831", 1163073585), ("es-1832", 1163073586), ("es-1840", 1163073600), ("es-1850", 1163073616), ("es100fx", 1163073617), ("es10t24", 1163076624), ("es10t24plus", 1163076640), ("es10mmt12", 1163076656), ("es10mmfl", 1163076657), ("es-0012", 1163067410), ("es-0040", 1163067456), ("es-0100", 1163067648), ("es-0101", 1163067649), ("es100mmfx", 1163067650), ("es-0103", 1163067651), ("es100mmtx", 1163067664), ("es-0111", 1163067665), ("es-0154", 1163067732), ("es-0155", 1163067733), ("lp-3001", 1280323585), ("lp-3002", 1280323586), ("lp-3003", 1280323587), ("lp-3004", 1280323588), ("lp-3005", 1280323589), ("lp-3006", 1280323590), ("lp-3014", 1280323604), ("lp-3015", 1280323605))
scSegments = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("segmentA", 1), ("segmentB", 2), ("segmentAandB", 3), ("segmentC", 4), ("segmentAandC", 5), ("segmentBandC", 6), ("segmentAandBandC", 7), ("none", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: scSegments.setStatus('mandatory')
if mibBuilder.loadTexts: scSegments.setDescription('Object controlling the use of the System Center Backplane. Each bus is assigned a bit value in the object')
defaultSwitchMode = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5))).clone(namedValues=NamedValues(("cutThrough", 2), ("fragmentFree", 3), ("storeAndForward", 4), ("adaptive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: defaultSwitchMode.setStatus('mandatory')
if mibBuilder.loadTexts: defaultSwitchMode.setDescription('Default Switch mode. used by the object portSwitchMode when its value is default(1)')
defaultThrottleBackMode = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("enable", 2), ("disable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: defaultThrottleBackMode.setStatus('mandatory')
if mibBuilder.loadTexts: defaultThrottleBackMode.setDescription('Default ThrottleBack mode. used by the object portThrottleBack when its value is default(1)')
networkPort = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: networkPort.setStatus('mandatory')
if mibBuilder.loadTexts: networkPort.setDescription('For a Workgroup Switch (only one multi MAC address port), this object determines the network port. The value equals the value of portNumber in the portTable. Notice that this object is valid for Workgroup switches only (ES-2410 and ES-1810). Other types of Switches returns the value 0 and has read-only access.')
expansionModule = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 2, 1), ProductIds()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expansionModule.setStatus('mandatory')
if mibBuilder.loadTexts: expansionModule.setDescription('An ID that identifies the expansion module')
portLastChange = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 3, 1), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portLastChange.setStatus('mandatory')
if mibBuilder.loadTexts: portLastChange.setDescription('The value of sysUpTime at the last time any interface has changed state')
portTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 3, 2), )
if mibBuilder.loadTexts: portTable.setStatus('mandatory')
if mibBuilder.loadTexts: portTable.setDescription('Port Table.')
portEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "portNumber"))
if mibBuilder.loadTexts: portEntry.setStatus('mandatory')
if mibBuilder.loadTexts: portEntry.setDescription('A Port entry containing object for a Switch port.')
portNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portNumber.setStatus('mandatory')
if mibBuilder.loadTexts: portNumber.setDescription('A number that uniquely identifies a port')
portInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portInterfaceIndex.setStatus('mandatory')
if mibBuilder.loadTexts: portInterfaceIndex.setDescription('The value of this object equals the ports interface index value, ex. portIfIndex. If the value of this object is zero, then the port is not active, i.e. does not appear in the portIfTable.')
portLED = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portLED.setStatus('mandatory')
if mibBuilder.loadTexts: portLED.setDescription('The value of this object represent the status (colour) of the LED(s) attached to this port. Lower 3 bits indicates off/colour (off(0), red(1), green(2), yellow(4)), Higher 5 bits reserved for flash frequency. For value 0-9 the resolution is 0.1 Hz and 10-31 the resolution is 1.0 Hz. Ex. 8 gives 0.8 Hz and 11 gives 2 Hz. For PIM slots with two LEDs that can contain either an expansion card, a one port PIM or a PIM with multiple ports a special case is needed. The two LEDs are mapped into a two LED octets. In case the PIM slot contain a PIM with multiple ports a third LED octet represent the first port on the PIM. In the case there is two LEDs on the front, the first LED octet is the front and the second the back.')
portState = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portState.setStatus('mandatory')
if mibBuilder.loadTexts: portState.setDescription('This is a bit array which represent the state of the port. bit 0: Disabled due to configuration bit 1: Hardware error bit 2: No linkpulse bit 3: Disabled by manager bit 4: Disabled by access control bit 5: Disabled due to shared port')
portCardNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portCardNumber.setStatus('mandatory')
if mibBuilder.loadTexts: portCardNumber.setDescription('The cardNumber in the switch system.')
portPimNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portPimNumber.setStatus('mandatory')
if mibBuilder.loadTexts: portPimNumber.setDescription('The pimNumber in the switch system.')
portPimPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 2, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portPimPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: portPimPortNumber.setDescription('The relative number of the port on the PIM.')
portIfTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 3, 3), )
if mibBuilder.loadTexts: portIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: portIfTable.setDescription('Switch Interface Table.')
portIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "portIfIndex"))
if mibBuilder.loadTexts: portIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: portIfEntry.setDescription('An entry containing object for a Switch interface.')
portIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: portIfIndex.setDescription('An index value that uniquely identifies an interface. The interface identified by a particular value of this index is the same interface as identified by the same value of ifIndex.')
portIfDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portIfDescr.setStatus('mandatory')
if mibBuilder.loadTexts: portIfDescr.setDescription('User configurable description of the port.')
portIfLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portIfLocation.setStatus('mandatory')
if mibBuilder.loadTexts: portIfLocation.setDescription('User configurable location of the port.')
portIfSwitchMode = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))).clone(namedValues=NamedValues(("default", 1), ("cutThrough", 2), ("fragmentFree", 3), ("storeAndForward", 4), ("adaptive", 5), ("notAvailable", 99)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portIfSwitchMode.setStatus('mandatory')
if mibBuilder.loadTexts: portIfSwitchMode.setDescription('Port Switch mode. If set to default(1) value from defaultSwitchMode is used.')
portIfDuplex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))).clone(namedValues=NamedValues(("autoDetect", 1), ("half", 2), ("full", 3), ("autoDetectedHalf", 4), ("autoDetectedFull", 5), ("notAvailable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIfDuplex.setStatus('mandatory')
if mibBuilder.loadTexts: portIfDuplex.setDescription('Port Duplex mode. For ports in auto mode the mode may change automatically between autoDetect(1), autoDetectedHalf(3) and autoDetectedFull(5).')
portIfThrottleBack = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 99))).clone(namedValues=NamedValues(("default", 1), ("enable", 2), ("disable", 3), ("notAvailable", 99)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portIfThrottleBack.setStatus('mandatory')
if mibBuilder.loadTexts: portIfThrottleBack.setDescription('Port ThrottleBack mode. If set to default(1) value from defaultThrottleBackMode is used.')
portIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 14, 15, 51, 54, 100, 101, 102, 110))).clone(namedValues=NamedValues(("absent", 1), ("tenbase2", 2), ("aui", 3), ("tenbasetutp", 4), ("tenbasetstp", 5), ("tenbasefl", 6), ("tenbaseflfullduplex", 7), ("tenbasetxutp", 14), ("tenbasetxstp", 15), ("hundredbasetx", 51), ("hundredbasefx", 54), ("backplane", 100), ("hsb", 101), ("internal", 102), ("layer3link", 110)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIfType.setStatus('mandatory')
if mibBuilder.loadTexts: portIfType.setDescription('The type of Physical interface at the port.')
portIfDuplexSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 7, 99))).clone(namedValues=NamedValues(("half", 1), ("full", 2), ("halfAndFull", 3), ("auto", 4), ("autoAndHalfAndFull", 7), ("notAvailable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIfDuplexSupported.setStatus('mandatory')
if mibBuilder.loadTexts: portIfDuplexSupported.setDescription('The duplex mode supported for a port.')
portIfSpeedSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))).clone(namedValues=NamedValues(("speed10Mbit", 1), ("speed100Mbit", 2), ("speed10And100Mbit", 3), ("speedAutoAnd10And100Mbit", 4), ("speed155Mbit", 5), ("notAvailable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIfSpeedSupported.setStatus('mandatory')
if mibBuilder.loadTexts: portIfSpeedSupported.setDescription('The speed supported for a port.')
portIfSpeedAndDuplex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 3, 3, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portIfSpeedAndDuplex.setStatus('mandatory')
if mibBuilder.loadTexts: portIfSpeedAndDuplex.setDescription('This object is used to configure the speed and duplex mode for a port. Byte 1: negotiation manual(0) autoDisable(1) autoAlternative(2) Byte 2: speed auto(1) 10(2) 100(3) 155(4) Byte 3: alternativeSpeed unused(0) (read-only) Byte 4: duplexMode auto(1) half(2) full(3) Byte 5: alternativeDuplexMode unused(0) (read-only) half(2) full(3) Explanation of the different negotiation: Manual: The port does not support auto detect of speed and duplex which means that only fixed values are legal. AutoDisable: The port supports auto negotiation and if the connected port does not except the suggested values the port will be disabled. AutoAlternative: The port supports auto negotiation and if the connected port does not except the suggested values the port will be set to some alternative values.')
txStatTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 4, 1), )
if mibBuilder.loadTexts: txStatTable.setStatus('mandatory')
if mibBuilder.loadTexts: txStatTable.setDescription('MIA ASIC Transmit statistic Table.')
txStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "txStatIndex"))
if mibBuilder.loadTexts: txStatEntry.setStatus('mandatory')
if mibBuilder.loadTexts: txStatEntry.setDescription('An entry containing object for MIA ASIC Transmit statistic.')
txStatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txStatIndex.setStatus('mandatory')
if mibBuilder.loadTexts: txStatIndex.setDescription('An index value that uniquely identifies an interface. The interface identified by a particular value of this index is the same interface as identified by the same value of ifIndex.')
txUCPkts64Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts64Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts64Octets.setDescription('64 byte unicast packets transmitted')
txUCPkts65To127Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts65To127Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts65To127Octets.setDescription('65 to 127 byte unicast packets transmitted')
txUCPkts128To255Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts128To255Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts128To255Octets.setDescription('128 to 255 byte unicast packets transmitted')
txUCPkts256To511Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts256To511Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts256To511Octets.setDescription('256 to 511 byte unicast packets transmitted')
txUCPkts512To1023Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts512To1023Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts512To1023Octets.setDescription('512 to 1023 byte unicast packets transmitted')
txUCPkts1024To1518Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txUCPkts1024To1518Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txUCPkts1024To1518Octets.setDescription('1024 to 1518 byte unicast packets transmitted')
txMCPkts64Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts64Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts64Octets.setDescription('64 byte multicast packets transmitted')
txMCPkts65To127Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts65To127Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts65To127Octets.setDescription('65 to 127 byte multicast packets transmitted')
txMCPkts128To255Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts128To255Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts128To255Octets.setDescription('128 to 255 byte multicast packets transmitted')
txMCPkts256To511Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts256To511Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts256To511Octets.setDescription('256 to 511 byte multicast packets transmitted')
txMCPkts512To1023Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts512To1023Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts512To1023Octets.setDescription('512 to 1023 byte multicast packets transmitted')
txMCPkts1024To1518Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txMCPkts1024To1518Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txMCPkts1024To1518Octets.setDescription('1024 to 1518 byte multicast packets transmitted')
txBCPkts64Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts64Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts64Octets.setDescription('64 byte broadcast packets transmitted')
txBCPkts65To127Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts65To127Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts65To127Octets.setDescription('65 to 127 byte broadcast packets transmitted')
txBCPkts128To255Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts128To255Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts128To255Octets.setDescription('128 to 255 byte broadcast packets transmitted')
txBCPkts256To511Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts256To511Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts256To511Octets.setDescription('256 to 511 byte broadcast packets transmitted')
txBCPkts512To1023Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts512To1023Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts512To1023Octets.setDescription('512 to 1023 byte broadcast packets transmitted')
txBCPkts1024To1518Octets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txBCPkts1024To1518Octets.setStatus('mandatory')
if mibBuilder.loadTexts: txBCPkts1024To1518Octets.setDescription('1024 to 1518 byte broadcast packets transmitted')
txDeffereds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txDeffereds.setStatus('mandatory')
if mibBuilder.loadTexts: txDeffereds.setDescription('A transmission must await a silent net in Simplex mode. If the transmission is delayed due to a non silent net the transmission is deffered.')
txOctetsHis = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txOctetsHis.setStatus('mandatory')
if mibBuilder.loadTexts: txOctetsHis.setDescription('High part of a 64 bit octet transmitted counter')
txOctetsLos = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txOctetsLos.setStatus('mandatory')
if mibBuilder.loadTexts: txOctetsLos.setDescription('Low part of a 64 bit octet transmitted counter')
txExcessiveDefferalsErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txExcessiveDefferalsErrors.setStatus('mandatory')
if mibBuilder.loadTexts: txExcessiveDefferalsErrors.setDescription('Maximum defferal should be the duration of a maximum length packet, if no errors on the net. Excessive defferal is indicated if defferal duration is more than approximately 2 maximum packets.')
txForwardedRxError = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txForwardedRxError.setStatus('mandatory')
if mibBuilder.loadTexts: txForwardedRxError.setDescription('The transmission are aborted on the transmitting port of the switch if the receiving port of the present packet receives are packet with error or receives a collision fragment. This should never occur in Store and forward mode, while collisions never should provoke this event in fragment free mode, except for at late collisions on the receiving net.')
txNiaUnderRunDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txNiaUnderRunDrops.setStatus('mandatory')
if mibBuilder.loadTexts: txNiaUnderRunDrops.setDescription('The NIA TX buffer gets in underrun if the nimbus is overloaded. This should only happen if a configuration is made which loads the Nimbus with more than a load equal to 44 simplex 10Mbit net. NOTE: This only counts for NIA100 interfaces.')
txLinkDownEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: txLinkDownEvents.setStatus('mandatory')
if mibBuilder.loadTexts: txLinkDownEvents.setDescription('No link pulses are received. Usually this means that the cable are removed.')
txAllCounterPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 27), OctetString().subtype(subtypeSpec=ValueSizeConstraint(127, 127)).setFixedLength(127)).setMaxAccess("readonly")
if mibBuilder.loadTexts: txAllCounterPackets.setStatus('mandatory')
if mibBuilder.loadTexts: txAllCounterPackets.setDescription('A collection of all counters for byte and packet counters. This object is needed to optimise SNMP traffic. The counters in this string are txStatIndex INTEGER txOctetsHis Counter txOctetsLos Counter txUCPkts64Octets Counter txUCPkts65To127Octets Counter txUCPkts128To255Octets Counter txUCPkts256To511Octets Counter txUCPkts512To1023Octets Counter txUCPkts1024To1518Octets Counter txMCPkts64Octets Counter txMCPkts65To127Octets Counter txMCPkts128To255Octets Counter txMCPkts256To511Octets Counter txMCPkts512To1023Octets Counter txMCPkts1024To1518Octets Counter txBCPkts64Octets Counter txBCPkts65To127Octets Counter txBCPkts128To255Octets Counter txBCPkts256To511Octets Counter txBCPkts512To1023Octets Counter txBCPkts1024To1518Octets Counter The values are represented in the OCTET STRING in the listed order, each value stored as a 32 bits big engine value.')
txAllCounterOthers = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 1, 1, 28), OctetString().subtype(subtypeSpec=ValueSizeConstraint(127, 127)).setFixedLength(127)).setMaxAccess("readonly")
if mibBuilder.loadTexts: txAllCounterOthers.setStatus('mandatory')
if mibBuilder.loadTexts: txAllCounterOthers.setDescription('A collection of all counters other than for byte and packet counters. This object is needed to optimise SNMP traffic. The counters in this string are txStatIndex INTEGER txDeffereds Counter txExcessiveDefferalsErrors Counter txForwardedRxError Counter txNiaUnderRunDrops Counter txLinkDownEvents Counter -- Counters from RFC1643 txCSenseErrors Counter txSQEErrors Counter txLateCollisions Counter txExcessiveCollisionErrors Counter txSingleCollisionFrames Counter txMultipleCollisionFrames Counter The values are represented in the OCTET STRING in the listed order, each value stored as a 32 bits big engine value.')
rxStatTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 4, 2), )
if mibBuilder.loadTexts: rxStatTable.setStatus('mandatory')
if mibBuilder.loadTexts: rxStatTable.setDescription('MIA ASIC Receive statistic Table.')
rxStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "rxStatIndex"))
if mibBuilder.loadTexts: rxStatEntry.setStatus('mandatory')
if mibBuilder.loadTexts: rxStatEntry.setDescription('An entry containing object for MIA ASIC Receive statistic.')
rxStatIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxStatIndex.setStatus('mandatory')
if mibBuilder.loadTexts: rxStatIndex.setDescription('An index value that uniquely identifies an interface. The interface identified by a particular value of this index is the same interface as identified by the same value of ifIndex.')
rxUCPkts64OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts64OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts64OctetsLocals.setDescription('64 byte unicast packets received which has not been forwarded')
rxUCPkts64OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts64OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts64OctetsForwardeds.setDescription('64 byte unicast packets received which has been forwarded')
rxUCPkts65To127OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts65To127OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts65To127OctetsLocals.setDescription('65 to 127 byte unicast packets received which has not been forwarded')
rxUCPkts65To127OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts65To127OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts65To127OctetsForwardeds.setDescription('65 to 127 byte unicast packets received which has been forwarded')
rxUCPkts128To255OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts128To255OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts128To255OctetsLocals.setDescription('128 to 255 byte unicast packets received which has not been forwarded')
rxUCPkts128To255OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts128To255OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts128To255OctetsForwardeds.setDescription('128 to 255 byte unicast packets received which has been forwarded')
rxUCPkts256To511OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts256To511OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts256To511OctetsLocals.setDescription('256 to 511 byte unicast packets received which has not been forwarded')
rxUCPkts256To511OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts256To511OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts256To511OctetsForwardeds.setDescription('255 to 511 byte unicast packets received which has been forwarded')
rxUCPkts512To1023OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts512To1023OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts512To1023OctetsLocals.setDescription('512 to 1023 byte unicast packets received which has not been forwarded')
rxUCPkts512To1023OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts512To1023OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts512To1023OctetsForwardeds.setDescription('512 to 1023 byte unicast packets received which has been forwarded')
rxUCPkts1024To1518OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts1024To1518OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts1024To1518OctetsLocals.setDescription('1024 to 1518 byte unicast packets received which has not been forwarded')
rxUCPkts1024To1518OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxUCPkts1024To1518OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxUCPkts1024To1518OctetsForwardeds.setDescription('1024 to 1518 byte unicast packets received which has been forwarded')
rxShortErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxShortErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxShortErrors.setDescription('Receive fragments shorter than minimum ordinary collision fragment.')
rxRuntErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxRuntErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxRuntErrors.setDescription('Ordinary collision fragments received.')
rxDataRateMMErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxDataRateMMErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxDataRateMMErrors.setDescription('Data Rate Miss Match Error occurs if the data rate deviation is larger than allowed. This should only occur if problems with the TX oscillator in the adapter transmitting the frame.')
rxMCPkts64OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts64OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts64OctetsLocals.setDescription('64 byte multicast packets received which has not been forwarded')
rxMCPkts64OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts64OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts64OctetsForwardeds.setDescription('64 byte multicast packets received which has been forwarded')
rxMCPkts65To127OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts65To127OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts65To127OctetsLocals.setDescription('65 to 127 byte multicast packets received which has not been forwarded')
rxMCPkts65To127OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts65To127OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts65To127OctetsForwardeds.setDescription('65 to 127 byte multicast packets received which has been forwarded')
rxMCPkts128To255OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts128To255OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts128To255OctetsLocals.setDescription('128 to 255 byte multicast packets received which has not been forwarded')
rxMCPkts128To255OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts128To255OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts128To255OctetsForwardeds.setDescription('128 to 255 byte multicast packets received which has been forwarded')
rxMCPkts256To511OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts256To511OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts256To511OctetsLocals.setDescription('256 to 511 byte multicast packets received which has not been forwarded')
rxMCPkts256To511OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts256To511OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts256To511OctetsForwardeds.setDescription('256 to 511 byte multicast packets received which has been forwarded')
rxMCPkts512To1023OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts512To1023OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts512To1023OctetsLocals.setDescription('512 to 1023 byte multicast packets received which has not been forwarded')
rxMCPkts512To1023OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts512To1023OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts512To1023OctetsForwardeds.setDescription('512 to 1023 byte multicast packets received which has been forwarded')
rxMCPkts1024To1518OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts1024To1518OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts1024To1518OctetsLocals.setDescription('1024 to 1518 byte multicast packets received which has not been forwarded')
rxMCPkts1024To1518OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxMCPkts1024To1518OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxMCPkts1024To1518OctetsForwardeds.setDescription('1024 to 1518 byte multicast packets received which has been forwarded')
rxOctetsLocalHis = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxOctetsLocalHis.setStatus('mandatory')
if mibBuilder.loadTexts: rxOctetsLocalHis.setDescription('High part of a 64 bit octet received counter which has not been forwarded')
rxOctetsLocalLos = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxOctetsLocalLos.setStatus('mandatory')
if mibBuilder.loadTexts: rxOctetsLocalLos.setDescription('Low part of a 64 bit octet received counter which has not been forwarded')
rxOctetsForwardedHis = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxOctetsForwardedHis.setStatus('mandatory')
if mibBuilder.loadTexts: rxOctetsForwardedHis.setDescription('High part of a 64 bit octet received counter which has been forwarded')
rxOctetsForwardedLos = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxOctetsForwardedLos.setStatus('mandatory')
if mibBuilder.loadTexts: rxOctetsForwardedLos.setDescription('Low part of a 64 bit octet received counter which has been forwarded')
rxBCPkts64OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts64OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts64OctetsLocals.setDescription('64 byte broadcast packets received which has not been forwarded')
rxBCPkts64OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts64OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts64OctetsForwardeds.setDescription('64 byte broadcast packets received which has been forwarded')
rxBCPkts65To127OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts65To127OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts65To127OctetsLocals.setDescription('65 to 127 byte broadcast packets received which has not been forwarded')
rxBCPkts65To127OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 36), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts65To127OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts65To127OctetsForwardeds.setDescription('65 to 127 byte broadcast packets received which has been forwarded')
rxBCPkts128To255OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 37), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts128To255OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts128To255OctetsLocals.setDescription('128 to 255 byte broadcast packets received which has not been forwarded')
rxBCPkts128To255OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 38), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts128To255OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts128To255OctetsForwardeds.setDescription('128 to 255 byte broadcast packets received which has been forwarded')
rxBCPkts256To511OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts256To511OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts256To511OctetsLocals.setDescription('256 to 511 byte broadcast packets received which has not been forwarded')
rxBCPkts256To511OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 40), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts256To511OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts256To511OctetsForwardeds.setDescription('256 to 511 byte broadcast packets received which has been forwarded')
rxBCPkts512To1023OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 41), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts512To1023OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts512To1023OctetsLocals.setDescription('512 to 1023 byte broadcast packets received which has not been forwarded')
rxBCPkts512To1023OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 42), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts512To1023OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts512To1023OctetsForwardeds.setDescription('512 to 1023 byte broadcast packets received which has been forwarded')
rxBCPkts1024To1518OctetsLocals = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 43), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts1024To1518OctetsLocals.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts1024To1518OctetsLocals.setDescription('1024 to 1518 byte broadcast packets received which has not been forwarded')
rxBCPkts1024To1518OctetsForwardeds = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 44), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxBCPkts1024To1518OctetsForwardeds.setStatus('mandatory')
if mibBuilder.loadTexts: rxBCPkts1024To1518OctetsForwardeds.setDescription('1024 to 1518 byte broadcast packets received which has been forwarded')
rxFilterMACUnexp2ndPortDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 45), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxFilterMACUnexp2ndPortDrops.setStatus('mandatory')
if mibBuilder.loadTexts: rxFilterMACUnexp2ndPortDrops.setDescription('MAC address has been moved from one port to another')
rxFilterIllegalMACDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 46), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxFilterIllegalMACDrops.setStatus('mandatory')
if mibBuilder.loadTexts: rxFilterIllegalMACDrops.setDescription('Indicate more than one MAC address on a port which is not a network port or in general that the filter discards a packet due to an illegal source MAC address')
rxFlowCtrlCollCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxFlowCtrlCollCounter.setStatus('mandatory')
if mibBuilder.loadTexts: rxFlowCtrlCollCounter.setDescription('This counter counts the number of Flow Control collisions, which are generated due to flow control. Flow control are used when most of the pool memory are used to limit further amount of RX data until memory available. ')
rxVeryLongErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxVeryLongErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxVeryLongErrors.setDescription('Occurs only if frame longer than 6000 bytes are received.')
rxLongErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxLongErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxLongErrors.setDescription('Occurs if a frame longer than 1518 bytes and shorter than 6000 bytes are received.')
rxPiaOutOfPoolsDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 50), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxPiaOutOfPoolsDrop.setStatus('mandatory')
if mibBuilder.loadTexts: rxPiaOutOfPoolsDrop.setDescription('Reception is aborted due to lack of Pool memory. The switch is overloaded.')
rxManchesterCodeViolationErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 51), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxManchesterCodeViolationErrors.setStatus('mandatory')
if mibBuilder.loadTexts: rxManchesterCodeViolationErrors.setDescription('An illegal symbol received on a 100Base-X port.')
rxRxJabbers = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 52), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxRxJabbers.setStatus('mandatory')
if mibBuilder.loadTexts: rxRxJabbers.setDescription('Jabber is counted instead of a long error if the frame contains dribble bits or FCS error.')
rxNiaOverRunDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 53), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxNiaOverRunDrops.setStatus('mandatory')
if mibBuilder.loadTexts: rxNiaOverRunDrops.setDescription('The NIA RX buffer gets in overrun if the nimbus is overloaded. This should only happen if a configuration is made which loads the Nimbus with more than a load equal to 44 simplex 10Mbit net. ')
rxAllCounterPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 54), OctetString().subtype(subtypeSpec=ValueSizeConstraint(164, 164)).setFixedLength(164)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxAllCounterPackets.setStatus('mandatory')
if mibBuilder.loadTexts: rxAllCounterPackets.setDescription('A collection of all counters for byte and packet counters. This object is needed to optimise SNMP traffic. The counters in this string are rxStatIndex INTEGER rxOctetsLocalHis Counter rxOctetsLocalLos Counter rxOctetsForwardedHis Counter rxOctetsForwardedLos Counter rxUCPkts64OctetsLocals Counter rxUCPkts64OctetsForwardeds Counter rxUCPkts65To127OctetsLocals Counter rxUCPkts65To127OctetsForwardeds Counter rxUCPkts128To255OctetsLocals Counter rxUCPkts128To255OctetsForwardeds Counter rxUCPkts256To511OctetsLocals Counter rxUCPkts256To511OctetsForwardeds Counter rxUCPkts512To1023OctetsLocals Counter rxUCPkts512To1023OctetsForwardeds Counter rxUCPkts1024To1518OctetsLocals Counter rxUCPkts1024To1518OctetsForwardeds Counter rxMCPkts64OctetsLocals Counter rxMCPkts64OctetsForwardeds Counter rxMCPkts65To127OctetsLocals Counter rxMCPkts65To127OctetsForwardeds Counter rxMCPkts128To255OctetsLocals Counter rxMCPkts128To255OctetsForwardeds Counter rxMCPkts256To511OctetsLocals Counter rxMCPkts256To511OctetsForwardeds Counter rxMCPkts512To1023OctetsLocals Counter rxMCPkts512To1023OctetsForwardeds Counter rxMCPkts1024To1518OctetsLocals Counter rxMCPkts1024To1518OctetsForwardeds Counter rxBCPkts64OctetsLocals Counter rxBCPkts64OctetsForwardeds Counter rxBCPkts65To127OctetsLocals Counter rxBCPkts65To127OctetsForwardeds Counter rxBCPkts128To255OctetsLocals Counter rxBCPkts128To255OctetsForwardeds Counter rxBCPkts256To511OctetsLocals Counter rxBCPkts256To511OctetsForwardeds Counter rxBCPkts512To1023OctetsLocals Counter rxBCPkts512To1023OctetsForwardeds Counter rxBCPkts1024To1518OctetsLocals Counter rxBCPkts1024To1518OctetsForwardeds Counter The values are represented in the OCTET STRING in the listed order, each value stored as a 32 bits big engine value.')
rxAllCounterOthers = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 4, 2, 1, 55), OctetString().subtype(subtypeSpec=ValueSizeConstraint(60, 60)).setFixedLength(60)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rxAllCounterOthers.setStatus('mandatory')
if mibBuilder.loadTexts: rxAllCounterOthers.setDescription('A collection of all counters other than for byte and packet counters. This object is needed to optimise SNMP traffic. The counters in this string are rxStatIndex INTEGER rxShortErrors Counter rxRuntErrors Counter rxDataRateMMErrors Counter rxFilterMACUnexp2ndPortDrops Counter rxFilterIllegalMACDrops Counter rxFlowCtrlCollCounter Counter rxVeryLongErrors Counter rxLongErrors Counter rxPiaOutOfPoolsDrop Counter rxManchesterCodeViolationErrors Counter rxRxJabbers Counter rxNiaOverRunDrops Counter -- Counters from RFC1643 rxAlignErrors Counter rxFCSErrors Counter The values are represented in the OCTET STRING in the listed order, each value stored as a 32 bits big engine value.')
totalRxTxPackets = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 4, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: totalRxTxPackets.setStatus('mandatory')
if mibBuilder.loadTexts: totalRxTxPackets.setDescription('This object is used for LED control to display traffic. Values in the string are all packets transmitted and received at each port. The number and order of values equals the ifIndex. Each entry is 4 bytes. The first 4 bytes of the string is the value of portLastChange.')
totalCollisions = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 4, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: totalCollisions.setStatus('mandatory')
if mibBuilder.loadTexts: totalCollisions.setDescription('This object is used for LED control to display collision. Values in the string are the total collisions at each port. The number and order of values equals the ifIndex. Each entry is 4 bytes. The first 4 bytes of the string is the value of portLastChange.')
adaptiveForwardModeSampleTime = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 5, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adaptiveForwardModeSampleTime.setStatus('mandatory')
if mibBuilder.loadTexts: adaptiveForwardModeSampleTime.setDescription('Specify how long (sec) the received packets will be monitored. The resulting forwarding mode will be based on the quality of the packets')
adaptiveForwardModeRuntsOffset = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 5, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adaptiveForwardModeRuntsOffset.setStatus('mandatory')
if mibBuilder.loadTexts: adaptiveForwardModeRuntsOffset.setDescription('Specifies the minimum amount of runts during the sample time which forces the port out of cutThrough(2). The value is specified in per mille * 1000')
adaptiveForwardModeRuntsRange = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 5, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adaptiveForwardModeRuntsRange.setStatus('mandatory')
if mibBuilder.loadTexts: adaptiveForwardModeRuntsRange.setDescription('Specifies together with adaptiveForwardModeRuntsOffset the maximum amount of runts allowed during the sample time, which forces the port back to cutThrough(2). The value is specified in per mille * 1000')
adaptiveForwardModeCrcsOffset = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 5, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adaptiveForwardModeCrcsOffset.setStatus('mandatory')
if mibBuilder.loadTexts: adaptiveForwardModeCrcsOffset.setDescription('Specifies the maximum amount of crc errors during the sample time which forces the port out of Store and Forward. The value is specified in per mille * 1000')
adaptiveForwardModeCrcsRange = MibScalar((1, 3, 6, 1, 4, 1, 208, 39, 5, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adaptiveForwardModeCrcsRange.setStatus('mandatory')
if mibBuilder.loadTexts: adaptiveForwardModeCrcsRange.setDescription('Specifies together with adaptiveForwardModeCrcsOffset the maximum amount of crc errors during the sample time which forces the port out of Store and Forward. The value is specified in per mille * 1000')
chipSetNIA10 = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 6, 1))
chipSetNIA100 = MibIdentifier((1, 3, 6, 1, 4, 1, 208, 39, 6, 2))
cardTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 7, 1), )
if mibBuilder.loadTexts: cardTable.setStatus('mandatory')
if mibBuilder.loadTexts: cardTable.setDescription('This table describes which cards a switch system consist of and how they are connected.')
cardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 7, 1, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "cardNumber"))
if mibBuilder.loadTexts: cardEntry.setStatus('mandatory')
if mibBuilder.loadTexts: cardEntry.setDescription('An entry contains either a card or an expansion card and how many PIMs it contains.')
cardNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardNumber.setStatus('mandatory')
if mibBuilder.loadTexts: cardNumber.setDescription('The card index number.')
cardId = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 1, 1, 2), ProductIds()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardId.setStatus('mandatory')
if mibBuilder.loadTexts: cardId.setDescription('The produce identification the card.')
cardFirstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardFirstPort.setStatus('mandatory')
if mibBuilder.loadTexts: cardFirstPort.setDescription('The portNumber of the first port on the card.')
cardMaxPims = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cardMaxPims.setStatus('mandatory')
if mibBuilder.loadTexts: cardMaxPims.setDescription('The maximum number of PIMs this card can contain.')
pimTable = MibTable((1, 3, 6, 1, 4, 1, 208, 39, 7, 2), )
if mibBuilder.loadTexts: pimTable.setStatus('mandatory')
if mibBuilder.loadTexts: pimTable.setDescription('This table describes the PIMs inserted in the switch system.')
pimEntry = MibTableRow((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1), ).setIndexNames((0, "ETH-SWITCH-MIB", "pimCardNumber"), (0, "ETH-SWITCH-MIB", "pimNumber"))
if mibBuilder.loadTexts: pimEntry.setStatus('mandatory')
if mibBuilder.loadTexts: pimEntry.setDescription('An entry describes the PIM.')
pimCardNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pimCardNumber.setStatus('mandatory')
if mibBuilder.loadTexts: pimCardNumber.setDescription('The card number in the cardTable that this PIM is inserted in.')
pimNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pimNumber.setStatus('mandatory')
if mibBuilder.loadTexts: pimNumber.setDescription('The number of the PIM.')
pimId = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1, 3), ProductIds()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pimId.setStatus('mandatory')
if mibBuilder.loadTexts: pimId.setDescription('The product identification of the PIM.')
pimFirstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pimFirstPort.setStatus('mandatory')
if mibBuilder.loadTexts: pimFirstPort.setDescription('The portNumber of the first port on the PIM.')
pimNumberOfPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 208, 39, 7, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pimNumberOfPorts.setStatus('mandatory')
if mibBuilder.loadTexts: pimNumberOfPorts.setDescription('The number of ports on the PIM.')
ethSwitchPermVioEvent = NotificationType((1, 3, 6, 1, 4, 1, 208, 39) + (0,1)).setObjects(("ETH-SWITCH-MIB", "portIfIndex"))
if mibBuilder.loadTexts: ethSwitchPermVioEvent.setDescription("This trap is sent when there has been a permanent entry violation. A MAC address has been detected at an other port than the port where it has been configured as a permanent entry. The variable portIfIndex indicates the port where the MAC incorrectly has been detected. The problem can be solved by re-configuring the MAC's permanent entry, or by physical moving the MAC to the port it has been configured for.")
ethSwitchOnlyOneMACEvent = NotificationType((1, 3, 6, 1, 4, 1, 208, 39) + (0,2)).setObjects(("ETH-SWITCH-MIB", "portIfIndex"))
if mibBuilder.loadTexts: ethSwitchOnlyOneMACEvent.setDescription("This trap is sent when a port detects more that one MAC address at a port intended for only one MAC. This Trap can only be sent from a Concentrator Switch where only one MAC is allowed, except from the network port. It can NOT be sent from a Backbone Switch since it allows multiple MAC's at all ports. The variable portIfIndex indicates the port where the MAC limit of one has bee exceeded. The problem can be solved by physical moving the MAC to a spare Concentrator Switch port.")
ethSwitchMACVioEvent = NotificationType((1, 3, 6, 1, 4, 1, 208, 39) + (0,3)).setObjects(("ETH-SWITCH-MIB", "portIfIndex"))
if mibBuilder.loadTexts: ethSwitchMACVioEvent.setDescription('This trap is sent when there has been detected a MAC access control violation. A MAC address has been detected at a port with MAC access restriction, and the MAC was not allowed. The variable portIfIndex indicates the port with the MAC access violation.')
ethSwitchAdaptiveForwEvent = NotificationType((1, 3, 6, 1, 4, 1, 208, 39) + (0,4)).setObjects(("ETH-SWITCH-MIB", "portIfIndex"), ("ETH-SWITCH-MIB", "portIfSwitchMode"), ("ETH-SWITCH-MIB", "portIfSwitchMode"))
if mibBuilder.loadTexts: ethSwitchAdaptiveForwEvent.setDescription('This trap is sent when a port automatically is changing forwarding mode. This will only happen when the port has been configured to Adaptive mode. The syntax is (Portnumber, New mode, Old mode) Mode numbers is: 2 = Cut-Through 3 = Fragment Free 4 = Store and Forward')
ethSwitchMACFilterVioEvent = NotificationType((1, 3, 6, 1, 4, 1, 208, 39) + (0,5)).setObjects(("ETH-SWITCH-MIB", "portIfIndex"))
if mibBuilder.loadTexts: ethSwitchMACFilterVioEvent.setDescription('Port/MAC filter MAC violation on port: %portIfIndex% This trap is sent when there has been detected a MAC address on a port (portIfIndex), where this MAC is not allowed access through the switch. Access restriction is due to entry in the Port/MAC filters of the switch.')
mibBuilder.exportSymbols("ETH-SWITCH-MIB", rxFilterMACUnexp2ndPortDrops=rxFilterMACUnexp2ndPortDrops, ethSwitchOnlyOneMACEvent=ethSwitchOnlyOneMACEvent, txUCPkts512To1023Octets=txUCPkts512To1023Octets, pimCardNumber=pimCardNumber, rxBCPkts128To255OctetsForwardeds=rxBCPkts128To255OctetsForwardeds, rxManchesterCodeViolationErrors=rxManchesterCodeViolationErrors, rxStatIndex=rxStatIndex, portTable=portTable, rxUCPkts512To1023OctetsForwardeds=rxUCPkts512To1023OctetsForwardeds, portIfIndex=portIfIndex, rxRxJabbers=rxRxJabbers, txBCPkts128To255Octets=txBCPkts128To255Octets, totalCollisions=totalCollisions, chipSetNIA100=chipSetNIA100, txUCPkts64Octets=txUCPkts64Octets, txMCPkts256To511Octets=txMCPkts256To511Octets, txForwardedRxError=txForwardedRxError, txMCPkts64Octets=txMCPkts64Octets, defaultSwitchMode=defaultSwitchMode, networkPort=networkPort, adaptiveForwardMode=adaptiveForwardMode, rxUCPkts128To255OctetsForwardeds=rxUCPkts128To255OctetsForwardeds, rxAllCounterOthers=rxAllCounterOthers, rxMCPkts64OctetsForwardeds=rxMCPkts64OctetsForwardeds, portNumber=portNumber, adaptiveForwardModeCrcsOffset=adaptiveForwardModeCrcsOffset, adaptiveForwardModeCrcsRange=adaptiveForwardModeCrcsRange, ProductIds=ProductIds, rxStatEntry=rxStatEntry, rxUCPkts512To1023OctetsLocals=rxUCPkts512To1023OctetsLocals, portIfSpeedSupported=portIfSpeedSupported, rxOctetsLocalLos=rxOctetsLocalLos, rxFlowCtrlCollCounter=rxFlowCtrlCollCounter, txStatIndex=txStatIndex, rxBCPkts512To1023OctetsLocals=rxBCPkts512To1023OctetsLocals, rxMCPkts128To255OctetsLocals=rxMCPkts128To255OctetsLocals, rxUCPkts1024To1518OctetsForwardeds=rxUCPkts1024To1518OctetsForwardeds, rxOctetsForwardedHis=rxOctetsForwardedHis, rxMCPkts256To511OctetsForwardeds=rxMCPkts256To511OctetsForwardeds, txBCPkts512To1023Octets=txBCPkts512To1023Octets, adaptiveForwardModeRuntsRange=adaptiveForwardModeRuntsRange, ethSwitch=ethSwitch, cardEntry=cardEntry, portLED=portLED, chipSetNIA10=chipSetNIA10, portIfDescr=portIfDescr, rxMCPkts128To255OctetsForwardeds=rxMCPkts128To255OctetsForwardeds, adaptiveForwardModeSampleTime=adaptiveForwardModeSampleTime, txDeffereds=txDeffereds, rxDataRateMMErrors=rxDataRateMMErrors, rxBCPkts1024To1518OctetsLocals=rxBCPkts1024To1518OctetsLocals, txUCPkts128To255Octets=txUCPkts128To255Octets, rxBCPkts65To127OctetsForwardeds=rxBCPkts65To127OctetsForwardeds, portIfSwitchMode=portIfSwitchMode, cards=cards, rxBCPkts256To511OctetsForwardeds=rxBCPkts256To511OctetsForwardeds, rxRuntErrors=rxRuntErrors, rxUCPkts64OctetsLocals=rxUCPkts64OctetsLocals, cardId=cardId, ethSwitchMACFilterVioEvent=ethSwitchMACFilterVioEvent, txExcessiveDefferalsErrors=txExcessiveDefferalsErrors, rxBCPkts64OctetsForwardeds=rxBCPkts64OctetsForwardeds, txUCPkts1024To1518Octets=txUCPkts1024To1518Octets, pimTable=pimTable, scSegments=scSegments, portState=portState, txOctetsHis=txOctetsHis, pimNumber=pimNumber, ports=ports, rxUCPkts64OctetsForwardeds=rxUCPkts64OctetsForwardeds, rxUCPkts1024To1518OctetsLocals=rxUCPkts1024To1518OctetsLocals, portIfDuplexSupported=portIfDuplexSupported, txBCPkts65To127Octets=txBCPkts65To127Octets, rxMCPkts512To1023OctetsForwardeds=rxMCPkts512To1023OctetsForwardeds, rxMCPkts65To127OctetsForwardeds=rxMCPkts65To127OctetsForwardeds, rxLongErrors=rxLongErrors, portInterfaceIndex=portInterfaceIndex, txStatTable=txStatTable, control=control, txOctetsLos=txOctetsLos, rxFilterIllegalMACDrops=rxFilterIllegalMACDrops, portIfTable=portIfTable, module=module, txMCPkts128To255Octets=txMCPkts128To255Octets, portPimPortNumber=portPimPortNumber, rxAllCounterPackets=rxAllCounterPackets, portIfLocation=portIfLocation, txMCPkts1024To1518Octets=txMCPkts1024To1518Octets, rxPiaOutOfPoolsDrop=rxPiaOutOfPoolsDrop, cardTable=cardTable, portIfDuplex=portIfDuplex, txUCPkts256To511Octets=txUCPkts256To511Octets, rxShortErrors=rxShortErrors, txBCPkts1024To1518Octets=txBCPkts1024To1518Octets, rxUCPkts65To127OctetsForwardeds=rxUCPkts65To127OctetsForwardeds, portIfSpeedAndDuplex=portIfSpeedAndDuplex, rxMCPkts1024To1518OctetsLocals=rxMCPkts1024To1518OctetsLocals, txAllCounterOthers=txAllCounterOthers, portEntry=portEntry, rxMCPkts65To127OctetsLocals=rxMCPkts65To127OctetsLocals, txStatEntry=txStatEntry, statistic=statistic, txBCPkts256To511Octets=txBCPkts256To511Octets, rxStatTable=rxStatTable, rxUCPkts128To255OctetsLocals=rxUCPkts128To255OctetsLocals, rxUCPkts256To511OctetsLocals=rxUCPkts256To511OctetsLocals, totalRxTxPackets=totalRxTxPackets, pimNumberOfPorts=pimNumberOfPorts, portIfType=portIfType, expansionModule=expansionModule, rxBCPkts256To511OctetsLocals=rxBCPkts256To511OctetsLocals, ethSwitchAdaptiveForwEvent=ethSwitchAdaptiveForwEvent, txLinkDownEvents=txLinkDownEvents, txMCPkts65To127Octets=txMCPkts65To127Octets, portPimNumber=portPimNumber, rxMCPkts64OctetsLocals=rxMCPkts64OctetsLocals, rxMCPkts512To1023OctetsLocals=rxMCPkts512To1023OctetsLocals, portLastChange=portLastChange, rxBCPkts128To255OctetsLocals=rxBCPkts128To255OctetsLocals, txMCPkts512To1023Octets=txMCPkts512To1023Octets, rxMCPkts256To511OctetsLocals=rxMCPkts256To511OctetsLocals, rxMCPkts1024To1518OctetsForwardeds=rxMCPkts1024To1518OctetsForwardeds, portIfEntry=portIfEntry, portCardNumber=portCardNumber, rxUCPkts256To511OctetsForwardeds=rxUCPkts256To511OctetsForwardeds, rxBCPkts1024To1518OctetsForwardeds=rxBCPkts1024To1518OctetsForwardeds, rxUCPkts65To127OctetsLocals=rxUCPkts65To127OctetsLocals, ethSwitchMACVioEvent=ethSwitchMACVioEvent, cardMaxPims=cardMaxPims, rxBCPkts512To1023OctetsForwardeds=rxBCPkts512To1023OctetsForwardeds, adaptiveForwardModeRuntsOffset=adaptiveForwardModeRuntsOffset, pimEntry=pimEntry, txNiaUnderRunDrops=txNiaUnderRunDrops, txAllCounterPackets=txAllCounterPackets, pimId=pimId, cardNumber=cardNumber, ethSwitchPermVioEvent=ethSwitchPermVioEvent, rxNiaOverRunDrops=rxNiaOverRunDrops, portIfThrottleBack=portIfThrottleBack, rxBCPkts65To127OctetsLocals=rxBCPkts65To127OctetsLocals, txBCPkts64Octets=txBCPkts64Octets, rxOctetsForwardedLos=rxOctetsForwardedLos, rxVeryLongErrors=rxVeryLongErrors, chipSets=chipSets, txUCPkts65To127Octets=txUCPkts65To127Octets, rxBCPkts64OctetsLocals=rxBCPkts64OctetsLocals, pimFirstPort=pimFirstPort, defaultThrottleBackMode=defaultThrottleBackMode, cardFirstPort=cardFirstPort, rxOctetsLocalHis=rxOctetsLocalHis)
| 145.638009
| 6,252
| 0.79606
|
09e6d712df7af7644c79d5b52cb1767932cdbc89
| 1,303
|
py
|
Python
|
keras_retinanet/layers/convp3d.py
|
toosyou/keras-retinanet
|
c2b5c51e79c5783b5451feaa2d4e1cf9304e724b
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/layers/convp3d.py
|
toosyou/keras-retinanet
|
c2b5c51e79c5783b5451feaa2d4e1cf9304e724b
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/layers/convp3d.py
|
toosyou/keras-retinanet
|
c2b5c51e79c5783b5451feaa2d4e1cf9304e724b
|
[
"Apache-2.0"
] | null | null | null |
import keras
from .group_norm import GroupNormalization
def ConvP3D(filters, kernel_size=3, stride=1, batch_norm=False, **kwargs):
if isinstance(kernel_size, int):
first_kernel_size = (kernel_size, kernel_size, 1)
second_kernel_size = (1, 1, kernel_size)
else:
first_kernel_size = (kernel_size[0], kernel_size[1], 1)
second_kernel_size = (1, 1, kernel_size[2])
if isinstance(stride, int):
first_stride = (stride, stride, 1)
second_stride = (1, 1, stride)
else:
first_stride = (stride[0], stride[1], 1)
second_stride = (1, 1, stride[2])
def f(x):
x = keras.layers.Conv3D(
filters=filters,
kernel_size=first_kernel_size,
strides=first_stride,
**kwargs
)(x)
if batch_norm:
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Conv3D(
filters=filters,
kernel_size=second_kernel_size,
strides=second_stride,
**kwargs
)(x)
if batch_norm:
x = keras.layers.BatchNormalization()(x)
return x
return f
| 34.289474
| 74
| 0.521873
|
2935bcdd7ac161b12e56cf268d12a96a89ca3bb0
| 1,207
|
py
|
Python
|
springcloudstream/options.py
|
dturanski/springcloudstream
|
208b542f9eba82e97882d52703af8e965a62a980
|
[
"Apache-2.0"
] | 1
|
2017-05-01T20:10:40.000Z
|
2017-05-01T20:10:40.000Z
|
springcloudstream/options.py
|
dturanski/springcloudstream
|
208b542f9eba82e97882d52703af8e965a62a980
|
[
"Apache-2.0"
] | 1
|
2018-03-13T00:08:32.000Z
|
2018-03-13T00:08:32.000Z
|
springcloudstream/options.py
|
dturanski/springcloudstream
|
208b542f9eba82e97882d52703af8e965a62a980
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = '''
Copyright 2017 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'David Turanski'
import optparse
class OptionsParser:
"""
Encapsulates on OptionParser to handle command line options.
"""
def __init__(self):
self.parser = optparse.OptionParser()
self.parser.usage = "%prog [options] --help for help"
def add_option(self,*args, **kwargs):
return self.parser.add_option(*args, **kwargs)
def parse(self,args,validate=False):
opts,args = self.parser.parse_args(args)
validate and self.validate(opts)
return opts,args
def validate(self,options):
pass
| 31.763158
| 75
| 0.702568
|
36d0b542582e5f33765b955a002154aa5f0bbf43
| 45
|
py
|
Python
|
gamelocker/__init__.py
|
ClarkThyLord/gamelocker-vg-python
|
a12a96b507a7a942e4dd7fff9b37b82151bd08d7
|
[
"MIT"
] | 1
|
2018-08-18T10:15:18.000Z
|
2018-08-18T10:15:18.000Z
|
gamelocker/__init__.py
|
ClarkThyLord/gamelocker-vg-python
|
a12a96b507a7a942e4dd7fff9b37b82151bd08d7
|
[
"MIT"
] | null | null | null |
gamelocker/__init__.py
|
ClarkThyLord/gamelocker-vg-python
|
a12a96b507a7a942e4dd7fff9b37b82151bd08d7
|
[
"MIT"
] | null | null | null |
"""Code directory"""
from .wrapper import *
| 11.25
| 22
| 0.666667
|
8a29ef9315661dfdfee6449816d8e570217ae0b4
| 5,651
|
py
|
Python
|
Chapter4/Singularity/scripts/logfetch/tail.py
|
HussainK72/Mastering-Mesos
|
c34d9822015b73172ad1478b21aa809d7c5c913e
|
[
"MIT"
] | 11
|
2016-05-25T15:44:34.000Z
|
2021-07-24T19:37:30.000Z
|
Chapter4/Singularity/scripts/logfetch/tail.py
|
HussainK72/Mastering-Mesos
|
c34d9822015b73172ad1478b21aa809d7c5c913e
|
[
"MIT"
] | 1
|
2022-01-21T23:08:28.000Z
|
2022-01-21T23:08:28.000Z
|
Chapter4/Singularity/scripts/logfetch/tail.py
|
HussainK72/Mastering-Mesos
|
c34d9822015b73172ad1478b21aa809d7c5c913e
|
[
"MIT"
] | 6
|
2016-05-30T06:41:24.000Z
|
2022-02-27T10:57:58.000Z
|
import os
import sys
import logfetch_base
import requests
import time
import fnmatch
import threading
from grep import grep_command
from termcolor import colored
from logfetch_base import log, get_json_response
TAIL_LOG_FORMAT = '{0}/sandbox/{1}/read'
READ_INTERVAL = 5
THREAD_TIMEOUT = 100000
BROWSE_FOLDER_FORMAT = '{0}/sandbox/{1}/browse'
def start_tail(args):
if args.requestId:
if not args.silent:
sys.stderr.write('Fetching tasks\n')
tasks = [str(t) for t in logfetch_base.tasks_for_requests(args)]
else:
tasks = [args.taskId]
log(colored('Tailing logs for tasks:\n', 'green'), args, True)
for t in tasks:
log(colored('{0}\n'.format(t), 'yellow'), args, True)
log(colored('ctrl+c to exit\n', 'cyan'), args, False)
try:
threads = []
for task in tasks:
thread = LogStreamer(args, task)
threads.append(thread)
thread.start()
for t in threads:
t.join(THREAD_TIMEOUT) #Need a timeout otherwise can't be killed by ctrl+c
if not t.isAlive:
break
except KeyboardInterrupt:
log(colored('Stopping tail', 'magenta') + '\n', args, False)
sys.exit(0)
def logs_folder_files(args, task):
uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
files_json = get_json_response(uri, args, {'path' : '{0}/logs'.format(task)})
if 'files' in files_json:
files = files_json['files']
return [f['name'] for f in files if valid_logfile(f)]
else:
return [f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f)]
def base_directory_files(args, task):
uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
files_json = get_json_response(uri, args)
if 'files' in files_json:
files = files_json['files']
return [f['name'] for f in files if valid_logfile(f)]
else:
return [f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f)]
def valid_logfile(fileData):
not_a_directory = not fileData['mode'].startswith('d')
is_a_logfile = fnmatch.fnmatch(fileData['name'], '*.log') or fnmatch.fnmatch(fileData['name'], '*.out') or fnmatch.fnmatch(fileData['name'], '*.err')
return not_a_directory and is_a_logfile
class LogStreamer(threading.Thread):
def __init__(self, args, task):
threading.Thread.__init__(self)
self.daemon = True
self.Args = args
self.Task = task
def run(self):
self.stream_log_for_task(self.Args, self.Task)
def stream_log_for_task(self, args, task):
uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task)
path = '{0}/{1}'.format(task, args.logfile)
keep_trying = True
try:
params = {"path" : path}
logfile_response = requests.get(uri, params=params, headers=args.headers)
logfile_response.raise_for_status()
offset = long(logfile_response.json()['offset'])
except ValueError:
sys.stderr.write(colored('Could not get initial offset for log in task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
keep_trying = False
except:
sys.stderr.write(colored('Could not find log file at path {0} for task {1}, check your -l arg and try again\n'.format(args.logfile, task), 'red'))
self.show_available_files(args, task)
keep_trying = False
while keep_trying:
try:
offset = self.fetch_new_log_data(uri, path, offset, args, task)
time.sleep(5)
except ValueError:
sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
keep_trying = False
def fetch_new_log_data(self, uri, path, offset, args, task):
params = {
"path" : path,
"offset" : offset
}
response = requests.get(uri, params=params, headers=args.headers).json()
prefix = '({0}) =>\n'.format(task) if args.verbose else ''
if len(response['data'].encode('utf-8')) > 0:
if args.grep:
filename = '{0}/.grep{1}'.format(args.dest, self.Task)
self.create_grep_file(args, filename, response['data'])
output = os.popen(grep_command(args, filename)).read()
sys.stdout.write('{0}{1}'.format(colored(prefix, 'cyan'), output))
self.remove_grep_file(filename)
else:
sys.stdout.write('{0}{1}'.format(colored(prefix, 'cyan'), response['data'].encode('utf-8')))
return offset + len(response['data'].encode('utf-8'))
else:
return offset
def create_grep_file(self, args, filename, content):
grep_file = open(filename, 'wb')
grep_file.write(content.encode('utf-8'))
grep_file.close()
def remove_grep_file(self, grep_file):
if os.path.isfile(grep_file):
os.remove(grep_file)
def show_available_files(self, args, task):
sys.stderr.write(colored('Available files (-l arguments):\n', 'cyan'))
try:
for f in base_directory_files(args, task):
sys.stderr.write(f + '\n')
for f in logs_folder_files(args, task):
sys.stderr.write('logs/' + f + '\n')
except:
sys.stderr.write(colored('Could not fetch list of files', 'red'))
| 40.654676
| 206
| 0.615112
|
c123c76347b13fd43ac55c48b0f11aa9e80f2406
| 381
|
py
|
Python
|
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task14_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task14_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | null | null | null |
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task14_w1.py
|
MingjunGeng/Code-Knowledge
|
5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa
|
[
"MIT"
] | 1
|
2022-03-18T04:52:10.000Z
|
2022-03-18T04:52:10.000Z
|
#!/usr/bin/python3
# --- 001 > U5W2P1_Task13_w1
def solution( a, b, c ):
input = [a, b, c]
larger = -float('inf')
for x in input:
if(larger<x):
larger = x
return larger
if __name__ == "__main__":
print('----------start------------')
a = -2
b = 20
c = 10
print(solution( a, b, c ))
print('------------end------------')
| 19.05
| 40
| 0.43832
|
02e32f888fdd98a3e3abd1a4202188134b946d8b
| 22,752
|
py
|
Python
|
whatshap/threading.py
|
erikvdp/whatshap-TLA
|
1e6cf0181de57ff5d5e0f3edfef2e7b70a4d91ec
|
[
"MIT"
] | null | null | null |
whatshap/threading.py
|
erikvdp/whatshap-TLA
|
1e6cf0181de57ff5d5e0f3edfef2e7b70a4d91ec
|
[
"MIT"
] | null | null | null |
whatshap/threading.py
|
erikvdp/whatshap-TLA
|
1e6cf0181de57ff5d5e0f3edfef2e7b70a4d91ec
|
[
"MIT"
] | null | null | null |
import itertools as it
import logging
from collections import defaultdict
from .core import HaploThreader
logger = logging.getLogger(__name__)
def run_threading(readset, clustering, ploidy, genotypes, block_cut_sensitivity):
"""
Main method for the threading stage of the polyploid phasing algorithm. Takes the following input:
readset -- The fragment matrix to phase
clustering -- A list of clusters. Each cluster is a list of read ids, indicating which reads it contains. Every read can
only be present in one cluster.
ploidy -- Number of haplotypes to phase
block_cut_sensitivity -- Policy how conversative the block cuts have to be done. 0 is one phasing block no matter what, 5
is very short blocks
For every variant, the threading algorithm finds a tuple of clusters through which the haplotypes can be threaded with
minimal cost. Costs arise when the positional coverage of a cluster does not match the number of haplotypes threaded through it
or when haplotypes switch the cluster on two consecutive positions.
"""
# compute auxiliary data
index, rev_index = get_position_map(readset)
num_vars = len(rev_index)
positions = get_cluster_start_end_positions(readset, clustering, index)
coverage = get_coverage(readset, clustering, index)
cov_map = get_pos_to_clusters_map(coverage, ploidy)
consensus = get_local_cluster_consensus(readset, clustering, cov_map, positions)
# compute threading through the clusters
path = compute_threading_path(
readset, clustering, num_vars, coverage, cov_map, consensus, ploidy, genotypes
)
# determine cut positions
num_clusters = len(clustering)
cut_positions = compute_cut_positions(path, block_cut_sensitivity, num_clusters)
# we can look at the sequences again to use the most likely continuation, when two or more clusters switch at the same position
c_to_c_global = compute_cluster_to_cluster_similarity(
readset, clustering, index, consensus, cov_map
)
path = improve_path_on_multiswitches(path, num_clusters, c_to_c_global)
# we can look at the sequences again to use the most likely continuation, when a haplotype leaves a collapsed cluster (currently inactive)
path = improve_path_on_collapsedswitches(path, num_clusters, c_to_c_global)
logger.debug("Cut positions: {}".format(cut_positions))
# compute haplotypes
haplotypes = []
for i in range(ploidy):
hap = ""
alleles_as_strings = []
for pos in range(len(path)):
c_id = path[pos][i]
allele = consensus[pos][c_id] if c_id in consensus[pos] else -1
if allele == -1:
alleles_as_strings.append("n")
else:
alleles_as_strings.append(str(allele))
haplotypes.append(hap.join(alleles_as_strings))
return (cut_positions, path, haplotypes)
def compute_threading_path(
readset,
clustering,
num_vars,
coverage,
cov_map,
consensus,
ploidy,
genotypes,
switch_cost=32.0,
affine_switch_cost=8.0,
):
"""
Runs the threading algorithm for the haplotypes using the given costs for switches. The normal switch cost is the
cost per haplotype, that switches clusters from one position to another (non matching coverage on single position
is always cost 1.0). The affine switch cost is an additional offset for every position, where a switch occurs.
These additional costs encourage the threading algorithm to summarize multiple switches on consecutive positions
into one big switch.
"""
logger.debug("Computing threading paths ..")
# arrange data
compressed_coverage = (
[]
) # rearrange the content of "coverage" in a position-wise way, such that the i-th coverage refers to the i-th cluster in cov_map
compressed_consensus = (
[]
) # for every position, give a list of consensi, such that the i-th consensus refers to the i-th cluster in cov_map
for pos in range(num_vars):
coverage_list = []
consensus_list = []
for i in range(len(cov_map[pos])):
coverage_list.append(coverage[pos][cov_map[pos][i]])
consensus_list.append(consensus[pos][cov_map[pos][i]])
compressed_coverage.append(coverage_list)
compressed_consensus.append(consensus_list)
# run threader
threader = HaploThreader(
ploidy, switch_cost, affine_switch_cost, True, 16 * 2 ** ploidy if ploidy > 6 else 0
)
path = threader.computePathsBlockwise(
[0], cov_map, compressed_coverage, compressed_consensus, genotypes
)
assert len(path) == num_vars
return path
def compute_cut_positions(path, block_cut_sensitivity, num_clusters):
"""
Takes a threading as input and computes on which positions a cut should be made according the cut sensitivity. The levels mean:
0 -- No cuts at all, even if regions are not connected by any reads
1 -- Only cut, when regions are not connected by any reads (is already done in advance, so nothing to do here)
2 -- Only cut, when regions are not connected by a sufficient number of reads (also lready done in advance)
3 -- Cut between two positions, if at least two haplotypes switch their cluster on this transition. In this case it is ambiguous,
how the haplotype are continued and we might get switch errors if we choose arbitrarily.
4 -- Additionally to 3, cut every time a haplotype leaves a collapsed region. Collapsed regions are positions, where multiple
haplotypes go through the same cluster. If one cluster leaves (e.g. due to decline in cluster coverage), we do not know
which to pick, so we are conservative here. Exception: If a cluster contains a set of multiple haplotypes since the start of
the current block and hap wants to leave, we do not need a cut, since it does not matter which haps leaves. Default option.
5 -- Cut every time a haplotype switches clusters. Most conservative, but also very short blocks.
The list of cut positions contains the first position of every block. Therefore, position 0 is always in the cut list.
"""
cut_positions = [0]
if len(path) == 0:
return cut_positions
ploidy = len(path[0])
dissim_threshold = 1
rise_fall_dissim = 0
if block_cut_sensitivity >= 3:
if block_cut_sensitivity >= 5:
# cut every time a haplotype jumps
dissim_threshold = 1
rise_fall_dissim = ploidy + 1
elif block_cut_sensitivity == 4:
# cut for every multi-switch and for every rise-fall-ploidy change
dissim_threshold = 2
rise_fall_dissim = ploidy + 1
else:
# cut for every multi-jump
dissim_threshold = 2
rise_fall_dissim = 0
if block_cut_sensitivity >= 3:
copynrs = []
for i in range(0, len(path)):
copynr = defaultdict(int)
for j in range(0, ploidy):
if path[i][j] not in copynr:
copynr[path[i][j]] = 0
copynr[path[i][j]] += 1
copynrs.append(copynr)
cpn_rising = [False for c_id in range(num_clusters)]
for i in range(1, len(path)):
dissim = 0
for j in range(0, ploidy):
old_c = path[i - 1][j]
new_c = path[i][j]
if old_c != new_c:
rise_fall = False
# check if previous cluster went down from copy number >= 2 to a smaller one >= 1
if copynrs[i - 1][old_c] > copynrs[i][old_c] >= 1:
if cpn_rising[old_c]:
rise_fall = True
# check if new cluster went up from copy number >= 1 to a greater one >= 2
if copynrs[i][new_c] > copynrs[i - 1][new_c] >= 1:
cpn_rising[new_c] = True
# check if one cluster has been rising and then falling in the current block
if rise_fall:
dissim += rise_fall_dissim
# count general switches
dissim += 1
if dissim >= dissim_threshold:
cpn_rising = [False for c_id in range(num_clusters)]
cut_positions.append(i)
return cut_positions
def compute_cluster_to_cluster_similarity(readset, clustering, index, consensus, cov_map):
"""
For every position p, compute the similarity between present clusters at position p-1 and
clusters at position p. Format is: c_to_c_sim[position][(cluster_p-1, cluster_p)]
"""
num_vars = len(consensus)
num_clusters = len(clustering)
coverage_abs = get_coverage_absolute(readset, clustering, index)
c_to_c_sim = [defaultdict(float) for _ in range(num_vars)]
cluster_zeroes = [dict() for c_id in range(num_clusters)]
cluster_ones = [dict() for c_id in range(num_clusters)]
for pos in range(num_vars):
for c_id in consensus[pos]:
cluster_zeroes[c_id][pos] = coverage_abs[pos][c_id] * (1 - consensus[pos][c_id])
cluster_ones[c_id][pos] = coverage_abs[pos][c_id] * consensus[pos][c_id]
for var in range(1, num_vars):
for i, c1 in enumerate(cov_map[var - 1]):
for j, c2 in enumerate(cov_map[var]):
same = 0
diff = 0
# Use a sliding window of positions as basis for consensus similarity
for pos in range(max(0, var - 10), min(num_vars - 1, var + 9)):
if pos in cluster_zeroes[c1] and pos in cluster_zeroes[c2]:
same += (
cluster_zeroes[c1][pos] * cluster_zeroes[c2][pos]
+ cluster_ones[c1][pos] * cluster_ones[c2][pos]
)
diff += (
cluster_zeroes[c1][pos] * cluster_ones[c2][pos]
+ cluster_ones[c1][pos] * cluster_zeroes[c2][pos]
)
c_to_c_sim[var][(c1, c2)] = same / (same + diff) if same > 0 else 0
return c_to_c_sim
def improve_path_on_multiswitches(path, num_clusters, cluster_sim):
"""
Post processing step after the threading. If two or more haplotypes switch clusters on the same position, we could use
the similarity scores between the clusters to find the most likely continuation of the switching haplotypes. See the
description of the compute_cut_positions method for more details about block cuts.
"""
if len(path) == 0:
return []
corrected_path = []
corrected_path.append(path[0])
ploidy = len(path[0])
current_perm = tuple(range(ploidy))
invers_perm = [i for i in range(ploidy)]
for i in range(1, len(path)):
changed = [] # set of haplotypes, that changed cluster at current position
for j in range(0, ploidy):
old_c = path[i - 1][j]
new_c = path[i][j]
if old_c != new_c:
# count general switches
changed.append(j)
if len(changed) >= 2:
# if at least two threads changed cluster: find optimal permutation of changed clusters
left_c = [path[i - 1][j] for j in changed]
right_c = [path[i][j] for j in changed]
actual_score = sum(
[cluster_sim[i][(left_c[j], right_c[j])] for j in range(len(changed))]
)
best_score = actual_score
best_perm = tuple(range(len(changed)))
for perm in it.permutations(range(len(changed))):
score = 0
for j, left in enumerate(left_c):
score += cluster_sim[i][(left, right_c[perm[j]])]
if score > best_score:
best_score = score
best_perm = perm
# apply local best permutation to current global permutation
current_perm_copy = list(current_perm)
for j in range(len(changed)):
current_perm_copy[changed[j]] = current_perm[changed[best_perm[j]]]
current_perm = tuple(current_perm_copy)
for j in range(ploidy):
invers_perm[current_perm[j]] = j
# apply current optimal permutation to local cluster config and add to corrected path
corrected_path.append([path[i][j] for j in invers_perm])
return corrected_path
def improve_path_on_collapsedswitches(path, num_clusters, cluster_sim):
"""
Post processing step after the threading. If a haplotype leaves a cluster on a collapsed region, we could use the
similarity scores between the clusters to find the most likely continuation of the switching haplotypes. See the
description of the compute_cut_positions method for more details about block cuts.
"""
if len(path) == 0:
return []
corrected_path = []
corrected_path.append(path[0])
ploidy = len(path[0])
current_perm = tuple(range(ploidy))
invers_perm = [i for i in range(ploidy)]
copynrs = []
for i in range(0, len(path)):
copynr = defaultdict(int)
for j in range(0, ploidy):
if path[i][j] not in copynr:
copynr[path[i][j]] = 0
copynr[path[i][j]] += 1
copynrs.append(copynr)
for i in range(1, len(path)):
changed = []
# iterate over present cluster ids
for c_id in copynrs[i]:
if copynrs[i - 1][c_id] >= 2:
# for all collapsed clusters: find haplotypes, which go through and check whether one of them exits
outgoing_c = False
affected = []
for j in range(ploidy):
if path[i - 1][j] == c_id:
affected.append(j)
if path[i][j] != c_id:
outgoing_c = True
# if haplotypes leaves collapsed cluster, all other might be equally suited, so add them
if outgoing_c:
changed.append(affected)
for h_group in changed:
# for every group of haplotypes coming from a collapsed cluster:
collapsed_cid = path[i - 1][h_group[0]]
left_c = []
# find last cluster before collapsed one for every haplotype (or use collapsed one if this does not exist)
for j in h_group:
pos = i - 1
while pos >= 0:
if path[pos][j] != collapsed_cid:
left_c.append(path[pos][j])
break
else:
pos -= 1
if pos == -1:
left_c.append(collapsed_cid)
right_c = [path[i][j] for j in h_group]
# we need to catch the case, where we compare a cluster with itself
ident_sim = 0
for c1 in left_c:
for c2 in right_c:
if c1 != c2:
ident_sim = max(ident_sim, cluster_sim[i][(c1, c2)])
ident_sim = ident_sim * 2 + 1
for j in range(len(h_group)):
actual_score = sum(
[
cluster_sim[i][(left_c[j], right_c[j])]
if left_c[j] != right_c[j]
else ident_sim
for j in range(len(h_group))
]
)
best_score = actual_score
best_perm = tuple(range(len(h_group)))
for perm in it.permutations(range(len(h_group))):
score = 0
for j, left in enumerate(left_c):
score += (
cluster_sim[i][(left, right_c[perm[j]])]
if left != right_c[perm[j]]
else ident_sim
)
if score > best_score:
best_score = score
best_perm = perm
# apply local best permutation to current global permutation
current_perm_copy = list(current_perm)
for j in range(len(h_group)):
current_perm_copy[h_group[j]] = current_perm[h_group[best_perm[j]]]
current_perm = tuple(current_perm_copy)
for j in range(ploidy):
invers_perm[current_perm[j]] = j
# apply current optimal permutation to local cluster config and add to corrected path
corrected_path.append([path[i][j] for j in invers_perm])
return corrected_path
def get_position_map(readset):
"""
Returns a mapping of genome (bp) positions to virtual positions (from 0 to l).
"""
# Map genome positions to [0,l)
index = {}
rev_index = []
num_vars = 0
for position in readset.get_positions():
index[position] = num_vars
rev_index.append(position)
num_vars += 1
return index, rev_index
def get_pos_to_clusters_map(coverage, ploidy):
"""
For every position, computes a list of relevant clusters for the threading
algorithm. Relevant means, that the relative coverage is at least 1/8 of
what a single haplotype is expected to have for the given ploidy. Apart
from that, at least <ploidy> and at most <2*ploidy> many clusters are
selected to avoid exponential blow-up.
"""
cov_map = [[] for _ in range(len(coverage))]
for pos in range(len(coverage)):
sorted_cids = sorted(
[cid for cid in coverage[pos]], key=lambda x: coverage[pos][x], reverse=True
)
cut_off = min(len(sorted_cids), 2 * ploidy)
for i in range(ploidy, min(len(sorted_cids), 2 * ploidy)):
if coverage[pos][sorted_cids[i]] < (1.0 / (8.0 * ploidy)):
cut_off = i
break
cov_map[pos] = sorted_cids[:cut_off]
return cov_map
def get_coverage(readset, clustering, pos_index):
"""
Returns a list, which for every position contains a dictionary, mapping a cluster id to
a relative coverage on this position.
"""
num_vars = len(pos_index)
num_clusters = len(clustering)
coverage = [dict() for pos in range(num_vars)]
coverage_sum = [0 for pos in range(num_vars)]
for c_id in range(num_clusters):
for read in clustering[c_id]:
for pos in [pos_index[var.position] for var in readset[read]]:
if c_id not in coverage[pos]:
coverage[pos][c_id] = 0
coverage[pos][c_id] += 1
coverage_sum[pos] += 1
for pos in range(num_vars):
for c_id in coverage[pos]:
coverage[pos][c_id] = coverage[pos][c_id] / coverage_sum[pos]
return coverage
def get_coverage_absolute(readset, clustering, pos_index):
"""
Returns a list, which for every position contains a dictionary, mapping a cluster id to
an absolute coverage on this position.
"""
num_vars = len(pos_index)
num_clusters = len(clustering)
coverage = [dict() for pos in range(num_vars)]
for c_id in range(num_clusters):
for read in clustering[c_id]:
for pos in [pos_index[var.position] for var in readset[read]]:
if c_id not in coverage[pos]:
coverage[pos][c_id] = 0
coverage[pos][c_id] += 1
return coverage
def get_cluster_start_end_positions(readset, clustering, pos_index):
num_clusters = len(clustering)
positions = {}
for c_id in range(num_clusters):
read = clustering[c_id][0]
start = pos_index[readset[read][0].position]
end = pos_index[readset[read][-1].position]
for read in clustering[c_id]:
readstart = pos_index[readset[read][0].position]
readend = pos_index[readset[read][-1].position]
if readstart < start:
start = readstart
if readend > end:
end = readend
positions[c_id] = (start, end)
assert len(positions) == num_clusters
return positions
def get_local_cluster_consensus(readset, clustering, cov_map, positions):
"""
Returns a list, which for every position contains a dictionary, mapping a cluster id to
its consensus on this position.
"""
return [
{c_id: pos_cons[c_id][0] for c_id in pos_cons}
for pos_cons in get_local_cluster_consensus_withfrac(
readset, clustering, cov_map, positions
)
]
def get_local_cluster_consensus_withfrac(readset, clustering, cov_map, positions):
# Map genome positions to [0,l)
index = {}
rev_index = []
num_vars = 0
for position in readset.get_positions():
index[position] = num_vars
rev_index.append(position)
num_vars += 1
relevant_pos = [[] for i in range(len(clustering))]
for pos in range(num_vars):
for c in cov_map[pos]:
relevant_pos[c].append(pos)
clusterwise_consensus = [
get_single_cluster_consensus_frac(readset, clustering[i], index, relevant_pos[i])
for i in range(len(clustering))
]
whole_consensus = []
for pos in range(num_vars):
newdict = defaultdict()
for c in cov_map[pos]:
newdict[c] = clusterwise_consensus[c][pos]
whole_consensus.append(newdict)
return whole_consensus
def get_single_cluster_consensus_frac(readset, cluster, index, relevant_pos):
# Count zeroes and one for every position
poswise_allelecount = dict()
for read in cluster:
for var in readset[read]:
pos = index[var.position]
if pos not in poswise_allelecount:
poswise_allelecount[pos] = dict()
if var.allele not in poswise_allelecount[pos]:
poswise_allelecount[pos][var.allele] = 0
poswise_allelecount[pos][var.allele] += 1
# Determine majority allele
cluster_consensus = {}
for pos in relevant_pos:
if pos in poswise_allelecount:
max_allele = 0
max_count = 0
sum_count = 0
for allele in sorted(poswise_allelecount[pos]):
cur_count = poswise_allelecount[pos][allele]
sum_count += cur_count
if cur_count > max_count:
max_allele = allele
max_count = cur_count
cluster_consensus[pos] = (max_allele, max_count / sum_count)
else:
cluster_consensus[pos] = (0, 1.0)
return cluster_consensus
| 40.056338
| 142
| 0.608562
|
4cc975fdb6bef8e5cf0f487270a2272da9b90828
| 6,344
|
py
|
Python
|
src/covid19/lk_vax_schedule/scrape_vax_schedule.py
|
nuuuwan/covid19
|
6f6a62e52104ff0579a4787e6ea06beca3eb4c76
|
[
"MIT"
] | 5
|
2021-06-11T16:11:58.000Z
|
2021-09-02T14:09:41.000Z
|
src/covid19/lk_vax_schedule/scrape_vax_schedule.py
|
nuuuwan/covid19
|
6f6a62e52104ff0579a4787e6ea06beca3eb4c76
|
[
"MIT"
] | 2
|
2021-07-06T11:11:50.000Z
|
2021-08-23T10:35:31.000Z
|
src/covid19/lk_vax_schedule/scrape_vax_schedule.py
|
nuuuwan/covid19
|
6f6a62e52104ff0579a4787e6ea06beca3eb4c76
|
[
"MIT"
] | 2
|
2021-08-16T15:32:44.000Z
|
2021-08-23T03:25:08.000Z
|
"""
Ministry of health seems to have the type
http://health.gov.lk/moh_final/english/news_read_more.php?id=977
@icta_srilanka
has a link
https://vaccine.covid19.gov.lk/sign-in, but it's not updated.
I remember
@Sri_Lanka_Army
having one, but can't seem to find it.
"""
import os
import re
import pandas
from bs4 import BeautifulSoup
from gig import ents
from utils import timex, tsv, www
from covid19._utils import log
DIR_DATA_LK_VAX_SCHEDULE = '/tmp/covid19/lk_vax_schedule'
TENTATIVE_VAX_SCH_URL = os.path.join(
'http://www.health.gov.lk',
'moh_final/english/public/elfinder/files/feturesArtical/2021',
'Tentative%20vaccination%20schedule%2020.08.2021.xlsx',
)
def make_data_dir():
os.system(f'mkdir -p {DIR_DATA_LK_VAX_SCHEDULE}')
def get_xlsx_file_url():
URL = 'http://health.gov.lk/moh_final/english/news_read_more.php?id=977'
html = www.read(URL)
soup = BeautifulSoup(html, 'html.parser')
for a in soup.find_all('a'):
href = a.get('href')
if 'xlsx' in href:
return href
return None
def scrape_xlsx(date_id):
xlsx_file_url = get_xlsx_file_url()
if not xlsx_file_url:
log.error('Could not find xlsx file URL. Aborting')
return None
schedule_xlsx_file = os.path.join(
DIR_DATA_LK_VAX_SCHEDULE,
f'schedule.{date_id}.xlsx',
)
if os.path.exists(schedule_xlsx_file):
log.warning(f'{schedule_xlsx_file} already exists. Not downloading')
return schedule_xlsx_file
make_data_dir()
www.download_binary(xlsx_file_url, schedule_xlsx_file)
log.info(f'Downloaded {xlsx_file_url} to {schedule_xlsx_file}')
return schedule_xlsx_file
def parse_xlsx(date_id):
schedule_xlsx_file = os.path.join(
DIR_DATA_LK_VAX_SCHEDULE,
f'schedule.{date_id}.xlsx',
)
data_frame = pandas.read_excel(
schedule_xlsx_file,
header=None,
)
data_frame = data_frame.fillna(method='ffill', axis=0)
data_list = []
prev_row = None
prev_gnd = None
table = data_frame.values.tolist()
first_row = table[0]
title_cell = str(
list(
filter(
lambda cell: str(cell) != 'nan',
first_row,
)
)[0]
)
is_valid_doc = False
results = re.search(r'.*\s(?P<date_str>\d{2}\.\d{2}\.\d{4}).*', title_cell)
doc_date_id = None
if results:
date_str = results.groupdict().get('date_str')
ut = timex.parse_time(date_str, '%d.%m.%Y')
doc_date_id = timex.get_date_id(ut)
if doc_date_id == date_id:
is_valid_doc = True
if not is_valid_doc:
log.warning(f'Invalid doc. doc_date_id = {doc_date_id}. Aborting')
os.system(f'rm {schedule_xlsx_file}')
return None
for row in table:
if str(row[1]) == 'nan' or str(row[1]) == 'Province':
continue
if str(row) == str(prev_row):
continue
__, province, district, moh, gnd, center, vaccine_str = row
if gnd == 'GN area':
gnd = ''
district = district.strip()
province_ents = ents.get_entities_by_name_fuzzy(province, 'province')
province_id = None
if province_ents:
province_ent = province_ents[0]
province_id = province_ent['id']
province = province_ent['name']
if district in ['Colombo RDHS', 'CMC']:
district_id = 'LK-11'
elif district in ['Kalutara NIHS']:
district_id = 'LK-13'
else:
district_ents = ents.get_entities_by_name_fuzzy(
district,
filter_entity_type='district',
filter_parent_id=province_id,
)
district_id = None
if district_ents:
district_ent = district_ents[0]
district_id = district_ent['id']
district = district_ent['name']
moh_ents = ents.get_entities_by_name_fuzzy(
moh, filter_entity_type='moh'
)
moh_id = None
if moh_ents:
moh_ent = moh_ents[0]
moh_id = moh_ent['id']
moh = moh_ent['name']
gnd_ents = ents.get_entities_by_name_fuzzy(
gnd, filter_entity_type='gnd', filter_parent_id=district_id
)
gnd_id = None
if gnd_ents:
gnd_ent = gnd_ents[0]
gnd_id = gnd_ent['id']
gnd = gnd_ent['name']
if gnd == prev_gnd:
gnd = ''
gnd_id = None
else:
prev_gnd = gnd
vaccine = vaccine_str.partition(' ')[0]
dose1 = '1' in vaccine_str
dose2 = '2' in vaccine_str
data = dict(
province=province,
province_id=province_id,
district=district,
district_id=district_id,
moh=moh,
moh_id=moh_id,
gnd=gnd,
gnd_id=gnd_id,
center=center,
vaccine=vaccine,
dose1=dose1,
dose2=dose2,
)
data_list.append(data)
prev_row = row
schedule_tsv_file = os.path.join(
DIR_DATA_LK_VAX_SCHEDULE,
f'schedule.{date_id}.tsv',
)
tsv.write(schedule_tsv_file, data_list)
log.info(f'Wrote {len(data_list)} to {schedule_tsv_file}')
def analyze(date_id):
schedule_tsv_file = os.path.join(
DIR_DATA_LK_VAX_SCHEDULE,
f'schedule.{date_id}.tsv',
)
data_list = tsv.read(schedule_tsv_file)
district_to_vaccine_to_count = {}
for data in data_list:
district = data['district']
vaccine = data['vaccine']
if district not in district_to_vaccine_to_count:
district_to_vaccine_to_count[district] = {}
if vaccine not in district_to_vaccine_to_count[district]:
district_to_vaccine_to_count[district][vaccine] = 0
district_to_vaccine_to_count[district][vaccine] += 1
for district, vaccine_to_count in district_to_vaccine_to_count.items():
print(district)
for vaccine, count in vaccine_to_count.items():
print('\t', vaccine, ' (', count, 'centers)'),
if __name__ == '__main__':
date_id = timex.get_date_id()
scrape_xlsx(date_id)
parse_xlsx(date_id)
analyze(date_id)
| 28.195556
| 79
| 0.604193
|
deeb437c67d43c7fe2ff5a1c76eddddee58abf19
| 140
|
py
|
Python
|
brigadier/home/urls.py
|
avoevodin/brigadier
|
c3869e4b526e0eb043146c53b78fda525ecfe5c4
|
[
"MIT"
] | 1
|
2021-03-30T05:06:39.000Z
|
2021-03-30T05:06:39.000Z
|
brigadier/home/urls.py
|
avoevodin/brigadier
|
c3869e4b526e0eb043146c53b78fda525ecfe5c4
|
[
"MIT"
] | 25
|
2021-03-22T12:41:41.000Z
|
2021-10-20T10:42:05.000Z
|
brigadier/home/urls.py
|
avoevodin/brigadier
|
c3869e4b526e0eb043146c53b78fda525ecfe5c4
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import HomeView
app_name = 'home'
urlpatterns = [
path('', HomeView.as_view(), name='home')
]
| 17.5
| 45
| 0.692857
|
7d90c6046e5ca802e6a1fbd09360439636133990
| 1,237
|
py
|
Python
|
ppq/executor/op/torch/__init__.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
ppq/executor/op/torch/__init__.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
ppq/executor/op/torch/__init__.py
|
xiguadong/ppq
|
6c71adb3c2a8ca95967f101724b5e4b3e6f761ff
|
[
"Apache-2.0"
] | null | null | null |
from .default import (DEFAULT_BACKEND_TABLE, AveragePool_forward,
BatchNormalization_forward, Cast_forward, Clip_forward,
Concat_forward, Constant_forward,
ConstantOfShape_forward, Conv_forward, Eltwise_forward,
Equal_forward, Expand_forward, Flatten_forward,
Gather_forward, GatherND_forward, Gemm_forward,
Greater_forward, MaxPool2d_forward, _NMS_forward,
NonZero_forward, Range_forward, ReduceL2_forward,
ReduceMax_forward, Reshape_forward, Resize_forward,
ScatterElements_forward, ScatterND_forward,
Shape_forward, Slice_forward, Softmax_forward,
Squeeze_forward, Tile_forward, TopK_forward,
Transpose_forward, UnaryEltwise_forward,
Unsqueeze_forward, Where_forward)
from .dsp import PPL_DSP_BACKEND_TABLE
from .cuda import PPL_GPU_BACKEND_TABLE
from .nxp import NXP_BACKEND_TABLE
from .extension import EXTENSION_BACKEND_TABLE
from .base import TorchBackendContext
from .onnx import ONNX_BACKEND_TABLE
from .academic import ACADEMIC_BACKEND_TABLE
| 58.904762
| 77
| 0.677445
|
fcc26d514edc1961fe2dc2d4785813e87929e966
| 7,443
|
py
|
Python
|
userbot/modules/anti_spambot.py
|
ratnasimatary/bot27
|
6a083db06e24614382b68800c10bbbdf47a86dcf
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 7
|
2020-06-27T20:15:34.000Z
|
2021-01-21T12:45:48.000Z
|
userbot/modules/anti_spambot.py
|
ratnasimatary/bot27
|
6a083db06e24614382b68800c10bbbdf47a86dcf
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2020-07-31T10:23:15.000Z
|
2020-08-03T04:56:37.000Z
|
userbot/modules/anti_spambot.py
|
ratnasimatary/bot27
|
6a083db06e24614382b68800c10bbbdf47a86dcf
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 70
|
2020-04-26T02:47:50.000Z
|
2022-01-26T10:13:13.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
''' A module for helping ban group join spammers. '''
from asyncio import sleep
from requests import get
from telethon.events import ChatAction
from telethon.tl.types import ChannelParticipantsAdmins, Message
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, ANTI_SPAMBOT, ANTI_SPAMBOT_SHOUT, bot
@bot.on(ChatAction)
async def ANTI_SPAMBOTS(welcm):
try:
''' Ban a recently joined user if it
matches the spammer checking algorithm. '''
if not ANTI_SPAMBOT:
return
if welcm.user_joined or welcm.user_added:
adder = None
ignore = False
users = None
if welcm.user_added:
ignore = False
try:
adder = welcm.action_message.from_id
except AttributeError:
return
async for admin in bot.iter_participants(
welcm.chat_id, filter=ChannelParticipantsAdmins):
if admin.id == adder:
ignore = True
break
if ignore:
return
elif welcm.user_joined:
users_list = hasattr(welcm.action_message.action, "users")
if users_list:
users = welcm.action_message.action.users
else:
users = [welcm.action_message.from_id]
await sleep(5)
spambot = False
if not users:
return
for user_id in users:
async for message in bot.iter_messages(welcm.chat_id,
from_user=user_id):
correct_type = isinstance(message, Message)
if not message or not correct_type:
break
join_time = welcm.action_message.date
message_date = message.date
if message_date < join_time:
continue # The message was sent before the user joined, thus ignore it
check_user = await welcm.client.get_entity(user_id)
# DEBUGGING. LEAVING IT HERE FOR SOME TIME ###
print(
f"User Joined: {check_user.first_name} [ID: {check_user.id}]"
)
print(f"Chat: {welcm.chat.title}")
print(f"Time: {join_time}")
print(
f"Message Sent: {message.text}\n\n[Time: {message_date}]"
)
##############################################
try:
cas_url = f"https://combot.org/api/cas/check?user_id={check_user.id}"
r = get(cas_url, timeout=3)
data = r.json()
except BaseException:
print(
"CAS check failed, falling back to legacy anti_spambot behaviour."
)
data = None
pass
if data and data['ok']:
reason = f"[Banned by Combot Anti Spam](https://combot.org/cas/query?u={check_user.id})"
spambot = True
elif "t.cn/" in message.text:
reason = "Match on `t.cn` URLs"
spambot = True
elif "t.me/joinchat" in message.text:
reason = "Potential Promotion Message"
spambot = True
elif message.fwd_from:
reason = "Forwarded Message"
spambot = True
elif "?start=" in message.text:
reason = "Telegram bot `start` link"
spambot = True
elif "bit.ly/" in message.text:
reason = "Match on `bit.ly` URLs"
spambot = True
else:
if check_user.first_name in ("Bitmex", "Promotion",
"Information", "Dex",
"Announcements", "Info"):
if users.last_name == "Bot":
reason = "Known spambot"
spambot = True
if spambot:
print(f"Potential Spam Message: {message.text}")
await message.delete()
break
continue # Check the next messsage
if spambot:
chat = await welcm.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
if ANTI_SPAMBOT_SHOUT:
await welcm.reply(
"@admins\n"
"`ANTI SPAMBOT DETECTOR!\n"
"THIS USER MATCHES MY ALGORITHMS AS A SPAMBOT!`"
f"REASON: {reason}")
kicked = False
reported = True
else:
try:
await welcm.reply(
"`Potential Spambot Detected !!`\n"
f"`REASON:` {reason}\n"
"Kicking away for now, will log the ID for further purposes.\n"
f"`USER:` [{check_user.first_name}](tg://user?id={check_user.id})"
)
await welcm.client.kick_participant(
welcm.chat_id, check_user.id)
kicked = True
reported = False
except BaseException:
if ANTI_SPAMBOT_SHOUT:
await welcm.reply(
"@admins\n"
"`ANTI SPAMBOT DETECTOR!\n"
"THIS USER MATCHES MY ALGORITHMS AS A SPAMBOT!`"
f"REASON: {reason}")
kicked = False
reported = True
if BOTLOG:
if kicked or reported:
await welcm.client.send_message(
BOTLOG_CHATID, "#ANTI_SPAMBOT REPORT\n"
f"USER: [{users.first_name}](tg://user?id={check_user.id})\n"
f"USER ID: `{check_user.id}`\n"
f"CHAT: {welcm.chat.title}\n"
f"CHAT ID: `{welcm.chat_id}`\n"
f"REASON: {reason}\n"
f"MESSAGE:\n\n{message.text}")
except ValueError:
pass
CMD_HELP.update({
"anti_spambot":
"If enabled in config.env or env var,"
"\nthis module will ban(or inform the admins of the group about) the"
"\nspammer(s) if they match the userbot's anti-spam algorithm."
})
| 39.802139
| 112
| 0.438667
|
35eb0339d03da09716e7b06384a50c429a20c2f2
| 5,382
|
py
|
Python
|
movingpandas/trajectory_plotter.py
|
sglvladi/movingpandas
|
d72141e1a3a22fcaeede77f32bdb7417b5280a45
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/trajectory_plotter.py
|
sglvladi/movingpandas
|
d72141e1a3a22fcaeede77f32bdb7417b5280a45
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/trajectory_plotter.py
|
sglvladi/movingpandas
|
d72141e1a3a22fcaeede77f32bdb7417b5280a45
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import hvplot.pandas # seems to be necessary for the following import to work
from holoviews import opts, dim
class _TrajectoryPlotter:
"""
Utility class for plotting trajectories
Performs necessary data preprocessing steps and hands over plotting arguments to Matplotlib plot or Holoviews hvplot.
"""
def __init__(self, data, *args, **kwargs):
self.data = data
self.args = args
self.kwargs = kwargs
self.width = kwargs.pop('width', 900)
self.height = kwargs.pop('height', 700)
self.figsize = kwargs.pop('figsize', None)
self.column = kwargs.get('column', None)
self.column = kwargs.get('c', self.column)
self.ax = kwargs.pop('ax', None)
self.column_to_color = kwargs.pop('column_to_color', None)
self.min_value = self.kwargs.get('vmin', None)
self.max_value = self.kwargs.get('vmax', None)
self.overlay = None
self.hvplot_is_geo = kwargs.pop('geo', True)
self.hvplot_tiles = kwargs.pop('tiles', 'OSM')
def _make_line_df(self, traj):
temp = traj.copy()
if self.column:
speed_col_name = traj.get_speed_column_name()
if self.column == speed_col_name and speed_col_name not in traj.df.columns:
temp.add_speed(overwrite=True)
line_gdf = temp._to_line_df().drop([temp.get_geom_column_name(), 'prev_pt'], axis=1)
line_gdf = line_gdf.rename(columns={'line': 'geometry'}).set_geometry('geometry')
return line_gdf
def _plot_trajectory(self, traj):
temp_df = self._make_line_df(traj)
if self.column and self.column_to_color:
try:
color = self.column_to_color[traj.df[self.column].max()]
except KeyError:
color = 'grey'
return temp_df.plot(ax=self.ax, color=color, *self.args, **self.kwargs)
else:
self.kwargs.pop('vmin', None)
self.kwargs.pop('vmax', None)
return temp_df.plot(ax=self.ax, vmin=self.min_value, vmax=self.max_value, *self.args, **self.kwargs)
def _hvplot_trajectory(self, traj):
line_gdf = self._make_line_df(traj)
if not traj.is_latlon and traj.crs is not None:
line_gdf = line_gdf.to_crs(epsg=4326)
if self.column and type(self.column) == str:
self.kwargs['c'] = dim(self.column) # fixes https://github.com/anitagraser/movingpandas/issues/71
if self.column and self.column_to_color:
try:
color = self.column_to_color[traj.df[self.column].max()]
except KeyError:
color = 'grey'
return line_gdf.hvplot(color=color, geo=self.hvplot_is_geo, tiles=self.hvplot_tiles, *self.args, **self.kwargs)
else:
return line_gdf.hvplot(geo=self.hvplot_is_geo, tiles=self.hvplot_tiles, *self.args, **self.kwargs)
def plot(self):
if not self.ax:
self.ax = plt.figure(figsize=self.figsize).add_subplot(1, 1, 1)
ax = self._plot_trajectory(self.data)
self.kwargs['legend'] = False # has to be removed after the first iteration, otherwise we get multiple legends!
self.kwargs.pop('column', None) # has to be popped, otherwise there's an error in the following plot call if we don't remove column from kwargs
return ax
def hvplot(self):
opts.defaults(opts.Overlay(width=self.width, height=self.height, active_tools=['wheel_zoom']))
return self._hvplot_trajectory(self.data)
class _TrajectoryCollectionPlotter(_TrajectoryPlotter):
def __init__(self, data, *args, **kwargs):
super().__init__(data, *args, **kwargs)
def get_min_max_values(self):
speed_col_name = self.data.trajectories[0].get_speed_column_name()
if self.column == speed_col_name and speed_col_name not in self.data.trajectories[0].df.columns:
self.data.add_speed(overwrite=True)
self.max_value = self.kwargs.pop('vmax', self.data.get_max(self.column))
self.min_value = self.kwargs.pop('vmin', self.data.get_min(self.column))
def plot(self):
if self.column:
self.get_min_max_values()
self.ax = plt.figure(figsize=self.figsize).add_subplot(1, 1, 1)
for traj in self.data:
self.ax = self._plot_trajectory(traj)
self.kwargs['legend'] = False # has to be removed after the first iteration, otherwise we get multiple legends!
self.kwargs.pop('column', None) # has to be popped, otherwise there's an error in the following plot call if we don't remove column from kwargs
start_locs = self.data.get_start_locations()
ax = start_locs.plot(ax=self.ax, column=self.column, color='white', *self.args, **self.kwargs)
return ax
def hvplot(self):
opts.defaults(opts.Overlay(width=self.width, height=self.height, active_tools=['wheel_zoom']))
for traj in self.data:
overlay = self._hvplot_trajectory(traj)
if self.overlay:
self.overlay = self.overlay * overlay
else:
self.overlay = overlay
self.hvplot_tiles = False # has to be removed after the first iteration, otherwise tiles will cover trajectories!
return self.overlay
| 43.756098
| 152
| 0.642698
|
99dd83b77b264de9770e29d2f67c903eae0722d6
| 235
|
py
|
Python
|
FrameApp/rainstyApp/py_module/action/rainsty/src/__init__.py
|
Rainstyd/rainsty
|
9a0d5f46c20faf909c4194f315fb9960652cffc6
|
[
"Apache-2.0"
] | 1
|
2020-03-25T01:13:35.000Z
|
2020-03-25T01:13:35.000Z
|
FrameApp/rainstyApp/py_module/action/rainsty/src/__init__.py
|
Rainstyed/rainsty
|
f74e0ccaf16d1871c9d1870bd8a7c8a63243fcf5
|
[
"Apache-2.0"
] | 1
|
2022-01-06T23:49:21.000Z
|
2022-01-06T23:49:21.000Z
|
FrameApp/rainstyApp/py_module/action/rainsty/src/__init__.py
|
rainstyd/rainsty
|
9a0d5f46c20faf909c4194f315fb9960652cffc6
|
[
"Apache-2.0"
] | 1
|
2020-03-20T08:48:36.000Z
|
2020-03-20T08:48:36.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: __init__.py
@time: 2020-09-18 17:28:29
@description:
"""
from .do_50000 import do_action as do_action_50000
from .do_50001 import do_action as do_action_50001
| 18.076923
| 50
| 0.706383
|
f1ef03d1b91c542b085307dbbba3931af75745b4
| 52,612
|
py
|
Python
|
scripts/check_compliance.py
|
ioannisg/ci-tools
|
da9a2df574094f52d87a03f6393928bdc7dce17c
|
[
"Apache-2.0"
] | 7
|
2019-04-10T08:50:06.000Z
|
2021-03-21T11:24:43.000Z
|
scripts/check_compliance.py
|
ioannisg/ci-tools
|
da9a2df574094f52d87a03f6393928bdc7dce17c
|
[
"Apache-2.0"
] | 86
|
2018-12-03T03:48:04.000Z
|
2021-07-12T13:35:26.000Z
|
scripts/check_compliance.py
|
ioannisg/ci-tools
|
da9a2df574094f52d87a03f6393928bdc7dce17c
|
[
"Apache-2.0"
] | 21
|
2018-11-23T17:00:07.000Z
|
2021-06-16T10:24:44.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import collections
import sys
import subprocess
import re
import os
from email.utils import parseaddr
import logging
import argparse
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure, Attr
from github import Github
from shutil import copyfile
import json
import tempfile
import traceback
import magic
import shlex
from pathlib import Path
# '*' makes it italic
EDIT_TIP = "\n\n*Tip: The bot edits this comment instead of posting a new " \
"one, so you can check the comment's history to see earlier " \
"messages.*"
logger = None
# This ends up as None when we're not running in a Zephyr tree
ZEPHYR_BASE = os.environ.get('ZEPHYR_BASE')
def git(*args, cwd=None):
# Helper for running a Git command. Returns the rstrip()ed stdout output.
# Called like git("diff"). Exits with SystemError (raised by sys.exit()) on
# errors. 'cwd' is the working directory to use (default: current
# directory).
git_cmd = ("git",) + args
try:
git_process = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
except OSError as e:
err(f"failed to run '{cmd2str(git_cmd)}': {e}")
stdout, stderr = git_process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
if git_process.returncode or stderr:
err(f"""\
'{cmd2str(git_cmd)}' exited with status {git_process.returncode} and/or wrote
to stderr.
==stdout==
{stdout}
==stderr==
{stderr}""")
return stdout.rstrip()
def get_shas(refspec):
"""
Returns the list of Git SHAs for 'refspec'.
:param refspec:
:return:
"""
return git('rev-list',
'--max-count={}'.format(-1 if "." in refspec else 1),
refspec).split()
class MyCase(TestCase):
"""
Custom junitparser.TestCase for our tests that adds some extra <testcase>
XML attributes. These will be preserved when tests are saved and loaded.
"""
classname = Attr()
# Remembers informational messages. These can appear on successful tests
# too, where TestCase.result isn't set.
info_msg = Attr()
class ComplianceTest:
"""
Base class for tests. Inheriting classes should have a run() method and set
these class variables:
name:
Test name
doc:
Link to documentation related to what's being tested
path_hint:
The path the test runs itself in. This is just informative and used in
the message that gets printed when running the test.
The magic string "<git-top>" refers to the top-level repository
directory. This avoids running 'git' to find the top-level directory
before main() runs (class variable assignments run when the 'class ...'
statement runs). That avoids swallowing errors, because main() reports
them to GitHub.
"""
def __init__(self):
self.case = MyCase(self.name)
self.case.classname = "Guidelines"
def error(self, msg):
"""
Signals a problem with running the test, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after error().
Any failures generated prior to the error() are included automatically
in the message. Usually, any failures would indicate problems with the
test code.
"""
if self.case.result:
msg += "\n\nFailures before error: " + self.case.result._elem.text
self.case.result = Error(msg, "error")
raise EndTest
def skip(self, msg):
"""
Signals that the test should be skipped, with message 'msg'.
Raises an exception internally, so you do not need to put a 'return'
after skip().
Any failures generated prior to the skip() are included automatically
in the message. Usually, any failures would indicate problems with the
test code.
"""
if self.case.result:
msg += "\n\nFailures before skip: " + self.case.result._elem.text
self.case.result = Skipped(msg, "skipped")
raise EndTest
def add_failure(self, msg):
"""
Signals that the test failed, with message 'msg'. Can be called many
times within the same test to report multiple failures.
"""
if not self.case.result:
# First reported failure
self.case.result = Failure(self.name + " issues", "failure")
self.case.result._elem.text = msg.rstrip()
else:
# If there are multiple Failures, concatenate their messages
self.case.result._elem.text += "\n\n" + msg.rstrip()
def add_info(self, msg):
"""
Adds an informational message without failing the test. The message is
shown on GitHub, and is shown regardless of whether the test passes or
fails. If the test fails, then both the informational message and the
failure message are shown.
Can be called many times within the same test to add multiple messages.
"""
def escape(s):
# Hack to preserve e.g. newlines and tabs in the attribute when
# tests are saved to .xml and reloaded. junitparser doesn't seem to
# handle it correctly, though it does escape stuff like quotes.
# unicode-escape replaces newlines with \n (two characters), etc.
return s.encode("unicode-escape").decode("utf-8")
if not self.case.info_msg:
self.case.info_msg = escape(msg)
else:
self.case.info_msg += r"\n\n" + escape(msg)
class EndTest(Exception):
"""
Raised by ComplianceTest.error()/skip() to end the test.
Tests can raise EndTest themselves to immediately end the test, e.g. from
within a nested function call.
"""
class CheckPatch(ComplianceTest):
"""
Runs checkpatch and reports found issues
"""
name = "checkpatch"
doc = "https://docs.zephyrproject.org/latest/contribute/#coding-style"
path_hint = "<git-top>"
def run(self):
# Default to Zephyr's checkpatch if ZEPHYR_BASE is set
checkpatch = os.path.join(ZEPHYR_BASE or GIT_TOP, 'scripts',
'checkpatch.pl')
if not os.path.exists(checkpatch):
self.skip(checkpatch + " not found")
# git diff's output doesn't depend on the current (sub)directory
diff = subprocess.Popen(('git', 'diff', COMMIT_RANGE),
stdout=subprocess.PIPE)
try:
subprocess.check_output((checkpatch, '--mailback', '--no-tree', '-'),
stdin=diff.stdout,
stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
except subprocess.CalledProcessError as ex:
output = ex.output.decode("utf-8")
if re.search("[1-9][0-9]* errors,", output):
self.add_failure(output)
else:
# No errors found, but warnings. Show them.
self.add_info(output)
class KconfigCheck(ComplianceTest):
"""
Checks is we are introducing any new warnings/errors with Kconfig,
for example using undefiend Kconfig variables.
"""
name = "Kconfig"
doc = "https://docs.zephyrproject.org/latest/guides/kconfig/index.html"
path_hint = ZEPHYR_BASE
def run(self):
kconf = self.parse_kconfig()
self.check_top_menu_not_too_long(kconf)
self.check_no_pointless_menuconfigs(kconf)
self.check_no_undef_within_kconfig(kconf)
self.check_no_undef_outside_kconfig(kconf)
def get_modules(self, modules_file):
"""
Get a list of modules and put them in a file that is parsed by
Kconfig
This is needed to complete Kconfig sanity tests.
"""
# Invoke the script directly using the Python executable since this is
# not a module nor a pip-installed Python utility
zephyr_module_path = os.path.join(ZEPHYR_BASE, "scripts",
"zephyr_module.py")
cmd = [sys.executable, zephyr_module_path,
'--kconfig-out', modules_file]
try:
_ = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
self.error(ex.output)
def parse_kconfig(self):
"""
Returns a kconfiglib.Kconfig object for the Kconfig files. We reuse
this object for all tests to avoid having to reparse for each test.
"""
if not ZEPHYR_BASE:
self.skip("Not a Zephyr tree (ZEPHYR_BASE unset)")
# Put the Kconfiglib path first to make sure no local Kconfiglib version is
# used
kconfig_path = os.path.join(ZEPHYR_BASE, "scripts", "kconfig")
if not os.path.exists(kconfig_path):
self.error(kconfig_path + " not found")
sys.path.insert(0, kconfig_path)
# Import globally so that e.g. kconfiglib.Symbol can be referenced in
# tests
global kconfiglib
import kconfiglib
# Look up Kconfig files relative to ZEPHYR_BASE
os.environ["srctree"] = ZEPHYR_BASE
# Parse the entire Kconfig tree, to make sure we see all symbols
os.environ["SOC_DIR"] = "soc/"
os.environ["ARCH_DIR"] = "arch/"
os.environ["BOARD_DIR"] = "boards/*/*"
os.environ["ARCH"] = "*"
os.environ["CMAKE_BINARY_DIR"] = tempfile.gettempdir()
os.environ['DEVICETREE_CONF'] = "dummy"
os.environ['DTS_POST_CPP'] = 'dummy'
# Older name for DEVICETREE_CONF, for compatibility with older Zephyr
# versions that don't have the renaming
os.environ["GENERATED_DTS_BOARD_CONF"] = "dummy"
# For multi repo support
self.get_modules(os.path.join(tempfile.gettempdir(), "Kconfig.modules"))
# Tells Kconfiglib to generate warnings for all references to undefined
# symbols within Kconfig files
os.environ["KCONFIG_WARN_UNDEF"] = "y"
try:
# Note this will both print warnings to stderr _and_ return
# them: so some warnings might get printed
# twice. "warn_to_stderr=False" could unfortunately cause
# some (other) warnings to never be printed.
return kconfiglib.Kconfig()
except kconfiglib.KconfigError as e:
self.add_failure(str(e))
raise EndTest
def check_top_menu_not_too_long(self, kconf):
"""
Checks that there aren't too many items in the top-level menu (which
might be a sign that stuff accidentally got added there)
"""
max_top_items = 50
n_top_items = 0
node = kconf.top_node.list
while node:
# Only count items with prompts. Other items will never be
# shown in the menuconfig (outside show-all mode).
if node.prompt:
n_top_items += 1
node = node.next
if n_top_items > max_top_items:
self.add_failure("""
Expected no more than {} potentially visible items (items with prompts) in the
top-level Kconfig menu, found {} items. If you're deliberately adding new
entries, then bump the 'max_top_items' variable in {}.
""".format(max_top_items, n_top_items, __file__))
def check_no_pointless_menuconfigs(self, kconf):
# Checks that there are no pointless 'menuconfig' symbols without
# children in the Kconfig files
bad_mconfs = []
for node in kconf.node_iter():
# 'kconfiglib' is global
# pylint: disable=undefined-variable
# Avoid flagging empty regular menus and choices, in case people do
# something with 'osource' (could happen for 'menuconfig' symbols
# too, though it's less likely)
if node.is_menuconfig and not node.list and \
isinstance(node.item, kconfiglib.Symbol):
bad_mconfs.append(node)
if bad_mconfs:
self.add_failure("""\
Found pointless 'menuconfig' symbols without children. Use regular 'config'
symbols instead. See
https://docs.zephyrproject.org/latest/guides/kconfig/tips.html#menuconfig-symbols.
""" + "\n".join(f"{node.item.name:35} {node.filename}:{node.linenr}"
for node in bad_mconfs))
def check_no_undef_within_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols within
the Kconfig files
"""
undef_ref_warnings = "\n\n\n".join(warning for warning in kconf.warnings
if "undefined symbol" in warning)
if undef_ref_warnings:
self.add_failure("Undefined Kconfig symbols:\n\n"
+ undef_ref_warnings)
def check_no_undef_outside_kconfig(self, kconf):
"""
Checks that there are no references to undefined Kconfig symbols
outside Kconfig files (any CONFIG_FOO where no FOO symbol exists)
"""
# Grep for symbol references.
#
# Example output line for a reference to CONFIG_FOO at line 17 of
# foo/bar.c:
#
# foo/bar.c<null>17<null>#ifdef CONFIG_FOO
#
# 'git grep --only-matching' would get rid of the surrounding context
# ('#ifdef '), but it was added fairly recently (second half of 2018),
# so we extract the references from each line ourselves instead.
#
# The regex uses word boundaries (\b) to isolate the reference, and
# negative lookahead to automatically whitelist the following:
#
# - ##, for token pasting (CONFIG_FOO_##X)
#
# - $, e.g. for CMake variable expansion (CONFIG_FOO_${VAR})
#
# - @, e.g. for CMakes's configure_file() (CONFIG_FOO_@VAR@)
#
# - {, e.g. for Python scripts ("CONFIG_FOO_{}_BAR".format(...)")
#
# - *, meant for comments like '#endif /* CONFIG_FOO_* */
defined_syms = get_defined_syms(kconf)
# Maps each undefined symbol to a list <filename>:<linenr> strings
undef_to_locs = collections.defaultdict(list)
# Warning: Needs to work with both --perl-regexp and the 're' module
regex = r"\bCONFIG_[A-Z0-9_]+\b(?!\s*##|[$@{*])"
# Skip doc/releases, which often references removed symbols
grep_stdout = git("grep", "--line-number", "-I", "--null",
"--perl-regexp", regex, "--", ":!/doc/releases",
cwd=ZEPHYR_BASE)
# splitlines() supports various line terminators
for grep_line in grep_stdout.splitlines():
path, lineno, line = grep_line.split("\0")
# Extract symbol references (might be more than one) within the
# line
for sym_name in re.findall(regex, line):
sym_name = sym_name[7:] # Strip CONFIG_
if sym_name not in defined_syms and \
sym_name not in UNDEF_KCONFIG_WHITELIST:
undef_to_locs[sym_name].append("{}:{}".format(path, lineno))
if not undef_to_locs:
return
# String that describes all referenced but undefined Kconfig symbols,
# in alphabetical order, along with the locations where they're
# referenced. Example:
#
# CONFIG_ALSO_MISSING arch/xtensa/core/fatal.c:273
# CONFIG_MISSING arch/xtensa/core/fatal.c:264, subsys/fb/cfb.c:20
undef_desc = "\n".join(
"CONFIG_{:35} {}".format(sym_name, ", ".join(locs))
for sym_name, locs in sorted(undef_to_locs.items()))
self.add_failure("""
Found references to undefined Kconfig symbols. If any of these are false
positives, then add them to UNDEF_KCONFIG_WHITELIST in {} in the ci-tools repo.
If the reference is for a comment like /* CONFIG_FOO_* */ (or
/* CONFIG_FOO_*_... */), then please use exactly that form (with the '*'). The
CI check knows not to flag it.
More generally, a reference followed by $, @, {{, *, or ## will never be
flagged.
{}""".format(os.path.basename(__file__), undef_desc))
def get_defined_syms(kconf):
# Returns a set() with the names of all defined Kconfig symbols (with no
# 'CONFIG_' prefix). This is complicated by samples and tests defining
# their own Kconfig trees. For those, just grep for 'config FOO' to find
# definitions. Doing it "properly" with Kconfiglib is still useful for the
# main tree, because some symbols are defined using preprocessor macros.
# Warning: Needs to work with both --perl-regexp and the 're' module.
# (?:...) is a non-capturing group.
regex = r"^\s*(?:menu)?config\s*([A-Z0-9_]+)\s*(?:#|$)"
# Grep samples/ and tests/ for symbol definitions
grep_stdout = git("grep", "-I", "-h", "--perl-regexp", regex, "--",
":samples", ":tests", cwd=ZEPHYR_BASE)
# Symbols from the main Kconfig tree + grepped definitions from samples and
# tests
return set([sym.name for sym in kconf.unique_defined_syms]
+ re.findall(regex, grep_stdout, re.MULTILINE))
# Many of these are symbols used as examples. Note that the list is sorted
# alphabetically, and skips the CONFIG_ prefix.
UNDEF_KCONFIG_WHITELIST = {
"ALSO_MISSING",
"APP_LINK_WITH_",
"CDC_ACM_PORT_NAME_",
"CLOCK_STM32_SYSCLK_SRC_",
"CMU",
"BT_6LOWPAN", # Defined in Linux, mentioned in docs
"COUNTER_RTC_STM32_CLOCK_SRC",
"CRC", # Used in TI CC13x2 / CC26x2 SDK comment
"DEEP_SLEEP", # #defined by RV32M1 in ext/
"DESCRIPTION",
"ERR",
"ESP_DIF_LIBRARY", # Referenced in CMake comment
"EXPERIMENTAL",
"FFT", # Used as an example in cmake/extensions.cmake
"FLAG", # Used as an example
"FOO",
"FOO_LOG_LEVEL",
"FOO_SETTING_1",
"FOO_SETTING_2",
"LIS2DW12_INT_PIN",
"LSM6DSO_INT_PIN",
"MISSING",
"MODULES",
"MYFEATURE",
"MY_DRIVER_0",
"NORMAL_SLEEP", # #defined by RV32M1 in ext/
"OPT",
"OPT_0",
"PEDO_THS_MIN",
"REG1",
"REG2",
"SAMPLE_MODULE_LOG_LEVEL", # Used as an example in samples/subsys/logging
"SEL",
"SHIFT",
"SOC_WATCH", # Issue 13749
"SOME_BOOL",
"SOME_INT",
"SOME_OTHER_BOOL",
"SOME_STRING",
"SRAM2", # Referenced in a comment in samples/application_development
"STACK_SIZE", # Used as an example in the Kconfig docs
"STD_CPP", # Referenced in CMake comment
"TEST1",
"TYPE_BOOLEAN",
"USB_CONSOLE",
"USE_STDC_",
"WHATEVER",
}
class DeviceTreeCheck(ComplianceTest):
"""
Runs the dtlib and edtlib test suites in scripts/dts/.
"""
name = "Device tree"
doc = "https://docs.zephyrproject.org/latest/guides/dts/index.html"
path_hint = ZEPHYR_BASE
def run(self):
if not ZEPHYR_BASE:
self.skip("Not a Zephyr tree (ZEPHYR_BASE unset)")
scripts_path = os.path.join(ZEPHYR_BASE, "scripts", "dts")
sys.path.insert(0, scripts_path)
import testdtlib
import testedtlib
# Hack: The test suites expect to be run from the scripts/dts
# directory, because they compare repr() output that contains relative
# paths against an expected string. Temporarily change the working
# directory to scripts/dts/.
#
# Warning: This is not thread-safe, though the test suites run in a
# fraction of a second.
old_dir = os.getcwd()
os.chdir(scripts_path)
try:
logger.info("cd %s && ./testdtlib.py", scripts_path)
testdtlib.run()
logger.info("cd %s && ./testedtlib.py", scripts_path)
testedtlib.run()
except SystemExit as e:
# The dtlib and edtlib test suites call sys.exit() on failure,
# which raises SystemExit. Let any errors in the test scripts
# themselves trickle through and turn into an internal CI error.
self.add_failure(str(e))
except Exception as e:
# Report other exceptions as an internal test failure
self.error(str(e))
finally:
# Restore working directory
os.chdir(old_dir)
class Codeowners(ComplianceTest):
"""
Check if added files have an owner.
"""
name = "Codeowners"
doc = "https://help.github.com/articles/about-code-owners/"
path_hint = "<git-top>"
def ls_owned_files(self, codeowners):
"""Returns an OrderedDict mapping git patterns from the CODEOWNERS file
to the corresponding list of files found on the filesystem. It
unfortunately does not seem possible to invoke git and re-use
how 'git ignore' and/or 'git attributes' already implement this,
we must re-invent it.
"""
# TODO: filter out files not in "git ls-files" (e.g.,
# sanity-out) _if_ the overhead isn't too high for a clean tree.
#
# pathlib.match() doesn't support **, so it looks like we can't
# recursively glob the output of ls-files directly, only real
# files :-(
pattern2files = collections.OrderedDict()
top_path = Path(GIT_TOP)
with open(codeowners, "r") as codeo:
for lineno, line in enumerate(codeo, start=1):
if line.startswith("#") or not line.strip():
continue
match = re.match(r"^([^\s,]+)\s+[^\s]+", line)
if not match:
self.add_failure(
"Invalid CODEOWNERS line %d\n\t%s" %
(lineno, line))
continue
git_patrn = match.group(1)
glob = self.git_pattern_to_glob(git_patrn)
files = []
for abs_path in top_path.glob(glob):
# comparing strings is much faster later
files.append(str(abs_path.relative_to(top_path)))
if not files:
self.add_failure("Path '{}' not found in the tree but is listed in "
"CODEOWNERS".format(git_patrn))
pattern2files[git_patrn] = files
return pattern2files
def git_pattern_to_glob(self, git_pattern):
"""Appends and prepends '**[/*]' when needed. Result has neither a
leading nor a trailing slash.
"""
if git_pattern.startswith("/"):
ret = git_pattern[1:]
else:
ret = "**/" + git_pattern
if git_pattern.endswith("/"):
ret = ret + "**/*"
elif os.path.isdir(os.path.join(GIT_TOP, ret)):
self.add_failure("Expected '/' after directory '{}' "
"in CODEOWNERS".format(ret))
return ret
def run(self):
# TODO: testing an old self.commit range that doesn't end
# with HEAD is most likely a mistake. Should warn, see
# https://github.com/zephyrproject-rtos/ci-tools/pull/24
codeowners = os.path.join(GIT_TOP, "CODEOWNERS")
if not os.path.exists(codeowners):
self.skip("CODEOWNERS not available in this repo")
name_changes = git("diff", "--name-only", "--diff-filter=ARCD",
COMMIT_RANGE)
owners_changes = git("diff", "--name-only", COMMIT_RANGE,
"--", codeowners)
if not name_changes and not owners_changes:
# TODO: 1. decouple basic and cheap CODEOWNERS syntax
# validation from the expensive ls_owned_files() scanning of
# the entire tree. 2. run the former always.
return
logging.info("If this takes too long then cleanup and try again")
patrn2files = self.ls_owned_files(codeowners)
# The way git finds Renames and Copies is not "exact science",
# however if one is missed then it will always be reported as an
# Addition instead.
new_files = git("diff", "--name-only", "--diff-filter=ARC",
COMMIT_RANGE).splitlines()
logging.debug("New files %s", new_files)
# Convert to pathlib.Path string representation (e.g.,
# backslashes 'dir1\dir2\' on Windows) to be consistent
# with self.ls_owned_files()
new_files = [str(Path(f)) for f in new_files]
new_not_owned = []
for newf in new_files:
f_is_owned = False
for git_pat, owned in patrn2files.items():
logging.debug("Scanning %s for %s", git_pat, newf)
if newf in owned:
logging.info("%s matches new file %s", git_pat, newf)
f_is_owned = True
# Unlike github, we don't care about finding any
# more specific owner.
break
if not f_is_owned:
new_not_owned.append(newf)
if new_not_owned:
self.add_failure("New files added that are not covered in "
"CODEOWNERS:\n\n" + "\n".join(new_not_owned) +
"\n\nPlease add one or more entries in the "
"CODEOWNERS file to cover those files")
class Nits(ComplianceTest):
"""
Checks various nits in added/modified files. Doesn't check stuff that's
already covered by e.g. checkpatch.pl and pylint.
"""
name = "Nits"
doc = "https://docs.zephyrproject.org/latest/contribute/#coding-style"
path_hint = "<git-top>"
def run(self):
# Loop through added/modified files
for fname in git("diff", "--name-only", "--diff-filter=d",
COMMIT_RANGE).splitlines():
if "Kconfig" in fname:
self.check_kconfig_header(fname)
self.check_redundant_zephyr_source(fname)
if fname.startswith("dts/bindings/"):
self.check_redundant_document_separator(fname)
if fname.endswith((".c", ".conf", ".cpp", ".dts", ".overlay",
".h", ".ld", ".py", ".rst", ".txt", ".yaml",
".yml")) or \
"Kconfig" in fname or \
"defconfig" in fname or \
fname == "README":
self.check_source_file(fname)
def check_kconfig_header(self, fname):
# Checks for a spammy copy-pasted header format
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
# 'Kconfig - yada yada' has a copy-pasted redundant filename at the
# top. This probably means all of the header was copy-pasted.
if re.match(r"\s*#\s*(K|k)config[\w.-]*\s*-", contents):
self.add_failure("""
Please use this format for the header in '{}' (see
https://docs.zephyrproject.org/latest/guides/kconfig/index.html#header-comments-and-other-nits):
# <Overview of symbols defined in the file, preferably in plain English>
(Blank line)
# Copyright (c) 2019 ...
# SPDX-License-Identifier: <License>
(Blank line)
(Kconfig definitions)
Skip the "Kconfig - " part of the first line, since it's clear that the comment
is about Kconfig from context. The "# Kconfig - " is what triggers this
failure.
""".format(fname))
def check_redundant_zephyr_source(self, fname):
# Checks for 'source "$(ZEPHYR_BASE)/Kconfig[.zephyr]"', which can be
# be simplified to 'source "Kconfig[.zephyr]"'
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
# Look for e.g. rsource as well, for completeness
match = re.search(
r'^\s*(?:o|r|or)?source\s*"\$\(?ZEPHYR_BASE\)?/(Kconfig(?:\.zephyr)?)"',
f.read(), re.MULTILINE)
if match:
self.add_failure("""
Redundant 'source "$(ZEPHYR_BASE)/{0}" in '{1}'. Just do 'source "{0}"'
instead. The $srctree environment variable already points to the Zephyr root,
and all 'source's are relative to it.""".format(match.group(1), fname))
def check_redundant_document_separator(self, fname):
# Looks for redundant '...' document separators in bindings
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
if re.search(r"^\.\.\.", f.read(), re.MULTILINE):
self.add_failure(f"""\
Redundant '...' document separator in {fname}. Binding YAML files are never
concatenated together, so no document separators are needed.""")
def check_source_file(self, fname):
# Generic nits related to various source files
with open(os.path.join(GIT_TOP, fname), encoding="utf-8") as f:
contents = f.read()
if not contents.endswith("\n"):
self.add_failure("Missing newline at end of '{}'. Check your text "
"editor settings.".format(fname))
if contents.startswith("\n"):
self.add_failure("Please remove blank lines at start of '{}'"
.format(fname))
if contents.endswith("\n\n"):
self.add_failure("Please remove blank lines at end of '{}'"
.format(fname))
class Documentation(ComplianceTest):
"""
Checks if documentation build has generated any new warnings.
"""
name = "Documentation"
doc = "https://docs.zephyrproject.org/latest/guides/documentation/index.html"
path_hint = os.getcwd()
DOCS_WARNING_FILE = "doc.warnings"
def run(self):
if os.path.exists(self.DOCS_WARNING_FILE) and os.path.getsize(self.DOCS_WARNING_FILE) > 0:
with open(self.DOCS_WARNING_FILE, "rb") as docs_warning:
self.add_failure(docs_warning.read().decode("utf-8"))
class GitLint(ComplianceTest):
"""
Runs gitlint on the commits and finds issues with style and syntax
"""
name = "Gitlint"
doc = "https://docs.zephyrproject.org/latest/contribute/#commit-guidelines"
path_hint = "<git-top>"
def run(self):
# By default gitlint looks for .gitlint configuration only in
# the current directory
proc = subprocess.Popen('gitlint --commits ' + COMMIT_RANGE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=GIT_TOP)
msg = ""
if proc.wait() != 0:
msg = proc.stdout.read()
if msg != "":
self.add_failure(msg.decode("utf-8"))
class PyLint(ComplianceTest):
"""
Runs pylint on all .py files, with a limited set of checks enabled. The
configuration is in the pylintrc file.
"""
name = "pylint"
doc = "https://www.pylint.org/"
path_hint = "<git-top>"
def run(self):
# Path to pylint configuration file
pylintrc = os.path.abspath(os.path.join(os.path.dirname(__file__),
"pylintrc"))
# List of files added/modified by the commit(s).
files = git(
"diff", "--name-only", "--diff-filter=d", COMMIT_RANGE, "--",
# Skip to work around crash in pylint 2.2.2:
# https://github.com/PyCQA/pylint/issues/2906
":!boards/xtensa/intel_s1000_crb/support/create_board_img.py") \
.splitlines()
# Filter out everything but Python files. Keep filenames
# relative (to GIT_TOP) to stay farther from any command line
# limit.
py_files = filter_py(GIT_TOP, files)
if not py_files:
return
pylintcmd = ["pylint", "--rcfile=" + pylintrc] + py_files
logger.info(cmd2str(pylintcmd))
try:
# Run pylint on added/modified Python files
process = subprocess.Popen(
pylintcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=GIT_TOP)
except OSError as e:
self.error(f"Failed to run {cmd2str(pylintcmd)}: {e}")
stdout, stderr = process.communicate()
if process.returncode or stderr:
# Issues found, or a problem with pylint itself
self.add_failure(stdout.decode("utf-8") + stderr.decode("utf-8"))
def filter_py(root, fnames):
# PyLint check helper. Returns all Python script filenames among the
# filenames in 'fnames', relative to directory 'root'. Uses the
# python-magic library, so that we can detect Python files that
# don't end in .py as well. python-magic is a frontend to libmagic,
# which is also used by 'file'.
return [fname for fname in fnames
if fname.endswith(".py") or
magic.from_file(os.path.join(root, fname),
mime=True) == "text/x-python"]
class License(ComplianceTest):
"""
Checks for licenses in new files added by the Pull request
"""
name = "License"
doc = "https://docs.zephyrproject.org/latest/contribute/#licensing"
# copyfile() below likely requires that getcwd()==GIT_TOP
path_hint = os.getcwd()
def run(self):
scancode = "/opt/scancode-toolkit/scancode"
if not os.path.exists(scancode):
self.skip("scancode-toolkit not installed")
os.makedirs("scancode-files", exist_ok=True)
# git diff's output doesn't depend on the current (sub)directory
new_files = git("diff", "--name-only", "--diff-filter=A",
COMMIT_RANGE).splitlines()
if not new_files:
return
for newf in new_files:
file = str(newf).rstrip()
os.makedirs(os.path.join('scancode-files',
os.path.dirname(file)), exist_ok=True)
copy = os.path.join("scancode-files", file)
copyfile(file, copy)
try:
cmd = [scancode, '--verbose', '--copyright', '--license', '--license-diag', '--info',
'--classify', '--summary', '--html', 'scancode.html', '--json', 'scancode.json', 'scancode-files/']
cmd_str = " ".join(cmd)
logging.info(cmd_str)
subprocess.check_output(cmd_str, stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as ex:
logging.error(ex.output)
self.error("Exception when running scancode: " + str(ex))
report = ""
never_check_ext = ['.yaml', '.html', '.rst', '.conf', '.cfg']
never_check_langs = ['HTML']
check_langs = ['CMake']
with open('scancode.json', 'r') as json_fp:
scancode_results = json.load(json_fp)
for file in scancode_results['files']:
if file['type'] == 'directory':
continue
orig_path = str(file['path']).replace('scancode-files/', '')
licenses = file['licenses']
file_type = file.get("file_type")
kconfig = "Kconfig" in orig_path and file_type in ['ASCII text']
check = False
if file.get("extension") in never_check_ext:
check = False
elif file.get("programming_language") in never_check_langs:
check = False
elif kconfig:
check = True
elif file.get("programming_language") in check_langs:
check = True
elif file.get("is_script"):
check = True
elif file.get("is_source"):
check = True
if check:
if not licenses:
report += ("* {} missing license.\n".format(orig_path))
else:
for lic in licenses:
if lic['key'] != "apache-2.0":
report += ("* {} is not apache-2.0 licensed: {}\n".format(
orig_path, lic['key']))
if lic['category'] != 'Permissive':
report += ("* {} has non-permissive license: {}\n".format(
orig_path, lic['key']))
if lic['key'] == 'unknown-spdx':
report += ("* {} has unknown SPDX: {}\n".format(
orig_path, lic['key']))
if not file['copyrights'] and file.get("programming_language") != 'CMake':
report += ("* {} missing copyright.\n".format(orig_path))
if report != "":
self.add_failure("""
In most cases you do not need to do anything here, especially if the files
reported below are going into ext/ and if license was approved for inclusion
into ext/ already. Fix any missing license/copyright issues. The license
exception if a JFYI for the maintainers and can be overriden when merging the
pull request.\n\n""" + report)
class Identity(ComplianceTest):
"""
Checks if Emails of author and signed-off messages are consistent.
"""
name = "Identity/Emails"
doc = "https://docs.zephyrproject.org/latest/contribute/#commit-guidelines"
# git rev-list and git log don't depend on the current (sub)directory
# unless explicited
path_hint = "<git-top>"
def run(self):
for shaidx in get_shas(COMMIT_RANGE):
commit = git("log", "--decorate=short", "-n 1", shaidx)
signed = []
author = ""
sha = ""
parsed_addr = None
for line in commit.split("\n"):
match = re.search(r"^commit\s([^\s]*)", line)
if match:
sha = match.group(1)
match = re.search(r"^Author:\s(.*)", line)
if match:
author = match.group(1)
parsed_addr = parseaddr(author)
match = re.search(r"signed-off-by:\s(.*)", line, re.IGNORECASE)
if match:
signed.append(match.group(1))
error1 = "%s: author email (%s) needs to match one of the signed-off-by entries." % (
sha, author)
error2 = "%s: author email (%s) does not follow the syntax: First Last <email>." % (
sha, author)
error3 = "%s: author email (%s) must be a real email and cannot end in @users.noreply.github.com" % (
sha, author)
failure = None
if author not in signed:
failure = error1
if not parsed_addr or len(parsed_addr[0].split(" ")) < 2:
if not failure:
failure = error2
else:
failure = failure + "\n" + error2
elif parsed_addr[1].endswith("@users.noreply.github.com"):
failure = error3
if failure:
self.add_failure(failure)
def init_logs(cli_arg):
# Initializes logging
# TODO: there may be a shorter version thanks to:
# logging.basicConfig(...)
global logger
level = os.environ.get('LOG_LEVEL', "WARN")
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(levelname)-8s: %(message)s'))
logger = logging.getLogger('')
logger.addHandler(console)
logger.setLevel(cli_arg if cli_arg else level)
logging.info("Log init completed, level=%s",
logging.getLevelName(logger.getEffectiveLevel()))
def set_pending(exclude):
# Sets 'pending' status for all tests for the commit given by --sha
for testcase in ComplianceTest.__subclasses__():
if testcase.name in exclude:
print("Skipping " + testcase.name)
continue
print("Creating pending status for " + testcase.name)
github_commit.create_status(
'pending', testcase.doc,
'Run in progress (build no. {})'.format(build_number),
testcase.name)
def report_test_results_to_github(suite):
# Reports test results to Github.
#
# suite: Test suite
print("reporting results to GitHub")
name2doc = {testcase.name: testcase.doc
for testcase in ComplianceTest.__subclasses__()}
n_failures = 0
comment = "" # Comment posted to GitHub
def set_commit_status(status, msg):
# 'case' gets set in the loop.
# pylint: disable=undefined-loop-variable
github_commit.create_status(
status, name2doc[case.name],
"{} (build no. {})".format(msg, build_number),
case.name)
for case in suite:
# This gives us access to the custom 'info_msg' attribute
case = MyCase.fromelem(case)
if not case.result:
print("reporting success on " + case.name)
set_commit_status('success', 'Checks passed')
elif case.result.type == 'skipped':
print("reporting skipped on " + case.name)
set_commit_status('success', 'Checks skipped')
elif case.result.type == 'failure':
print("reporting failure on " + case.name)
n_failures += 1
comment += "## {}\n\n".format(case.result.message)
if case.name in {"Gitlint", "Identity/Emails", "License"}:
comment += case.result._elem.text
else:
comment += "```\n" + case.result._elem.text + "\n```"
comment += "\n\n"
set_commit_status('failure', 'Checks failed')
elif case.result.type == 'error':
print("reporting error on " + case.name)
n_failures += 1
comment += "## {} (internal test error)\n\n```\n{}\n```\n\n" \
.format(case.name, case.result.message)
set_commit_status(
'error', 'Error during verification, please report!')
# Always show informational messages. See ComplianceTest.add_info() re.
# the decoding thing. We undo it here.
if case.info_msg:
comment += "## {} (informational only, not a failure)\n\n" \
"```\n{}\n```\n\n".format(
case.name,
case.info_msg.encode("utf-8")
.decode("unicode-escape"))
if n_failures > 0:
github_comment(
"**Some checks failed. Please fix and resubmit.**\n\n" + comment)
elif comment or get_bot_comment():
# Post/edit a success comment if there's some message to show, or if
# there's a previous comment from the bot. Skip the success comment if
# everything passes right away and there's no message to show.
github_comment(bot_success_msg(comment) + comment)
return n_failures
def bot_success_msg(comment):
# report_test_results_to_github() helper. Returns the success message that
# makes the most sense given the previous status.
#
# It might help to know that github_comment() is a no-op if the new message
# is identical to the old one when untangling this.
all_passed_msg = "**All checks passed.**\n\n"
bot_comment = get_bot_comment()
if bot_comment and all_passed_msg not in bot_comment.body:
# Fail -> Success, or Fail -> Success -> Success
return "**All checks are passing now.**\n\n"
# Only successes
return all_passed_msg
def github_comment(msg):
# Posts 'msg' to GitHub, or edits the previous message posted by the bot if
# it has already posted a message
if not github_pr:
# No pull request to post the message in
return
msg += EDIT_TIP
bot_comment = get_bot_comment()
if bot_comment:
if bot_comment.body != msg:
bot_comment.edit(msg)
else:
github_pr.create_issue_comment(msg)
# Cache used by get_bot_comment(). Use 0 instead of None for "no cached value"
# so that None (no comment) can be cached.
cached_bot_comment = 0
def get_bot_comment():
# Returns any previous comment posted by the bot in 'github_pr', or None if
# the bot hasn't posted any comment (or there's no pull request)
global cached_bot_comment
def get_comment():
if not github_pr:
return None
for comment in github_pr.get_issue_comments():
if comment.user.login != os.getenv('GH_USERNAME', 'zephyrbot'):
continue
if EDIT_TIP in comment.body:
return comment
return None
if cached_bot_comment == 0:
cached_bot_comment = get_comment()
return cached_bot_comment
def parse_args():
parser = argparse.ArgumentParser(
description="Check for coding style and documentation warnings.")
parser.add_argument('-c', '--commits', default="HEAD~1..",
help='''Commit range in the form: a..[b], default is
HEAD~1..HEAD''')
parser.add_argument('-g', '--github', action="store_true",
help="Post results as comments in the PR on GitHub")
parser.add_argument('-r', '--repo', default=None,
help="GitHub repository")
parser.add_argument('-p', '--pull-request', default=0, type=int,
help="Pull request number")
parser.add_argument('-s', '--status', action="store_true",
help="Set status on GitHub to pending and exit")
parser.add_argument('-S', '--sha', default=None, help="Commit SHA")
parser.add_argument('-o', '--output', default="compliance.xml",
help='''Name of outfile in JUnit format,
default is ./compliance.xml''')
parser.add_argument('-l', '--list', action="store_true",
help="List all checks and exit")
parser.add_argument("-v", "--loglevel", help="python logging level")
parser.add_argument('-m', '--module', action="append", default=[],
help="Checks to run. All checks by default.")
parser.add_argument('-e', '--exclude-module', action="append", default=[],
help="Do not run the specified checks")
parser.add_argument('-j', '--previous-run', default=None,
help='''Pre-load JUnit results in XML format
from a previous run and combine with new results.''')
return parser.parse_args()
def init_github(args):
# Initializes a GitHub connection
global commit_sha
global github_repo
global github_pr
global github_commit
global build_number
if args.repo is None:
err("--repo <name> must be passed when connecting to GitHub")
if args.sha is None:
err("--sha <SHA> must be passed when connecting to GitHub")
commit_sha = args.sha
if 'GH_TOKEN' not in os.environ:
err("the GH_TOKEN environment variable must be set when connecting "
"to GitHub")
github_repo = Github(os.environ['GH_TOKEN']).get_repo(args.repo)
github_commit = github_repo.get_commit(commit_sha)
if args.pull_request:
github_pr = github_repo.get_pull(args.pull_request)
else:
github_pr = None
# Get the shippable build number, useful to find logs
build_number = os.environ.get("BUILD_NUMBER")
if build_number is None:
err("the BUILD_NUMBER environment variable must be set when "
"connecting to GitHub")
def _main(args):
# The "real" main(), which is wrapped to catch exceptions and report them
# to GitHub. Returns the number of test failures.
# The absolute path of the top-level git directory. Initialize it here so
# that issues running Git can be reported to GitHub.
global GIT_TOP
GIT_TOP = git("rev-parse", "--show-toplevel")
# The commit range passed in --commit, e.g. "HEAD~3"
global COMMIT_RANGE
COMMIT_RANGE = args.commits
init_logs(args.loglevel)
if args.list:
for testcase in ComplianceTest.__subclasses__():
print(testcase.name)
return 0
if args.status:
set_pending(args.exclude_module)
return 0
# Load saved test results from an earlier run, if requested
if args.previous_run:
if not os.path.exists(args.previous_run):
# This probably means that an earlier pass had an internal error
# (the script is currently run multiple times by the ci-pipelines
# repo). Since that earlier pass might've posted an error to
# GitHub, avoid generating a GitHub comment here, by avoiding
# sys.exit() (which gets caught in main()).
print("error: '{}' not found".format(args.previous_run),
file=sys.stderr)
return 1
logging.info("Loading previous results from " + args.previous_run)
for loaded_suite in JUnitXml.fromfile(args.previous_run):
suite = loaded_suite
break
else:
suite = TestSuite("Compliance")
for testcase in ComplianceTest.__subclasses__():
# "Modules" and "testcases" are the same thing. Better flags would have
# been --tests and --exclude-tests or the like, but it's awkward to
# change now.
if args.module and testcase.name not in args.module:
continue
if testcase.name in args.exclude_module:
print("Skipping " + testcase.name)
continue
test = testcase()
try:
print(f"Running {test.name:16} tests in "
f"{GIT_TOP if test.path_hint == '<git-top>' else test.path_hint} ...")
test.run()
except EndTest:
pass
suite.add_testcase(test.case)
xml = JUnitXml()
xml.add_testsuite(suite)
xml.update_statistics()
xml.write(args.output, pretty=True)
failed_cases = []
# TODO maybe: move all the github-related code to a different .py
# file to draw a better line between developer code versus
# infrastructure-specific code, in other words keep this file
# 100% testable and maintainable by non-admins developers.
if args.github:
n_fails = report_test_results_to_github(suite)
else:
for case in suite:
if case.result:
if case.result.type == 'skipped':
logging.warning("Skipped %s, %s", case.name, case.result.message)
else:
failed_cases.append(case)
else:
# Some checks like codeowners can produce no .result
logging.info("No JUnit result for %s", case.name)
n_fails = len(failed_cases)
if n_fails:
print("{} checks failed".format(n_fails))
for case in failed_cases:
# not clear why junitxml doesn't clearly expose the most
# important part of its underlying etree.Element
errmsg = case.result._elem.text
logging.error("Test %s failed: %s", case.name,
errmsg.strip() if errmsg else case.result.message)
print("\nComplete results in " + args.output)
return n_fails
def main():
args = parse_args()
# Initialize the GitHub connection early so that any errors from the script
# itself can be reported
if args.github or args.status:
init_github(args)
try:
n_fails = _main(args)
except BaseException:
# Catch BaseException instead of Exception to include stuff like
# SystemExit (raised by sys.exit())
if args.github:
github_comment(
"**Internal CI Error.**\n\nPython exception in `{}`:\n\n"
"```\n{}\n```".format(__file__, traceback.format_exc()))
raise
sys.exit(n_fails)
def cmd2str(cmd):
# Formats the command-line arguments in the iterable 'cmd' into a string,
# for error messages and the like
return " ".join(shlex.quote(word) for word in cmd)
def err(msg):
cmd = sys.argv[0] # Empty if missing
if cmd:
cmd += ": "
sys.exit(cmd + "error: " + msg)
if __name__ == "__main__":
main()
| 36.359364
| 118
| 0.592793
|
f951268fd6a0904af3baafc0bf7c283eda050976
| 3,393
|
py
|
Python
|
Poly.py
|
eidolonFIRE/CarpetBomb
|
52d83a2f1d17b88bfd5cbcac5741437ce630a8b5
|
[
"MIT"
] | 5
|
2018-05-13T05:01:14.000Z
|
2018-05-28T15:08:27.000Z
|
Poly.py
|
eidolonFIRE/CarpetBomb
|
52d83a2f1d17b88bfd5cbcac5741437ce630a8b5
|
[
"MIT"
] | null | null | null |
Poly.py
|
eidolonFIRE/CarpetBomb
|
52d83a2f1d17b88bfd5cbcac5741437ce630a8b5
|
[
"MIT"
] | null | null | null |
import math
def _vertSub(a, b):
return a[0] - b[0], a[1] - b[1]
def _vertAtan2(a, b):
return math.atan2(b[1] - a[1], b[0] - a[0])
def _vertNorm(a, b):
return math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)
class Poly(object):
"""docstring for Poly"""
def __init__(self, bomb_callback=None, display=None, root=None):
super(Poly, self).__init__()
self.d = display
self.root = root
self.started = False
self._reset()
self.zoom = 16
self.bomb = bomb_callback
def _reset(self):
self.left = 1000000
self.right = 0
self.top = 1000000
self.bottom = 0
self.verts = []
def printVerts(self, verts, indent = 0):
i = 0
for v in verts:
print(("%d, " + ","*indent + " %d") % (v[0], v[1]))
i += 1
def _contains(self, x, y):
i = 0
c = 0
for i in range(0, len(self.verts)):
ax, ay = self.verts[i]
px, py = self.verts[i-1]
if (ay > y) != (py > y):
if x < (px - ax) * (y - ay) / (float(py) - ay) + ax:
c += 1
return c % 2 == 1
def _getScale(self):
return max(int(self.zoom**(2.7) / 90), 5)
def douglasPeuckerReduceVerts(self, epsilon, verbose=True):
prevCount = len(self.verts)
self.verts, iterCounter = self._douglasPeucker(self.verts, epsilon)
postCount = len(self.verts)
if verbose:
print("Vert reduction: %d --> %d in %d steps" % (prevCount, postCount, iterCounter))
def _douglasHeuristic(self, origin, point, end):
return abs(math.sin(abs(_vertAtan2(origin, point) - _vertAtan2(origin, end))) * _vertNorm(origin, point))
def _douglasPeucker(self, verts, epsilon):
# https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
# Find the point with the maximum distance
iterCounter = 0
dmax = 0
index = 0
for scan in range(1, len(verts) - 1):
iterCounter += 1
d = self._douglasHeuristic(verts[0], verts[scan], verts[-1])
if d > dmax:
index = scan
dmax = d
# If max distance is greater than epsilon, recursively simplify
if dmax > epsilon:
# Recursive call
recResults1, z1 = self._douglasPeucker(verts[:index + 1], epsilon)
recResults2, z2 = self._douglasPeucker(verts[index:], epsilon)
iterCounter += z1 + z2
retval = recResults1[:-1] + recResults2
else:
retval = [verts[0], verts[-1]]
return retval, iterCounter
def addVert(self, x, y):
if self.started:
if len(self.verts) == 0 or (abs(x - self.verts[-1][0]) > 2 or abs(y - self.verts[-1][1]) > 2):
self.left = x if x < self.left else self.left
self.right = x if x > self.right else self.right
self.top = y if y < self.top else self.top
self.bottom = y if y > self.bottom else self.bottom
self.verts.append((x,y))
def polyBomb(self):
scale = self._getScale()
count = 0
offsetRow = True
if self.bomb:
for mx in range(self.left, self.right, scale):
offsetRow = not offsetRow
for my in range(self.top - (scale / 2 * offsetRow), self.bottom, scale):
if self._contains(mx, my):
self.bomb(mx, my)
count += 1
if count >= 1000:
print("Failsafe due to >1000 launches...")
return count
return count
def startPoly(self):
self.started = True
def endPoly(self):
self.started = False
if len(self.verts) > 5:
self.douglasPeuckerReduceVerts(epsilon=2.7, verbose=False)
count = self.polyBomb()
print("- %d Artillery launched at %d zoom"%(count, self.zoom))
self._reset()
| 27.811475
| 107
| 0.627763
|
e6e81a903499807c1053b0c6c65f6b608fb50103
| 4,697
|
py
|
Python
|
blogs/goes16/maria/create_image.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 6,140
|
2016-05-23T16:09:35.000Z
|
2022-03-30T19:00:46.000Z
|
blogs/goes16/maria/create_image.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 1,384
|
2016-07-08T22:26:41.000Z
|
2022-03-24T16:39:43.000Z
|
blogs/goes16/maria/create_image.py
|
laurenzberger/training-data-analyst
|
3e2ef4668c5088ab50ad50a4f29673c88fb1bcd3
|
[
"Apache-2.0"
] | 5,110
|
2016-05-27T13:45:18.000Z
|
2022-03-31T18:40:42.000Z
|
#!/usr/bin/env python
"""
Copyright Google Inc. 2017
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def create_local_snapshots(outdir, hurricane_file):
import shutil,os
import hurricanes.goes_to_jpeg as g2j
shutil.rmtree(outdir, ignore_errors=True)
os.mkdir(outdir)
with open(hurricane_file, 'r') as ifp:
for line in ifp:
dt, lat, lon = g2j.parse_line(line)
objectId = g2j.get_objectId_at(dt)
outfilename = os.path.join(
outdir,
'ir_{}{:02d}{:02d}{:02d}{:02d}.jpg'.format(
dt.year, dt.month, dt.day, dt.hour, dt.second))
jpgfile = g2j.goes_to_jpeg(objectId, lat, lon, None, outfilename)
break # take out this to process all the timestamps ...
def create_query(opts):
query = """
SELECT
name,
latitude,
longitude,
iso_time,
dist2land
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
"""
clause = "WHERE season = '{}' ".format(opts.year)
if len(opts.hurricane) > 0:
clause += " AND name LIKE '%{}%' ".format(opts.hurricane.upper())
elif len(opts.basin) > 0:
clause += " AND basin = '{}' ".format(opts.basin.upper())
else:
raise ValueError("Need to specify either a hurricane or a basin")
return query + clause
def create_snapshots_on_cloud(bucket, project, runner, opts):
import datetime, os
import apache_beam as beam
import hurricanes.goes_to_jpeg as g2j
query = create_query(opts)
OUTPUT_DIR = 'gs://{}/hurricane/'.format(bucket)
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': 'maria-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),
'project': project,
'max_num_workers': 12,
'setup_file': './setup.py',
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(runner, options=opts)
(p
| 'get_tracks' >> beam.io.Read(beam.io.BigQuerySource(query=query, use_standard_sql=True))
| 'loc_at_time' >> beam.Map(lambda rowdict: (
g2j.parse_timestamp(rowdict['iso_time']),
rowdict['name'].lower(),
rowdict['latitude'],
rowdict['longitude']))
| 'to_jpg' >> beam.Map(lambda (dt,name,lat,lon):
g2j.goes_to_jpeg(
g2j.get_objectId_at(dt),
lat, lon,
bucket,
'hurricane/images/{}/ir_{}{:02d}{:02d}{:02d}{:02d}.jpg'.format(
name, dt.year, dt.month, dt.day, dt.hour, dt.second)))
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
if __name__ == '__main__':
import argparse, logging
parser = argparse.ArgumentParser(description='Plot the landfall of Hurricane Maria')
parser.add_argument('--bucket', default='', help='Specify GCS bucket to run on cloud')
parser.add_argument('--project', default='', help='Specify GCP project to bill')
parser.add_argument('--outdir', default='', help='output dir if local')
parser.add_argument('--hurricane', default='', help='name of hurricane; if empty, uses basin')
parser.add_argument('--basin', default='', help='name of basin, e.g NA for North-Atlantic')
parser.add_argument('--year', required=True, help='year of named hurricane, e.g. 2017')
opts = parser.parse_args()
runner = 'DataflowRunner' # run on Cloud
#runner = 'DirectRunner' # run Beam on local machine, but write outputs to cloud
logging.basicConfig(level=getattr(logging, 'INFO', None))
if len(opts.bucket) > 0:
if len(opts.project) == 0:
raise ValueError("Please specify billed project")
logging.info('Running on cloud ...')
create_snapshots_on_cloud(opts.bucket, opts.project, runner, opts)
elif len(opts.outdir) > 0:
create_local_snapshots(opts.outdir, 'MARIA.csv')
else:
raise ValueError("Need to specify either outdir or bucket")
| 38.818182
| 98
| 0.630402
|
157fd55ec2947856d748a12ea0900b1ca9a292f3
| 1,835
|
py
|
Python
|
fdroid.py
|
anatolikalysch/APKCrawler
|
6d2fde6decb4043a5a838871396f67e33657e893
|
[
"MIT"
] | 10
|
2017-12-05T03:49:35.000Z
|
2021-01-10T23:51:47.000Z
|
fdroid.py
|
anatolikalysch/APKCrawler
|
6d2fde6decb4043a5a838871396f67e33657e893
|
[
"MIT"
] | null | null | null |
fdroid.py
|
anatolikalysch/APKCrawler
|
6d2fde6decb4043a5a838871396f67e33657e893
|
[
"MIT"
] | 7
|
2017-11-01T06:17:13.000Z
|
2020-04-03T08:42:31.000Z
|
from crawler import *
class fdroid_crawler(crawler):
def __init__(self):
super().__init__('https://f-droid.org/repository/browse/',
'https://f-droid.org/repository/browse/?fdpage=2')
self.folder_name = 'fdroid/'
if not os.path.exists(self.folder_name):
os.mkdir(self.folder_name)
def mutate_url(self, url, counter):
result = 'https://f-droid.org/repository/browse/?fdpage={}'.format(counter + 1)
return result
def extraction_routine(self, string):
apps = re.findall(r'.*href="(https://f-droid.org/repository/browse/\?fdid=.*?)".*', string)
apps.extend(re.findall(r'.*href="(https://f-droid.org/repository/browse/\?fdid=.*?&fdpage=.*?)".*', string))
for app in apps:
try:
website = requests.get(app, timeout=self.timeout, headers=self.header).text
dl_links = re.findall(r'href="(https://f-droid.org/repo/.*?\.apk)"', website)
for link in dl_links:
try:
apk_name = link.split('/')[-1]
if os.path.exists(self.folder_name + apk_name):
continue
else:
apk_bytes = requests.get(link, allow_redirects=True, stream=True, timeout=self.timeout,
headers=self.header)
with open(self.folder_name + apk_name, 'wb') as f:
for chunk in apk_bytes.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except:
pass
except:
pass
| 44.756098
| 116
| 0.497548
|
5a4cd238676b961f71fc2cd613ebf246564f948a
| 596
|
py
|
Python
|
problems/435.Non-overlapping_Intervals/li_1line.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/435.Non-overlapping_Intervals/li_1line.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/435.Non-overlapping_Intervals/li_1line.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
# coding=utf-8
# Author: Jianghan LI
# Question: /Users/mac/Work/LeetCode/problems/435.Non-overlapping_Intervals/li_1line.py
# Date: 2017-04-01
# Complexity: O(NlogN)
class Solution(object):
def eraseOverlapIntervals(self, intervals):
return len(intervals) - reduce(lambda res, i: (res[0] + 1, i[1]) if i[0] >= res[1] else res,
sorted(intervals, key=lambda i: i[1]), (0, -float('inf')))[0]
############ test case ###########
s = Solution()
print s.eraseOverlapIntervals([[1, 2], [2, 3], [3, 4], [1, 3]])
############ comments ############
| 29.8
| 100
| 0.563758
|
9a40c552b551319e2160027626209d71d7c0728f
| 3,190
|
py
|
Python
|
m1k7/views.py
|
octavioturra/aritial
|
499f001d9f308727820736f852faca9f3a3ffed5
|
[
"Apache-2.0"
] | null | null | null |
m1k7/views.py
|
octavioturra/aritial
|
499f001d9f308727820736f852faca9f3a3ffed5
|
[
"Apache-2.0"
] | null | null | null |
m1k7/views.py
|
octavioturra/aritial
|
499f001d9f308727820736f852faca9f3a3ffed5
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
# Create your views here.
#from django.template import Context, loader
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import mail
from django.shortcuts import render_to_response, get_list_or_404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpRequest
from django import forms
from m1k7.models import Cliente
import md5
'''
Aritial
Simples sistema de conversação aleatória baseado em frases dos usuários
e conteúdo aprendido através de livros txt
PLAN
Sistema inteligente de procura que retornará seu perfil e alguns dados
que você procurar, identificando suas necessidades através de dados
semânticos e proporcionar um estudo de todo o perfil psicológico do
usuário.
'''
def main(request):
'''
Main
Página principal com o controle do Ajax
'''
user = users.get_current_user()
if user:
sistema = "aritial"
else:
sistema = "sistema"
return render_to_response('index.html', {'sistema':sistema})
def home(request):
'''
Home
Página de entrada do site
PLAN
Criar um microblog do Aritial, com conteúdo dinâmico que trará
algumas frases interessantes do Knowledge
'''
return render_to_response('home.html', {})
def system(request):
'''
System
Página de entrada ou Login
PLAN
Identificar usuários que tenham o seu mesmo perfil e listá-los aqui
'''
loginUrl = users.create_login_url("/#aritial")
return render_to_response('services.html', {'log':loginUrl})
def contato(request):
'''
Contato
Página de contato com os criadores
PLAN
Adicionar aqui o contato de todas as pessoas e ferramentas
utilizadas no desenvolvimento
'''
return render_to_response('contato.html', {})
def aritial(request):
'''
Aritial
Página inicial do sistema e contâiner do AJAX de controle
PLAN
Integrar com serviços de imagem, upload com Drag'n Drop e fazer
com que mais pessoas interajam nesta página
'''
cliente = Cliente()
user = users.get_current_user()
dbUser = db.GqlQuery("SELECT * FROM Cliente WHERE user = :1",user)
try:
dbUser[0].user
except:
cliente.user = user
cliente.put()
logofUrl = users.create_logout_url("/#home")
user = user.nickname()
hkey = md5.new()
hkey.update(str(user))
hkey = hkey.hexdigest()
return render_to_response('aritial.html',{'logoff':logofUrl,'key':hkey})
def email(request):
'''
Email
Simples ferramenta de send mail
PLAN
Transformar num contâiner de chamados com contato e interação
que coordena o perfil do usuário
'''
mail.send_mail(sender='octavio.turra@gmail.com',to="octavio.turra@gmail",subject=request.POST["nome"],body = "Enviado por: " + request.POST["email"] +" Mensagem:" + request.POST["corpo"])
return HttpResponse("Obrigado pela sua mensagem!")
| 25.934959
| 191
| 0.658621
|
60dbdcac5b50be5d7aac8fd426eb612be84284ad
| 26,022
|
py
|
Python
|
src/twisted/test/test_adbapi.py
|
apjanke/twisted
|
22f949f7ce187513f0c218b73186c8a73baa00b4
|
[
"Unlicense",
"MIT"
] | 1
|
2021-01-03T01:54:14.000Z
|
2021-01-03T01:54:14.000Z
|
src/twisted/test/test_adbapi.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
src/twisted/test/test_adbapi.py
|
zerospam/twisted
|
e23b5e2040a4d643bc6a43785621358569886a0d
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.enterprise.adbapi.
"""
from twisted.trial import unittest
import os
import stat
from typing import Dict, Optional
from twisted.enterprise.adbapi import ConnectionPool, ConnectionLost
from twisted.enterprise.adbapi import Connection, Transaction
from twisted.internet import reactor, defer, interfaces
from twisted.python.failure import Failure
from twisted.python.reflect import requireModule
simple_table_schema = """
CREATE TABLE simple (
x integer
)
"""
class ADBAPITestBase:
"""
Test the asynchronous DB-API code.
"""
openfun_called = {} # type: Dict[object, bool]
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Set up the database and create a connection pool pointing at it.
"""
self.startDB()
self.dbpool = self.makePool(cp_openfun=self.openfun)
self.dbpool.start()
def tearDown(self):
d = self.dbpool.runOperation("DROP TABLE simple")
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def openfun(self, conn):
self.openfun_called[conn] = True
def checkOpenfunCalled(self, conn=None):
if not conn:
self.assertTrue(self.openfun_called)
else:
self.assertIn(conn, self.openfun_called)
def test_pool(self):
d = self.dbpool.runOperation(simple_table_schema)
if self.test_failures:
d.addCallback(self._testPool_1_1)
d.addCallback(self._testPool_1_2)
d.addCallback(self._testPool_1_3)
d.addCallback(self._testPool_1_4)
d.addCallback(lambda res: self.flushLoggedErrors())
d.addCallback(self._testPool_2)
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
d.addCallback(self._testPool_6)
d.addCallback(self._testPool_7)
d.addCallback(self._testPool_8)
d.addCallback(self._testPool_9)
return d
def _testPool_1_1(self, res):
d = defer.maybeDeferred(self.dbpool.runQuery, "select * from NOTABLE")
d.addCallbacks(lambda res: self.fail("no exception"), lambda f: None)
return d
def _testPool_1_2(self, res):
d = defer.maybeDeferred(self.dbpool.runOperation, "deletexxx from NOTABLE")
d.addCallbacks(lambda res: self.fail("no exception"), lambda f: None)
return d
def _testPool_1_3(self, res):
d = defer.maybeDeferred(self.dbpool.runInteraction, self.bad_interaction)
d.addCallbacks(lambda res: self.fail("no exception"), lambda f: None)
return d
def _testPool_1_4(self, res):
d = defer.maybeDeferred(self.dbpool.runWithConnection, self.bad_withConnection)
d.addCallbacks(lambda res: self.fail("no exception"), lambda f: None)
return d
def _testPool_2(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Interaction not rolled back")
self.checkOpenfunCalled()
d.addCallback(_check)
return d
def _testPool_3(self, res):
sql = "select count(1) from simple"
inserts = []
# add some rows to simple table (runOperation)
for i in range(self.num_iterations):
sql = "insert into simple(x) values(%d)" % i
inserts.append(self.dbpool.runOperation(sql))
d = defer.gatherResults(inserts)
def _select(res):
# make sure they were added (runQuery)
sql = "select x from simple order by x"
d = self.dbpool.runQuery(sql)
return d
d.addCallback(_select)
def _check(rows):
self.assertTrue(len(rows) == self.num_iterations, "Wrong number of rows")
for i in range(self.num_iterations):
self.assertTrue(len(rows[i]) == 1, "Wrong size row")
self.assertTrue(rows[i][0] == i, "Values not returned.")
d.addCallback(_check)
return d
def _testPool_4(self, res):
# runInteraction
d = self.dbpool.runInteraction(self.interaction)
d.addCallback(lambda res: self.assertEqual(res, "done"))
return d
def _testPool_5(self, res):
# withConnection
d = self.dbpool.runWithConnection(self.withConnection)
d.addCallback(lambda res: self.assertEqual(res, "done"))
return d
def _testPool_6(self, res):
# Test a withConnection cannot be closed
d = self.dbpool.runWithConnection(self.close_withConnection)
return d
def _testPool_7(self, res):
# give the pool a workout
ds = []
for i in range(self.num_iterations):
sql = "select x from simple where x = %d" % i
ds.append(self.dbpool.runQuery(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
def _check(result):
for i in range(self.num_iterations):
self.assertTrue(result[i][1][0][0] == i, "Value not returned")
dlist.addCallback(_check)
return dlist
def _testPool_8(self, res):
# now delete everything
ds = []
for i in range(self.num_iterations):
sql = "delete from simple where x = %d" % i
ds.append(self.dbpool.runOperation(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
return dlist
def _testPool_9(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(
int(row[0][0]) == 0, "Didn't successfully delete table contents"
)
self.checkConnect()
d.addCallback(_check)
return d
def checkConnect(self):
"""Check the connect/disconnect synchronous calls."""
conn = self.dbpool.connect()
self.checkOpenfunCalled(conn)
curs = conn.cursor()
curs.execute("insert into simple(x) values(1)")
curs.execute("select x from simple")
res = curs.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0]), 1)
self.assertEqual(res[0][0], 1)
curs.execute("delete from simple")
curs.execute("select x from simple")
self.assertEqual(len(curs.fetchall()), 0)
curs.close()
self.dbpool.disconnect(conn)
def interaction(self, transaction):
transaction.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = transaction.fetchone()
self.assertTrue(len(row) == 1, "Wrong size row")
self.assertTrue(row[0] == i, "Value not returned.")
self.assertIsNone(transaction.fetchone(), "Too many rows")
return "done"
def bad_interaction(self, transaction):
if self.can_rollback:
transaction.execute("insert into simple(x) values(0)")
transaction.execute("select * from NOTABLE")
def withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = curs.fetchone()
self.assertTrue(len(row) == 1, "Wrong size row")
self.assertTrue(row[0] == i, "Value not returned.")
finally:
curs.close()
return "done"
def close_withConnection(self, conn):
conn.close()
def bad_withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select * from NOTABLE")
finally:
curs.close()
class ReconnectTestBase:
"""
Test the asynchronous DB-API code with reconnect.
"""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Skip the test if C{good_sql} is unavailable. Otherwise, set up the
database, create a connection pool pointed at it, and set up a simple
schema in it.
"""
if self.good_sql is None:
raise unittest.SkipTest("no good sql for reconnect test")
self.startDB()
self.dbpool = self.makePool(
cp_max=1, cp_reconnect=True, cp_good_sql=self.good_sql
)
self.dbpool.start()
return self.dbpool.runOperation(simple_table_schema)
def tearDown(self):
d = self.dbpool.runOperation("DROP TABLE simple")
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def test_pool(self):
d = defer.succeed(None)
d.addCallback(self._testPool_1)
d.addCallback(self._testPool_2)
if not self.early_reconnect:
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
return d
def _testPool_1(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_2(self, res):
# reach in and close the connection manually
list(self.dbpool.connections.values())[0].close()
def _testPool_3(self, res):
sql = "select count(1) from simple"
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail("no exception"), lambda f: None)
return d
def _testPool_4(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_5(self, res):
self.flushLoggedErrors()
sql = "select * from NOTABLE" # bad sql
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(
lambda res: self.fail("no exception"),
lambda f: self.assertFalse(f.check(ConnectionLost)),
)
return d
class DBTestConnector:
"""
A class which knows how to test for the presence of
and establish a connection to a relational database.
To enable test cases which use a central, system database,
you must create a database named DB_NAME with a user DB_USER
and password DB_PASS with full access rights to database DB_NAME.
"""
# used for creating new test cases
TEST_PREFIX = None # type: Optional[str]
DB_NAME = "twisted_test"
DB_USER = "twisted_test"
DB_PASS = "twisted_test"
DB_DIR = None # directory for database storage
nulls_ok = True # nulls supported
trailing_spaces_ok = True # trailing spaces in strings preserved
can_rollback = True # rollback supported
test_failures = True # test bad sql?
escape_slashes = True # escape \ in sql?
good_sql = ConnectionPool.good_sql # type: Optional[str]
early_reconnect = True # cursor() will fail on closed connection
can_clear = True # can try to clear out tables when starting
# number of iterations for test loop (lower this for slow db's)
num_iterations = 50
def setUp(self):
self.DB_DIR = self.mktemp()
os.mkdir(self.DB_DIR)
if not self.can_connect():
raise unittest.SkipTest("%s: Cannot access db" % self.TEST_PREFIX)
return self.extraSetUp()
def can_connect(self):
"""Return true if this database is present on the system
and can be used in a test."""
raise NotImplementedError()
def startDB(self):
"""Take any steps needed to bring database up."""
pass
def stopDB(self):
"""Bring database down, if needed."""
pass
def makePool(self, **newkw):
"""Create a connection pool with additional keyword arguments."""
args, kw = self.getPoolArgs()
kw = kw.copy()
kw.update(newkw)
return ConnectionPool(*args, **kw)
def getPoolArgs(self):
"""Return a tuple (args, kw) of list and keyword arguments
that need to be passed to ConnectionPool to create a connection
to this database."""
raise NotImplementedError()
class SQLite3Connector(DBTestConnector):
"""
Connector that uses the stdlib SQLite3 database support.
"""
TEST_PREFIX = "SQLite3"
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
if requireModule("sqlite3") is None:
return False
else:
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ("sqlite3",)
kw = {"database": self.database, "cp_max": 1, "check_same_thread": False}
return args, kw
class PySQLite2Connector(DBTestConnector):
"""
Connector that uses pysqlite's SQLite database support.
"""
TEST_PREFIX = "pysqlite2"
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
if requireModule("pysqlite2.dbapi2") is None:
return False
else:
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ("pysqlite2.dbapi2",)
kw = {"database": self.database, "cp_max": 1, "check_same_thread": False}
return args, kw
class PyPgSQLConnector(DBTestConnector):
TEST_PREFIX = "PyPgSQL"
def can_connect(self):
try:
from pyPgSQL import PgSQL
except:
return False
try:
conn = PgSQL.connect(
database=self.DB_NAME, user=self.DB_USER, password=self.DB_PASS
)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ("pyPgSQL.PgSQL",)
kw = {
"database": self.DB_NAME,
"user": self.DB_USER,
"password": self.DB_PASS,
"cp_min": 0,
}
return args, kw
class PsycopgConnector(DBTestConnector):
TEST_PREFIX = "Psycopg"
def can_connect(self):
try:
import psycopg
except:
return False
try:
conn = psycopg.connect(
database=self.DB_NAME, user=self.DB_USER, password=self.DB_PASS
)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ("psycopg",)
kw = {
"database": self.DB_NAME,
"user": self.DB_USER,
"password": self.DB_PASS,
"cp_min": 0,
}
return args, kw
class MySQLConnector(DBTestConnector):
TEST_PREFIX = "MySQL"
trailing_spaces_ok = False
can_rollback = False
early_reconnect = False
def can_connect(self):
try:
import MySQLdb
except:
return False
try:
conn = MySQLdb.connect(
db=self.DB_NAME, user=self.DB_USER, passwd=self.DB_PASS
)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ("MySQLdb",)
kw = {"db": self.DB_NAME, "user": self.DB_USER, "passwd": self.DB_PASS}
return args, kw
class FirebirdConnector(DBTestConnector):
TEST_PREFIX = "Firebird"
test_failures = False # failure testing causes problems
escape_slashes = False
good_sql = None # firebird doesn't handle failed sql well
can_clear = False # firebird is not so good
num_iterations = 5 # slow
def can_connect(self):
if requireModule("kinterbasdb") is None:
return False
try:
self.startDB()
self.stopDB()
return True
except:
return False
def startDB(self):
import kinterbasdb
self.DB_NAME = os.path.join(self.DB_DIR, DBTestConnector.DB_NAME)
os.chmod(self.DB_DIR, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
sql = 'create database "%s" user "%s" password "%s"'
sql %= (self.DB_NAME, self.DB_USER, self.DB_PASS)
conn = kinterbasdb.create_database(sql)
conn.close()
def getPoolArgs(self):
args = ("kinterbasdb",)
kw = {
"database": self.DB_NAME,
"host": "127.0.0.1",
"user": self.DB_USER,
"password": self.DB_PASS,
}
return args, kw
def stopDB(self):
import kinterbasdb
conn = kinterbasdb.connect(
database=self.DB_NAME,
host="127.0.0.1",
user=self.DB_USER,
password=self.DB_PASS,
)
conn.drop_database()
def makeSQLTests(base, suffix, globals):
"""
Make a test case for every db connector which can connect.
@param base: Base class for test case. Additional base classes
will be a DBConnector subclass and unittest.TestCase
@param suffix: A suffix used to create test case names. Prefixes
are defined in the DBConnector subclasses.
"""
connectors = [
PySQLite2Connector,
SQLite3Connector,
PyPgSQLConnector,
PsycopgConnector,
MySQLConnector,
FirebirdConnector,
]
tests = {}
for connclass in connectors:
name = connclass.TEST_PREFIX + suffix
class testcase(connclass, base, unittest.TestCase):
__module__ = connclass.__module__
testcase.__name__ = name
if hasattr(connclass, "__qualname__"):
testcase.__qualname__ = ".".join(
connclass.__qualname__.split()[0:-1] + [name]
)
tests[name] = testcase
globals.update(tests)
# PySQLite2Connector SQLite3ADBAPITests PyPgSQLADBAPITests
# PsycopgADBAPITests MySQLADBAPITests FirebirdADBAPITests
makeSQLTests(ADBAPITestBase, "ADBAPITests", globals())
# PySQLite2Connector SQLite3ReconnectTests PyPgSQLReconnectTests
# PsycopgReconnectTests MySQLReconnectTests FirebirdReconnectTests
makeSQLTests(ReconnectTestBase, "ReconnectTests", globals())
class FakePool:
"""
A fake L{ConnectionPool} for tests.
@ivar connectionFactory: factory for making connections returned by the
C{connect} method.
@type connectionFactory: any callable
"""
reconnect = True
noisy = True
def __init__(self, connectionFactory):
self.connectionFactory = connectionFactory
def connect(self):
"""
Return an instance of C{self.connectionFactory}.
"""
return self.connectionFactory()
def disconnect(self, connection):
"""
Do nothing.
"""
class ConnectionTests(unittest.TestCase):
"""
Tests for the L{Connection} class.
"""
def test_rollbackErrorLogged(self):
"""
If an error happens during rollback, L{ConnectionLost} is raised but
the original error is logged.
"""
class ConnectionRollbackRaise:
def rollback(self):
raise RuntimeError("problem!")
pool = FakePool(ConnectionRollbackRaise)
connection = Connection(pool)
self.assertRaises(ConnectionLost, connection.rollback)
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
class TransactionTests(unittest.TestCase):
"""
Tests for the L{Transaction} class.
"""
def test_reopenLogErrorIfReconnect(self):
"""
If the cursor creation raises an error in L{Transaction.reopen}, it
reconnects but log the error occurred.
"""
class ConnectionCursorRaise:
count = 0
def reconnect(self):
pass
def cursor(self):
if self.count == 0:
self.count += 1
raise RuntimeError("problem!")
pool = FakePool(None)
transaction = Transaction(pool, ConnectionCursorRaise())
transaction.reopen()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
class NonThreadPool:
def callInThreadWithCallback(self, onResult, f, *a, **kw):
success = True
try:
result = f(*a, **kw)
except Exception:
success = False
result = Failure()
onResult(success, result)
class DummyConnectionPool(ConnectionPool):
"""
A testable L{ConnectionPool};
"""
threadpool = NonThreadPool()
def __init__(self):
"""
Don't forward init call.
"""
self._reactor = reactor
class EventReactor:
"""
Partial L{IReactorCore} implementation with simple event-related
methods.
@ivar _running: A C{bool} indicating whether the reactor is pretending
to have been started already or not.
@ivar triggers: A C{list} of pending system event triggers.
"""
def __init__(self, running):
self._running = running
self.triggers = []
def callWhenRunning(self, function):
if self._running:
function()
else:
return self.addSystemEventTrigger("after", "startup", function)
def addSystemEventTrigger(self, phase, event, trigger):
handle = (phase, event, trigger)
self.triggers.append(handle)
return handle
def removeSystemEventTrigger(self, handle):
self.triggers.remove(handle)
class ConnectionPoolTests(unittest.TestCase):
"""
Unit tests for L{ConnectionPool}.
"""
def test_runWithConnectionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runWithConnection} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise:
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
def raisingFunction(connection):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
d = pool.runWithConnection(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_closeLogError(self):
"""
L{ConnectionPool._close} logs exceptions.
"""
class ConnectionCloseRaise:
def close(self):
raise RuntimeError("problem!")
pool = DummyConnectionPool()
pool._close(ConnectionCloseRaise())
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
def test_runWithInteractionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runInteraction} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise:
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
class DummyTransaction:
def __init__(self, pool, connection):
pass
def raisingFunction(transaction):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
pool.transactionFactory = DummyTransaction
d = pool.runInteraction(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_unstartedClose(self):
"""
If L{ConnectionPool.close} is called without L{ConnectionPool.start}
having been called, the pool's startup event is cancelled.
"""
reactor = EventReactor(False)
pool = ConnectionPool("twisted.test.test_adbapi", cp_reactor=reactor)
# There should be a startup trigger waiting.
self.assertEqual(reactor.triggers, [("after", "startup", pool._start)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
def test_startedClose(self):
"""
If L{ConnectionPool.close} is called after it has been started, but
not by its shutdown trigger, the shutdown trigger is cancelled.
"""
reactor = EventReactor(True)
pool = ConnectionPool("twisted.test.test_adbapi", cp_reactor=reactor)
# There should be a shutdown trigger waiting.
self.assertEqual(reactor.triggers, [("during", "shutdown", pool.finalClose)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
| 30.013841
| 87
| 0.610483
|
35089c1301399779026925afc1ba2cf640c90e1e
| 7,908
|
py
|
Python
|
astroML/density_estimation/histtools.py
|
larsmans/astroML
|
01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d
|
[
"BSD-2-Clause"
] | 1
|
2018-07-03T12:22:22.000Z
|
2018-07-03T12:22:22.000Z
|
astroML/density_estimation/histtools.py
|
larsmans/astroML
|
01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d
|
[
"BSD-2-Clause"
] | null | null | null |
astroML/density_estimation/histtools.py
|
larsmans/astroML
|
01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d
|
[
"BSD-2-Clause"
] | 1
|
2018-07-03T12:22:24.000Z
|
2018-07-03T12:22:24.000Z
|
"""
Tools for working with distributions
"""
import numpy as np
from astroML.density_estimation import bayesian_blocks
from scipy.special import gammaln
from scipy import optimize
def scotts_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule:
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if `return_bins` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points.
See Also
--------
knuth_bin_width
freedman_bin_width
astroML.plotting.hist
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma * 1. / (n ** (1. / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) * 1. / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if `return_bins` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points.
See Also
--------
knuth_bin_width
scotts_bin_width
astroML.plotting.hist
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
dsorted = np.sort(data)
v25 = dsorted[n / 4 - 1]
v75 = dsorted[(3 * n) / 4 - 1]
dx = 2 * (v75 - v25) * 1. / (n ** (1. / 3))
if return_bins:
Nbins = np.ceil((dsorted[-1] - dsorted[0]) * 1. / dx)
Nbins = max(1, Nbins)
bins = dsorted[0] + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
class KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
astroML.plotting.hist
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M)
+ gammaln(0.5 * M)
- M * gammaln(0.5)
- gammaln(self.n + 0.5 * M)
+ np.sum(gammaln(nk + 0.5)))
def knuth_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Knuth's rule [1]_
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if `return_bins` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
KnuthF
freedman_bin_width
scotts_bin_width
"""
knuthF = KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M0 = len(bins0) - 1
M = optimize.fmin(knuthF, len(bins0))[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
def histogram(a, bins=10, range=None, **kwargs):
"""Enhanced histogram
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the `bins` argument allowing
a string specified how bins are computed, the parameters are the same
as numpy.histogram().
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
'blocks' : use bayesian blocks for dynamic bin widths
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
other keyword arguments are described in numpy.hist().
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
astroML.plotting.hist
"""
a = np.asarray(a)
# if range is specified, we need to truncate the data for
# the bin-finding routines
if (range is not None and (bins in ['blocks', 'knuth',
'scotts', 'freedman'])):
a = a[(a >= range[0]) & (a <= range[1])]
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scotts':
da, bins = scotts_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
elif isinstance(bins, str):
raise ValueError("unrecognized bin code: '%s'" % bins)
return np.histogram(a, bins, range, **kwargs)
| 26.80678
| 79
| 0.57562
|
d0769b21ad66ec747f3ae882317ac2621cb2f35c
| 1,456
|
py
|
Python
|
django/src/attic/urls.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
django/src/attic/urls.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
django/src/attic/urls.py
|
SkySchermer/uweclang
|
c4404b550c8c1e6d22eff0a5ddeb8127080b2ad3
|
[
"MIT"
] | null | null | null |
"""attic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'attic.views.log_in', name='login'),
url(r'^home/', 'attic.views.home', name='home'),
url(r'^logout/', 'attic.views.log_out', name='logout'),
url(r'^profile/(?P<session_id>.*)$', 'attic.views.profile', name='profile'),
url(r'^reset-session/', 'attic.views.reset_session', name='reset_session'),
url(r'^delete-session/(?P<session_id>.*)$', 'attic.views.delete_session', name='delete_session'),
url(r'^delete-query/(?P<query_id>.*)$', 'attic.views.delete_query', name='delete_query'),
url(r'^delete-query-from-profile/(?P<query_id>.*)$', 'attic.views.delete_query_from_profile', name='delete_query'),
url(r'^modify/', 'attic.views.modify', name='modify'),
]
| 46.967742
| 119
| 0.678571
|
597d38ad10d73b7e60178e6403402deab910dfb5
| 4,891
|
py
|
Python
|
src/harness/cu_pass/dpa_calculator/utilities.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
src/harness/cu_pass/dpa_calculator/utilities.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
src/harness/cu_pass/dpa_calculator/utilities.py
|
NSF-Swift/Spectrum-Access-System
|
02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
from dataclasses import dataclass
from pathlib import Path
from statistics import stdev
from typing import Callable, List, Tuple
import numpy
from numpy import asarray
from shapely import geometry
from cu_pass.dpa_calculator.constants import DPA_CALCULATOR_LOGGER_NAME, REGION_TYPE_DENSE_URBAN, REGION_TYPE_RURAL, \
REGION_TYPE_SUBURBAN, REGION_TYPE_URBAN
from reference_models.dpa.dpa_mgr import Dpa
from reference_models.geo.drive import nlcd_driver
from reference_models.geo.nlcd import LandCoverCodes
from reference_models.geo.vincenty import GeodesicDistanceBearing, GeodesicPoint
class Point:
def __init__(self, latitude: float, longitude: float):
self.latitude = latitude
self.longitude = longitude
def __eq__(self, other):
return geometry.Point(self.latitude, self.longitude) == geometry.Point(other.latitude, other.longitude)
@classmethod
def from_shapely(cls, point_shapely: geometry.Point) -> 'Point':
return cls(latitude=point_shapely.y, longitude=point_shapely.x)
def to_shapely(self) -> geometry.Point:
return geometry.Point(self.longitude, self.latitude)
def move_distance(bearing: float, kilometers: float, origin: Point) -> Point:
latitude, longitude, _ = GeodesicPoint(lat=origin.latitude, lon=origin.longitude, dist_km=kilometers, bearing=bearing)
return Point(latitude=latitude, longitude=longitude)
def get_distance_between_two_points(point1: Point, point2: Point) -> float:
return _get_geodesic_distance_bearing(point1=point1, point2=point2)[0]
def get_bearing_between_two_points(point1: Point, point2: Point) -> float:
return _get_geodesic_distance_bearing(point1=point1, point2=point2)[1]
def _get_geodesic_distance_bearing(point1: Point, point2: Point) -> Tuple[float, float, float]:
return GeodesicDistanceBearing(lat1=point1.latitude, lon1=point1.longitude, lat2=point2.latitude, lon2=point2.longitude)
def get_dpa_center(dpa: Dpa) -> Point:
return Point.from_shapely(point_shapely=dpa.geometry.centroid)
def get_region_type(coordinates: Point) -> str:
cbsd_region_code = nlcd_driver.GetLandCoverCodes(lat=coordinates.latitude, lon=coordinates.longitude)
if cbsd_region_code == LandCoverCodes.DEVELOPED_LOW:
return REGION_TYPE_SUBURBAN
elif cbsd_region_code == LandCoverCodes.DEVELOPED_MEDIUM:
return REGION_TYPE_URBAN
elif cbsd_region_code == LandCoverCodes.DEVELOPED_HIGH:
return REGION_TYPE_DENSE_URBAN
return REGION_TYPE_RURAL
def region_is_rural(coordinates: Point) -> bool:
return get_region_type(coordinates=coordinates) == REGION_TYPE_RURAL
@dataclass
class SimulationStatistics:
percentile_50: float
percentile_95: float
maximum: float
minimum: float
standard_deviation: float
title: str
def log(self) -> None:
logger = get_dpa_calculator_logger()
logger.info(f'\nResults for {self.title}:')
logger.info(f'\t50th percentile: {self.percentile_50}')
logger.info(f'\t95th percentile: {self.percentile_95}')
logger.info(f'\tStandard Deviation: {self.standard_deviation}')
logger.info(f'\tMinimum: {self.minimum}')
logger.info(f'\tMaximum: {self.maximum}')
def run_monte_carlo_simulation(functions_to_run: List[Callable[[], float]], number_of_iterations: int, percentile: int = 50) -> List[float]:
results = []
for i in range(number_of_iterations):
logger = get_dpa_calculator_logger()
logger.info(f'Monte Carlo iteration {i + 1}')
iteration_results = [function_to_run() for function_to_run in functions_to_run]
results.append(iteration_results)
results_per_function = asarray(results).transpose()
_log_results(results=results_per_function)
return [get_percentile(results=iteration_results, percentile=percentile) for iteration_results in results_per_function]
def _log_results(results: numpy.ndarray) -> None:
simulation_statistics = [SimulationStatistics(
percentile_50=get_percentile(results=iteration_results, percentile=50),
percentile_95=get_percentile(results=iteration_results, percentile=95),
maximum=max(iteration_results),
minimum=min(iteration_results),
standard_deviation=stdev(iteration_results) if len(iteration_results) > 1 else 0,
title='UEs' if index else 'APs'
)
for index, iteration_results in enumerate(results.tolist())]
for statistics in simulation_statistics:
statistics.log()
def get_percentile(results: List[float], percentile: int) -> float:
return numpy.percentile(results, percentile, interpolation='lower')
def get_dpa_calculator_logger() -> logging.Logger:
return logging.getLogger(DPA_CALCULATOR_LOGGER_NAME)
def get_script_directory(file: str) -> Path:
return Path(os.path.dirname(os.path.realpath(file)))
| 37.914729
| 140
| 0.757309
|
ec2d4962aa934506378d6b6dbe75c02eb2aec358
| 11,269
|
py
|
Python
|
conans/test/unittests/client/build/msbuild_test.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | 1
|
2021-05-29T16:44:56.000Z
|
2021-05-29T16:44:56.000Z
|
conans/test/unittests/client/build/msbuild_test.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | 1
|
2020-12-21T10:06:30.000Z
|
2020-12-21T10:06:30.000Z
|
conans/test/unittests/client/build/msbuild_test.py
|
a4z/conan
|
dec9e0288f81462c53b9222a206002fbc525ea65
|
[
"MIT"
] | 1
|
2021-08-20T19:47:51.000Z
|
2021-08-20T19:47:51.000Z
|
import os
import re
import unittest
from parameterized import parameterized
from conans.client import tools
from conans.client.build.msbuild import MSBuild
from conans.test.utils.mocks import MockSettings, MockConanfile
class MSBuildTest(unittest.TestCase):
def test_dont_mess_with_build_type(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
self.assertEqual(msbuild.build_env.flags, [])
template = msbuild._get_props_file_contents()
self.assertNotIn("-Ob0", template)
self.assertNotIn("-Od", template)
msbuild.build_env.flags = ["-Zi"]
template = msbuild._get_props_file_contents()
self.assertNotIn("-Ob0", template)
self.assertNotIn("-Od", template)
self.assertIn("-Zi", template)
self.assertIn("<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>", template)
def test_skip_only_none_definitions(self):
# https://github.com/conan-io/conan/issues/6728
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={"foo": 0, "bar": False})
self.assertIn("<PreprocessorDefinitions>foo=0;bar=False;%(PreprocessorDefinitions)",
template)
def test_without_runtime(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents()
self.assertNotIn("<RuntimeLibrary>", template)
def test_custom_properties(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_file.sln", properties={"MyProp1": "MyValue1",
"MyProp2": "MyValue2"})
self.assertIn('/p:MyProp1="MyValue1"', command)
self.assertIn('/p:MyProp2="MyValue2"', command)
def test_binary_logging_off_explicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln", output_binary_log=False)
self.assertNotIn("/bl", command)
def test_binary_logging_off_implicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln")
self.assertNotIn("/bl", command)
def test_error_targets_argument(self):
conanfile = MockConanfile(MockSettings({}))
msbuild = MSBuild(conanfile)
with self.assertRaises(TypeError):
msbuild.get_command("dummy.sln", targets="sometarget")
@parameterized.expand([("16", "v142"),
("15", "v141"),
("14", "v140"),
("12", "v120"),
("11", "v110"),
("10", "v100"),
("9", "v90"),
("8", "v80")])
def test_default_toolset(self, compiler_version, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": compiler_version,
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
@parameterized.expand([("v142",),
("v141",),
("v140",),
("v120",),
("v110",),
("v100",),
("v90",),
("v80",)])
def test_explicit_toolset(self, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "15",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln", toolset=expected_toolset)
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
@parameterized.expand([("16", "v141_xp"),
("15", "v141_xp"),
("14", "v140_xp"),
("12", "v120_xp"),
("11", "v110_xp")])
def test_custom_toolset(self, compiler_version, expected_toolset):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": compiler_version,
"compiler.toolset": expected_toolset,
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
def test_definitions(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={'_WIN32_WINNT': "0x0501"})
self.assertIn("<PreprocessorDefinitions>"
"_WIN32_WINNT=0x0501;"
"%(PreprocessorDefinitions)</PreprocessorDefinitions>", template)
def test_definitions_no_value(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64",
"compiler.runtime": "MDd"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
template = msbuild._get_props_file_contents(definitions={'_DEBUG': None})
self.assertIn("<PreprocessorDefinitions>"
"_DEBUG;"
"%(PreprocessorDefinitions)</PreprocessorDefinitions>", template)
def test_verbosity_default(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln")
self.assertIn('/verbosity:minimal', command)
def test_verbosity_env(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
with tools.environment_append({"CONAN_MSBUILD_VERBOSITY": "detailed"}):
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln")
self.assertIn('/verbosity:detailed', command)
def test_verbosity_explicit(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("projecshould_flags_testt_file.sln", verbosity="quiet")
self.assertIn('/verbosity:quiet', command)
def test_properties_injection(self):
# https://github.com/conan-io/conan/issues/4471
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"arch": "x86_64"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("dummy.sln", props_file_path="conan_build.props")
match = re.search('/p:ForceImportBeforeCppTargets="(.+?)"', command)
self.assertTrue(
match, "Haven't been able to find the ForceImportBeforeCppTargets")
props_file_path = match.group(1)
self.assertTrue(os.path.isabs(props_file_path))
self.assertEqual(os.path.basename(props_file_path), "conan_build.props")
def test_windows_ce(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "Visual Studio",
"compiler.version": "9",
"os": "WindowsCE",
"os.platform": "YOUR PLATFORM SDK (ARMV4)",
"arch": "armv4"})
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("test.sln")
self.assertIn('/p:Platform="YOUR PLATFORM SDK (ARMV4)"', command)
def test_intel(self):
settings = MockSettings({"build_type": "Debug",
"compiler": "intel",
"compiler.version": "19.1",
"compiler.base": "Visual Studio",
"compiler.base.version": "15",
"arch": "x86_64"})
expected_toolset = "Intel C++ Compiler 19.1"
conanfile = MockConanfile(settings)
msbuild = MSBuild(conanfile)
command = msbuild.get_command("project_should_flags_test_file.sln")
self.assertIn('/p:PlatformToolset="%s"' % expected_toolset, command)
| 46.566116
| 101
| 0.530127
|
8e2bf82ff96feffee1a5763c2a2125411edcdab8
| 4,527
|
py
|
Python
|
dataio.py
|
entn-at/lcnn
|
797d8847fad6d1179866ac2d7d7402240483123b
|
[
"BSD-3-Clause"
] | 15
|
2018-11-04T18:35:41.000Z
|
2021-01-10T14:18:54.000Z
|
dataio.py
|
entn-at/lcnn
|
797d8847fad6d1179866ac2d7d7402240483123b
|
[
"BSD-3-Clause"
] | 2
|
2018-11-02T20:15:53.000Z
|
2019-04-13T06:55:01.000Z
|
dataio.py
|
entn-at/lcnn
|
797d8847fad6d1179866ac2d7d7402240483123b
|
[
"BSD-3-Clause"
] | 5
|
2018-10-07T12:44:16.000Z
|
2019-07-16T08:29:44.000Z
|
###########################################################
## reading and saving data #
###########################################################
## Copyright (c) 2018, National Institute of Informatics #
## Author: Fuming Fang #
## Affiliation: National Institute of Informatics #
## Email: fang@nii.ac.jp #
###########################################################
# -*- coding: utf-8 -*-
import numpy as np
import os
from sklearn.utils import shuffle
import sys
import struct
class dataio(object):
def __init__(self, train_genuine, train_spoof, dev_genuine, dev_spoof,
test_data=None, batch_size=32):
if train_genuine is not None and train_spoof is not None:
self.training_data, self.label = self.load_data(train_genuine, train_spoof)
self.frames = len(self.training_data)
self.batch_size = min(batch_size, self.frames)
self.max_index = self.frames - self.batch_size
if dev_genuine is not None and dev_spoof is not None:
self.dev_data, self.dev_label = self.load_data(dev_genuine, dev_spoof)
self.dev_frames = len(self.dev_data)
self.current_dev_index = 0
self.dev_batch_size = min(64, self.dev_frames)
self.dev_iterations = (self.dev_frames - 1)//self.dev_batch_size + 1
if test_data is not None:
self.test_data, self.test_names = self.load_test_data(test_data, 400)
self.test_frames = len(self.test_data)
def load_data(self, scp_genuine, scp_spoof):
if scp_genuine is not None:
genuine = self._load_data(scp_genuine, 400)
genuine = np.reshape(genuine, (-1, 864, 400, 1))
genuine_lab = np.zeros((len(genuine), 2), dtype=np.float32)
genuine_lab[:, 0] = 1.0
if scp_spoof is not None:
spoof = self._load_data(scp_spoof, 400)
spoof = np.reshape(spoof, (-1, 864, 400, 1))
spoof_lab = np.zeros((len(spoof), 2), dtype=np.float32)
spoof_lab[:, 1] = 1.0
if scp_genuine is not None and scp_spoof is not None:
x = np.concatenate((genuine, spoof), axis=0)
y = np.concatenate((genuine_lab, spoof_lab), axis=0)
elif scp_genuine is not None and scp_spoof is None:
x = genuine
y = genuine_lab
elif scp_genuine is None and scp_spoof is not None:
x = spoof
y = spoof_lab
else:
raise NotImplementedError
return x, y
def _load_data(self, scp_path, dim):
scp = np.loadtxt(scp_path, dtype=str)
total_frames = 0
for name in scp:
total_frames += os.path.getsize(name)/4/dim
data = np.zeros((total_frames, dim), dtype=np.float32)
idx = 0
for name in scp:
with open(name, 'rb') as f:
v = f.read()
v = np.frombuffer(v, dtype=np.float32)
v = np.reshape(v, (-1, dim))
data[idx:idx+len(v)] = v
idx += len(v)
return data
def load_test_data(self, scp_path, dim):
scp = np.loadtxt(scp_path, dtype=str)
test_data = list()
for name in scp:
with open(name, 'rb') as f:
v = f.read()
v = np.frombuffer(v, dtype=np.float32)
v = np.reshape(v, (-1, 864, dim, 1))
test_data.append(v)
return test_data, scp
def shuffle(self):
self.training_data, self.label = shuffle(self.training_data, self.label)
def batch(self):
rand_v = np.random.randint(self.max_index)
x = self.training_data[rand_v:rand_v+self.batch_size]
y = self.label[rand_v:rand_v+self.batch_size]
return x, y
def dev_batch(self):
s = self.current_dev_index
e = s + self.dev_batch_size
if e > self.dev_frames:
e = self.dev_frames
x = self.dev_data[s:e]
y = self.dev_label[s:e]
if e >= self.dev_frames:
self.current_dev_index = 0
else:
self.current_dev_index = e
return x, y
def save_data(self, name, data):
with open(name,'wb') as f:
f.write(struct.pack('f'*data.size, *data.flat))
| 34.557252
| 87
| 0.529048
|
143057a8378260c0a815a635b2c576b94e5c220a
| 35
|
py
|
Python
|
fourmis/__init__.py
|
teodam/openedx-manhill
|
01892cab98cac949c0a98940eda6c6ea672c608b
|
[
"MIT"
] | null | null | null |
fourmis/__init__.py
|
teodam/openedx-manhill
|
01892cab98cac949c0a98940eda6c6ea672c608b
|
[
"MIT"
] | null | null | null |
fourmis/__init__.py
|
teodam/openedx-manhill
|
01892cab98cac949c0a98940eda6c6ea672c608b
|
[
"MIT"
] | null | null | null |
from .fourmis import FourmisXBlock
| 17.5
| 34
| 0.857143
|
1ab0cd75cbaac355b51950e5cbb5e1155d753d9e
| 39,940
|
py
|
Python
|
kats/utils/backtesters.py
|
vishalbelsare/Kats
|
b555e97c84662b8b92b23fd3687c72583e0e9a47
|
[
"MIT"
] | null | null | null |
kats/utils/backtesters.py
|
vishalbelsare/Kats
|
b555e97c84662b8b92b23fd3687c72583e0e9a47
|
[
"MIT"
] | null | null | null |
kats/utils/backtesters.py
|
vishalbelsare/Kats
|
b555e97c84662b8b92b23fd3687c72583e0e9a47
|
[
"MIT"
] | null | null | null |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file defines the BackTester classes for Kats.
Kats supports multiple types of backtesters, including:
- :class:`BackTesterSimple` (basic train & test backtesting).
- :class:`BackTesterFixedWindow` (discontinuous train & test data).
- :class:`BackTesterExpandingWindow` (increasing train window size over
multiple iterations).
- :class:`BackTesterRollingWindow` (sliding train & test windows over
multiple iterations).
This module also supports :class:`CrossValidation` with both expanding and
rolling windows.
For more information, check out the Kats tutorial notebook on backtesting!
"""
import logging
import multiprocessing as mp
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING
import numpy as np
import pandas as pd
from kats.consts import Params, TimeSeriesData
from kats.metrics.metrics import core_metric, CoreMetric
if TYPE_CHECKING:
from kats.models.model import Model
class BackTesterParent(ABC):
"""
This class defines the parent functions for various backtesting methods.
Attributes:
error_methods: List of strings indicating which errors to calculate
(see `kats.metrics` for exhaustive list).
data: :class:`kats.consts.TimeSeriesData` object to perform backtest on.
params: Parameters to train model with.
model_class: Defines the model type to use for backtesting.
multi: Boolean flag to use multiprocessing (if set to True).
offset: Gap between train/test datasets (default 0).
results: List of tuples `(training_data, testing_data, trained_model,
forecast_predictions)` storing forecast results.
errors: Dictionary mapping the error type to value.
size: An integer for the total number of datapoints.
freq: A string representing the (inferred) frequency of the
`pandas.DataFrame`.
Raises:
ValueError: The time series is empty or an invalid error type was passed.
"""
error_methods: List[Tuple[str, CoreMetric]]
data: TimeSeriesData
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type
params: Params
multi: bool
offset: int
results: List[Tuple[np.ndarray, np.ndarray, "Model[Params]", np.ndarray]]
errors: Dict[str, float]
size: int
freq: Optional[str]
raw_errors: List[np.ndarray]
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
multi: bool,
offset: int = 0,
**kwargs: Any,
) -> None:
self.size = size = len(data.time)
if not size:
msg = "Passed an empty time series"
logging.error(msg)
raise ValueError(msg)
self.data = data
self.model_class = model_class
self.params = params
self.multi = multi
self.offset = offset
self.results = []
# Handling frequency
if "freq" in kwargs:
self.freq = kwargs["freq"]
else:
logging.info("Inferring frequency")
self.freq = pd.infer_freq(self.data.time)
self.raw_errors = []
methods = []
errors = {}
for error in error_methods:
try:
methods.append((error, core_metric(error)))
errors[error] = 0.0
except ValueError:
msg = f"Unsupported error function {error}"
logging.error(msg)
raise ValueError(msg)
self.errors = errors
self.error_methods = methods
logging.info("Instantiated BackTester")
if kwargs:
logging.info(
"Additional arguments: {0}".format(
(", ".join(["{}={!r}".format(k, v) for k, v in kwargs.items()]))
)
)
logging.info("Model type: {0}".format(self.model_class))
logging.info("Error metrics: {0}".format(error_methods))
super().__init__()
def calc_error(self) -> Optional[float]:
"""
Calculates all errors in `self.error_methods` and stores them in the
errors dict.
Returns:
The error value. None if the error value does not exist.
"""
logging.info("Calculating Errors")
if len(self.results) <= 0:
logging.error("Empty forecast")
raise ValueError("No results from forecast")
# Storing total length of predictions for weighting fold errors
total_fold_length = sum(result[1].size for result in self.results)
for result in self.results:
if len(result) != 4:
logging.error("Invalid result: {0}".format(result))
raise ValueError("Invalid result")
training_inputs, truth, _, predictions = result
# Storing raw errors
self.raw_errors.append(truth - predictions)
if training_inputs.size <= 0:
logging.error("No training data provided ")
raise ValueError("Not enough training data")
if predictions.size <= 0:
logging.error("No predictions provided")
raise ValueError("Not enough predictions")
if truth.size <= 0:
logging.error("No ground truth data provided")
raise ValueError("Not enough ground truth data")
if predictions.size != truth.size:
logging.error("Unequal amount of labels and predictions")
raise ValueError("Incorrect dimensionality of predictions & labels")
for name, method in self.error_methods:
# Weighting the errors by the relative fold length if
# predictions are of different sizes
weight = float(len(truth)) / total_fold_length
self.errors[name] += weight * method(truth, predictions)
def _create_model(
self,
training_data_indices: Tuple[int, int],
testing_data_indices: Tuple[int, int],
) -> Optional[Tuple[np.ndarray, np.ndarray, "Model[Params]", np.ndarray]]:
"""
Trains model, evaluates it, and stores results in results list.
"""
training_data_start, training_data_end = training_data_indices
testing_data_start, testing_data_end = testing_data_indices
logging.info("Creating TimeSeries train test objects for split")
logging.info(
"Train split of {0}, {1}".format(training_data_start, training_data_end)
)
logging.info(
"Test split of {0}, {1}".format(testing_data_start, testing_data_end)
)
if (
training_data_start < 0
or training_data_start > self.size
or training_data_end < 0
or training_data_end > self.size
):
logging.error(
"Train Split of {0}, {1} was invalid".format(
training_data_start, training_data_end
)
)
raise ValueError("Invalid training data indices in split")
if (
testing_data_start < 0
or testing_data_start > self.size
or testing_data_end < 0
or testing_data_end > self.size
):
logging.error(
"Test Split of {0}, {1} was invalid".format(
testing_data_start, testing_data_end
)
)
raise ValueError("Invalid testing data indices in split")
training_data = TimeSeriesData(
pd.DataFrame(
{
"time": self.data.time[training_data_start:training_data_end],
"y": self.data.value[training_data_start:training_data_end],
}
)
)
testing_data = TimeSeriesData(
pd.DataFrame(
{
"time": self.data.time[testing_data_start:testing_data_end],
"y": self.data.value[testing_data_start:testing_data_end],
}
)
)
if training_data.value.size <= 0:
logging.error("No training data provided ")
raise ValueError("Not enough training data")
if testing_data.value.size <= 0:
logging.error("No testing data provided ")
raise ValueError("Not enough testing data")
logging.info("Training model")
train_model = self.model_class(data=training_data, params=self.params)
train_model.fit()
logging.info("Making forecast prediction")
fcst = train_model.predict(
steps=testing_data.value.size + self.offset, freq=self.freq
)
train_data_only = np.array(training_data.value)
truth = np.array(testing_data.value)
predictions = np.array(fcst["fcst"])
if self.offset:
predictions = predictions[self.offset :]
if not self.multi:
self.results.append((train_data_only, truth, train_model, predictions))
else:
return (train_data_only, truth, train_model, predictions)
def _build_and_train_models(
self, splits: Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]
) -> None:
training_splits, testing_splits = splits
num_splits = len(training_splits)
if not self.multi:
for train_split, test_split in zip(training_splits, testing_splits):
self._create_model(train_split, test_split)
else:
pool = mp.Pool(processes=num_splits)
futures = [
pool.apply_async(self._create_model, args=(train_split, test_split))
for train_split, test_split in zip(training_splits, testing_splits)
]
self.results = results = []
for fut in futures:
result = fut.get()
assert result is not None
results.append(result)
pool.close()
def run_backtest(self) -> None:
"""Executes backtest."""
self._build_and_train_models(self._create_train_test_splits())
self.calc_error()
def get_error_value(self, error_name: str) -> float:
"""Gets requested error value.
Args:
error_name: A string of the error whose value should be returned.
Returns:
A float of the eror value.
Raises:
ValueError: The error name is invalid.
"""
if error_name in self.errors:
return self.errors[error_name]
else:
logging.error("Invalid error name: {0}".format(error_name))
raise ValueError("Invalid error name")
@abstractmethod
def _create_train_test_splits(
self,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]:
raise NotImplementedError()
class BackTesterSimple(BackTesterParent):
"""Defines the functions to execute a simple train/test backtest.
Attributes:
train_percentage: A float for the percentage of data used for training.
test_percentage: A float for the percentage of data used for testing.
error_methods: List of strings indicating which errors to calculate
(see `kats.metrics` for exhaustive list).
data: :class:`kats.consts.TimeSeriesData` object to perform backtest on.
params: Parameters to train model with.
model_class: Defines the model type to use for backtesting.
results: List of tuples `(training_data, testing_data, trained_model,
forecast_predictions)` storing forecast results.
errors: Dictionary mapping the error type to value.
size: An integer for the total number of datapoints.
error_funcs: Dictionary mapping error name to the
function that calculates it.
freq: A string representing the (inferred) frequency of the
`pandas.DataFrame`.
raw_errors: List storing raw errors (truth - predicted).
Raises:
ValueError: Invalid train and/or test params passed. Or the time series
is empty.
Sample Usage:
>>> df = pd.read_csv("kats/data/air_passengers.csv")
>>> ts = TimeSeriesData(df=df)
>>> params = ARIMAParams(p=1, d=1, q=1)
>>> all_errors = ["mape", "smape", "mae", "mase", "mse", "rmse"]
>>> backtester = BackTesterSimple(
error_methods=all_errors,
data=ts,
params=params,
train_percentage=75,
test_percentage=25,
model_class=ARIMAModel,
)
>>> backtester.run_backtest()
>>> mape = backtester.get_error_value("mape") # Retrieve MAPE error
"""
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
train_percentage: float,
test_percentage: float,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
**kwargs: Any,
) -> None:
logging.info("Initializing train/test percentages")
if train_percentage <= 0:
logging.error("Non positive training percentage")
raise ValueError("Invalid training percentage")
elif train_percentage > 100:
logging.error("Too large training percentage")
raise ValueError("Invalid training percentage")
self.train_percentage = train_percentage
if test_percentage <= 0:
logging.error("Non positive test percentage")
raise ValueError("Invalid test percentage")
elif test_percentage > 100:
logging.error("Too large test percentage")
raise ValueError("Invalid test percentage")
self.test_percentage = test_percentage
logging.info("Calling parent class constructor")
super().__init__(error_methods, data, params, model_class, False, **kwargs)
def _create_train_test_splits(
self,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]:
"""Creates train/test folds for the backtest."""
logging.info("Creating train test splits")
train_size = _get_absolute_size(self.size, self.train_percentage)
test_size = _get_absolute_size(self.size, self.test_percentage)
if train_size <= 0 or train_size >= self.size:
logging.error("Invalid training size: {0}".format(train_size))
logging.error("Training Percentage: {0}".format(self.train_percentage))
raise ValueError("Incorrect training size")
if test_size <= 0 or test_size >= self.size:
logging.error("Invalid testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
raise ValueError("Incorrect testing size")
if train_size + test_size > self.size:
logging.error("Training and Testing sizes too big")
logging.error("Training size: {0}".format(train_size))
logging.error("Training Percentage: {0}".format(self.train_percentage))
logging.error("Testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
raise ValueError("Incorrect training and testing sizes")
return [(0, train_size)], [(train_size, train_size + test_size)]
class BackTesterRollingOrigin(BackTesterParent):
"""Defines functions to execute an rolling origin backtest.
A rolling forecast origin backtest conducts a backtest over multiple
iterations, wherein each iteration, the "forecasting origin"
(the location separating training and testing datasets) "slides" forward
by a fixed amount.
Hereby, the size of the training dataset is usually increased at each
iteration, while the test dataset "slides" forward to accommodate.
However, the size of the training dataset can alternatively be held
constant, in which case at each iteration the start location of the
training dataset moves forward by the same amount as the "forecast origin".
Iterations continue until the complete data set is used to either train
or test in the final interation.
This procedure is also known in literature as a rolling origin evaluation
with a continuously increasing in-sample size (train set) and a constant
out-sample size (test set).
For more information, check out the Kats tutorial notebooks!
Attributes:
start_train_percentage: A float for the initial percentage of data used
for training. (The train percentage at the end will be 100 -
test_percentage)
test_percentage: A float for the percentage of data used for testing.
(The test set is taken at sliding positions from start_train_percentage
up to the end of the dataset - only the last fold is at the very end.)
expanding_steps: An integer for the number of expanding steps (i.e.
number of folds).
error_methods: List of strings indicating which errors to calculate
(see `kats.metrics` for exhaustive list).
data: :class:`kats.consts.TimeSeriesData` object to perform backtest on.
params: Parameters to train model with.
model_class: Defines the model type to use for backtesting.
constant_train_size: A bool defining if the training data size should be
constant instead of expanding it at each iteration (default False).
multi: A boolean flag to toggle multiprocessing (default True).
results: List of tuples `(training_data, testing_data, trained_model,
forecast_predictions)` storing forecast results.
errors: Dictionary mapping the error type to value.
size: An integer for the total number of datapoints.
freq: A string representing the (inferred) frequency of the
`pandas.DataFrame`.
raw_errors: List storing raw errors (truth - predicted).
Raises:
ValueError: One or more of the train, test, or expanding steps params
were invalid. Or the time series is empty.
Sample Usage:
>>> df = pd.read_csv("kats/data/air_passengers.csv")
>>> ts = TimeSeriesData(df=df)
>>> params = ARIMAParams(p=1, d=1, q=1)
>>> all_errors = ["mape", "smape", "mae", "mase", "mse", "rmse"]
>>> backtester = BackTesterRollingOrigin(
error_methods=all_errors,
data=ts,
params=params,
start_train_percentage=50,
test_percentage=25,
expanding_steps=3,
model_class=ARIMAModel,
constant_train_size=False,
)
>>> backtester.run_backtest()
>>> mape = backtester.get_error_value("mape") # Retrieve MAPE error
"""
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
start_train_percentage: float,
test_percentage: float,
expanding_steps: int,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
constant_train_size: bool = False,
multi: bool = True,
**kwargs: Any,
) -> None:
logging.info("Initializing train/test percentages")
if start_train_percentage <= 0:
logging.error("Non positive start training percentage")
raise ValueError("Invalid start training percentage")
elif start_train_percentage > 100:
logging.error("Too large start training percentage")
raise ValueError("Invalid end training percentage")
self.start_train_percentage = start_train_percentage
if test_percentage <= 0:
logging.error("Non positive test percentage")
raise ValueError("Invalid test percentage")
elif test_percentage > 100:
logging.error("Too large test percentage")
raise ValueError("Invalid test percentage")
self.test_percentage = test_percentage
if start_train_percentage + test_percentage > 100:
logging.error("Too large combined train and test percentage")
raise ValueError( # noqa
"Invalid training and testing percentage combination."
)
elif start_train_percentage + test_percentage == 100:
if expanding_steps > 1:
logging.error(
"Too large combined train and test percentage for "
"%s expanding steps.",
expanding_steps,
)
raise ValueError(
"Invalid trraining and testing percentage combination "
f"given for {expanding_steps} expanding steps"
)
if expanding_steps < 0:
logging.error("Non positive expanding steps")
raise ValueError("Invalid expanding steps")
self.expanding_steps = expanding_steps
self.constant_train_size = constant_train_size
logging.info("Calling parent class constructor")
super().__init__(error_methods, data, params, model_class, multi, **kwargs)
def _create_train_test_splits(
self,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]:
"""Creates train/test folds for the backtest."""
logging.info("Creating train test splits")
start_train_size = _get_absolute_size(self.size, self.start_train_percentage)
test_size = _get_absolute_size(self.size, self.test_percentage)
if start_train_size <= 0 or start_train_size >= self.size:
logging.error(
"Invalid starting training size: {0}".format(start_train_size)
)
logging.error(
"Start Training Percentage: {0}".format(self.start_train_percentage)
)
raise ValueError("Incorrect starting training size")
if test_size <= 0 or test_size >= self.size:
logging.error("Invalid testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
raise ValueError("Incorrect testing size")
if start_train_size + test_size > self.size:
if start_train_size + test_size > self.size:
logging.error("Training and Testing sizes too big")
logging.error("Start Training size: {0}".format(start_train_size))
logging.error(
"Start Training Percentage: {0}".format(self.start_train_percentage)
)
logging.error("Testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
raise ValueError("Incorrect training and testing sizes")
elif start_train_size + test_size == self.size:
if self.expanding_steps > 1:
logging.error(
"Training and Testing sizes too big " "for multiple steps"
)
logging.error("Start Training size: {0}".format(start_train_size))
logging.error(
"Start Training Percentage: {0}".format(self.start_train_percentage)
)
logging.error("Testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
logging.error("Expanding steps: {}".format(self.expanding_steps))
raise ValueError(
"Incorrect training and testing sizes " "for multiple steps"
)
# Handling edge case where only 1 fold is needed (same as BackTesterSimple)
if self.expanding_steps == 1:
return (
[(0, start_train_size)],
[(start_train_size, start_train_size + test_size)],
)
train_splits = []
test_splits = []
offsets = _return_fold_offsets(
0, self.size - start_train_size - test_size, self.expanding_steps
)
for offset in offsets:
skip_size = 0
if self.constant_train_size:
skip_size = offset
train_splits.append((skip_size, int(start_train_size + offset)))
test_splits.append(
(
int(start_train_size + offset),
int(start_train_size + offset + test_size),
)
)
return train_splits, test_splits
class BackTesterExpandingWindow(BackTesterRollingOrigin):
"""Defines functions to exectute an expanding window backtest.
This class will be deprecated soon. Please use `BackTesterRollingOrigin`
with param `constant_train_size = True`.
"""
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
start_train_percentage: float,
end_train_percentage: float,
test_percentage: float,
expanding_steps: int,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
multi: bool = True,
**kwargs: Any,
) -> None:
logging.info(
"BackTesterExpandingWindow will be deprecated. Please use the "
"updated API found in BackTesterRollingOrigin."
)
super().__init__(
error_methods=error_methods,
data=data,
params=params,
start_train_percentage=start_train_percentage,
test_percentage=test_percentage,
expanding_steps=expanding_steps,
model_class=model_class,
multi=multi,
constant_train_size=False,
**kwargs,
)
class BackTesterRollingWindow(BackTesterRollingOrigin):
"""Defines functions to execute a rolling window backtest.
This class will be deprecated soon. Please use `BackTesterRollingOrigin`
with param `constant_train_size = False`.
"""
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
train_percentage: float,
test_percentage: float,
sliding_steps: int,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
multi: bool = True,
**kwargs: Any,
) -> None:
logging.info(
"BackTesterRollingWindow will be deprecated. Please use the "
"updated API found in BackTesterRollingOrigin."
)
super().__init__(
error_methods=error_methods,
data=data,
params=params,
start_train_percentage=train_percentage,
test_percentage=test_percentage,
expanding_steps=sliding_steps,
model_class=model_class,
multi=multi,
constant_train_size=True,
**kwargs,
)
class BackTesterFixedWindow(BackTesterParent):
"""Defines functions to execute a fixed window ahead backtest.
A fixed window ahead backtest is similar to a standard (i.e. simple)
backtest, with the caveat that there is a gap between the train and test
data sets. The purpose of this type backtest is to focus on the long range
forecasting ability of the model.
Attributes:
train_percentage: A float for the percentage of data used for training.
test_percentage: A float for the percentage of data used for testing.
window_percentage: A float for the percentage of data used for the
fixed window.
error_methods: List of strings indicating which errors to calculate
(see `kats.metrics` for exhaustive list).
data: :class:`kats.consts.TimeSeriesData` object to perform backtest on.
params: Parameters to train model with.
model_class: Defines the model type to use for backtesting.
results: List of tuples `(training_data, testing_data, trained_model,
forecast_predictions)` storing forecast results.
errors: Dictionary mapping the error type to value.
size: An integer for the total number of datapoints.
freq: A string representing the (inferred) frequency of the
`pandas.DataFrame`.
raw_errors: List storing raw errors (truth - predicted).
Raises:
ValueError: One or more of the train, test, or fixed window params were
invalid. Or the time series is empty.
Sample Usage:
>>> df = pd.read_csv("kats/data/air_passengers.csv")
>>> ts = TimeSeriesData(df=df)
>>> params = ARIMAParams(p=1, d=1, q=1)
>>> all_errors = ["mape", "smape", "mae", "mase", "mse", "rmse"]
>>> backtester = BackTesterFixedWindow(
error_methods=all_errors,
data=ts,
params=params,
train_percentage=50,
test_percentage=25,
window_percentage=25,
model_class=ARIMAModel,
)
>>> backtester.run_backtest()
>>> mape = backtester.get_error_value("mape") # Retrieve MAPE error
"""
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
train_percentage: float,
test_percentage: float,
window_percentage: int,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
**kwargs: Any,
) -> None:
logging.info("Initializing train/test percentages")
if train_percentage <= 0:
logging.error("Non positive training percentage")
raise ValueError("Invalid training percentage")
elif train_percentage > 100:
logging.error("Too large training percentage")
raise ValueError("Invalid training percentage")
self.train_percentage = train_percentage
if test_percentage <= 0:
logging.error("Non positive test percentage")
raise ValueError("Invalid test percentage")
elif test_percentage > 100:
logging.error("Too large test percentage")
raise ValueError("Invalid test percentage")
self.test_percentage = test_percentage
if window_percentage < 0:
logging.error("Non positive window percentage")
raise ValueError("Invalid window percentage")
elif window_percentage > 100:
logging.error("Too large window percentage")
raise ValueError("Invalid window percentage")
self.window_percentage = window_percentage
offset = _get_absolute_size(len(data.time), self.window_percentage)
logging.info("Calling parent class constructor")
super().__init__(
error_methods, data, params, model_class, False, offset, **kwargs
)
def _create_train_test_splits(
self,
) -> Tuple[List[Tuple[int, int]], List[Tuple[int, int]]]:
"""Creates train/test folds for the backtest."""
logging.info("Creating train test splits")
train_size = _get_absolute_size(self.size, self.train_percentage)
test_size = _get_absolute_size(self.size, self.test_percentage)
window_size = _get_absolute_size(self.size, self.window_percentage)
if train_size <= 0 or train_size >= self.size:
logging.error("Invalid training size: {0}".format(train_size))
logging.error("Training Percentage: {0}".format(self.train_percentage))
raise ValueError("Incorrect training size")
if test_size <= 0 or test_size >= self.size:
logging.error("Invalid testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
raise ValueError("Incorrect testing size")
if train_size + test_size + window_size > self.size:
logging.error("Combo of Training, Testing, & Window sizes too big")
logging.error("Training size: {0}".format(train_size))
logging.error("Training Percentage: {0}".format(self.train_percentage))
logging.error("Testing size: {0}".format(test_size))
logging.error("Testing Percentage: {0}".format(self.test_percentage))
logging.error("Window size: {0}".format(window_size))
logging.error("Window Percentage: {0}".format(self.window_percentage))
raise ValueError("Incorrect training, testing, & window sizes")
train_splits = [(0, int(train_size))]
test_splits = [
(int(train_size + window_size), int(train_size + window_size + test_size))
]
return train_splits, test_splits
class CrossValidation:
"""Defines class to execute time series cross validation.
Cross validation is a useful technique to use multiple folds of the
training and testing data to help optimize the performance of the
model (e.g. hyperparameter tuning). For more info on cross validation, see
https://en.wikipedia.org/wiki/Cross-validation_(statistics)
This procedure is also known in literature as a rolling origin evaluation.
Attributes:
train_percentage: A float for the percentage of data used for training.
test_percentage: A float for the percentage of data used for testing.
num_folds: An integer for the number of folds to use.
error_methods: List of strings indicating which errors to calculate
(see `kats.metrics` for exhaustive list).
data: :class:`kats.consts.TimeSeriesData` object to perform backtest on.
params: Parameters to train model with.
model_class: Defines the model type to use for backtesting.
constant_train_size: A boolean flag to keep the train set size constant,
sliding it forward with each iteration instead of expanding the
train set with each iteration (default False).
multi: A boolean flag to toggle multiprocessing (default True).
results: List of tuples `(training_data, testing_data, trained_model,
forecast_predictions)` storing forecast results.
errors: Dictionary mapping the error type to value.
size: An integer for the total number of datapoints.
raw_errors: List storing raw errors (truth - predicted).
Raises:
ValueError: One or more of the train, test, or num_folds params
were invalid. Or the time series is empty.
Sample Usage:
>>> df = pd.read_csv("kats/data/air_passengers.csv")
>>> ts = TimeSeriesData(df=df)
>>> params = ARIMAParams(p=1, d=1, q=1)
>>> all_errors = ["mape", "smape", "mae", "mase", "mse", "rmse"]
>>> cv = CrossValidation(
error_methods=all_errors,
data=ts,
params=params,
train_percentage=50,
test_percentage=25,
num_folds=3,
model_class=ARIMAModel,
rolling_window=True
)
>>> cv.run_cv()
>>> mape = cv.get_error_value("mape") # Retrieve MAPE error
"""
size: int
results: List[Tuple[np.ndarray, np.ndarray, "Model[Params]", np.ndarray]]
num_folds: int
errors: Dict[str, float]
raw_errors: List[np.ndarray]
_backtester: BackTesterParent
def __init__(
self,
error_methods: List[str],
data: TimeSeriesData,
params: Params,
train_percentage: float,
test_percentage: float,
num_folds: int,
# pyre-fixme[24]: Generic type `type` expects 1 type parameter, use
# `typing.Type` to avoid runtime subscripting errors.
model_class: Type,
constant_train_size: bool = False,
multi: bool = True,
) -> None:
logging.info("Initializing and validating parameter values")
if train_percentage <= 0:
logging.error("Non positive training percentage")
raise ValueError("Invalid training percentage")
elif train_percentage > 100:
logging.error("Too large training percentage")
raise ValueError("Invalid training percentage")
self.train_percentage = train_percentage
if test_percentage <= 0:
logging.error("Non positive test percentage")
raise ValueError("Invalid test percentage")
elif test_percentage > 100:
logging.error("Too large test percentage")
raise ValueError("Invalid test percentage")
self.test_percentage = test_percentage
if num_folds < 0:
logging.error("Non positive number of folds")
raise ValueError("Invalid number of folds")
self.num_folds = num_folds
self.size = len(data.time)
if self.size <= 0:
logging.error("self.size <= 0")
logging.error("self.size: {0}".format(self.size))
raise ValueError("Passing an empty time series")
self.results = []
self.errors = {}
self.raw_errors = []
if not constant_train_size:
self._backtester = BackTesterExpandingWindow(
error_methods,
data,
params,
self.train_percentage,
100 - self.test_percentage,
self.test_percentage,
self.num_folds,
model_class,
multi=multi,
)
else:
self._backtester = BackTesterRollingWindow(
error_methods,
data,
params,
self.train_percentage,
self.test_percentage,
self.num_folds,
model_class,
multi=multi,
)
# Run cross validation
def run_cv(self) -> None:
"""Runs the cross validation."""
logging.info("Running training and evaluation")
self._backtester.run_backtest()
self.results = self._backtester.results
self.errors = self._backtester.errors
self.raw_errors = self._backtester.raw_errors
logging.info("Finished")
def get_error_value(self, error_name: str) -> float:
"""Gets requested error value.
Args:
error_name: A string of the error whose value should be returned.
Returns:
A float of the eror value.
Raises:
ValueError: The error name is invalid.
"""
if error_name in self.errors:
return self.errors[error_name]
else:
logging.error("Invalid error name: {0}".format(error_name))
raise ValueError("Invalid error name")
def _get_absolute_size(size: int, percent: float) -> int:
"""
Returns absolute size corresponding to percentage of array of length size.
"""
val = np.floor(size * percent / 100)
return int(val)
def _return_fold_offsets(start: int, end: int, num_folds: int) -> List[int]:
"""
Returns approximately even length fold offsets for a given range.
"""
offsets = [0]
splits = np.array_split(range(end - start), num_folds - 1)
for split in splits:
prev_offset = offsets[-1]
offsets.append(split.size + prev_offset)
return offsets
| 39.50544
| 88
| 0.625138
|
9dd88ffae6284eb501aa84770febebb549d17a7a
| 26
|
py
|
Python
|
pyloraserver/__init__.py
|
dtony/pychirp
|
c942b84cf834e0cec3e7f60b3c7b781581478a7b
|
[
"MIT"
] | null | null | null |
pyloraserver/__init__.py
|
dtony/pychirp
|
c942b84cf834e0cec3e7f60b3c7b781581478a7b
|
[
"MIT"
] | null | null | null |
pyloraserver/__init__.py
|
dtony/pychirp
|
c942b84cf834e0cec3e7f60b3c7b781581478a7b
|
[
"MIT"
] | null | null | null |
__name__ = "pyloraserver"
| 13
| 25
| 0.769231
|
0a7815fff831f5879fa38000bc17cc6db9898320
| 190
|
py
|
Python
|
projects/WSL/wsl/modeling/meta_arch/__init__.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 6
|
2021-11-03T09:07:39.000Z
|
2022-02-06T15:36:56.000Z
|
projects/WSL/wsl/modeling/meta_arch/__init__.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 2
|
2021-10-17T16:55:59.000Z
|
2022-03-27T02:46:38.000Z
|
projects/WSL/wsl/modeling/meta_arch/__init__.py
|
shenyunhang/PDSL
|
ade1e0cf4435c9252b4384100a82bb45b026b7c2
|
[
"Apache-2.0"
] | 4
|
2021-11-03T09:07:41.000Z
|
2022-02-06T15:37:03.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .rcnn import GeneralizedRCNNWSL, ProposalNetworkWSL
from .mcnn import GeneralizedMCNNWSL
| 31.666667
| 70
| 0.768421
|
5aec69b4a2978d4ad505b1470112bd6ebb5c6a8b
| 559
|
py
|
Python
|
numberlist.py
|
akselsd/VisualSort
|
26b5505219227c20ac1ca834bc9ec89c989af457
|
[
"MIT"
] | null | null | null |
numberlist.py
|
akselsd/VisualSort
|
26b5505219227c20ac1ca834bc9ec89c989af457
|
[
"MIT"
] | null | null | null |
numberlist.py
|
akselsd/VisualSort
|
26b5505219227c20ac1ca834bc9ec89c989af457
|
[
"MIT"
] | null | null | null |
import random
class Numbers:
def __init__(self, amount):
self.numbers = [x for x in range(1, amount+1)]
random.shuffle(self.numbers)
self.sorted_numbers = sorted(self.numbers)
# 1 is correct
def get_displacement_vector(self):
n = len(self.sorted_numbers)
return list(map(lambda x: 1 - abs(self.numbers.index(x) - self.sorted_numbers.index(x)) / (n), self.numbers))
def reshuffle(self):
random.shuffle(self.numbers)
def __bool__(self):
return self.numbers == self.sorted_numbers
| 27.95
| 117
| 0.651163
|
f09e1acaedc0107a312f33b2e0b9af94452d349d
| 1,799
|
py
|
Python
|
gui/elements/_window.py
|
pep8speaks/napari-gui
|
86a55ec4707f1775217b35ccef5ba5a2a55fff03
|
[
"BSD-3-Clause"
] | null | null | null |
gui/elements/_window.py
|
pep8speaks/napari-gui
|
86a55ec4707f1775217b35ccef5ba5a2a55fff03
|
[
"BSD-3-Clause"
] | null | null | null |
gui/elements/_window.py
|
pep8speaks/napari-gui
|
86a55ec4707f1775217b35ccef5ba5a2a55fff03
|
[
"BSD-3-Clause"
] | null | null | null |
from PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QLabel
from PyQt5.QtCore import Qt
from ._viewer import Viewer
class Window:
"""Application window that contains the menu bar and viewers.
Parameters
----------
viewer : Viewer
Contained viewer.
Attributes
----------
viewer : Viewer
Contained viewer.
"""
def __init__(self, viewer, show=True):
self._qt_window = QMainWindow()
self._qt_center = QWidget()
self._qt_window.setCentralWidget(self._qt_center)
self._qt_center.setLayout(QHBoxLayout())
self._statusBar = self._qt_window.statusBar()
self._statusBar.showMessage('Ready')
self._help = QLabel('')
self._statusBar.addPermanentWidget(self._help)
self.viewer = viewer
self._qt_center.layout().addWidget(self.viewer._qt)
self.viewer.statusChanged.connect(self._statusChanged)
self.viewer.helpChanged.connect(self._helpChanged)
if show:
self.show()
def resize(self, width, height):
"""Resize the window.
Parameters
----------
width : int
Width in logical pixels.
height : int
Height in logical pixels.
"""
self._qt_window.resize(width, height)
def show(self):
"""Resize, show, and bring forward the window.
"""
self._qt_window.resize(self._qt_window.layout().sizeHint())
self._qt_window.show()
self._qt_window.raise_()
def _statusChanged(self, message):
"""Update status bar.
"""
self._statusBar.showMessage(message)
def _helpChanged(self, message):
"""Update help message on status bar.
"""
self._help.setText(message)
| 26.455882
| 69
| 0.611451
|
0b1a346534dc65b830cd6d9d6ab70680de82faae
| 36
|
py
|
Python
|
system/__init__.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
system/__init__.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
system/__init__.py
|
philgookang/Simulated_InvertedIndex_TFIDF_PageRank
|
612452f50913275b818942f82da098b78956ff3a
|
[
"MIT"
] | null | null | null |
from system.Database import Database
| 36
| 36
| 0.888889
|
d3fbdbd22c9361e9ebcd64d2a3d6248cc1e6ec9c
| 480
|
py
|
Python
|
wristband/apps/serializers.py
|
hmrc/wristband
|
35648a15b91dea4a927e486bfe0ace5e00b44dcc
|
[
"Apache-2.0"
] | 1
|
2015-07-14T14:32:17.000Z
|
2015-07-14T14:32:17.000Z
|
wristband/apps/serializers.py
|
hmrc/wristband
|
35648a15b91dea4a927e486bfe0ace5e00b44dcc
|
[
"Apache-2.0"
] | 4
|
2015-08-03T11:17:37.000Z
|
2015-09-24T10:06:02.000Z
|
wristband/apps/serializers.py
|
hmrc/wristband
|
35648a15b91dea4a927e486bfe0ace5e00b44dcc
|
[
"Apache-2.0"
] | 2
|
2020-05-05T13:56:47.000Z
|
2021-04-10T23:51:52.000Z
|
from rest_framework import serializers
class NestedAppSerializer(serializers.Serializer):
name = serializers.CharField()
version = serializers.CharField()
stage = serializers.CharField()
log_url = serializers.URLField(allow_null=True)
class AppSerializer(serializers.Serializer):
name = serializers.CharField()
stages = serializers.ListField(
child=serializers.DictField(
child=serializers.CharField(allow_null=True)
)
)
| 28.235294
| 56
| 0.727083
|
a77cdbf2fe3c27bb6721a3052888b68fcb98595b
| 6,348
|
py
|
Python
|
word_language_model/model.py
|
dl-framework-benchmark/pytorch-examples
|
9439d83c137f352347494aacf1fc45e8e2373d6e
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T14:51:46.000Z
|
2020-06-16T14:51:46.000Z
|
word_language_model/model.py
|
ArnoutDevos/examples
|
4581968193699de14b56527296262dd76ab43557
|
[
"BSD-3-Clause"
] | null | null | null |
word_language_model/model.py
|
ArnoutDevos/examples
|
4581968193699de14b56527296262dd76ab43557
|
[
"BSD-3-Clause"
] | 1
|
2020-07-06T01:37:28.000Z
|
2020-07-06T01:37:28.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
| 42.039735
| 110
| 0.625709
|
79fea646368efa351c54ecefc2c88f7073e6244b
| 322
|
py
|
Python
|
tests/test_dataset_downloader.py
|
fossabot/spampy
|
dea667671175a3c6950413154f42298a6b75c5b2
|
[
"MIT"
] | 1
|
2018-11-26T07:34:26.000Z
|
2018-11-26T07:34:26.000Z
|
tests/test_dataset_downloader.py
|
fossabot/spampy
|
dea667671175a3c6950413154f42298a6b75c5b2
|
[
"MIT"
] | null | null | null |
tests/test_dataset_downloader.py
|
fossabot/spampy
|
dea667671175a3c6950413154f42298a6b75c5b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from spampy import dataset_downloader
class DatasetDownloaderTests(unittest.TestCase):
def test_download_enron_dataset(self):
dataset_downloader.download_enron_dataset()
self.assertTrue(os.path.exists('spampy/datasets/enron'))
| 24.769231
| 64
| 0.754658
|
fab52478d4534d697a3092e3adcac526d9769fed
| 1,096
|
py
|
Python
|
tools/add_missing_attribution-confidence.py
|
dekoder/misp-galaxy
|
b9d54b8ad90ca80681d534c40207d015ebff31fd
|
[
"BSD-2-Clause",
"CC0-1.0"
] | 361
|
2016-02-29T22:26:55.000Z
|
2022-03-28T08:31:09.000Z
|
tools/add_missing_attribution-confidence.py
|
dekoder/misp-galaxy
|
b9d54b8ad90ca80681d534c40207d015ebff31fd
|
[
"BSD-2-Clause",
"CC0-1.0"
] | 500
|
2016-02-29T21:25:33.000Z
|
2022-03-31T10:54:54.000Z
|
tools/add_missing_attribution-confidence.py
|
dekoder/misp-galaxy
|
b9d54b8ad90ca80681d534c40207d015ebff31fd
|
[
"BSD-2-Clause",
"CC0-1.0"
] | 246
|
2016-02-29T21:13:01.000Z
|
2022-03-24T10:54:34.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import argparse
import uuid
parser = argparse.ArgumentParser(description='Add missing attribution-confidence in threat-actor clusters')
parser.add_argument("-f", "--filename", required=True, help="name of the cluster")
args = parser.parse_args()
with open(args.filename) as json_file:
data = json.load(json_file)
json_file.close()
for value in data['values']:
if value.get('meta'):
if not value.get('meta').get('attribution-confidence') and (value.get('meta').get('cfr-suspected-state-sponsor') or value.get('meta').get('country')):
value.set('meta')['attribution-confidence'] = "50"
elif value.get('meta').get('attribution-confidence') and (value.get('meta').get('cfr-suspected-state-sponsor') or value.get('meta').get('country')):
value.get('meta')['attribution-confidence'] = str(value.get('meta').get('attribution-confidence'))
with open(args.filename, 'w') as json_file:
json.dump(data, json_file, indent=2, sort_keys=True, ensure_ascii=False)
| 40.592593
| 162
| 0.675182
|
d01894f1ea37609ae3ff77eed8725fd0739be4b5
| 6,065
|
py
|
Python
|
tracardi/process_engine/action/v1/ux/consent/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | 1
|
2021-11-17T00:45:29.000Z
|
2021-11-17T00:45:29.000Z
|
tracardi/process_engine/action/v1/ux/consent/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | null | null | null |
tracardi/process_engine/action/v1/ux/consent/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel, validator, AnyHttpUrl
from tracardi_plugin_sdk.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc, Form, FormGroup, \
FormField, FormComponent
from tracardi_plugin_sdk.action_runner import ActionRunner
from tracardi_plugin_sdk.domain.result import Result
class Configuration(BaseModel):
endpoint: AnyHttpUrl
event_type: str = "user-consent-pref"
agree_all_event_type: str = "agree-all-event-type"
position: str = "bottom"
expand_height: int = 400
enable: bool = True
@validator("agree_all_event_type")
def all_event_type_should_no_be_empty(cls, value):
if len(value) == 0:
raise ValueError("This field should not be empty")
return value
@validator("endpoint")
def endpoint_should_no_be_empty(cls, value):
if len(value) == 0:
raise ValueError("This field should not be empty")
return value
@validator("event_type")
def event_type_should_no_be_empty(cls, value):
if len(value) == 0:
raise ValueError("This field should not be empty")
return value
@validator("position")
def position_enum(cls, value):
if len(value) == 0:
raise ValueError("This field should be either [top] or [bottom]")
return value
@validator("expand_height")
def height_enum(cls, value: str):
if isinstance(value, str) and not value.isnumeric():
raise ValueError("This field must be a number")
return int(value)
def validate(config: dict):
return Configuration(**config)
class ConsentUx(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload):
tracardi_endpoint = "http://localhost:8686"
uix_endpoint = "http://localhost:8686"
self.ux.append({"tag": "div", "props": {
"class": "tracardi-uix-consent",
"data-endpoint": tracardi_endpoint, # Tracardi endpoint
"data-event-type": "user-consent-pref",
"data-agree-all-event-type": "user-consent-agree-all",
"data-position": "top",
"data-expand-height": 400,
"data-profile": self.profile.id,
"data-session": self.session.id,
"data-source": self.event.source.id
}})
self.ux.append({"tag": "script", "props": {"src": f"{uix_endpoint}/uix/consent/index.js"}})
return Result(port="payload", value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='ConsentUx',
inputs=["payload"],
outputs=["payload"],
init={
"endpoint": "http://locahost:8686",
"event_type": "user-consent-pref",
"agree_all_event_type": "agree-all-event-type",
"position": "bottom",
"expand_height": 400,
"enabled": True
},
version='0.6.1',
license="MIT",
author="Risto Kowaczewski",
form=Form(groups=[
FormGroup(
name="Consent Widget Configuration",
fields=[
FormField(
id="endpoint",
name="Tracardi API endpoint URL",
description="Provide URL where the events from this widget will be send.",
component=FormComponent(type="text", props={"label": "URL"})
),
FormField(
id="event_type",
name="Event type",
description="Event type that will be send when user selects consent preferences.",
component=FormComponent(type="text", props={"label": "Event type"})
),
FormField(
id="agree_all_event_type",
name="Event type for Agree To All",
description="Event type that will be send when user selects Agree to All consents.",
component=FormComponent(type="text", props={"label": "Event type"})
),
FormField(
id="position",
name="Widget position",
description="Where would you like to place the widget.",
component=FormComponent(type="select", props={"label": "Position", "items": {
"top": "Top",
"bottom": "Bottom"
}})
),
FormField(
id="expand_height",
name="Widget height",
description="Type height of the expanded widget",
component=FormComponent(type="text", props={"label": "Height"})
),
FormField(
id="enabled",
name="Enable widget",
description="Only enabled widgets are show on the page",
component=FormComponent(type="bool", props={"label": "Enable"})
),
]
),
]),
),
metadata=MetaData(
name='Show consent bar',
desc='Shows consent pop-up on the front end.',
icon='react',
group=["UI Widgets"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes payload object.")
},
outputs={"payload": PortDoc(desc="This port returns input payload object.")}
)
)
)
| 38.878205
| 114
| 0.50305
|
00ec656b0eb1440fc81a26aad5cbc2d621612cb3
| 4,810
|
py
|
Python
|
salt/states/slack.py
|
leifliddy/salt
|
22edef4bdaa20502865a6710696c953260fc3f76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/states/slack.py
|
leifliddy/salt
|
22edef4bdaa20502865a6710696c953260fc3f76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/states/slack.py
|
leifliddy/salt
|
22edef4bdaa20502865a6710696c953260fc3f76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Send a message to Slack
=======================
This state is useful for sending messages to Slack during state runs.
.. versionadded:: 2015.5.0
.. code-block:: yaml
slack-message:
slack.post_message:
- channel: '#general'
- from_name: SuperAdmin
- message: 'This state was executed successfully.'
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
The api key can be specified in the master or minion configuration like below:
.. code-block:: yaml
slack:
api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.exceptions import SaltInvocationError
def __virtual__():
"""
Only load if the slack module is available in __salt__
"""
return "slack" if "slack.post_message" in __salt__ else False
def post_message(name, **kwargs):
"""
Send a message to a Slack channel.
.. code-block:: yaml
slack-message:
slack.post_message:
- channel: '#general'
- from_name: SuperAdmin
- message: 'This state was executed successfully.'
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
The following parameters are required:
api_key parameters:
name
The unique name for this event.
channel
The channel to send the message to. Can either be the ID or the name.
from_name
The name of that is to be shown in the "from" field.
message
The message that is to be sent to the Slack channel.
The following parameters are optional:
api_key
The api key for Slack to use for authentication,
if not specified in the configuration options of master or minion.
icon
URL to an image to use as the icon for this message
webhook parameters:
name
The unique name for this event.
message
The message that is to be sent to the Slack channel.
color
The color of border of left side
short
An optional flag indicating whether the value is short
enough to be displayed side-by-side with other values.
webhook
The identifier of WebHook.
channel
The channel to use instead of the WebHook default.
username
Username to use instead of WebHook default.
icon_emoji
Icon to use instead of WebHook default.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if not kwargs.get("api_key") and not kwargs.get("webhook"):
ret["comment"] = "Please specify api_key or webhook."
return ret
if kwargs.get("api_key") and kwargs.get("webhook"):
ret["comment"] = "Please specify only either api_key or webhook."
return ret
if kwargs.get("api_key") and not kwargs.get("channel"):
ret["comment"] = "Slack channel is missing."
return ret
if kwargs.get("api_key") and not kwargs.get("from_name"):
ret["comment"] = "Slack from name is missing."
return ret
if not kwargs.get("message"):
ret["comment"] = "Slack message is missing."
return ret
if __opts__["test"]:
ret["comment"] = "The following message is to be sent to Slack: {0}".format(
kwargs.get("message")
)
ret["result"] = None
return ret
try:
if kwargs.get("api_key"):
result = __salt__["slack.post_message"](
channel=kwargs.get("channel"),
message=kwargs.get("message"),
from_name=kwargs.get("from_name"),
api_key=kwargs.get("api_key"),
icon=kwargs.get("icon"),
)
elif kwargs.get("webhook"):
result = __salt__["slack.call_hook"](
message=kwargs.get("message"),
attachment=kwargs.get("attachment"),
color=kwargs.get("color", "good"),
short=kwargs.get("short"),
identifier=kwargs.get("webhook"),
channel=kwargs.get("channel"),
username=kwargs.get("username"),
icon_emoji=kwargs.get("icon_emoji"),
)
except SaltInvocationError as sie:
ret["comment"] = "Failed to send message ({0}): {1}".format(sie, name)
else:
if isinstance(result, bool) and result:
ret["result"] = True
ret["comment"] = "Sent message: {0}".format(name)
else:
ret["comment"] = "Failed to send message ({0}): {1}".format(
result["message"], name
)
return ret
| 28.975904
| 84
| 0.583368
|
e1d1db5c9ede35e734943f8cc71020b6923a4620
| 135
|
py
|
Python
|
src/ntf/__init__.py
|
Emil-501/ntf
|
106c2a0ecfbdbe1f7a0297e8e86cf668dc291f2e
|
[
"Apache-2.0"
] | 37
|
2016-07-15T21:56:12.000Z
|
2021-11-30T04:16:22.000Z
|
src/ntf/__init__.py
|
Emil-501/ntf
|
106c2a0ecfbdbe1f7a0297e8e86cf668dc291f2e
|
[
"Apache-2.0"
] | 7
|
2017-03-12T06:55:41.000Z
|
2022-03-15T14:42:23.000Z
|
src/ntf/__init__.py
|
Emil-501/ntf
|
106c2a0ecfbdbe1f7a0297e8e86cf668dc291f2e
|
[
"Apache-2.0"
] | 21
|
2016-07-15T21:56:38.000Z
|
2022-03-06T06:21:01.000Z
|
'''Docstring to silence pylint; ignores --ignore option for __init__.py'''
# Global config dictionary
# Populated by ntf.
config = {}
| 22.5
| 74
| 0.725926
|
aa59d870164de7b108b96e6f9d7ce2fc8a49e37c
| 33,499
|
py
|
Python
|
danceschool/stats/stats.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 32
|
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/stats/stats.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 97
|
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/stats/stats.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 19
|
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
from django.db.models import Count, Avg, Sum, IntegerField, Case, When, Q, Min, FloatField, F
from django.db.models.functions import TruncDate
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, JsonResponse
from django.utils.translation import gettext as _
from django.utils import timezone
from dateutil.relativedelta import relativedelta
import unicodecsv as csv
from collections import Counter, OrderedDict
from bisect import bisect
from calendar import month_name
from datetime import datetime
from danceschool.core.models import (
Customer, Series, EventOccurrence, Registration, EventRegistration,
DanceTypeLevel, Location, DanceRole, SeriesTeacher, Instructor
)
from danceschool.core.utils.requests import getDateTimeFromGet
from danceschool.core.utils.timezone import ensure_timezone
def getAveragesByClassType(startDate=None, endDate=None):
# If a date filter was passed in GET, then apply it
when_all = {
'classdescription__series__eventregistration__cancelled': False,
'classdescription__series__eventregistration__dropIn': False,
'classdescription__series__eventregistration__registration__final': True,
}
timeFilters = {}
classFilters = {}
roleFilters = Q()
if startDate:
timeFilters['classdescription__series__startTime__gte'] = startDate
classFilters['startTime__gte'] = startDate
roleFilters = roleFilters & (
Q(eventrole__event__startTime__gte=startDate) |
Q(eventregistration__event__startTime__gte=startDate)
)
if endDate:
timeFilters['classdescription__series__startTime__lte'] = endDate
classFilters['startTime__lte'] = endDate
roleFilters = roleFilters & (
Q(eventrole__event__startTime__lte=endDate) |
Q(eventregistration__event__startTime__lte=endDate)
)
when_all.update(timeFilters)
role_list = DanceRole.objects.filter(roleFilters).distinct()
annotations = {
'registrations': Sum(Case(When(Q(**when_all), then=1), output_field=IntegerField()))
}
values_list = ['name', 'danceType__name', 'registrations']
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(
When(
Q(
Q(**when_all) &
Q(classdescription__series__eventregistration__role=this_role)
),
then=1
),
output_field=IntegerField()
))
values_list.append(this_role.pluralName)
registration_counts = list(DanceTypeLevel.objects.annotate(**annotations).values_list(*values_list))
class_counter = Counter([
(
x.classDescription.danceTypeLevel.name,
x.classDescription.danceTypeLevel.danceType.name
)
for x in Series.objects.filter(**classFilters).distinct()
])
results = {}
for list_item in registration_counts:
type_name = ' '.join((str(list_item[0]), str(list_item[1])))
results[type_name] = {
str(_('Registrations')): list_item[2],
}
m = 3
for this_role in role_list:
results[type_name][str(_('Total %s' % this_role.pluralName))] = list_item[m]
m += 1
for k, count in class_counter.items():
type_name = ' '.join((str(k[0]), str(k[1])))
results[type_name].update({
str(_('Series')): count
})
for k, v in results.items():
if results[k].get(str(_('Series'))):
results[k].update({
str(_('Average Registrations')): (
(results[k][str(_('Registrations'))] or 0) / float(results[k][str(_('Series'))])
),
})
for this_role in role_list:
results[k][str(_('Average %s' % this_role.pluralName))] = (
(results[k][str(_('Total %s' % this_role.pluralName))] or 0) /
float(results[k][str(_('Series'))])
)
return results
@staff_member_required
def AveragesByClassTypeJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
results = getAveragesByClassType(startDate, endDate)
# Needs to return a list, not a dict
# Also, filter out types with no series or registrations
# and sort descending
results_list = [
dict({'type': k}, **dict(v)) for k, v in results.items() if
v.get(str(_('Series'))) or v.get(str(_('Registrations')))
]
sorted_list = sorted(results_list, key=lambda k: k[str(_('Series'))], reverse=True)
return JsonResponse(sorted_list, safe=False)
@staff_member_required
def AveragesByClassTypeCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="averagesByClassDescriptionType.csv"'
writer = csv.writer(response)
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
results = getAveragesByClassType(startDate, endDate)
role_names = [
x.replace(str(_('Average ')), '') for x in results.keys() if
x.startswith(str(_('Average ')))
]
header_list = [
str(_('Class Type')), str(_('Total Classes')),
str(_('Total Students')), str(_('Avg. Students/Class'))
]
for this_role in role_names:
header_list += [str(_('Total %s' % this_role)), str(_('Avg. %s/Class' % this_role))]
# Note: These are not translated because the chart Javascript looks for these keys
writer.writerow(header_list)
for key, value in results.items():
this_row = [
key,
value.get(str(_('Series')), 0),
value.get(str(_('Registrations')), 0),
value.get(str(_('Average Registrations')), None),
]
for this_role in role_names:
this_row += [
value.get(str(_('Total %s' % this_role)), 0),
value.get(str(_('Average %s' % this_role)), 0)
]
writer.writerow(this_row)
return response
def getClassTypeMonthlyData(year=None, series=None, typeLimit=None):
'''
To break out by class type and month simultaneously, get data for each
series and aggregate by class type.
'''
# If no year specified, report current year to date.
if not year:
year = timezone.now().year
role_list = DanceRole.objects.distinct()
# Report data on all students registered unless otherwise specified
if (
series not in ['registrations', 'studenthours'] and
series not in [x.pluralName for x in role_list]
):
series = 'registrations'
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
'eventregistration__registration__final': True,
}
annotations = {'registrations': Sum(Case(When(Q(**when_all), then=1), output_field=FloatField()))}
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(
When(
Q(Q(**when_all) & Q(eventregistration__role=this_role)),
then=1
),
output_field=FloatField()
))
series_counts = Series.objects.filter(year=year).annotate(**annotations).annotate(
studenthours=F('duration') * F('registrations')
).select_related(
'classDescription__danceTypeLevel__danceType', 'classDescription__danceTypeLevel'
)
# If no limit specified on number of types, then do not aggregate dance types.
# Otherwise, report the typeLimit most common types individually, and report all
# others as other. This gets tuples of names and counts
dance_type_counts = [
(dance_type, count) for dance_type, count in
Counter([x.classDescription.danceTypeLevel for x in series_counts]).items()
]
dance_type_counts.sort(key=lambda k: k[1], reverse=True)
if typeLimit:
dance_types = [x[0] for x in dance_type_counts[:typeLimit]]
else:
dance_types = [x[0] for x in dance_type_counts]
results = []
# Month by month, calculate the result data
for month in range(1, 13):
this_month_result = {
'month': month,
'month_name': month_name[month],
}
for dance_type in dance_types:
this_month_result[dance_type.__str__()] = \
series_counts.filter(
classDescription__danceTypeLevel=dance_type, month=month
).aggregate(Sum(series))['%s__sum' % series]
if typeLimit:
this_month_result['Other'] = \
series_counts.filter(month=month).exclude(
classDescription__danceTypeLevel__in=dance_types
).aggregate(Sum(series))['%s__sum' % series]
results.append(this_month_result)
# Now get totals
totals_result = {
'month': 'Totals',
'month_name': 'totals',
}
for dance_type in dance_types:
totals_result[dance_type.__str__()] = \
series_counts.filter(classDescription__danceTypeLevel=dance_type).aggregate(
Sum(series)
)['%s__sum' % series]
if typeLimit:
totals_result['Other'] = \
series_counts.exclude(classDescription__danceTypeLevel__in=dance_types).aggregate(
Sum(series)
)['%s__sum' % series]
results.append(totals_result)
return results
def ClassTypeMonthlyJSON(request):
try:
year = int(request.GET.get('year'))
except (ValueError, TypeError):
year = None
try:
typeLimit = int(request.GET.get('typeLimit'))
except (ValueError, TypeError):
typeLimit = None
series = request.GET.get('series')
results = getClassTypeMonthlyData(year=year, series=series, typeLimit=typeLimit)
return JsonResponse(results, safe=False)
def getClassCountHistogramData(cohortStart=None, cohortEnd=None):
# Note: Bins are inclusive, and 99999 means 'or more'. That should last us awhile.
bins = [
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
(7, 7),
(8, 8),
(9, 9),
(10, 15),
(16, 20),
(21, 99999)]
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
'eventregistration__registration__final': True,
}
cohortFilters = {}
roleFilters = {}
if cohortStart:
cohortFilters['eventregistration__event__startTime__min__gte'] = cohortStart
roleFilters['eventregistration__event__startTime__gte'] = cohortStart
if cohortEnd:
cohortFilters['eventregistration__event__startTime__min__lte'] = cohortEnd
roleFilters['eventregistration__event__startTime__lte'] = cohortEnd
role_list = DanceRole.objects.filter(**roleFilters).distinct()
annotations = {
'eventregistration__event__startTime__min': Min('eventregistration__event__startTime'),
'registrations': Sum(Case(When(Q(**when_all), then=1), default=0, output_field=IntegerField())),
}
for this_role in role_list:
annotations[this_role.pluralName] = Sum(Case(
When(
Q(Q(**when_all) & Q(eventregistration__role=this_role)),
then=1
),
default=0,
output_field=IntegerField()
))
customers = Customer.objects.annotate(**annotations).filter(**cohortFilters).distinct()
totalCustomers = customers.filter(registrations__gt=0).count()
totalClasses = [x.registrations for x in customers if x.registrations]
totalClasses.sort()
totalsByRole = {}
for this_role in role_list:
totalsByRole[this_role.pluralName] = {
'customers': customers.filter(**{this_role.pluralName + '__gt': 0}).count(),
'classes': [
getattr(x, this_role.pluralName, None) for x in customers if
getattr(x, this_role.pluralName, None)
],
}
totalsByRole[this_role.pluralName]['classes'].sort()
results = {}
lastAll = 0
lastByRole = {this_role.pluralName: 0 for this_role in role_list}
iByRole = {}
for this_bin in bins:
range_max = this_bin[1]
if this_bin[0] == this_bin[1]:
this_label = '%s' % this_bin[0]
elif this_bin[1] == 99999:
this_label = str(_('%s or more' % this_bin[0]))
else:
this_label = '%s-%s' % this_bin
i_all = bisect(totalClasses, range_max, lastAll)
iByRole = {
this_role.pluralName: bisect(
totalsByRole[this_role.pluralName]['classes'],
range_max, lastByRole[this_role.pluralName]
)
for this_role in role_list
}
# Note: These are not translated because the chart Javascript looks for these keys
results.update({
this_label:
{
str(_('# Students')): (i_all - lastAll),
str(_('Percentage')): 100 * (i_all - lastAll) / (float(totalCustomers) or 1),
'bin': this_bin,
},
})
for this_role in role_list:
results[this_label].update({
'# ' + this_role.pluralName: (
iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]
),
'Percentage ' + this_role.pluralName: 100 * (
iByRole[this_role.pluralName] - lastByRole[this_role.pluralName]
) /
(float(totalsByRole[this_role.pluralName]['customers']) or 1),
})
lastAll = i_all
lastByRole = {this_role.pluralName: iByRole[this_role.pluralName] for this_role in role_list}
return results
@staff_member_required
def ClassCountHistogramJSON(request):
cohortStart = getDateTimeFromGet(request, 'cohortStart')
cohortEnd = getDateTimeFromGet(request, 'cohortEnd')
results = getClassCountHistogramData(cohortStart=cohortStart, cohortEnd=cohortEnd)
# Needs to return a sorted list, not a dict
results_list = [dict({'bin_label': k}, **dict(v)) for k, v in results.items()]
sorted_list = sorted(results_list, key=lambda k: k['bin'][0])
return JsonResponse(sorted_list, safe=False)
@staff_member_required
def ClassCountHistogramCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="studentHistogramData.csv"'
cohortStart = getDateTimeFromGet(request, 'cohortStart')
cohortEnd = getDateTimeFromGet(request, 'cohortEnd')
results = getClassCountHistogramData(cohortStart=cohortStart, cohortEnd=cohortEnd)
writer = csv.writer(response)
# Note: These are not translated because the chart Javascript looks for these keys
header_row = ['# of Classes']
keys = OrderedDict()
for v in results.values():
keys.update(v)
header_row += [x for x in keys.keys()]
writer.writerow(header_row)
for k, v in results.items():
this_row = [k]
this_row += [v.get(x, None) for x in keys.keys()]
writer.writerow(this_row)
return response
def getMonthlyPerformance():
'''
This function does the work of compiling monthly performance data
that can either be rendered as CSV or as JSON
'''
when_all = {
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
'eventregistration__registration__final': True,
}
# Get objects at the Series level so that we can calculate StudentHours
series_counts = list(Series.objects.annotate(
eventregistrations=Sum(Case(When(Q(**when_all), then=1), output_field=IntegerField())), )
.values('year', 'month', 'eventregistrations', 'duration'))
for series in series_counts:
series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0)
all_years = set([x['year'] for x in series_counts])
dataseries_list = ['EventRegistrations', 'Registrations', 'Hours', 'StudentHours', 'AvgStudents']
yearTotals = {}
# Initialize dictionaries
for dataseries in dataseries_list:
yearTotals[dataseries] = {'MonthlyAverage': {}}
for year in all_years:
yearTotals[dataseries][year] = {}
# Fill in by year and month for a cleaner looping process
for year in all_years:
# Monthly Totals
for month in range(1, 13):
# Total EventRegistrations per month is retrieved by the query above.
yearTotals['EventRegistrations'][year][month] = sum([
x['eventregistrations'] or 0 for x in series_counts if
x['month'] == month and x['year'] == year
])
# Total Registrations per month and hours per month require a separate query for each month
yearTotals['Registrations'][year][month] = len(
Registration.objects.filter(
final=True,
eventregistration__dropIn=False,
eventregistration__cancelled=False,
eventregistration__event__year=year,
eventregistration__event__month=month
).distinct()
)
yearTotals['Hours'][year][month] = sum([
x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year
])
yearTotals['StudentHours'][year][month] = sum([
x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year
])
if yearTotals['Hours'][year][month] > 0:
yearTotals['AvgStudents'][year][month] = (
yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month])
)
else:
yearTotals['AvgStudents'][year][month] = 0
# Annual Totals
for sub_series in ['EventRegistrations', 'Registrations', 'Hours', 'StudentHours']:
yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()])
# Annual (Monthly) Averages
month_count = len([x for k, x in yearTotals['Hours'][year].items() if k in range(1, 13) and x > 0])
if month_count > 0:
for sub_series in ['EventRegistrations', 'Registrations', 'Hours', 'StudentHours']:
yearTotals[sub_series][year]['Average'] = (
yearTotals[sub_series][year]['Total'] / float(month_count)
)
yearTotals['AvgStudents'][year]['Average'] = (
yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total'])
)
# Monthly Averages
for month in range(1, 13):
yearly_hours_data = [
x[month] for k, x in yearTotals['Hours'].items() if
k in all_years and x[month] > 0
]
yearly_studenthours_data = [
x[month] for k, x in yearTotals['StudentHours'].items() if
k in all_years and x[month] > 0
]
yearly_eventregistrations_data = [
x[month] for k, x in yearTotals['EventRegistrations'].items() if
k in all_years and yearTotals['Hours'][k][month] > 0
]
yearly_registrations_data = [
x[month] for k, x in yearTotals['Registrations'].items() if
k in all_years and yearTotals['Hours'][k][month] > 0
]
year_count = len(yearly_hours_data)
if year_count > 0:
yearTotals['EventRegistrations']['MonthlyAverage'][month] = (
sum([x for x in yearly_eventregistrations_data]) / year_count
)
yearTotals['Registrations']['MonthlyAverage'][month] = (
sum([x for x in yearly_registrations_data]) / year_count
)
yearTotals['Hours']['MonthlyAverage'][month] = (
sum([x for x in yearly_hours_data]) / year_count
)
yearTotals['StudentHours']['MonthlyAverage'][month] = (
sum([x for x in yearly_studenthours_data]) / year_count
)
yearTotals['AvgStudents']['MonthlyAverage'][month] = (
yearTotals['StudentHours']['MonthlyAverage'][month] /
float(yearTotals['Hours']['MonthlyAverage'][month])
)
return yearTotals
@staff_member_required
def MonthlyPerformanceJSON(request):
series = request.GET.get('series')
if series not in ['AvgStudents', 'Registrations', 'EventRegistrations', 'Hours', 'StudentHours']:
series = 'EventRegistrations'
yearTotals = getMonthlyPerformance()[series]
# Return JSON as lists, not as dictionaries, for c3.js
# yearTotals_list = [dict(v, **{'year':k}) for k, v in yearTotals.items()]
# Now make the lists so that there is one row per month, not one row per year,
# to make things easier for working with c3.js.yearTotals
monthTotals_list = []
years = list(set([k for k, v in yearTotals.items()]))
# Only include calendar months for graphing
for month in range(1, 13):
this_month_data = {'month': month, 'month_name': month_name[month]}
for year in years:
this_month_data[year] = yearTotals[year].get(month)
monthTotals_list.append(this_month_data)
monthTotals_list_sorted = sorted(monthTotals_list, key=lambda k: k['month'])
return JsonResponse(monthTotals_list_sorted, safe=False)
@staff_member_required
def MonthlyPerformanceCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="monthlyPerformance.csv"'
writer = csv.writer(response)
yearTotals = getMonthlyPerformance()
all_years = [k for k in yearTotals['Hours'].keys() if k != 'MonthlyAverage']
all_years.sort()
# Write headers first
headers_list = ['Data Series', 'Month', 'All-Time Avg.']
for year in all_years:
headers_list.append(str(year))
writer.writerow(headers_list)
# Note: These are not translated because the chart Javascript looks for these keys
yearTotals_keys = {
'Total Student-Hours': 'StudentHours',
'Avg. Students/Hour': 'AvgStudents',
'Hours of Instruction': 'Hours',
'Unique Registrations': 'Registrations',
'Total Students': 'EventRegistrations',
}
for series, key in yearTotals_keys.items():
for month in range(1, 13):
this_row = [
series,
month_name[month],
yearTotals[key]['MonthlyAverage'][month],
]
for year in all_years:
this_row.append(yearTotals[key][year][month])
writer.writerow(this_row)
return response
def getLocationPerformance(startDate=None, endDate=None):
timeFilters = {}
if startDate:
timeFilters['event__startTime__gte'] = startDate
if endDate:
timeFilters['event__startTime__lte'] = endDate
seriesCounts = list(Location.objects.values_list('name').filter(
**timeFilters
).distinct().annotate(Count('event')))
timeFilters.update({
'event__eventregistration__dropIn': False,
'event__eventregistration__cancelled': False,
'event__eventregistration__registration__final': True,
})
eventRegistrationCounts = list(Location.objects.values_list('name').filter(
**timeFilters
).distinct().annotate(Count('event')))
results = {}
for list_item in seriesCounts:
results[list_item[0]] = {'series': list_item[1]}
for list_item in eventRegistrationCounts:
results[list_item[0]].update({'registrations': list_item[1]})
return results
@staff_member_required
def LocationPerformanceJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
results = getLocationPerformance(startDate, endDate)
# Needs to return a list, not a dict
results_list = [dict({'name': k}, **dict(v)) for k, v in results.items()]
return JsonResponse(results_list, safe=False)
@staff_member_required
def LocationPerformanceCSV(request):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="locationPerformance.csv"'
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
results = getLocationPerformance(startDate, endDate)
writer = csv.writer(response)
# Note: These are not translated because the chart Javascript looks for these keys
writer.writerow(['Location', '# Series', '# Students', 'Avg. Students/Series'])
for location, data in results.items():
writer.writerow([
location, # The location name
data.get('series', 0), # The num. of series taught there
data.get('registrations', 0), # The num. of students taught there
float(data.get('registrations', 0)) / data.get('series', 1)
])
return response
def getRegistrationTypesAveragesByYear():
srs = EventRegistration.objects.filter(registration__final=True)
eligible_years = [x['event__year'] for x in srs.values('event__year').annotate(Count('event__year'))]
eligible_years.sort()
year_averages = []
for year in eligible_years:
this_year_results = srs.filter(event__year=year).annotate(
student=Case(When(registration__student=True, then=100), default=0, output_field=IntegerField()),
door=Case(When(registration__payAtDoor=False, then=100), default=0, output_field=IntegerField()),
droppedIn=Case(When(dropIn=True, then=100), default=0, output_field=IntegerField()),
cancellation=Case(When(cancelled=True, then=100), default=0, output_field=IntegerField()),
).aggregate(
Student=Avg('student'), Door=Avg('door'), DropIn=Avg('droppedIn'),
Cancelled=Avg('cancellation'), year=Min('event__year')
)
year_averages.append(this_year_results)
return year_averages
@staff_member_required
def RegistrationTypeAveragesJSON(request):
results = getRegistrationTypesAveragesByYear()
return JsonResponse(results, safe=False)
def getRegistrationReferralCounts(startDate, endDate):
'''
When a user accesses the class registration page through a
referral URL, the marketing_id gets saved in the extra JSON
data associated with that registration. This just returns
counts associated with how often given referral terms appear
in a specified time window (i.e. how many people signed up
by clicking through a referral button).
'''
timeFilters = {'final': True}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lt'] = endDate
regs = Registration.objects.filter(**timeFilters)
counter = Counter(
[x.data.get('marketing_id', None) for x in regs if isinstance(x.data, dict)] +
[None for x in regs if not isinstance(x.data, dict)]
)
results = [{'code': k or _('None'), 'count': v} for k, v in counter.items()]
return results
@staff_member_required
def RegistrationReferralCountsJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
results = getRegistrationReferralCounts(startDate, endDate)
return JsonResponse(results, safe=False)
@staff_member_required
def MultiRegistrationJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
timeFilters = {'final': True}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
er_counter_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
er_count=Count('eventregistration')).values_list('er_count', flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in er_counter_sorted])
for x in er_counter_sorted:
cumulative += x[1]
results_list.append({
'items': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list, safe=False)
@staff_member_required
def RegistrationHoursJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
timeFilters = {'final': True}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
hours_counter_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
er_sum=Sum('eventregistration__event__duration')).values_list('er_sum', flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in hours_counter_sorted])
for x in hours_counter_sorted:
cumulative += x[1]
results_list.append({
'hours': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list, safe=False)
@staff_member_required
def AdvanceRegistrationDaysJSON(request):
startDate = getDateTimeFromGet(request, 'startDate')
endDate = getDateTimeFromGet(request, 'endDate')
timeFilters = {'final': True}
if startDate:
timeFilters['dateTime__gte'] = startDate
if endDate:
timeFilters['dateTime__lte'] = endDate
advance_days_sorted = sorted(Counter(
Registration.objects.filter(**timeFilters).annotate(
min_start=Min('eventregistration__event__startTime')
).annotate(
advance=(TruncDate('dateTime') - TruncDate('min_start'))
).values_list(
'advance', flat=True)
).items())
results_list = []
cumulative = 0
total = sum([x[1] for x in advance_days_sorted])
for x in advance_days_sorted:
cumulative += x[1]
results_list.append({
'days': x[0], 'count': x[1], 'cumulative': cumulative,
'pct': 100 * (x[1] / total), 'cumulative_pct': 100 * (cumulative / total)
})
return JsonResponse(results_list, safe=False)
@staff_member_required
def getGeneralStats(request):
# total number of students:
totalStudents = Customer.objects.filter(eventregistration__isnull=False).distinct().count()
numSeries = Series.objects.distinct().count()
totalSeriesRegs = EventRegistration.objects.filter(
dropIn=False, cancelled=False, registration__final=True
).values(
'event', 'customer__user__email'
).distinct().count()
# time studio in existence:
firstClass = EventOccurrence.objects.order_by('startTime').values('startTime').first()
if firstClass:
firstStartTime = firstClass['startTime']
else:
firstStartTime = timezone.now()
timeDiff = relativedelta(timezone.now(), firstStartTime)
totalTime = '%s years, %s months, %s days' % (timeDiff.years, timeDiff.months, timeDiff.days)
return (totalStudents, numSeries, totalSeriesRegs, totalTime)
@staff_member_required
def getBestCustomersJSON(request):
bestCustomersLastTwelveMonths = Customer.objects.values(
'first_name', 'last_name'
).filter(**{
'eventregistration__registration__dateTime__gte': ensure_timezone(
datetime(timezone.now().year - 1, timezone.now().month, timezone.now().day)
),
'eventregistration__dropIn': False, 'eventregistration__cancelled': False,
'eventregistration__registration__final': True
}).annotate(Count('eventregistration')).order_by('-eventregistration__count')[:10]
bestCustomersAllTime = Customer.objects.values(
'first_name', 'last_name'
).filter(**{
'eventregistration__dropIn': False,
'eventregistration__cancelled': False,
'eventregistration__registration__final': True
}).annotate(Count('eventregistration')).order_by('-eventregistration__count')[:10]
mostActiveTeachersThisYear = SeriesTeacher.objects.filter(
event__year=timezone.now().year
).exclude(
staffMember__instructor__status=Instructor.InstructorStatus.guest
).values_list(
'staffMember__firstName', 'staffMember__lastName'
).annotate(Count('staffMember')).order_by('-staffMember__count')
bestCustomerData = {
'bestCustomersLastTwelveMonths': list(bestCustomersLastTwelveMonths),
'bestCustomersAllTime': list(bestCustomersAllTime),
'mostActiveTeachersThisYear': list(mostActiveTeachersThisYear),
}
return JsonResponse(bestCustomerData)
| 35.599362
| 109
| 0.641989
|
45068171cdf2fc665b83f7d263e506d6b5389435
| 4,454
|
py
|
Python
|
test/test_reversi.py
|
iBug/PyReversi
|
a8b469a5d5bf0f1f0bf5ffda1efeb9b55d5808bd
|
[
"MIT"
] | 1
|
2022-02-22T00:38:30.000Z
|
2022-02-22T00:38:30.000Z
|
test/test_reversi.py
|
iBug/PyReversi
|
a8b469a5d5bf0f1f0bf5ffda1efeb9b55d5808bd
|
[
"MIT"
] | 1
|
2021-02-23T00:03:13.000Z
|
2021-02-23T07:49:45.000Z
|
test/test_reversi.py
|
iBug/PyReversi
|
a8b469a5d5bf0f1f0bf5ffda1efeb9b55d5808bd
|
[
"MIT"
] | null | null | null |
import pytest
import reversi
from reversi import Reversi
def test_globals():
assert reversi.BS == 8
assert (reversi.EMPTY, reversi.BLACK, reversi.WHITE) == (0, 1, 2)
def test_reversi_reset():
game = Reversi()
game.reset()
assert game.current == reversi.BLACK
assert len(game.history) == 0
assert len(game.board) == 8
assert all(len(col) == 8 for col in game.board)
# Ignoring board content. They may change some day
def test_reversi_toggle():
game = Reversi()
assert game.current == reversi.BLACK
assert game.toggle() is None
assert game.current == reversi.WHITE
@pytest.mark.parametrize("x, y, dx, dy, player, expected", [
(2, 4, 1, 0, reversi.BLACK, True),
(2, 4, 1, 0, reversi.WHITE, False),
(3, 2, 0, 1, reversi.BLACK, False),
(3, 2, 0, 1, reversi.WHITE, True),
(2, 4, 1, 0, None, True),
])
def test_reversi_check(x, y, dx, dy, player, expected):
game = Reversi()
game.reset()
assert game.check(x, y, dx, dy, player) == expected
@pytest.mark.parametrize("x, y, player, expected", [
(2, 4, reversi.BLACK, True),
(2, 4, reversi.WHITE, False),
(2, 4, None, True),
])
def test_reversi_canPut(x, y, player, expected):
game = Reversi()
game.reset()
assert game.canPut(x, y, player) == expected
def test_reversi_getAvailables():
game = Reversi()
game.reset()
assert sorted(game.getAvailables()) == sorted([(2, 4), (3, 5), (4, 2), (5, 3)])
game.toggle()
assert sorted(game.getAvailables()) == sorted([(2, 3), (4, 5), (3, 2), (5, 4)])
def test_reversi_any():
game = Reversi()
game.reset()
assert game.any()
assert game.any(reversi.BLACK)
assert game.any(reversi.WHITE)
game.board[3][3] = game.board[3][4] = game.board[4][3] = game.board[4][4] = reversi.EMPTY
assert not game.any(reversi.BLACK)
assert not game.any(reversi.WHITE)
def test_reversi_over():
game = Reversi()
game.reset()
assert not game.over
game.board[3][3] = game.board[3][4] = game.board[4][3] = game.board[4][4] = reversi.EMPTY
assert game.over
@pytest.mark.parametrize("x, y, expected", [
(3, 3, reversi.BLACK),
(3, 4, reversi.WHITE),
(3, 5, reversi.EMPTY),
])
def test_reversi_at(x, y, expected):
game = Reversi()
game.reset()
assert game.at(x, y) == expected
def test_reversi_lastChess():
game = Reversi()
game.reset()
assert game.lastChess is None
game.put(2, 4)
assert game.lastChess == (2, 4)
def test_reversi_chessCount():
game = Reversi()
game.reset()
assert game.chessCount == [60, 2, 2]
def test_reversi_put():
game = Reversi()
game.reset()
assert game.put(2, 4)
assert game.history[-1] == [(3, 4), (2, 4)]
assert not game.put(2, 4)
assert not game.put(0, 0)
assert not game.put((0, 0)) # Tuple unpacking test
assert game.put((2, 3))
def test_reversi_skipPut():
game = Reversi()
game.reset()
assert not game.skipPut()
game.board[3][3] = game.board[3][4] = game.board[4][3] = game.board[4][4] = reversi.EMPTY
assert game.skipPut()
assert game.history[-1] == []
def test_reversi_undo():
game_1, game_2 = Reversi(), Reversi()
game_1.reset()
game_2.reset()
assert game_1.board == game_2.board
assert game_1.history == game_2.history == []
assert game_1.undo() == (False, 0)
game_1.put(2, 4)
assert game_1.board != game_2.board
assert game_1.history != game_2.history
assert game_1.undo() == (True, 2)
assert game_1.board == game_2.board
assert game_1.history == game_2.history
game_1.history.append([])
game_1.toggle()
assert game_1.undo() == (True, 0)
def test_reversi_copy():
game = Reversi()
game.reset()
game.put(2, 4)
other = game.copy()
assert game is not other
assert game.board == other.board
assert all(a is not b for a, b in zip(game.board, other.board))
assert game.current == other.current
assert game.history == other.history
assert all(a is not b for a, b in zip(game.history, other.history))
def test_reversi_hash():
import random
game = Reversi()
s = set()
s.add(game)
assert len(s) == 1
for i in range(12):
game.put(random.choice(game.getAvailables()))
assert game not in s
s.add(game)
def test_reversi_repr():
game = Reversi()
game.reset()
assert str(game)
assert repr(game)
| 25.597701
| 93
| 0.619892
|
a2141c98d8ed6b02ef8a40b1ea423cca53a94198
| 29,439
|
py
|
Python
|
integration-test/1491-brazil-shields.py
|
roman-ianivskyy/vector-datasource
|
3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92
|
[
"MIT"
] | null | null | null |
integration-test/1491-brazil-shields.py
|
roman-ianivskyy/vector-datasource
|
3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92
|
[
"MIT"
] | null | null | null |
integration-test/1491-brazil-shields.py
|
roman-ianivskyy/vector-datasource
|
3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92
|
[
"MIT"
] | null | null | null |
from . import FixtureTest
class BrazilShieldTest(FixtureTest):
def test_trans_amazonian(self):
import dsl
z, x, y = (16, 26409, 34070)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/258644490
dsl.way(258644490, dsl.tile_diagonal(z, x, y), {
'maxspeed': '110', 'lanes': '2', 'surface': 'asphalt',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'BR-101;BR-230', 'highway': 'motorway',
}),
dsl.relation(1, {
'source:name': 'Lei 10.292/01',
'name': u'Rodovia Governador M\xe1rio Covas',
'type': 'route', 'route': 'road', 'wikipedia': 'pt:BR-101',
'note': u'BR-101 Regi\xe3o Nordeste',
'source': 'openstreetmap.org', 'wikidata': 'Q2877408',
'ref': 'BR-101', 'network': 'BR',
}, ways=[258644490]),
dsl.relation(2, {
'name:en': 'Trans-Amazonian highway',
'name': u'Rodovia Transamaz\xf4nica', 'type': 'route',
'route': 'road', 'wikipedia': 'pt:BR-230',
'source': 'openstreetmap.org', 'name:fr': 'Transamazonienne',
'wikidata': 'Q1569903', 'ref': 'BR-230', 'network': 'BR',
}, ways=[258644490]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 258644490,
'shield_text': '230', 'network': 'BR:Trans-Amazonian',
'all_networks': ['BR:Trans-Amazonian', 'BR'],
'all_shield_texts': ['230', '101'],
})
def test_df(self):
import dsl
z, x, y = (16, 24049, 35668)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/12524589
dsl.way(12524589, dsl.tile_diagonal(z, x, y), {
'bridge': 'yes', 'layer': '1', 'maxspeed': '60', 'lanes': '4',
'name': 'Ponte do Bragueto', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'yes',
'sidewalk': 'right', 'ref': 'DF-002', 'highway': 'motorway',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 12524589, 'shield_text': '002', 'network': 'BR:DF'})
def test_ac(self):
import dsl
z, x, y = (16, 20489, 34659)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31524493
dsl.way(31524493, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'AC-040',
'highway': 'primary', 'IBGE:CD_ADMINIS': 'estadual',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 31524493, 'shield_text': '040', 'network': 'BR:AC'})
def test_al(self):
import dsl
z, x, y = (16, 26147, 34626)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31532787
dsl.way(31532787, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'AL-105',
'highway': 'primary', 'oneway': 'no',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31532787,
'shield_text': '105',
'network': 'BR:AL',
})
def test_ap(self):
import dsl
z, x, y = (16, 23450, 32768)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/29111026
dsl.way(29111026, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'AP-010',
'name': 'Rodovia Juscelino Kubitschek', 'highway': 'primary',
'surface': 'paved',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 29111026,
'shield_text': '010',
'network': 'BR:AP',
})
def test_am(self):
import dsl
z, x, y = (16, 21854, 33286)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/28958366
dsl.way(28958366, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'AM-010',
'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 28958366,
'shield_text': '010',
'network': 'BR:AM',
})
def test_ba(self):
import dsl
z, x, y = (16, 25332, 35512)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/8099519
dsl.way(8099519, dsl.tile_diagonal(z, x, y), {
'lanes': '2', 'name': u'Rodovia Serra do Mar\xe7al',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'ref': 'BA-263', 'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 8099519, 'shield_text': '263', 'network': 'BR:BA'})
def test_ce(self):
import dsl
z, x, y = (16, 25662, 33678)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/23328809
dsl.way(23328809, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'CE-060',
'highway': 'secondary', 'oneway': 'yes',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 23328809, 'shield_text': '060', 'network': 'BR:CE'})
def test_es(self):
import dsl
z, x, y = (16, 25390, 36607)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/24602299
dsl.way(24602299, dsl.tile_diagonal(z, x, y), {
'maxspeed': '60', 'name': 'Rodovia Jones dos Santos Neves',
'destination': 'Guarapari', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'ES-480', 'highway': 'primary',
}),
dsl.relation(1, {
'name': 'Rodovia Jones dos Santos Neves', 'type': 'route',
'route': 'road', 'source': 'openstreetmap.org',
'ref': 'ES-480', 'network': 'BR:ES',
}, ways=[24602299]),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 24602299, 'shield_text': '480', 'network': 'BR:ES'})
def test_go(self):
import dsl
z, x, y = (16, 23822, 35860)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/24012217
dsl.way(24012217, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'GO-536',
'highway': 'secondary', 'oneway': 'yes',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 24012217, 'shield_text': '536', 'network': 'BR:GO'})
def test_ma(self):
import dsl
z, x, y = (16, 24520, 33173)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/29137050
dsl.way(29137050, dsl.tile_diagonal(z, x, y), {
'bridge': 'yes', 'layer': '1', 'ref': 'MA-106',
'highway': 'primary', 'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 29137050, 'shield_text': '106', 'network': 'BR:MA'})
def test_mt(self):
import dsl
z, x, y = (16, 22466, 35738)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/28996480
dsl.way(28996480, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'MT-451',
'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 28996480, 'shield_text': '451', 'network': 'BR:MT'})
def test_ms(self):
import dsl
z, x, y = (16, 22323, 36340)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/29012329
dsl.way(29012329, dsl.tile_diagonal(z, x, y), {
'bridge': 'yes', 'layer': '1', 'ref': 'MS-228',
'highway': 'secondary', 'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 29012329, 'shield_text': '228', 'network': 'BR:MS'})
def test_mg(self):
import dsl
z, x, y = (16, 24770, 36461)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/4543343
dsl.way(4543343, dsl.tile_diagonal(z, x, y), {
'lanes': '2', 'name': u'Rua Jacu\xed',
'source': 'openstreetmap.org', 'oneway': 'no',
'ref': 'MG-020', 'highway': 'secondary',
}),
dsl.relation(1, {
'type': 'route', 'route': 'road', 'ref': 'MG-020',
'network': 'BR:MG', 'source': 'openstreetmap.org',
}, ways=[4543343]),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 4543343, 'shield_text': '020', 'network': 'BR:MG'})
def test_pa(self):
import dsl
z, x, y = (16, 24274, 32930)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/23983406
dsl.way(23983406, dsl.tile_diagonal(z, x, y), {
'source:highway': 'schema_br2013', 'maxspeed': '80',
'lanes': '1', 'surface': 'paved',
'source': 'openstreetmap.org', 'embankment': 'false',
'ref': 'PA-458', 'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 23983406, 'shield_text': '458', 'network': 'BR:PA'})
def test_pb(self):
import dsl
z, x, y = (16, 25886, 34039)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31514019
dsl.way(31514019, dsl.tile_diagonal(z, x, y), {
'horse': 'yes', 'maxspeed': '80', 'bicycle': 'yes',
'oneway': 'no', 'surface': 'asphalt', 'cycleway': 'no',
'access': 'yes', 'source': 'openstreetmap.org',
'IBGE:CD_ADMINIS': 'estadual', 'foot': 'yes', 'lanes': '2',
'sidewalk': 'none', 'ref': 'PB-366', 'highway': 'secondary',
}),
dsl.relation(1, {
'source': 'openstreetmap.org', 'route': 'road',
'ref': 'PB-366', 'network': 'BR:PB', 'type': 'route',
}, ways=[31514019]),
dsl.relation(2, {
'type': 'route', 'route': 'road', 'ref': 'BR-426',
'network': 'BR', 'source': 'openstreetmap.org',
}, ways=[31514019]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31514019,
'shield_text': '426', 'network': 'BR',
'all_networks': ['BR', 'BR:PB'],
'all_shield_texts': ['426', '366'],
})
def test_pr(self):
import dsl
z, x, y = (16, 23900, 37556)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/25460043
dsl.way(25460043, dsl.tile_diagonal(z, x, y), {
'name': 'Rodovia Deputado Miguel Bufara', 'surface': 'asphalt',
'source': 'openstreetmap.org', 'oneway': 'no', 'ref': 'PR-408',
'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 25460043, 'shield_text': '408', 'network': 'BR:PR'})
def test_pe(self):
import dsl
z, x, y = (16, 26383, 34306)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/23392694
dsl.way(23392694, dsl.tile_diagonal(z, x, y), {
'maxspeed': '30', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'PE-038', 'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 23392694, 'shield_text': '038', 'network': 'BR:PE'})
def test_pi(self):
import dsl
z, x, y = (16, 24979, 33664)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/30844349
dsl.way(30844349, dsl.tile_diagonal(z, x, y), {
'lanes': '2', 'name': 'Rodovia Antonio Medeiros Filho',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'oneway': 'no', 'ref': 'PI-112', 'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 30844349, 'shield_text': '112', 'network': 'BR:PI'})
def test_rj(self):
import dsl
z, x, y = (16, 24908, 36979)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/13076835
dsl.way(13076835, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'RJ-107',
'name': u'Rua Lopes Trov\xe3o', 'highway': 'secondary',
}),
dsl.relation(1, {
'name': 'Caminho Novo', 'tourism': 'yes', 'route': 'road',
'source': 'openstreetmap.org', 'historic': 'yes',
'type': 'route',
}, ways=[13076835]),
)
# note: we don't pick up the tourist route
self.assert_has_feature(
z, x, y, 'roads',
{'id': 13076835, 'shield_text': '107', 'network': 'BR:RJ'})
def test_rn(self):
import dsl
z, x, y = (16, 26371, 33904)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/28659532
dsl.way(28659532, dsl.tile_diagonal(z, x, y), {
'shoulder': 'no', 'source:highway': 'schema_br2013',
'lanes': '2', 'name': 'RN-003',
'source:highway_classification': 'survey', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'no', 'ref': 'RN-003',
'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 28659532, 'shield_text': '003', 'network': 'BR:RN'})
def test_rs(self):
import dsl
z, x, y = (16, 23457, 38464)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/584381094
dsl.way(584381094, dsl.tile_diagonal(z, x, y), {
'name': u'Rodovia M\xe1rio Quintana', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'RS-118', 'highway': 'primary_link',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 584381094, 'shield_text': '118', 'network': 'BR:RS'})
def test_ro(self):
import dsl
z, x, y = (16, 21488, 34875)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31525107
dsl.way(31525107, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'RO-267',
'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 31525107, 'shield_text': '267', 'network': 'BR:RO'})
def test_rr(self):
import dsl
z, x, y = (16, 21616, 32213)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31481157
dsl.way(31481157, dsl.tile_diagonal(z, x, y), {
'name': 'RR-205', 'IBGE:CD_ADMINIS': 'estadual',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'oneway': 'no', 'ref': 'RR-205', 'highway': 'primary'}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 31481157, 'shield_text': '205', 'network': 'BR:RR'})
def test_sc(self):
import dsl
z, x, y = (16, 23936, 37994)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/10492763
dsl.way(10492763, dsl.tile_diagonal(z, x, y), {
'maxspeed': '50', 'name': 'Rodovia Admar Gonzaga',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'postal_code': '88034-100', 'oneway': 'yes', 'ref': 'SC-404',
'highway': 'primary',
}),
dsl.relation(1, {
'name': 'SC-404', 'ref': 'SC-404', 'route': 'road',
'source': 'openstreetmap.org', 'type': 'route',
'network': 'BR:SC',
}, ways=[10492763]),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 10492763, 'shield_text': '404', 'network': 'BR:SC'})
def test_sp(self):
import dsl
z, x, y = (16, 24262, 37201)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/4273875
dsl.way(4273875, dsl.tile_diagonal(z, x, y), {
'maxspeed': '90', 'lanes': '2',
'name': 'Marginal Pinheiros - Expressa', 'surface': 'asphalt',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'SP-015', 'highway': 'motorway',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 4273875, 'shield_text': '015', 'network': 'BR:SP'})
def test_se(self):
import dsl
z, x, y = (16, 26010, 34601)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31529719
dsl.way(31529719, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'SE-200',
'surface': 'asphalt', 'highway': 'primary',
'IBGE:CD_ADMINIS': 'estadual',
}),
dsl.relation(1, {
'type': 'route', 'route': 'road', 'ref': 'SE-200',
'network': 'SE', 'source': 'openstreetmap.org',
}, ways=[31529719]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31529719,
'shield_text': '200', 'network': 'BR:SE',
'all_shield_texts': ['200'],
'all_networks': ['BR:SE'],
})
def test_to(self):
import dsl
z, x, y = (16, 23986, 34080)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/28782365
dsl.way(28782365, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'TO-222',
'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 28782365, 'shield_text': '222', 'network': 'BR:TO'})
def test_br_AMG(self):
import dsl
z, x, y = (16, 24778, 36439)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/11507107
dsl.way(11507107, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'AMG-0130',
'highway': 'secondary_link', 'oneway': 'yes',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 11507107, 'shield_text': '0130', 'network': 'BR:MG'})
def test_br_LMG(self):
import dsl
z, x, y = (16, 24759, 36447)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/4946285
dsl.way(4946285, dsl.tile_diagonal(z, x, y), {
'maxspeed': '60', 'name': 'Rua Padre Pedro Pinto',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'oneway': 'yes', 'ref': 'LMG-806', 'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads',
{'id': 4946285, 'shield_text': '806', 'network': 'BR:MG:local'})
def test_br_MGC(self):
import dsl
z, x, y = (16, 24603, 36244)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31574746
dsl.way(31574746, dsl.tile_diagonal(z, x, y), {
'network': 'BR', 'IBGE:CD_ADMINIS': 'estadual',
'surface': 'paved', 'source': 'openstreetmap.org',
'incorrect:name': 'MGC-259', 'oneway': 'no',
'ref': 'MGC-259;BR-259', 'highway': 'primary',
}),
dsl.relation(1, {
'type': 'route', 'route': 'road', 'ref': 'BR-259',
'network': 'BR', 'source': 'openstreetmap.org',
}, ways=[31574746]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31574746,
'all_shield_texts': ['259', '259'],
'all_networks': ['BR', 'BR:MG'],
})
def test_br_CMG(self):
import dsl
z, x, y = (16, 24787, 35853)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31574975
dsl.way(31574975, dsl.tile_diagonal(z, x, y), {
'name': u'Avenida Governador Magalh\xe3es Pinto',
'IBGE:CD_ADMINIS': 'federal', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'yes',
'ref': 'CMG-251;BR-251', 'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31574975,
'all_shield_texts': ['251', '251'],
'all_networks': ['BR', 'BR:MG'],
})
def test_br_ERS(self):
import dsl
z, x, y = (16, 23101, 38967)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/20906150
dsl.way(20906150, dsl.tile_diagonal(z, x, y), {
'old_ref': 'RS-602', 'maxspeed': '80', 'surface': 'asphalt',
'source': 'openstreetmap.org', 'ref': 'ERS-602',
'highway': 'secondary',
}),
dsl.relation(1, {
'network': 'BR:RS', 'ref': 'ERS-602', 'route': 'road',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'type': 'route',
}, ways=[20906150]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 20906150,
'shield_text': '602', 'network': 'BR:RS',
'all_networks': ['BR:RS'],
'all_shield_texts': ['602'],
})
def test_br_VRS(self):
import dsl
z, x, y = (16, 23312, 38227)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/26757190
dsl.way(26757190, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'VRS-851',
'highway': 'secondary', 'oneway': 'yes',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 26757190, 'shield_text': '851',
'network': 'BR:RS',
})
def test_br_RSC(self):
import dsl
z, x, y = (16, 23450, 38314)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/25979338
dsl.way(25979338, dsl.tile_diagonal(z, x, y), {
'name': 'Rota do Sol', 'surface': 'paved',
'source': 'openstreetmap.org', 'oneway': 'no',
'ref': 'RSC-453', 'highway': 'primary',
}),
dsl.relation(1, {
'network': 'BR', 'ref': 'BR-453', 'route': 'road',
'wikipedia': 'pt:BR-453', 'source': 'openstreetmap.org',
'wikidata': 'Q2877442', 'type': 'route',
}, ways=[25979338]),
dsl.relation(2, {
'old_ref': 'RST-453', 'name': 'Rota do Sol', 'route': 'road',
'source:official_name': 'LO 11432/2000', 'type': 'route',
'official_name': 'Rota do Sol Euclides Triches',
'source': 'openstreetmap.org', 'ref': 'RSC-453',
'network': 'BR:RS',
}, ways=[25979338]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 25979338,
'shield_text': '453', 'network': 'BR',
'all_shield_texts': ['453', '453'],
'all_networks': ['BR', 'BR:RS']
})
def test_br_SPA(self):
import dsl
z, x, y = (16, 24194, 37330)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/4964220
dsl.way(4964220, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org', 'ref': 'SPA-344/055',
'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 4964220,
'shield_text': '055', 'network': 'BR:SP',
'all_shield_texts': ['055', '344'],
'all_networks': ['BR:SP', 'BR:SP'],
})
def test_br_PRC(self):
import dsl
z, x, y = (16, 23383, 37517)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/31611447
dsl.way(31611447, dsl.tile_diagonal(z, x, y), {
'maxspeed': '110', 'surface': 'paved',
'source': 'openstreetmap.org', 'IBGE:CD_ADMINIS': 'federal',
'nat_ref': 'BR-466', 'ref': 'PRC-466', 'highway': 'primary',
}),
dsl.relation(1, {
'type': 'route', 'route': 'road', 'ref': 'BR-466',
'network': 'BR', 'source': 'openstreetmap.org',
}, ways=[31611447]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 31611447,
'shield_text': '466', 'network': 'BR',
'all_shield_texts': ['466', '466'],
'all_networks': ['BR', 'BR:PR']
})
def test_br_PLN(self):
import dsl
z, x, y = (16, 24178, 37017)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/116514858
dsl.way(116514858, dsl.tile_diagonal(z, x, y), {
'maxspeed': '40', 'name': u'Avenida Jo\xe3o Vieira',
'source': 'openstreetmap.org', 'postal_code': '13145-754',
'oneway': 'yes', 'ref': 'PLN-346', 'highway': 'secondary',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 116514858,
'shield_text': '346', 'network': 'BR:SP:PLN',
})
def test_br_SP_many(self):
import dsl
z, x, y = (16, 24051, 36887)
self.generate_fixtures(
dsl.is_in('BR', z, x, y),
# https://www.openstreetmap.org/way/258575188
dsl.way(258575188, dsl.tile_diagonal(z, x, y), {
'lanes': '1', 'name': 'Rodovia Municipal Domingos Innocentini',
'wikipedia': 'pt:Rodovia Municipal Domingos Innocentini',
'surface': 'asphalt', 'source': 'openstreetmap.org',
'oneway': 'yes', 'ref': 'SPA-149/215;SCA-040',
'highway': 'primary',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'id': 258575188,
'shield_text': '149',
'network': 'BR:SP',
'all_networks': ['BR:SP', 'BR:SP', 'BR:SP:SCA'],
'all_shield_texts': ['149', '215', '040'],
})
| 33.955017
| 79
| 0.46007
|
f7d27e24e03dfb6b45a7e1e14c843cabdd03e346
| 39,132
|
py
|
Python
|
snakemake/executors/google_lifesciences.py
|
nigiord/snakemake
|
e5171490749378328799b184e0a1dcd0a984b18f
|
[
"MIT"
] | 1,326
|
2019-10-04T15:11:20.000Z
|
2022-03-31T18:39:40.000Z
|
snakemake/executors/google_lifesciences.py
|
nigiord/snakemake
|
e5171490749378328799b184e0a1dcd0a984b18f
|
[
"MIT"
] | 1,496
|
2019-10-04T15:15:12.000Z
|
2022-03-31T23:14:33.000Z
|
snakemake/executors/google_lifesciences.py
|
nigiord/snakemake
|
e5171490749378328799b184e0a1dcd0a984b18f
|
[
"MIT"
] | 375
|
2019-10-08T21:28:51.000Z
|
2022-03-28T18:44:36.000Z
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import logging
import os
import sys
import time
import shutil
import tarfile
import tempfile
from collections import namedtuple
import uuid
import re
import math
from snakemake.logging import logger
from snakemake.exceptions import print_exception
from snakemake.exceptions import log_verbose_traceback
from snakemake.exceptions import WorkflowError
from snakemake.executors import ClusterExecutor, sleep
from snakemake.common import get_container_image, get_file_hash
from snakemake.resources import DefaultResources
# https://github.com/googleapis/google-api-python-client/issues/299#issuecomment-343255309
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.ERROR)
GoogleLifeSciencesJob = namedtuple(
"GoogleLifeSciencesJob", "job jobname jobid callback error_callback"
)
class GoogleLifeSciencesExecutor(ClusterExecutor):
"""the GoogleLifeSciences executor uses Google Cloud Storage, and
Compute Engine paired with the Google Life Sciences API.
https://cloud.google.com/life-sciences/docs/quickstart
"""
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
container_image=None,
regions=None,
location=None,
cache=False,
latency_wait=3,
local_input=None,
restart_times=None,
exec_job=None,
max_status_checks_per_second=1,
preemption_default=None,
preemptible_rules=None,
):
# Attach variables for easy access
self.workflow = workflow
self.quiet = quiet
self.workdir = os.path.dirname(self.workflow.persistence.path)
self._save_storage_cache = cache
# Relative path for running on instance
self._set_snakefile()
# Prepare workflow sources for build package
self._set_workflow_sources()
exec_job = exec_job or (
"snakemake {target} --snakefile %s "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock " % self.snakefile
)
# Set preemptible instances
self._set_preemptible_rules(preemption_default, preemptible_rules)
# IMPORTANT: using Compute Engine API and not k8s == no support secrets
self.envvars = list(self.workflow.envvars) or []
# Quit early if we can't authenticate
self._get_services()
self._get_bucket()
# Akin to Kubernetes, create a run namespace, default container image
self.run_namespace = str(uuid.uuid4())
self.container_image = container_image or get_container_image()
self.regions = regions or ["us-east1", "us-west1", "us-central1"]
# The project name is required, either from client or environment
self.project = (
os.environ.get("GOOGLE_CLOUD_PROJECT") or self._bucket_service.project
)
# Determine API location based on user preference, and then regions
self._set_location(location)
# Tell the user right away the regions, location, and container
logger.debug("regions=%s" % self.regions)
logger.debug("location=%s" % self.location)
logger.debug("container=%s" % self.container_image)
# Keep track of build packages to clean up shutdown, and generate
self._build_packages = set()
targz = self._generate_build_source_package()
self._upload_build_source_package(targz)
# we need to add custom
# default resources depending on the instance requested
self.default_resources = None
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
def _get_services(self):
"""use the Google Discovery Build to generate API clients
for Life Sciences, and use the google storage python client
for storage.
"""
from googleapiclient.discovery import build as discovery_build
from oauth2client.client import (
GoogleCredentials,
ApplicationDefaultCredentialsError,
)
from google.cloud import storage
# Credentials must be exported to environment
try:
creds = GoogleCredentials.get_application_default()
except ApplicationDefaultCredentialsError as ex:
log_verbose_traceback(ex)
raise ex
# Discovery clients for Google Cloud Storage and Life Sciences API
self._storage_cli = discovery_build(
"storage", "v1", credentials=creds, cache_discovery=False
)
self._compute_cli = discovery_build(
"compute", "v1", credentials=creds, cache_discovery=False
)
self._api = discovery_build(
"lifesciences", "v2beta", credentials=creds, cache_discovery=False
)
self._bucket_service = storage.Client()
def _get_bucket(self):
"""get a connection to the storage bucket (self.bucket) and exit
if the name is taken or otherwise invalid.
Parameters
==========
workflow: the workflow object to derive the prefix from
"""
import google
# Hold path to requested subdirectory and main bucket
bucket_name = self.workflow.default_remote_prefix.split("/")[0]
self.gs_subdir = re.sub(
"^{}/".format(bucket_name), "", self.workflow.default_remote_prefix
)
self.gs_logs = os.path.join(self.gs_subdir, "google-lifesciences-logs")
# Case 1: The bucket already exists
try:
self.bucket = self._bucket_service.get_bucket(bucket_name)
# Case 2: The bucket needs to be created
except google.cloud.exceptions.NotFound:
self.bucket = self._bucket_service.create_bucket(bucket_name)
# Case 2: The bucket name is already taken
except Exception as ex:
logger.error(
"Cannot get or create {} (exit code {}):\n{}".format(
bucket_name, ex.returncode, ex.output.decode()
)
)
log_verbose_traceback(ex)
raise ex
logger.debug("bucket=%s" % self.bucket.name)
logger.debug("subdir=%s" % self.gs_subdir)
logger.debug("logs=%s" % self.gs_logs)
def _set_location(self, location=None):
"""The location is where the Google Life Sciences API is located.
This can be meaningful if the requester has data residency
requirements or multi-zone needs. To determine this value,
we first use the locations API to determine locations available,
and then compare them against:
1. user specified location or prefix
2. regions having the same prefix
3. if cannot be satisifed, we throw an error.
"""
# Derive available locations
# See https://cloud.google.com/life-sciences/docs/concepts/locations
locations = (
self._api.projects()
.locations()
.list(name="projects/{}".format(self.project))
.execute()
)
locations = {x["locationId"]: x["name"] for x in locations.get("locations", [])}
# Alert the user about locations available
logger.debug("locations-available:\n%s" % "\n".join(locations))
# If no locations, there is something wrong
if not locations:
raise WorkflowError("No locations found for Google Life Sciences API.")
# First pass, attempt to match the user-specified location (or prefix)
if location:
if location in locations:
self.location = locations[location]
return
# It could be that a prefix was provided
for contender in locations:
if contender.startswith(location):
self.location = locations[contender]
return
# If we get here and no match, alert user.
raise WorkflowError(
"Location or prefix requested %s is not available." % location
)
# If we get here, we need to select location from regions
for region in self.regions:
if region in locations:
self.location = locations[region]
return
# If we get here, choose based on prefix
prefixes = set([r.split("-")[0] for r in self.regions])
regexp = "^(%s)" % "|".join(prefixes)
for location in locations:
if re.search(regexp, location):
self.location = locations[location]
return
# If we get here, total failure of finding location
raise WorkflowError(
" No locations available for regions!"
" Please specify a location with --google-lifesciences-location "
" or extend --google-lifesciences-regions to find a Life Sciences location."
)
def shutdown(self):
"""shutdown deletes build packages if the user didn't request to clean
up the cache. At this point we've already cancelled running jobs.
"""
from google.api_core import retry
from snakemake.remote.GS import google_cloud_retry_predicate
@retry.Retry(predicate=google_cloud_retry_predicate)
def _shutdown():
# Delete build source packages only if user regooglquested no cache
if self._save_storage_cache:
logger.debug("Requested to save workflow sources, skipping cleanup.")
else:
for package in self._build_packages:
blob = self.bucket.blob(package)
if blob.exists():
logger.debug("Deleting blob %s" % package)
blob.delete()
# perform additional steps on shutdown if necessary
_shutdown()
super().shutdown()
def cancel(self):
"""cancel execution, usually by way of control+c. Cleanup is done in
shutdown (deleting cached workdirs in Google Cloud Storage
"""
import googleapiclient
# projects.locations.operations/cancel
operations = self._api.projects().locations().operations()
for job in self.active_jobs:
request = operations.cancel(name=job.jobname)
logger.debug("Cancelling operation {}".format(job.jobid))
try:
self._retry_request(request)
except (Exception, BaseException, googleapiclient.errors.HttpError):
continue
self.shutdown()
def get_available_machine_types(self):
"""Using the regions available at self.regions, use the GCP API
to retrieve a lookup dictionary of all available machine types.
"""
# Regular expression to determine if zone in region
regexp = "^(%s)" % "|".join(self.regions)
# Retrieve zones, filter down to selected regions
zones = self._retry_request(
self._compute_cli.zones().list(project=self.project)
)
zones = [z for z in zones["items"] if re.search(regexp, z["name"])]
# Retrieve machine types available across zones
# https://cloud.google.com/compute/docs/regions-zones/
lookup = {}
for zone in zones:
request = self._compute_cli.machineTypes().list(
project=self.project, zone=zone["name"]
)
lookup[zone["name"]] = self._retry_request(request)["items"]
# Only keep those that are shared, use last zone as a base
machine_types = {mt["name"]: mt for mt in lookup[zone["name"]]}
del lookup[zone["name"]]
# Update final list based on the remaining
to_remove = set()
for zone, types in lookup.items():
names = [x["name"] for x in types]
names = [name for name in names if "micro" not in name]
names = [name for name in names if not re.search("^(e2|m1)", name)]
for machine_type in list(machine_types.keys()):
if machine_type not in names:
to_remove.add(machine_type)
for machine_type in to_remove:
del machine_types[machine_type]
return machine_types
def _add_gpu(self, gpu_count):
"""Add a number of NVIDIA gpus to the current executor. This works
by way of adding nvidia_gpu to the job default resources, and also
changing the default machine type prefix to be n1, which is
the currently only supported instance type for using GPUs for LHS.
"""
if not gpu_count or gpu_count == 0:
return
logger.debug(
"found resource request for {} GPUs. This will limit to n1 "
"instance types.".format(gpu_count)
)
self.default_resources.set_resource("nvidia_gpu", gpu_count)
self._machine_type_prefix = self._machine_type_prefix or ""
if not self._machine_type_prefix.startswith("n1"):
self._machine_type_prefix = "n1"
def _set_preemptible_rules(self, preemption_default=None, preemptible_rules=None):
"""define a lookup dictionary for preemptible instance retries, which
is supported by the Google Life Science API. The user can set a default
for all steps, specify per step, or define a default for all steps
that aren't individually customized.
"""
self.preemptible_rules = {}
# If a default is defined, we apply it to all the rules
if preemption_default is not None:
self.preemptible_rules = {
rule.name: preemption_default for rule in self.workflow.rules
}
# Now update custom set rules
if preemptible_rules is not None:
for rule in preemptible_rules:
rule_name, restart_times = rule.strip().split("=")
self.preemptible_rules[rule_name] = int(restart_times)
# Ensure we set the number of restart times for each rule
for rule_name, restart_times in self.preemptible_rules.items():
rule = self.workflow.get_rule(rule_name)
rule.restart_times = restart_times
def _generate_job_resources(self, job):
"""given a particular job, generate the resources that it needs,
including default regions and the virtual machine configuration
"""
# Right now, do a best effort mapping of resources to instance types
cores = job.resources.get("_cores", 1)
mem_mb = job.resources.get("mem_mb", 15360)
# IOPS performance proportional to disk size
disk_mb = job.resources.get("disk_mb", 512000)
# Convert mb to gb
disk_gb = math.ceil(disk_mb / 1024)
# Look for if the user wants an nvidia gpu
gpu_count = job.resources.get("nvidia_gpu") or job.resources.get("gpu")
gpu_model = job.resources.get("gpu_model")
# If a gpu model is specified without a count, we assume 1
if gpu_model and not gpu_count:
gpu_count = 1
# Update default resources using decided memory and disk
self.default_resources = DefaultResources(
from_other=self.workflow.default_resources
)
self.default_resources.set_resource("mem_mb", mem_mb)
self.default_resources.set_resource("disk_mb", disk_mb)
# Job resource specification can be overridden by gpu preferences
self.machine_type_prefix = job.resources.get("machine_type")
# If gpu wanted, limit to N1 general family, and update arguments
if gpu_count:
self._add_gpu(gpu_count)
machine_types = self.get_available_machine_types()
# Alert the user of machine_types available before filtering
# https://cloud.google.com/compute/docs/machine-types
logger.debug(
"found {} machine types across regions {} before filtering "
"to increase selection, define fewer regions".format(
len(machine_types), self.regions
)
)
# First pass - eliminate anything that too low in cpu/memory
keepers = dict()
# Also keep track of max cpus and memory, in case none available
max_cpu = 1
max_mem = 15360
for name, machine_type in machine_types.items():
max_cpu = max(max_cpu, machine_type["guestCpus"])
max_mem = max(max_mem, machine_type["memoryMb"])
if machine_type["guestCpus"] < cores or machine_type["memoryMb"] < mem_mb:
continue
keepers[name] = machine_type
# If a prefix is set, filter down to it
if self.machine_type_prefix:
machine_types = keepers
keepers = dict()
for name, machine_type in machine_types.items():
if name.startswith(self.machine_type_prefix):
keepers[name] = machine_type
# If we don't have any contenders, workflow error
if not keepers:
if self.machine_type_prefix:
raise WorkflowError(
"Machine prefix {prefix} is too strict, or the resources cannot "
" be satisfied, so there are no options "
"available.".format(prefix=self.machine_type_prefix)
)
else:
raise WorkflowError(
"You requested {requestMemory} MB memory, {requestCpu} cores. "
"The maximum available are {availableMemory} MB memory and "
"{availableCpu} cores. These resources cannot be satisfied. "
"Please consider reducing the resource requirements of the "
"corresponding rule.".format(
requestMemory=mem_mb,
requestCpu=cores,
availableCpu=max_cpu,
availableMemory=max_mem,
)
)
# Now find (quasi) minimal to satisfy constraints
machine_types = keepers
# Select the first as the "smallest"
smallest = list(machine_types.keys())[0]
min_cores = machine_types[smallest]["guestCpus"]
min_mem = machine_types[smallest]["memoryMb"]
for name, machine_type in machine_types.items():
if (
machine_type["guestCpus"] < min_cores
and machine_type["memoryMb"] < min_mem
):
smallest = name
min_cores = machine_type["guestCpus"]
min_mem = machine_type["memoryMb"]
selected = machine_types[smallest]
logger.debug(
"Selected machine type {}:{}".format(smallest, selected["description"])
)
# We add the size for the image itself (10 GB) to bootDiskSizeGb
virtual_machine = {
"machineType": smallest,
"labels": {"app": "snakemake"},
"bootDiskSizeGb": disk_gb + 10,
"preemptible": job.rule.name in self.preemptible_rules,
}
# If the user wants gpus, add accelerators here
if gpu_count:
accelerator = self._get_accelerator(
gpu_count, zone=selected["zone"], gpu_model=gpu_model
)
virtual_machine["accelerators"] = [
{"type": accelerator["name"], "count": gpu_count}
]
resources = {"regions": self.regions, "virtualMachine": virtual_machine}
return resources
def _get_accelerator(self, gpu_count, zone, gpu_model=None):
"""Get an appropriate accelerator for a GPU given a zone selection.
Currently Google offers NVIDIA Tesla T4 (likely the best),
NVIDIA P100, and the same T4 for a graphical workstation. Since
this isn't a graphical workstation use case, we choose the
accelerator that has >= to the maximumCardsPerInstace
"""
if not gpu_count or gpu_count == 0:
return
accelerators = self._retry_request(
self._compute_cli.acceleratorTypes().list(project=self.project, zone=zone)
)
# Filter down to those with greater than or equal to needed gpus
keepers = {}
for accelerator in accelerators.get("items", []):
# Eliminate virtual workstations (vws) and models that don't match user preference
if (gpu_model and accelerator["name"] != gpu_model) or accelerator[
"name"
].endswith("vws"):
continue
if accelerator["maximumCardsPerInstance"] >= gpu_count:
keepers[accelerator["name"]] = accelerator
# If no matches available, exit early
if not keepers:
if gpu_model:
raise WorkflowError(
"An accelerator in zone {zone} with model {model} cannot "
" be satisfied, so there are no options "
"available.".format(zone=zone, model=gpu_model)
)
else:
raise WorkflowError(
"An accelerator in zone {zone} cannot be satisifed, so "
"there are no options available.".format(zone=zone)
)
# Find smallest (in future the user might have preference for the type)
smallest = list(keepers.keys())[0]
max_gpu = keepers[smallest]["maximumCardsPerInstance"]
# This should usually return P-100, which would be preference (cheapest)
for name, accelerator in keepers.items():
if accelerator["maximumCardsPerInstance"] < max_gpu:
smallest = name
max_gpu = accelerator["maximumCardsPerInstance"]
return keepers[smallest]
def _set_snakefile(self):
"""The snakefile must be a relative path, which should be derived
from the self.workflow.main_snakefile.
"""
assert os.path.exists(self.workflow.main_snakefile)
self.snakefile = self.workflow.main_snakefile.replace(self.workdir, "").strip(
os.sep
)
def _set_workflow_sources(self):
"""We only add files from the working directory that are config related
(e.g., the Snakefile or a config.yml equivalent), or checked into git.
"""
self.workflow_sources = []
for wfs in self.workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[
self.workflow.check_source_sizes(os.path.join(dirpath, f))
for f in filenames
]
)
else:
self.workflow_sources.append(
self.workflow.check_source_sizes(os.path.abspath(wfs))
)
def _generate_build_source_package(self):
"""in order for the instance to access the working directory in storage,
we need to upload it. This file is cleaned up at the end of the run.
We do this, and then obtain from the instance and extract.
"""
# Workflow sources for cloud executor must all be under same workdir root
for filename in self.workflow_sources:
if self.workdir not in filename:
raise WorkflowError(
"All source files must be present in the working directory, "
"{workdir} to be uploaded to a build package that respects "
"relative paths, but {filename} was found outside of this "
"directory. Please set your working directory accordingly, "
"and the path of your Snakefile to be relative to it.".format(
workdir=self.workdir, filename=filename
)
)
# We will generate a tar.gz package, renamed by hash
tmpname = next(tempfile._get_candidate_names())
targz = os.path.join(tempfile.gettempdir(), "snakemake-%s.tar.gz" % tmpname)
tar = tarfile.open(targz, "w:gz")
# Add all workflow_sources files
for filename in self.workflow_sources:
arcname = filename.replace(self.workdir + os.path.sep, "")
tar.add(filename, arcname=arcname)
tar.close()
# Rename based on hash, in case user wants to save cache
sha256 = get_file_hash(targz)
hash_tar = os.path.join(
self.workflow.persistence.aux_path, "workdir-{}.tar.gz".format(sha256)
)
# Only copy if we don't have it yet, clean up if we do
if not os.path.exists(hash_tar):
shutil.move(targz, hash_tar)
else:
os.remove(targz)
# We will clean these all up at shutdown
self._build_packages.add(hash_tar)
return hash_tar
def _upload_build_source_package(self, targz):
"""given a .tar.gz created for a workflow, upload it to source/cache
of Google storage, only if the blob doesn't already exist.
"""
from google.api_core import retry
from snakemake.remote.GS import google_cloud_retry_predicate
@retry.Retry(predicate=google_cloud_retry_predicate)
def _upload():
# Upload to temporary storage, only if doesn't exist
self.pipeline_package = "source/cache/%s" % os.path.basename(targz)
blob = self.bucket.blob(self.pipeline_package)
logger.debug("build-package=%s" % self.pipeline_package)
if not blob.exists():
blob.upload_from_filename(targz, content_type="application/gzip")
_upload()
def _generate_log_action(self, job):
"""generate an action to save the pipeline logs to storage."""
# script should be changed to this when added to version control!
# https://raw.githubusercontent.com/snakemake/snakemake/main/snakemake/executors/google_lifesciences_helper.py
# Save logs from /google/logs/output to source/logs in bucket
commands = [
"/bin/bash",
"-c",
"wget -O /gls.py https://raw.githubusercontent.com/snakemake/snakemake/main/snakemake/executors/google_lifesciences_helper.py && chmod +x /gls.py && source activate snakemake || true && python /gls.py save %s /google/logs %s/%s"
% (self.bucket.name, self.gs_logs, job.name),
]
# Always run the action to generate log output
action = {
"containerName": "snakelog-{}-{}".format(job.name, job.jobid),
"imageUri": self.container_image,
"commands": commands,
"labels": self._generate_pipeline_labels(job),
"alwaysRun": True,
}
return action
def _generate_job_action(self, job):
"""generate a single action to execute the job."""
# Derive the entrypoint command, the same content that might be written by self.get_jobscript(job)
use_threads = "--force-use-threads" if not job.is_group() else ""
exec_job = self.format_job(
self.exec_job, job, _quote_all=True, use_threads=use_threads
)
# Now that we've parsed the job resource requirements, add to exec
exec_job += self.get_default_resources_args(self.default_resources)
# script should be changed to this when added to version control!
# https://raw.githubusercontent.com/snakemake/snakemake/main/snakemake/executors/google_lifesciences_helper.py
# The full command to download the archive, extract, and run
# For snakemake bases, we must activate the conda environment, but
# for custom images we must allow this to fail (hence || true)
commands = [
"/bin/bash",
"-c",
"mkdir -p /workdir && cd /workdir && wget -O /download.py https://raw.githubusercontent.com/snakemake/snakemake/main/snakemake/executors/google_lifesciences_helper.py && chmod +x /download.py && source activate snakemake || true && python /download.py download %s %s /tmp/workdir.tar.gz && tar -xzvf /tmp/workdir.tar.gz && %s"
% (self.bucket.name, self.pipeline_package, exec_job),
]
# We are only generating one action, one job per run
# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.pipelines/run#Action
action = {
"containerName": "snakejob-{}-{}".format(job.name, job.jobid),
"imageUri": self.container_image,
"commands": commands,
"environment": self._generate_environment(),
"labels": self._generate_pipeline_labels(job),
}
return action
def _get_jobname(self, job):
# Use a dummy job name (human readable and also namespaced)
return "snakejob-%s-%s-%s" % (self.run_namespace, job.name, job.jobid)
def _generate_pipeline_labels(self, job):
"""generate basic labels to identify the job, namespace, and that
snakemake is running the show!
"""
jobname = self._get_jobname(job)
labels = {"name": jobname, "app": "snakemake"}
return labels
def _generate_environment(self):
"""loop through envvars (keys to host environment) and add
any that are requested for the container environment.
"""
envvars = {}
for key in self.envvars:
try:
envvars[key] = os.environ[key]
except KeyError:
continue
# Warn the user that we cannot support secrets
if envvars:
logger.warning("This API does not support environment secrets.")
return envvars
def _generate_pipeline(self, job):
"""based on the job details, generate a google Pipeline object
to pass to pipelines.run. This includes actions, resources,
environment, and timeout.
"""
# Generate actions (one per job step) and log saving action (runs no matter what) and resources
resources = self._generate_job_resources(job)
action = self._generate_job_action(job)
log_action = self._generate_log_action(job)
pipeline = {
# Ordered list of actions to execute
"actions": [action, log_action],
# resources required for execution
"resources": resources,
# Technical question - difference between resource and action environment
# For now we will set them to be the same.
"environment": self._generate_environment(),
}
# "timeout": string in seconds (3.5s) is not included (defaults to 7 days)
return pipeline
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.pipelines
pipelines = self._api.projects().locations().pipelines()
# pipelines.run
# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.pipelines/run
labels = self._generate_pipeline_labels(job)
pipeline = self._generate_pipeline(job)
# The body of the request is a Pipeline and labels
body = {"pipeline": pipeline, "labels": labels}
# capabilities - this won't currently work (Singularity in Docker)
# We either need to add CAPS or run in privileged mode (ehh)
if job.needs_singularity and self.workflow.use_singularity:
raise WorkflowError(
"Singularity requires additional capabilities that "
"aren't yet supported for standard Docker runs, and "
"is not supported for the Google Life Sciences executor."
)
# location looks like: "projects/<project>/locations/<location>"
operation = pipelines.run(parent=self.location, body=body)
# 403 will result if no permission to use pipelines or project
result = self._retry_request(operation)
# The jobid is the last number of the full name
jobid = result["name"].split("/")[-1]
# Give some logging for how to get status
logger.info(
"Get status with:\n"
"gcloud config set project {project}\n"
"gcloud beta lifesciences operations describe {location}/operations/{jobid}\n"
"gcloud beta lifesciences operations list\n"
"Logs will be saved to: {bucket}/{logdir}\n".format(
project=self.project,
jobid=jobid,
location=self.location,
bucket=self.bucket.name,
logdir=self.gs_logs,
)
)
self.active_jobs.append(
GoogleLifeSciencesJob(job, result["name"], jobid, callback, error_callback)
)
def _job_was_successful(self, status):
"""based on a status response (a [pipeline].projects.locations.operations.get
debug print the list of events, return True if all return codes 0
and False otherwise (indication of failure). In that a nonzero exit
status is found, we also debug print it for the user.
"""
success = True
# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event
for event in status["metadata"]["events"]:
logger.debug(event["description"])
# Does it always result in fail for other failure reasons?
if "failed" in event:
success = False
action = event.get("failed")
logger.debug("{}: {}".format(action["code"], action["cause"]))
elif "unexpectedExitStatus" in event:
action = event.get("unexpectedExitStatus")
if action["exitStatus"] != 0:
success = False
# Provide reason for the failure (desc includes exit code)
msg = "%s" % event["description"]
if "stderr" in action:
msg += ": %s" % action["stderr"]
logger.debug(msg)
return success
def _retry_request(self, request, timeout=2, attempts=3):
"""The Google Python API client frequently has BrokenPipe errors. This
function takes a request, and executes it up to number of retry,
each time with a 2* increase in timeout.
Parameters
==========
request: the Google Cloud request that needs to be executed
timeout: time to sleep (in seconds) before trying again
attempts: remaining attempts, throw error when hit 0
"""
import googleapiclient
try:
return request.execute()
except BrokenPipeError as ex:
if attempts > 0:
time.sleep(timeout)
return self._retry_request(request, timeout * 2, attempts - 1)
raise ex
except googleapiclient.errors.HttpError as ex:
if attempts > 0:
time.sleep(timeout)
return self._retry_request(request, timeout * 2, attempts - 1)
log_verbose_traceback(ex)
raise ex
except Exception as ex:
if attempts > 0:
time.sleep(timeout)
return self._retry_request(request, timeout * 2, attempts - 1)
log_verbose_traceback(ex)
raise ex
def _wait_for_jobs(self):
"""wait for jobs to complete. This means requesting their status,
and then marking them as finished when a "done" parameter
shows up. Even for finished jobs, the status should still return
"""
import googleapiclient
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# Loop through active jobs and act on status
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
# https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/projects.locations.operations/get
# Get status from projects.locations.operations/get
operations = self._api.projects().locations().operations()
request = operations.get(name=j.jobname)
logger.debug("Checking status for operation {}".format(j.jobid))
try:
status = self._retry_request(request)
except googleapiclient.errors.HttpError as ex:
# Operation name not found, even finished should be found
if ex.status == 404:
j.error_callback(j.job)
continue
# Unpredictable server (500) error
elif ex.status == 500:
logger.error(ex["content"].decode("utf-8"))
j.error_callback(j.job)
except WorkflowError as ex:
print_exception(ex, self.workflow.linemaps)
j.error_callback(j.job)
continue
# The operation is done
if status.get("done", False) == True:
# Derive success/failure from status codes (prints too)
if self._job_was_successful(status):
j.callback(j.job)
else:
self.print_job_error(j.job, jobid=j.jobid)
j.error_callback(j.job)
# The operation is still running
else:
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
| 40.217883
| 338
| 0.603803
|
687fe07e957fa5ac96e11eb2881bb2435c4b5a4a
| 166
|
py
|
Python
|
configs/fanet/fanet_r18_1024x1024_80k_cityscapes.py
|
Jun-jieChen/real-time-segmentation
|
22d0cb1a8a0dfa3b38f25bcd05db15f345be291a
|
[
"Apache-2.0"
] | 1
|
2022-03-13T11:43:44.000Z
|
2022-03-13T11:43:44.000Z
|
configs/fanet/fanet_r18_1024x1024_80k_cityscapes.py
|
Jun-jieChen/real-time-segmentation
|
22d0cb1a8a0dfa3b38f25bcd05db15f345be291a
|
[
"Apache-2.0"
] | null | null | null |
configs/fanet/fanet_r18_1024x1024_80k_cityscapes.py
|
Jun-jieChen/real-time-segmentation
|
22d0cb1a8a0dfa3b38f25bcd05db15f345be291a
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/fanet.py', '../_base_/datasets/cityscapes_1024x1024.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
]
| 33.2
| 78
| 0.680723
|
7b32bed1f11eb6d25f2d9c222688553c7c442f64
| 9,940
|
py
|
Python
|
sdk/python/pulumi_azure/eventhub/namespace_customer_managed_key.py
|
roderik/pulumi-azure
|
f6d0c058d6f9111a709bc5f1515d1638f9d615f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/eventhub/namespace_customer_managed_key.py
|
roderik/pulumi-azure
|
f6d0c058d6f9111a709bc5f1515d1638f9d615f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/eventhub/namespace_customer_managed_key.py
|
roderik/pulumi-azure
|
f6d0c058d6f9111a709bc5f1515d1638f9d615f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['NamespaceCustomerManagedKeyArgs', 'NamespaceCustomerManagedKey']
@pulumi.input_type
class NamespaceCustomerManagedKeyArgs:
def __init__(__self__, *,
eventhub_namespace_id: pulumi.Input[str],
key_vault_key_ids: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
The set of arguments for constructing a NamespaceCustomerManagedKey resource.
:param pulumi.Input[str] eventhub_namespace_id: The ID of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] key_vault_key_ids: The list of keys of Key Vault.
"""
pulumi.set(__self__, "eventhub_namespace_id", eventhub_namespace_id)
pulumi.set(__self__, "key_vault_key_ids", key_vault_key_ids)
@property
@pulumi.getter(name="eventhubNamespaceId")
def eventhub_namespace_id(self) -> pulumi.Input[str]:
"""
The ID of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_namespace_id")
@eventhub_namespace_id.setter
def eventhub_namespace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "eventhub_namespace_id", value)
@property
@pulumi.getter(name="keyVaultKeyIds")
def key_vault_key_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The list of keys of Key Vault.
"""
return pulumi.get(self, "key_vault_key_ids")
@key_vault_key_ids.setter
def key_vault_key_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "key_vault_key_ids", value)
@pulumi.input_type
class _NamespaceCustomerManagedKeyState:
def __init__(__self__, *,
eventhub_namespace_id: Optional[pulumi.Input[str]] = None,
key_vault_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering NamespaceCustomerManagedKey resources.
:param pulumi.Input[str] eventhub_namespace_id: The ID of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] key_vault_key_ids: The list of keys of Key Vault.
"""
if eventhub_namespace_id is not None:
pulumi.set(__self__, "eventhub_namespace_id", eventhub_namespace_id)
if key_vault_key_ids is not None:
pulumi.set(__self__, "key_vault_key_ids", key_vault_key_ids)
@property
@pulumi.getter(name="eventhubNamespaceId")
def eventhub_namespace_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_namespace_id")
@eventhub_namespace_id.setter
def eventhub_namespace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_namespace_id", value)
@property
@pulumi.getter(name="keyVaultKeyIds")
def key_vault_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of keys of Key Vault.
"""
return pulumi.get(self, "key_vault_key_ids")
@key_vault_key_ids.setter
def key_vault_key_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "key_vault_key_ids", value)
class NamespaceCustomerManagedKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_namespace_id: Optional[pulumi.Input[str]] = None,
key_vault_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a Customer Managed Key for a EventHub Namespace.
## Import
Customer Managed Keys for a EventHub Namespace can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/namespaceCustomerManagedKey:NamespaceCustomerManagedKey example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] eventhub_namespace_id: The ID of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] key_vault_key_ids: The list of keys of Key Vault.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NamespaceCustomerManagedKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Customer Managed Key for a EventHub Namespace.
## Import
Customer Managed Keys for a EventHub Namespace can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:eventhub/namespaceCustomerManagedKey:NamespaceCustomerManagedKey example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1
```
:param str resource_name: The name of the resource.
:param NamespaceCustomerManagedKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NamespaceCustomerManagedKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_namespace_id: Optional[pulumi.Input[str]] = None,
key_vault_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NamespaceCustomerManagedKeyArgs.__new__(NamespaceCustomerManagedKeyArgs)
if eventhub_namespace_id is None and not opts.urn:
raise TypeError("Missing required property 'eventhub_namespace_id'")
__props__.__dict__["eventhub_namespace_id"] = eventhub_namespace_id
if key_vault_key_ids is None and not opts.urn:
raise TypeError("Missing required property 'key_vault_key_ids'")
__props__.__dict__["key_vault_key_ids"] = key_vault_key_ids
super(NamespaceCustomerManagedKey, __self__).__init__(
'azure:eventhub/namespaceCustomerManagedKey:NamespaceCustomerManagedKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_namespace_id: Optional[pulumi.Input[str]] = None,
key_vault_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'NamespaceCustomerManagedKey':
"""
Get an existing NamespaceCustomerManagedKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] eventhub_namespace_id: The ID of the EventHub Namespace. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] key_vault_key_ids: The list of keys of Key Vault.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NamespaceCustomerManagedKeyState.__new__(_NamespaceCustomerManagedKeyState)
__props__.__dict__["eventhub_namespace_id"] = eventhub_namespace_id
__props__.__dict__["key_vault_key_ids"] = key_vault_key_ids
return NamespaceCustomerManagedKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eventhubNamespaceId")
def eventhub_namespace_id(self) -> pulumi.Output[str]:
"""
The ID of the EventHub Namespace. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_namespace_id")
@property
@pulumi.getter(name="keyVaultKeyIds")
def key_vault_key_ids(self) -> pulumi.Output[Sequence[str]]:
"""
The list of keys of Key Vault.
"""
return pulumi.get(self, "key_vault_key_ids")
| 46.666667
| 228
| 0.685211
|
4fe14a5aee12800d8ac0fe3ce542a7517c3bb8e7
| 1,882
|
py
|
Python
|
counterfactualms/distributions/transforms/affine.py
|
jcreinhold/counterfactualms
|
9be5919c8885354fe1ac91c852d196969cfe16be
|
[
"Apache-2.0"
] | 14
|
2021-03-08T11:51:34.000Z
|
2022-03-21T03:30:21.000Z
|
counterfactualms/distributions/transforms/affine.py
|
jcreinhold/counterfactualms
|
9be5919c8885354fe1ac91c852d196969cfe16be
|
[
"Apache-2.0"
] | 1
|
2021-10-03T15:20:30.000Z
|
2021-10-03T15:20:30.000Z
|
counterfactualms/distributions/transforms/affine.py
|
jcreinhold/counterfactualms
|
9be5919c8885354fe1ac91c852d196969cfe16be
|
[
"Apache-2.0"
] | 4
|
2021-04-03T15:23:24.000Z
|
2021-09-30T04:14:28.000Z
|
from pyro.distributions.conditional import ConditionalTransformModule
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions import transforms as pyro_transforms
import torch
from torch.distributions import transforms
class LearnedAffineTransform(TransformModule, transforms.AffineTransform):
def __init__(self, loc=None, scale=None, **kwargs):
super().__init__(loc=loc, scale=scale, **kwargs)
if loc is None:
self.loc = torch.nn.Parameter(torch.zeros([1, ]))
if scale is None:
self.scale = torch.nn.Parameter(torch.ones([1, ]))
def _broadcast(self, val):
dim_extension = tuple(1 for _ in range(val.dim() - 1))
loc = self.loc.view(-1, *dim_extension)
scale = self.scale.view(-1, *dim_extension)
return loc, scale
def _call(self, x):
loc, scale = self._broadcast(x)
return loc + scale * x
def _inverse(self, y):
loc, scale = self._broadcast(y)
return (y - loc) / scale
class ConditionalAffineTransform(ConditionalTransformModule):
def __init__(self, context_nn, event_dim=0, **kwargs):
super().__init__(**kwargs)
self.event_dim = event_dim
self.context_nn = context_nn
def condition(self, context):
loc, log_scale = self.context_nn(context)
scale = torch.exp(log_scale)
ac = transforms.AffineTransform(loc, scale, event_dim=self.event_dim)
return ac
class LowerCholeskyAffine(pyro_transforms.LowerCholeskyAffine):
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian, i.e.
log(abs(dy/dx)).
"""
return torch.ones(x.size()[:-1], dtype=x.dtype, layout=x.layout, device=x.device) * \
self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1).sum(-1)
| 36.192308
| 93
| 0.663656
|
7e410bf89c7168d9a1e901f2a5ad3a3d72831cfd
| 1,307
|
py
|
Python
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/notify_users.py
|
block1o1/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 4
|
2021-10-14T21:22:25.000Z
|
2022-03-12T19:58:48.000Z
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/notify_users.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | null | null | null |
ENV/lib/python3.5/site-packages/pyrogram/api/types/notify_users.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 1
|
2022-03-15T22:52:53.000Z
|
2022-03-15T22:52:53.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class NotifyUsers(Object):
"""Attributes:
ID: ``0xb4c83b4c``
No parameters required.
"""
ID = 0xb4c83b4c
def __init__(self):
pass
@staticmethod
def read(b: BytesIO, *args) -> "NotifyUsers":
# No flags
return NotifyUsers()
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
return b.getvalue()
| 26.673469
| 74
| 0.678653
|
64aa90550ec50dbce6152e69ee40ab9bee708c05
| 8,869
|
py
|
Python
|
tensorflow/python/training/adam_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 101
|
2016-12-03T11:40:52.000Z
|
2017-12-23T02:02:03.000Z
|
tensorflow/python/training/adam_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 9
|
2016-12-14T03:27:46.000Z
|
2017-09-13T02:29:07.000Z
|
tensorflow/python/training/adam_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 47
|
2016-12-04T12:37:24.000Z
|
2018-01-14T18:13:07.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def adam_update_numpy(param, g_t, t, m, v, alpha=0.001, beta1=0.9, beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(tf.test.TestCase):
def testSparse(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = tf.IndexedSlices(tf.constant(grads0_np),
tf.constant(grads0_np_indices),
tf.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = tf.IndexedSlices(tf.constant(grads1_np),
tf.constant(grads1_np_indices),
tf.constant([2]))
opt = tf.train.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer(tf.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = tf.Variable(var0_np)
var1 = tf.Variable(var1_np)
grads0 = tf.constant(grads0_np)
grads1 = tf.constant(grads1_np)
opt = tf.train.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9 ** t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999 ** t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = tf.train.AdamOptimizer()
g = tf.Graph()
with g.as_default():
with tf.Session():
var0 = tf.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = tf.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = tf.Graph()
with gg.as_default():
with tf.Session():
var0 = tf.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = tf.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
if __name__ == "__main__":
tf.test.main()
| 40.683486
| 80
| 0.621603
|
3088297c2a2fa7fe9543aae1929c391493cd97c1
| 10,240
|
py
|
Python
|
src/model/pieces/piece.py
|
ameyerow/Chesstrainer
|
51d273cce9081b24e03a63096b1c0f6c6f0bcab6
|
[
"MIT"
] | null | null | null |
src/model/pieces/piece.py
|
ameyerow/Chesstrainer
|
51d273cce9081b24e03a63096b1c0f6c6f0bcab6
|
[
"MIT"
] | 1
|
2022-01-25T18:29:07.000Z
|
2022-01-25T18:29:07.000Z
|
src/model/pieces/piece.py
|
ameyerow/Chesstrainer
|
51d273cce9081b24e03a63096b1c0f6c6f0bcab6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import abc
from typing import List, Tuple, Type
from ..pos import Pos
from ..player import Player
from ..move import Move
from ..exceptions import PieceNotFoundException
class Piece(metaclass=abc.ABCMeta):
def __init__(self, pos: Pos, player: Player, name: str):
self.pos = pos
self.player = player
self.name = name
def __str__(self) -> str:
return self.name if self.player is Player.WHITE else self.name.lower()
@abc.abstractmethod
def can_pin_orthogonally(self) -> bool:
"""
return:
True if the piece can pin orthogonally, False otherwise
"""
raise NotImplementedError
@abc.abstractmethod
def can_pin_diagonally(self) -> bool:
"""
return:
True if the piece can pin diagonally, False otherwise
"""
raise NotImplementedError
@abc.abstractmethod
def attacks_square_from_position(self, pos: Pos, dest: Pos, board) -> bool:
"""
Determines if the current piece could attack a given location from a tile on the board --
that it could move there legally. The current piece's position is considered to be empty.
param pos:
The position the piece should be imagined existing in
param dest:
The position that is attempting to be attacked
param board:
The current state of the board
return:
True if the current piece could attack dest from pos, False otherwise
"""
raise NotImplementedError
def legal_moves(self, board) -> Tuple[List[Pos], List[Pos]]:
"""
Returns a list of legal positions the piece can move.
return:
List of positions
"""
# TODO: speed this up
moves = []
captures = []
for i in range(8):
for j in range(8):
dest = Pos(i, j)
move: Move = board.is_legal_move(dest, self)
if move.is_capture() and move.is_legal():
captures.append(dest)
elif move.is_legal():
moves.append(dest)
return moves, captures
@abc.abstractmethod
def is_dest_reachable(self, dest: Pos, board) -> bool:
"""
Given the type of the current piece is the destination position reachable: is the move the
correct shape and is the path clear? At this point you may assume that you are not attempting
to move to your current position and that the destination square is not occupied by your own
piece.
param dest:
The position the piece is attempting to move
param board:
The current state of the board
return:
True if the destination is reachable, False otherwise
"""
raise NotImplementedError
def maintains_pin(self, dest: Pos, board) -> bool:
"""
Does the current move maintain every pin if they exists? At this point you may assume that the
destination tile constites a reachable tile for that piece -- that it is of the correct shape
and the path is clear.
param dest:
The position the piece is attempting to move
param board:
The current state of the board
return:
True if the a pin is maintained or one doesn't exist, False otherwise
"""
king_pos = board.get_king_pos(board.current_player)
# A king can't be pinned.
if king_pos == self.pos:
return True
rank_diff = king_pos.rank - self.pos.rank
file_diff = king_pos.file - self.pos.file
# If the piece is on the same rank as the King we must check if any pin along the rank exists,
# and if it does exist it is maintained
if rank_diff == 0:
delta = 1 if file_diff < 0 else -1
edge = -1 if delta < 0 else 8
# Is there an empty path between us and the king?
for i in range(king_pos.file + delta, self.pos.file, delta):
pos = Pos(rank=self.pos.rank, file=i)
piece = board.get(pos)
# If the piece is not empty, return True because there is no pin to maintain
if piece is not None:
return True
# Is there a piece pinning us to the king?
for i in range(self.pos.file + delta, edge, delta):
pos = Pos(rank=self.pos.rank, file=i)
piece = board.get(pos)
# If we encounter a piece
if piece is not None:
# And the piece is not the current player's piece
if piece.player is not board.current_player:
# And the piece can pin orthogonally (it is a rook or queen)
if piece.can_pin_orthogonally():
# And the destination does not stay on this rank
if dest.rank != self.pos.rank:
# Then the pin is not maintained
return False
# Once we have found a piece that does not return False to the above condition
# we no longer need to iterate along the file
break
# If the piece is on the same file as the King we must check if any pin along the file exists,
# and if it does exist it is maintained
if file_diff == 0:
delta = 1 if rank_diff < 0 else -1
edge = -1 if delta < 0 else 8
for i in range(king_pos.rank + delta, self.pos.rank, delta):
pos = Pos(rank=i, file=self.pos.file)
piece = board.get(pos)
if piece is not None:
return True
for i in range(self.pos.rank + delta, edge, delta):
pos = Pos(rank=i, file=self.pos.file)
piece = board.get(pos)
if piece is not None:
if piece.player is not board.current_player:
if piece.can_pin_orthogonally():
if self.pos.file != dest.file:
return False
break
# Avoid division by zero
if rank_diff == 0 or file_diff == 0:
return True
# If the piece is on the same diagonal as the King we must check if any pin along the diagonal
# exists, and if it does exist it is maintained
if abs(rank_diff/file_diff) == 1:
delta_rank = 1 if rank_diff < 0 else -1
delta_file = 1 if file_diff < 0 else -1
# As we search for a pinning piece keep track of the possible moves we could have made that
# would maintain the pin
possible_moves = []
# Is there an empty path between us and the king?
for rank, file in zip(range(king_pos.rank+delta_rank, self.pos.rank, delta_rank),\
range(king_pos.file+delta_file, self.pos.file, delta_file)):
pos = Pos(rank, file)
possible_moves.append(pos)
piece = board.get(pos)
# If the piece is not empty, return True because there is no pin to maintain
if piece is not None:
return True
# How many tiles are between the current piece and the edge of the board along the piece-King
# diagonal?
num_steps = min(7 - self.pos.rank if delta_rank > 0 else self.pos.rank, \
7 - self.pos.file if delta_file > 0 else self.pos.file)
edge_rank = self.pos.rank + delta_rank * num_steps
edge_file = self.pos.file + delta_file * num_steps
for i, j in zip(range(self.pos.rank + delta_rank, edge_rank, delta_rank), \
range(self.pos.file + delta_file, edge_file, delta_file)):
pos = Pos(rank=i, file=j)
possible_moves.append(pos)
piece = board.get(pos)
if piece is not None:
if piece.player is not board.current_player:
if piece.can_pin_diagonally():
if dest not in possible_moves:
return False
break
return True
@abc.abstractstaticmethod
def get_origin(destination_pos: Pos, board, origin_hint: str = None) -> Pos:
"""
Determine the origin position of a piece given its destination.
param destination_pos:
The position the piece is attempting to move
param board:
The current state of the board
param origin_hint:
A string used to give additional information about the location of the piece. For
example in the string Bad4, "a" is a hint describing the location of the bishop on
the a-file.
return:
The origin position
"""
raise NotImplementedError
@staticmethod
def get_hint_origin(origin_hint: str, piece_type: Type[Piece], board) -> Pos:
# TODO: method description
"""
"""
if len(origin_hint) == 2:
return Pos.index(origin_hint)
elif origin_hint.isnumeric():
rank = int(origin_hint) - 1
for file in range(8):
pos = Pos(rank, file)
if isinstance(board.get(pos), piece_type) and \
board.get(pos).player == board.current_player:
return pos
else:
file = Pos.index_from_file(origin_hint)
for rank in range(8):
pos = Pos(rank, file)
if isinstance(board.get(pos), piece_type) and \
board.get(pos).player == board.current_player:
return pos
raise PieceNotFoundException()
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
| 38.496241
| 105
| 0.556348
|
9ee75f09747e4d1fc65b2e1c6186abe0bffc5405
| 23,622
|
py
|
Python
|
questions.py
|
elaynelemos/m00dbot
|
f949a18c85a183a07fab8a884e3c7a00d3a322c8
|
[
"MIT"
] | 1
|
2020-03-07T11:23:51.000Z
|
2020-03-07T11:23:51.000Z
|
questions.py
|
Warlockk/m00dbot
|
f949a18c85a183a07fab8a884e3c7a00d3a322c8
|
[
"MIT"
] | null | null | null |
questions.py
|
Warlockk/m00dbot
|
f949a18c85a183a07fab8a884e3c7a00d3a322c8
|
[
"MIT"
] | null | null | null |
HARS_QUESTIONS = {
"ru": {
"answers": ["Отсутствует", "В слабой степени", "В умеренной степени", "В тяжелой степени", "В очень тяжелой степени"],
"questions": [
"Тревожное настроение. (озабоченность, ожидание наихудшего, тревожные опасения, раздражительность)",
"Напряжение. (ощущение напряжения, вздрагивание, легко возникающая плаксивость, дрожь, чувство беспокойства, неспособность расслабиться)",
"Страхи. (темноты, незнакомцев, одиночества, животных, толпы, транспорта)",
"Инсомния. (затрудненное засыпание, прерывистый сон, не приносящий отдыха, чувство разбитости и слабости при пробуждении, кошмарные сны)",
"Интеллектуальные нарушения. (затруднение концентрации внимания, ухудшение памяти)",
"Депрессивное настроение. (утрата привычных интересов, чувства удовольствия от хобби, подавленность, ранние пробуждения, суточные колебания настроения)",
"Соматические мышечные симптомы. (боли, подергивания, напряжение, судороги клонические, скрипение зубами, срывающийся голос, повышенный мышечный тонус)",
"Соматические сенсорные симптомы. (звон в ушах, нечеткость зрения, приливы жара и холода, ощущение слабости, покалывания)",
"Сердечно-сосудистые симптомы. (тахикардия, сердцебиение, боль в груди, пульсация в сосудах, частые вздохи)",
"Респираторные симптомы. (давление и сжатие в груди, удушье, частые вздохи)",
"Гастроинтестинальные симптомы. (затрудненное глотание, метеоризм, боль в животе, изжога, чувство переполненного желудка, тошнота, рвота, урчание в животе, диарея, запоры, снижение веса тела)",
"Мочеполовые симптомы. (учащенное мочеиспускание, сильные позывы на мочеиспускание, аменорея, менорагия, фригидность, преждевременная эякуляция, утрата либидо, импотенция)",
"Вегетативные симптомы. (сухость во рту, покраснение или бледность кожи, потливость, головные боли с чувством напряжения)",
"Поведение при осмотре. (ерзанье на стуле, беспокойная жестикуляция и походка, тремор, нахмуривание лица, напряженное выражение лица, вздохи или учащенное дыхание, частоее сглатывание слюны)"
]
},
"en": {
"answers": ["None", "Mild", "Moderate", "Severe", "Very severe"],
"questions": [
"Anxious. Worries, anticipation of the worst, fearful anticipation, irritability",
"Tension. Feelings of tension, fatigability, startle response, moved to tears easily, trembling, feelings of restlessness, inability to relax",
"Fears. Of dark, of strangers, of being left alone, of animals, of traffic, of crowds",
"Insomnia Difficulty in falling asleep, broken sleep, unsatisfying sleep and fatigue on waking, dreams, nightmares, night-terrors",
"Intellectual. Difficulty in concentration, poor memory",
"Depressed mood. Loss of interest, lack of pleasure in hobbies, depression, early waking, diurnal swing",
"Somatic (muscular). Pains and aches, twitching, stiffness, myoclonic jerks, grinding of teeth, unsteady voice, increased muscular tone",
"Somatic (sensory). Tinnitus, blurring of vision, hot and cold flushes, feelings of weakness, pricking sensation",
"Cardiovascular symptoms. Tachycardia, palpitations, pain in chest, throbbing of vessels, fainting feelings, missing beat",
"Respiratory symptoms. Pressure or constriction in chest, choking feelings, sighing, dyspnea",
"Gastrointestinal symptoms. Difficulty in swallowing, wind, abdominal pain, burning sensations, abdominal fullness, nausea, vomiting, borborygmi, looseness of bowels, loss of weight, constipation",
"Genitourinary symptoms. Frequency of micturition, urgency of micturition, amenorrhea, menorrhagia, development of frigidity, premature ejaculation, loss of libido, impotence",
"Autonomic symptoms. Dry mouth, flushing, pallor, tendency to sweat, giddiness, tension headache, raising of hair",
"Behavior at Interview. Fidgeting, restlessness or pacing, tremor of hands, furrowed brow, strained face, sighing or rapid respiration, facial pallor, swallowing, belching, brisk tendon jerks, dilated pupils, exophthalmos"
]
},
"pt": {
"answers": ["Nenhuma", "Leve", "Moderada", "Severa", "Muito severa"],
"questions": [
"Ansiedade. Preocupações, antecipação do pior, antecipação temerosa, irritabilidade",
"Tensão. Sentimentos de tensão, fatigabilidade, sobressalto, choro repentino, tremor, sentimentos de inquietação, inabilidade para relaxar",
"Medos. Do escuro, de estranhos, de ficar sozinho(a), de animais, do trânsito, de multidões",
"Insônia. Dificuldade de cair no sono, sono intermitente, sono insatisfatório e despertar cansado(a), sonhos, pesadelos, terrores noturnos",
"Intelectual. Dificuldade de concentração, memória fraca",
"Humor depressivo. Perca de interesse, falta de prazer em hobbies, depressão, acorda cedo, variações de humor diurnas",
"Sintomas musculares. Dores , espasmos, rigidez, mioclonia, ranger de dentes, voz trêmula, aumento de tônus muscular",
"Sintomas sensorias. Zumbido, visão turva, rubores quentes e frios, sentimentos de fraquezas, formigamento",
"Sintomas cardiovasculares. Taquicardia, palpitações, dor no peito, pulsação de vasos, desfalecimento, falta de batimento",
"Sintomas respiratórios. Pressão e constrição no peito, asfixia, suspiro, dispineia",
"Sintomas Gastrointestinais. Difilculdade para engolir, fôlego, dor abdominal, queimações, sensação de estômago cheio, náusea, vômitos, burburinho, frouxidão, perda de peso, constipação",
"Sintomas Geniturinários. Frequência de micção, urgência de micção, amenorreia, menorragia, frigidez, ejaculação precoce, perda de libido, impotência",
"Sintomas autonômicos. Boca seca, rubor, palidez, tendência ao suor, vertigem, dor de cabeça tensional, aumento capilar",
"Comportamento na Entrevista. Inquietação, inquietação ou ritmo, tremor das mãos, sobrancelha franzida, rosto tenso, suspiro ou respiração rápida, palidez facial, deglutição, arroto, contrações nos tendões, pupilas dilatadas, exoftalmia"
]
}
}
MADRS_QUESTIONS = {
"ru": [
{
"question": "Объективные (видимые) признаки подавленности. Проявления угнетенности, уныния, отчаяния (более выраженных, чем при обычном временном снижении настроения) в речи, в мимике и позе. Оцениваются в соответствии с глубиной снижения настроения.",
"answers": ["0 = отсутствие", "1 =", "2 = выглядит подавленным, но настроение легко улучшается", "3 =", "4 = выглядит подавленным и несчастным большую часть времени", "5 =", "6 = выглядит крайне подавленным и угнетенным все время"]
},
{
"question": "Субъективные признаки подавленности. Сообщение пациента о депрессивном настроении независимо от того, насколько оно проявляется внешними признаками. Включает упадок духа, угнетенность или чувство беспомощности и безнадежности. Оценивается в соответствии с интенсивностью, продолжительностью и степенью того, насколько, по описанию пациента, сниженное настроение связано с внешними событиями.",
"answers": ["0 = эпизодическая подавленность, связанная с внешними обстоятельствами", "1 =", "2 = печальное или подавленное настроение, легко поддающееся улучшению", "3 =", "4 = глубокое чувство угнетенности или уныния; настроение еще подвержено влиянию внешних событий", "5 =", "6 = постоянное и неизменное чувство подавленности, отчаяния или угнетенности"]
},
{
"question": "Внутреннее напряжение. Чувство болезненного дискомфорта, смятения, раздражения, психического напряжения, доходящего до паники, сильного страха или душевной боли.",
"answers": ["0 = спокойное состояние; только чувство внутреннего напряжения", "1 =", "2 = эпизодическое чувство раздражения или болезненного дискомфорта", "3 =", "4 = постоянное чувство внутреннего напряжения, периодическая паника, преодолеваемая больным с большим трудом", "5 =", "6 = неослабевающий крайне выраженный страх или душевная боль; непреодолимая паника"]
},
{
"question": "Недостаточный сон. Уменьшение продолжительности или глубины сна в сравнении с привычными для пациента характеристиками сна.",
"answers": ["0 = обычный сон", "1 =", "2 = незначительно затрудненное засыпание или несколько укороченный, поверхностный или прерывистый сон", "3 =", "4 = укороченный сон, не менее 2 часов", "5 =", "6 = менее 2-3 часов сна"]
},
{
"question": "Снижение аппетита Утрата аппетита. Оценивается в соответствии со степенью утраты желания поесть или усилий заставить себя принять пищу.",
"answers": ["0 = нормальный или повышенный аппетит", "1 =", "2 = несколько сниженный аппетит", "3 =", "4 = отсутствие аппетита; пища не имеет вкуса", "5 =", "6 = необходимость принуждения для приема пищи"]
},
{
"question": "Нарушение концентрации внимания. Трудности собраться с мыслями вплоть до утраты способности сконцентрироваться. Оценивается в соответствии с интенсивностью, частотой и степенью утраты способности концентрировать внимание.",
"answers": ["0 = нет нарушений концентрации", "1 =", "2 = эпизодически трудно собраться с мыслями", "3 =", "4 = затруднения концентрации и длительного сосредоточения со снижением способности читать или поддерживать разговор", "5 =", "6 = утрата способности читать или участвовать в разговоре без значительных усилий"]
},
{
"question": "Апатия. Затруднения начать какую-либо деятельность или замедленность начала и выполнения повседневной деятельности.",
"answers": ["0 = отсутствие затруднения начать какую-либо деятельность; отсутствие замедленности", "1 =", "2 = затруднения начать какую-либо деятельность", "3 =", "4 = затруднения начать простую повседневную деятельность, выполнение которых требует дополнительных усилий", "5 =", "6 = полная апатия; неспособность выполнить что-либо без посторонней помощи"]
},
{
"question": "Утрата способности чувствовать. Субъективное ощущение снижения интереса к окружающему или деятельности, обычно доставляющим удовольствие. Снижение способности адекватно эмоционально реагировать на внешние события или людей",
"answers": ["0 = нормальный интерес к окружающему и людям", "1 =", "2 = снижение способности получать удовольствие от того, что обычно интересно", "3 =", "4 = утрата интереса к окружающему; утрата чувств к друзьям и знакомым", "5 =", "6 = ощущение эмоционального паралича, утраты способности испытывать гнев, печаль или удовольствие, полной или даже болезненной утраты чувств к близким и друзьям"]
},
{
"question": "Пессимистические мысли. Идеи собственной вины, малоценности, самоуничижения, греховности или раскаяния",
"answers": ["0 = отсутствие пессимистических мыслей", "1 =", "2 = эпизодические идеи неудачливости в жизни, самоуничижения или малоценности", "3 =", "4 = постоянное самообвинение или конкретные, но еще рациональные, идеи виновности или греховности; нарастающая пессимистическая оценка будущего", "5 =", "6 = бредовые идеи полного краха, раскаяния или неискупимого греха; абсурдное и непоколебимое самообвинение"]
},
{
"question": "Суицидальные мысли. Чувство, что жить больше не стоит, что естественная смерть – желаемый исход; суицидальные мысли и приготовления к самоубийству.",
"answers": ["0 = жизнь приносит удовольствие или воспринимается такой, какая она есть", "1 =", "2 = усталость от жизни; эпизодические мысли о самоубийстве", "3 =", "4 = возможно лучше умереть; суицидальные мысли становятся привычными, а самоубийство рассматривается как возможный способ решения проблем при отсутствии конкретных суицидальных планов или намерений", "5 =", "6 = конкретное планирование совершения самоубийства при первой возможности; активные приготовления к самоубийству"]
}
],
"en": [
{
"question": "Apparent Sadness. Representing despondency, gloom and despair, (more than just ordinary transient low spirits) reflected in speech, facial expression, and posture. Rate by depth and inability to brighten up",
"answers": ["0 = No sadness", "1 =", "2 = Looks dispirited but does brighten up without difficulty", "3 =", "4 = Appears sad and unhappy most of the time", "5 =", "6 = Looks miserable all the time. Extremely despondent"]
},
{
"question": "Reported Sadness. Representing reports of depressed mood, regardless of whether it is reflected in appearance or not. Includes low spirits, despondency or the feeling of being beyond help and without hope. Rate according to intensity, duration and the extent to which the mood is reported to be influenced by events",
"answers": ["0 = Occasional sadness in keeping with the circumstances", "1 =", "2 = Sad or low but brightens up without difficulty", "3 =", "4 = Pervasive feelings of sadness or gloominess. The mood is still influenced by external circumstances", "5 =", "6 = Continuous or unvarying sadness, misery or despondency"]
},
{
"question": "Inner Tension. Representing feelings of ill-defined discomfort, edginess, inner turmoil, mental tension mounting to either panic, dread or anguish. Rate according to intensity, frequency, duration and the extent of reassurance called for",
"answers": ["0 = Placid. Only fleeting inner tension", "1 =", "2 = Occasional feelings of edginess and ill-defined discomfort", "3 =", "4 = Continuous feelings of inner tension or intermittent panic which the patient can only master with some difficulty", "5 =", "6 = Unrelenting dread or anguish. Overwhelming panic"]
},
{
"question": "Reduced Sleep. Representing the experience of reduced duration or depth of sleep compared to the subject’s own normal pattern when well",
"answers": ["0 = Sleeps as usual", "1 =", "2 = Slight difficulty dropping off to sleep or slightly reduced, light or fitful sleep", "3 =", "4 = Sleep reduced or broken by at least two hours", "5 =", "6 = Less than two or three hours sleep"]
},
{
"question": "Reduced Appetite. Representing the feeling of a loss of appetite compared with when well. Rate by loss of desire for food or the need to force oneself to eat",
"answers": ["0 = Normal or increased appetite", "1 =", "2 = Slightly reduced appetite", "3 =", "4 = No appetite. Food is tasteless", "5 =", "6 = Needs persuasion to eat at all"]
},
{
"question": "Concentration Difficulties. Representing difficulties in collecting one’s thoughts mounting to incapacitating lack of concentration. Rate according to intensity, frequency, and degree of incapacity produced",
"answers": ["0 = No difficulties in concentrating", "1 =", "2 = Occasional difficulties in collecting one’s thoughts", "3 =", "4 = Difficulties in concentrating and sustaining thought which reduces ability to read or hold a conversation", "5 =", "6 = Unable to read or converse without great difficulty"]
},
{
"question": "Lassitude. Representing a difficulty getting started or slowness initiating and performing everyday activities",
"answers": ["0 = Hardly any difficulties in getting started. No sluggishness", "1 =", "2 = Difficulties in starting activities", "3 =", "4 = Difficulties in starting simple routine activities, which are carried out with effort", "5 =", "6 = Complete lassitude. Unable to do anything without help"]
},
{
"question": "Inability to Feel. Representing the subjective experience of reduced interest in the surroundings, or activities that normally give pleasure.The ability to react with adequate emotion to circumstances or people is reduced",
"answers": ["0 = Normal interest in the surroundings and in other people", "1 = ", "2 = Reduced ability to enjoy usual interests", "3 =", "4 = Loss of interest in the surroundings. Loss of feelings for friends and acquaintances", "5 =", "6 = The experience of being emotionally paralyzed, inability to feel anger, grief or pleasure and a complete or even painful failure to feel for close relatives and friends"]
},
{
"question": "Pessimistic Thoughts. Representing thoughts of guilt, inferiority, self-reproach, sinfulness, remorse and ruin",
"answers": ["0 = No pessimistic thoughts", "1 =", "2 = Fluctuating ideas of failure, self-reproach or self-depreciation", "3 =", "4 = Persistent self-accusations, or definite but still rational ideas of guilt or sin. Increasingly pessimistic about the future", "5 =", "6 = Delusions of ruin, remorse and unredeemable sin. Self-accusations which are absurd and unshakable"]
},
{
"question": "Suicidal Thoughts. Representing the feeling that life is not worth living, that a natural death would be welcome, suicidal thoughts, and preparations for suicide. Suicidal attempts should not in themselves influence the rating",
"answers": ["0 = Enjoys life or takes it as it comes", "1 =", "2 = Weary of life. Only fleeting suicidal thoughts", "3 =", "4 = Probably better off dead. Suicidal thoughts are common, and suicide is considered as a possible solution, but without specific plans or intention", "5 =", "6 = Explicit plans for suicide when there is an opportunity. Active preparations for suicide"]
}
],
"pt": [
{
"question": "Tristeza Aparente. Representando desânimo, melancolia e desespero, (mais que apenas um desânimo transitório) refletido no discurso, expressão facial e postura. Classifique pela profundidade e incapacidade de melhorar",
"answers": ["0 = Não apresenta tristeza", "1 =", "2 = Parece desanimado(a), mas melhora sem dificuldade", "3 =", "4 = Parece triste e infeliz na maior parte do tempo", "5 =", "6 = Parece miserável o tempo todo. Extremamente desanimado(a)"]
},
{
"question": "Tristeza Relatada. Representando relatos de humor depressivo, independentemente de refletir ou não na aparência. Inclue desânimo, abatimento ou sentimento de estar além da ajuda e desesperançoso(a). Classifique de acordo com a intensidade, duração e extensão em que o humor é relatado ser influenciado por eventos",
"answers": ["0 = Tristeza ocasional de acordo com as circunstâncias", "1 =", "2 = Tristeza ou desânimo, mas melhora sem dificuldade", "3 =", "4 = Sentimentos penetrantes de tristeza ou melancolia. O humor ainda é influenciado por fatores externos", "5 =", "6 = Tristeza contínua ou invariável, angústia ou desânimo"]
},
{
"question": "Tensão Interna. Representando sentimentos de desconforto mal definidos, nervosismo, turbulência interior, tensão mental gerando pânico, pavor ou angústia. Classifique de acordo com a intensidade, frequência duração e extensão com que reaparecem",
"answers": ["0 = Tranquilo. Apenas tensão interna transitória", "1 =", "2 = Sentimentos ocasionais de nervosismo e desconforto mal definido", "3 =", "4 = Sentimentos contínuos de tensão interna ou pânico intermitente que o(a) paciente domina apenas com alguma dificuldade", "5 =", "6 = Pavor ou angústia implacáveis. Pânico esmagador"]
},
{
"question": "Sono Reduzido. Representando a experiência de sono com menor duração ou pronfundidade se comparado ao padrão de quando está bem",
"answers": ["0 = Dorme como de costume", "1 =", "2 = Pequena dificuldade para cair no sono ou sono levemente reduzido, leve ou inquieto", "3 =", "4 = Sono reduzido ou quebrado a pelo menos duas horas", "5 =", "6 = Menos que duas ou três horas totais de sono"]
},
{
"question": "Apetite Reduzido. Representando o sentimento de perda do apetite se comparado a quando está bem. Classifique pela perda do desejo de comer ou a necessidade de se forçar a comer",
"answers": ["0 = Apetite normal ou maior que o normal", "1 =", "2 = Apetite levemente reduzido", "3 =", "4 = Sem apetite. A comida é sem sabor", "5 =", "6 = Precisa ser convencido(a) a comer"]
},
{
"question": "Dificuldades de Concentração. Representando dificuldades em ordenar seus pensamentos levando a uma falta de concentração incapacitante. Classifique de acordo com a intensidade, frequência e grau de incapacidade produzidos",
"answers": ["0 = Sem dificuldades de concentração", "1 =", "2 = Dificuldade ocasional em ordenar seus pensamentos", "3 =", "4 = Dificuldades de concentração e de manter seus pensamentos que reduzem sua habilidade de ler e de manter um diálogo", "5 =", "6 = Incapaz de ler ou de conversar sem grande dificuldade"]
},
{
"question": "Lassitude. Representando uma dificuldade de começar ou uma demora em iniciar suas tarefas diárias",
"answers": ["0 = Raramente possue dificuldades em iniciar coisas. Sem lentidão", "1 =", "2 = Dificuldades em iniciar atividades", "3 =", "4 = Dificuldades em iniciar tarefas simples do dia a dia, que são executadas com esforço", "5 =", "6 = Lassitude completa. Incapaz de fazer qualquer coisa sem ajuda"]
},
{
"question": "Inabilidade de Sentir. Representando a experiência subjetiva da redução de interesse no que tem ao redor ou nas atividades que nomalmente davam prazer. A habilidade de reagir com emoção adequada às circunstâncias ou pessoas é reduzida",
"answers": ["0 = Interesse normal no entorno e nos outros", "1 = ", "2 = Habilidade reduzida de aproveitar interesses comuns", "3 =", "4 = Perda de interesse no entorno. Perda de sentimentos por amigos e conhecidos", "5 =", "6 = A experiência de estar emocionalmente paralisado, incapacidade de sentir raiva, luto ou prazer e uma completa ou até dolorosa falha em sentir por parentes e amigos"]
},
{
"question": "Pensamentos Pessimistas. Representando sentimentos de culpa, inferioridade, autodesaprovação, pecaminosidade, remorso e ruína",
"answers": ["0 = Sem pensamentos pessimistas", "1 =", "2 = Ideias flutuantes de falha, autodesaprovação ou autodepreciação", "3 =", "4 = Autoacusações presistentes, ou ideias definitivas, mas ainda racionais, de culpa ou pecado. Cada vez mais pessimista sobre o futuro", "5 =", "6 = Ilusão de ruína, remorso e pecado irrecuperáveis. Autoacusações que são absurdas e inabaláveis"]
},
{
"question": "Pensamentos Suicidas. Representando o sentimento de que a vida não vale a pena, que uma morte natural seria bem vinda, pensamentos suicidas e preparação para o suicídio. Tentativas de suicídio não devem, por si só, influenciar a classificação",
"answers": ["0 = Gosta da vida ou lida com uma coisa de cada vez", "1 =", "2 = Cansado(a) da vida. Apenas pensamentos suicidas efêmeros", "3 =", "4 = Provavelmente seria melhor a morte. Pensamentos suicidas são comuns e suicídio é considerado uma solução possível, mas sem planos específicos ou intenção", "5 =", "6 = Planos explícitos para o suicídio quando há uma oportunidade. Preparações ativas para o suicídio"]
}
]
}
| 124.326316
| 500
| 0.709169
|
b4f15431b4f0ec62ca58e495b45e7c131c93af5e
| 1,494
|
py
|
Python
|
deepmosaic/repeatAnnotation.py
|
Virginiaxu/DeepMosaic
|
fe5bf9b98e36f0b9ef6bb88345d4afaa55054e96
|
[
"MIT"
] | 11
|
2020-09-23T10:38:16.000Z
|
2022-02-16T04:19:56.000Z
|
deepmosaic/repeatAnnotation.py
|
Virginiaxu/DeepMosaic
|
fe5bf9b98e36f0b9ef6bb88345d4afaa55054e96
|
[
"MIT"
] | 2
|
2020-12-30T19:57:26.000Z
|
2021-11-18T08:38:56.000Z
|
deepmosaic/repeatAnnotation.py
|
Virginiaxu/DeepMosaic
|
fe5bf9b98e36f0b9ef6bb88345d4afaa55054e96
|
[
"MIT"
] | null | null | null |
import sys, os
import pandas as pd
import tempfile
import subprocess
import pkg_resources
import re
#all_repeats_path = pkg_resources.resource_filename('deepmosaic', 'resources/all_repeats.b37.bed')
#segdup_path = pkg_resources.resource_filename('deepmosaic', 'resources/segdup.hg19.bed')
HERE = os.path.abspath(os.path.dirname(__file__))
all_repeats_path = os.path.join(HERE, "./resources/all_repeats.b37.bed")
segdup_path = os.path.join(HERE, "./resources/segdup.hg19.bed")
def repeats_annotation(all_variants, output_dir):
rp_fd, rp_path = tempfile.mkstemp()
try:
with os.fdopen(rp_fd, 'w') as tmp:
# do stuff with temp file
for variant in all_variants:
sample_name, bam, chrom, pos, ref, alt, depth, sex = variant
key = "_".join([chrom, pos, ref, alt])
tmp.write("\t".join(map(str, [chrom, int(pos)-1, int(pos) + len(ref)-2, ref, alt, key])) + "\n")
command = "bedtools annotate -i " + rp_path +" -files " + all_repeats_path + " " + segdup_path + " > " + \
output_dir + "repeats_annotation.bed"
subprocess.call(command, shell=True)
os.remove(rp_path)
df = pd.read_csv(output_dir + "repeats_annotation.bed", header=None, sep="\t")
repeats_dict = dict(zip(df[5], zip(df[6], df[7])))
return repeats_dict
except:
sys.stderr.write("Error with repeat annotation. Check if you have module loaded bedtools.\n")
sys.exit(2)
| 43.941176
| 115
| 0.649933
|
e6a8211dcc299f47b5df0be338e99dea67c24009
| 3,778
|
py
|
Python
|
examples/01_resources_and_rendering/01_load_and_render_pygame.py
|
inniyah/pytmxloader
|
93a4f8565300663c731b48a7ebe1c9d3b72479b2
|
[
"BSD-3-Clause"
] | 1
|
2020-02-22T03:47:09.000Z
|
2020-02-22T03:47:09.000Z
|
examples/01_resources_and_rendering/01_load_and_render_pygame.py
|
inniyah/pytmxloader
|
93a4f8565300663c731b48a7ebe1c9d3b72479b2
|
[
"BSD-3-Clause"
] | null | null | null |
examples/01_resources_and_rendering/01_load_and_render_pygame.py
|
inniyah/pytmxloader
|
93a4f8565300663c731b48a7ebe1c9d3b72479b2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is the pygame minimal example.
"""
__revision__ = "$Rev$"
__version__ = "3.0.0." + __revision__[6:-2]
__author__ = 'DR0ID @ 2009-2011'
import sys
import os
import pygame
try:
import _path
except:
pass
import tiledtmxloader
# -----------------------------------------------------------------------------
def main():
"""
Main method.
"""
args = sys.argv[1:]
if len(args) < 1:
path_to_map = os.path.join(os.pardir, "001-1.tmx")
print(("usage: python %s your_map.tmx\n\nUsing default map '%s'\n" % \
(os.path.basename(__file__), path_to_map)))
else:
path_to_map = args[0]
demo_pygame(path_to_map)
# -----------------------------------------------------------------------------
def demo_pygame(file_name):
"""
Example showing basic loading and rendering of a tmx map.
"""
# parser the map (it is done here to initialize the
# window the same size as the map if it is small enough)
world_map = tiledtmxloader.tmxreader.TileMapParser().parse_decode(file_name)
# init pygame and set up a screen
pygame.init()
pygame.display.set_caption("tiledtmxloader - " + file_name + " - keys: arrows" )
screen_width = min(1024, world_map.pixel_width)
screen_height = min(768, world_map.pixel_height)
screen = pygame.display.set_mode((screen_width, screen_height))
# load the images using pygame
resources = tiledtmxloader.helperspygame.ResourceLoaderPygame()
resources.load(world_map)
# prepare map rendering
assert world_map.orientation == "orthogonal"
# renderer
renderer = tiledtmxloader.helperspygame.RendererPygame()
# cam_offset is for scrolling
cam_world_pos_x = 0
cam_world_pos_y = 0
# set initial cam position and size
renderer.set_camera_position_and_size(cam_world_pos_x, cam_world_pos_y, \
screen_width, screen_height, "topleft")
# retrieve the layers
sprite_layers = tiledtmxloader.helperspygame.get_layers_from_map(resources)
# variables for the main loop
frames_per_sec = 60.0
clock = pygame.time.Clock()
running = True
# mainloop
while running:
clock.tick(frames_per_sec)
# event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
elif event.key == pygame.K_UP:
cam_world_pos_y -= world_map.tileheight
elif event.key == pygame.K_DOWN:
cam_world_pos_y += world_map.tileheight
elif event.key == pygame.K_RIGHT:
cam_world_pos_x += world_map.tilewidth
elif event.key == pygame.K_LEFT:
cam_world_pos_x -= world_map.tilewidth
# adjust camera to position according to the keypresses
renderer.set_camera_position(cam_world_pos_x, \
cam_world_pos_y, "topleft")
# clear screen, might be left out if every pixel is redrawn anyway
screen.fill((0, 0, 0))
# render the map
for sprite_layer in sprite_layers:
if sprite_layer.is_object_group:
# we dont draw the object group layers
# you should filter them out if not needed
continue
else:
renderer.render_layer(screen, sprite_layer)
pygame.display.flip()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 28.406015
| 84
| 0.57729
|
3f28bc18d4a608e91e0931e3c3779b91170e7ac8
| 5,367
|
py
|
Python
|
base/lib/pythonbin/urwid/tests/test_widget.py
|
threefoldtech/sandbox_osx
|
e2a5ea812c3789dea40113719dbad6d6ee7cd720
|
[
"Apache-2.0"
] | 4
|
2021-10-14T21:22:25.000Z
|
2022-03-12T19:58:48.000Z
|
base/lib/pythonbin/urwid/tests/test_widget.py
|
threefoldtech/sandbox_osx
|
e2a5ea812c3789dea40113719dbad6d6ee7cd720
|
[
"Apache-2.0"
] | 3
|
2020-06-05T18:53:36.000Z
|
2021-06-10T20:47:05.000Z
|
base/lib/pythonbin/urwid/tests/test_widget.py
|
threefoldtech/sandbox_osx
|
e2a5ea812c3789dea40113719dbad6d6ee7cd720
|
[
"Apache-2.0"
] | 1
|
2022-03-15T22:52:53.000Z
|
2022-03-15T22:52:53.000Z
|
# -*- coding: utf-8 -*-
import unittest
from urwid.compat import B
import urwid
class TextTest(unittest.TestCase):
def setUp(self):
self.t = urwid.Text("I walk the\ncity in the night")
def test1_wrap(self):
expected = [B(t) for t in ("I walk the","city in ","the night ")]
got = self.t.render((10,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test2_left(self):
self.t.set_align_mode('left')
expected = [B(t) for t in ("I walk the ","city in the night ")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test3_right(self):
self.t.set_align_mode('right')
expected = [B(t) for t in (" I walk the"," city in the night")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test4_center(self):
self.t.set_align_mode('center')
expected = [B(t) for t in (" I walk the "," city in the night")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test5_encode_error(self):
urwid.set_encoding("ascii")
expected = [B("? ")]
got = urwid.Text(u'û').render((3,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
class EditTest(unittest.TestCase):
def setUp(self):
self.t1 = urwid.Edit(B(""),"blah blah")
self.t2 = urwid.Edit(B("stuff:"), "blah blah")
self.t3 = urwid.Edit(B("junk:\n"),"blah blah\n\nbloo",1)
self.t4 = urwid.Edit(u"better:")
def ktest(self, e, key, expected, pos, desc):
got= e.keypress((12,),key)
assert got == expected, "%s. got: %r expected:%r" % (desc, got,
expected)
assert e.edit_pos == pos, "%s. pos: %r expected pos: %r" % (
desc, e.edit_pos, pos)
def test1_left(self):
self.t1.set_edit_pos(0)
self.ktest(self.t1,'left','left',0,"left at left edge")
self.ktest(self.t2,'left',None,8,"left within text")
self.t3.set_edit_pos(10)
self.ktest(self.t3,'left',None,9,"left after newline")
def test2_right(self):
self.ktest(self.t1,'right','right',9,"right at right edge")
self.t2.set_edit_pos(8)
self.ktest(self.t2,'right',None,9,"right at right edge-1")
self.t3.set_edit_pos(0)
self.t3.keypress((12,),'right')
assert self.t3.get_pref_col((12,)) == 1
def test3_up(self):
self.ktest(self.t1,'up','up',9,"up at top")
self.t2.set_edit_pos(2)
self.t2.keypress((12,),"left")
assert self.t2.get_pref_col((12,)) == 7
self.ktest(self.t2,'up','up',1,"up at top again")
assert self.t2.get_pref_col((12,)) == 7
self.t3.set_edit_pos(10)
self.ktest(self.t3,'up',None,0,"up at top+1")
def test4_down(self):
self.ktest(self.t1,'down','down',9,"down single line")
self.t3.set_edit_pos(5)
self.ktest(self.t3,'down',None,10,"down line 1 to 2")
self.ktest(self.t3,'down',None,15,"down line 2 to 3")
self.ktest(self.t3,'down','down',15,"down at bottom")
def test_utf8_input(self):
urwid.set_encoding("utf-8")
self.t1.set_edit_text('')
self.t1.keypress((12,), u'û')
self.assertEqual(self.t1.edit_text, u'û'.encode('utf-8'))
self.t4.keypress((12,), u'û')
self.assertEqual(self.t4.edit_text, u'û')
class EditRenderTest(unittest.TestCase):
def rtest(self, w, expected_text, expected_cursor):
expected_text = [B(t) for t in expected_text]
get_cursor = w.get_cursor_coords((4,))
assert get_cursor == expected_cursor, "got: %r expected: %r" % (
get_cursor, expected_cursor)
r = w.render((4,), focus = 1)
text = [t for a, cs, t in [ln[0] for ln in r.content()]]
assert text == expected_text, "got: %r expected: %r" % (text,
expected_text)
assert r.cursor == expected_cursor, "got: %r expected: %r" % (
r.cursor, expected_cursor)
def test1_SpaceWrap(self):
w = urwid.Edit("","blah blah")
w.set_edit_pos(0)
self.rtest(w,["blah","blah"],(0,0))
w.set_edit_pos(4)
self.rtest(w,["lah ","blah"],(3,0))
w.set_edit_pos(5)
self.rtest(w,["blah","blah"],(0,1))
w.set_edit_pos(9)
self.rtest(w,["blah","lah "],(3,1))
def test2_ClipWrap(self):
w = urwid.Edit("","blah\nblargh",1)
w.set_wrap_mode('clip')
w.set_edit_pos(0)
self.rtest(w,["blah","blar"],(0,0))
w.set_edit_pos(10)
self.rtest(w,["blah","argh"],(3,1))
w.set_align_mode('right')
w.set_edit_pos(6)
self.rtest(w,["blah","larg"],(0,1))
def test3_AnyWrap(self):
w = urwid.Edit("","blah blah")
w.set_wrap_mode('any')
self.rtest(w,["blah"," bla","h "],(1,2))
def test4_CursorNudge(self):
w = urwid.Edit("","hi",align='right')
w.keypress((4,),'end')
self.rtest(w,[" hi "],(3,0))
w.keypress((4,),'left')
self.rtest(w,[" hi"],(3,0))
| 34.850649
| 78
| 0.547233
|
5d71711be07f92a470c6342825830074f2011f3f
| 411
|
py
|
Python
|
utils/blender-export-edges/blender-export-edges.py
|
jmptable/spinneret
|
b8b24cdde062c58201329076ca77a35a69837c22
|
[
"MIT"
] | 1
|
2019-03-25T13:22:04.000Z
|
2019-03-25T13:22:04.000Z
|
utils/blender-export-edges/blender-export-edges.py
|
jmptable/spinneret
|
b8b24cdde062c58201329076ca77a35a69837c22
|
[
"MIT"
] | null | null | null |
utils/blender-export-edges/blender-export-edges.py
|
jmptable/spinneret
|
b8b24cdde062c58201329076ca77a35a69837c22
|
[
"MIT"
] | null | null | null |
import bpy
outputFile = '/tmp/mesh.csv'
csv = ''
for edge in bpy.context.object.data.edges:
xIndex = edge.vertices[0]
yIndex = edge.vertices[1]
a = bpy.context.object.data.vertices[xIndex]
b = bpy.context.object.data.vertices[yIndex]
csv += ','.join([str(c) for c in [a.co[0], a.co[1], a.co[2], b.co[0], b.co[1], b.co[2]]]) + '\n'
f = open(outputFile, 'w')
f.writelines(csv)
f.close()
| 21.631579
| 100
| 0.613139
|
a9405032574b968ee90e4071a8e4bec2a0a96f45
| 9,929
|
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
reeninja/recipe-app-api
|
bc35887db7129e844ea177dd26402c494a629a9b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
reeninja/recipe-app-api
|
bc35887db7129e844ea177dd26402c494a629a9b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
reeninja/recipe-app-api
|
bc35887db7129e844ea177dd26402c494a629a9b
|
[
"MIT"
] | null | null | null |
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return url for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""
Return recipe details URL
"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""
Create and return sample tag
"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""
Create and return sample ingredient
"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""
Create and return a sample Recipe
"""
defaults = {
'title': 'sample recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""
Test unauthenticated recipe API access
"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""
Test that authorization is required
"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""
Test authenticated recipe API
"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'test1234'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""
Test retrieving recipes
"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""
"""
user2 = get_user_model().objects.create_user(
'test2@test.com',
'test1234'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""
Test viewing a recipe detail
"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""
Test creating recipe
"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""
Test creating a recipe with tags
"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 50,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""
Test creating a recipe with ingredients
"""
ingredient1 = sample_ingredient(user=self.user, name='Salt')
ingredient2 = sample_ingredient(user=self.user, name='Onion')
payload = {
'title': 'Onion cheesecake',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_racipe_partial_update(self):
"""
Test updating a recipe partially (patch)
"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_recipe_full_update(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
def test_filter_recipes_by_tag(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and chips')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id},{tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
ingredient1 = sample_ingredient(user=self.user, name='Feta cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushrooms')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id},{ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'user@test.com',
'test1234'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 32.029032
| 78
| 0.631786
|
993481da1fc7ac36ce5a231b24b6df7e13f676d9
| 646
|
py
|
Python
|
crawler/data/counter.py
|
manhph2211/SentimentAnalysis
|
dc346203137bed9b1f6892c298d9c6d621533582
|
[
"MIT"
] | 4
|
2021-06-08T07:59:54.000Z
|
2021-12-16T13:16:02.000Z
|
crawler/data/counter.py
|
manhph2211/SentimentAnalysis
|
dc346203137bed9b1f6892c298d9c6d621533582
|
[
"MIT"
] | null | null | null |
crawler/data/counter.py
|
manhph2211/SentimentAnalysis
|
dc346203137bed9b1f6892c298d9c6d621533582
|
[
"MIT"
] | null | null | null |
import pandas as pd
import glob
import json
def converter(point):
star = 0
if point >=8.5:
star = 5
elif 8.5>point>=7.5:
star = 4
elif 7.5>point>=6:
star = 3
elif 6>point>=4:
star = 2
else:
star = 1
return star
def counter():
counter = {}
for i in range(1,6):
counter[i] = 0
csv_files = glob.glob('./*.csv')
for file in csv_files:
df = pd.read_csv(file)
df['star'] = df['star'].apply(lambda x: converter(x))
stars = df['star'].to_numpy()
for star in stars:
counter[star] += 1
df.to_csv(file)
with open('./results.json','w') as f:
json.dump(counter,f,indent=4)
if __name__ == '__main__':
counter()
| 15.756098
| 55
| 0.614551
|
b5aaa4ea18c4fd57a2cf1a97450b8a7669f678fe
| 8,292
|
py
|
Python
|
tools/test.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | 5
|
2022-02-18T16:26:29.000Z
|
2022-03-07T07:25:20.000Z
|
tools/test.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | 1
|
2022-02-24T12:51:19.000Z
|
2022-02-28T06:31:15.000Z
|
tools/test.py
|
CityU-AIM-Group/HTD
|
0be9fd844118c275abc6053b3cbd5ffb589e62ee
|
[
"MIT"
] | null | null | null |
import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', default='configs_HTD/htd/r101.py', help='train config file path')
parser.add_argument('checkpoint',default='configs_HTD/models/r101/epoch_24.pth', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
default='bbox',
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
# '--show-dir',default='../show/polyp/',help='directory where painted images will be saved')
'--show-dir',help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
# default=0.05,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
print(model)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 38.567442
| 108
| 0.63302
|
4f2f1eba21bf8fae474e132af57feccdc0fecab2
| 35,007
|
py
|
Python
|
sockeye/model.py
|
ogunoz/sockeye
|
26c02b1016b0937714ecd4ab367a6a67761ef2df
|
[
"Apache-2.0"
] | 3
|
2019-06-03T17:29:49.000Z
|
2021-07-11T05:51:53.000Z
|
sockeye/model.py
|
ogunoz/sockeye
|
26c02b1016b0937714ecd4ab367a6a67761ef2df
|
[
"Apache-2.0"
] | 1
|
2020-10-12T19:36:02.000Z
|
2020-10-12T19:36:02.000Z
|
sockeye/model.py
|
DianaLaura/sockeye
|
e01e4317a1ea42ed6d6a686dbc0782b73611a7c2
|
[
"Apache-2.0"
] | 1
|
2021-07-11T05:51:54.000Z
|
2021-07-11T05:51:54.000Z
|
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import time
import logging
import os
from typing import cast, Dict, Optional, Tuple, Union, List
from functools import lru_cache
import mxnet as mx
from sockeye import __version__
from sockeye.config import Config
from . import constants as C
from . import data_io
from . import decoder
from . import encoder
from . import layers
from . import quantization
from . import utils
from . import vocab
logger = logging.getLogger(__name__)
class ModelConfig(Config):
"""
ModelConfig defines model parameters defined at training time which are relevant to model inference.
Add new model parameters here. If you want backwards compatibility for models trained with code that did not
contain these parameters, provide a reasonable default under default_values.
:param config_data: Used training data.
:param vocab_source_size: Source vocabulary size.
:param vocab_target_size: Target vocabulary size.
:param config_embed_source: Embedding config for source.
:param config_embed_target: Embedding config for target.
:param config_encoder: Encoder configuration.
:param config_decoder: Decoder configuration.
:param config_length_task: Optional length task configuration.
:param weight_tying_type: Determines which weights get tied.
:param lhuc: LHUC (Vilar 2018) is applied at some part of the model.
:param dtype: Data type of model parameters. Default: float32.
:param intgemm_custom_lib: Path to intgemm custom operator library used for dtype is int8. Default: libintgemm.so
in the same directory as this script.
"""
def __init__(self,
config_data: data_io.DataConfig,
vocab_source_size: int,
vocab_target_size: int,
config_embed_source: encoder.EmbeddingConfig,
config_embed_target: encoder.EmbeddingConfig,
config_encoder: encoder.EncoderConfig,
config_decoder: decoder.DecoderConfig,
config_length_task: layers.LengthRatioConfig= None,
weight_tying_type: str = C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
lhuc: bool = False,
dtype: str = C.DTYPE_FP32,
intgemm_custom_lib: str = os.path.join(os.path.dirname(__file__), "libintgemm.so")) -> None:
super().__init__()
self.config_data = config_data
self.vocab_source_size = vocab_source_size
self.vocab_target_size = vocab_target_size
self.config_embed_source = config_embed_source
self.config_embed_target = config_embed_target
self.config_encoder = config_encoder
self.config_decoder = config_decoder
self.config_length_task = config_length_task
self.weight_tying_type = weight_tying_type
self.lhuc = lhuc
self.dtype = dtype
self.intgemm_custom_lib = intgemm_custom_lib
class SockeyeModel(mx.gluon.Block):
"""
SockeyeModel shares components needed for both training and inference.
The main components of a Sockeye model are
1) Source embedding
2) Target embedding
3) Encoder
4) Decoder
5) Output Layer
ModelConfig contains parameters and their values that are fixed at training time and must be re-used at inference
time.
:param config: Model configuration.
:param inference_only: Use the model only for inference, enabling optimizations.
:param prefix: Name prefix for all parameters of this model.
"""
def __init__(self,
config: ModelConfig,
inference_only: bool = False,
mc_dropout: bool = False,
forward_pass_cache_size: int = 0,
prefix: str = '',
**kwargs) -> None:
super().__init__(prefix=prefix, **kwargs)
self.config = copy.deepcopy(config)
logger.info("%s", self.config)
self.dtype = config.dtype
self.mc_dropout = mc_dropout
self._output_layer_factor_format_string = 'output_layer_factor%i'
self.forward_pass_cache_size = forward_pass_cache_size
self.embed_and_encode = self._embed_and_encode
if self.forward_pass_cache_size > 0:
self.embed_and_encode = self._cache_wrapper(self._embed_and_encode)
with self.name_scope():
# source & target embeddings
self.source_embed_weight, self.target_embed_weight, self.output_weight = self._get_embedding_weights()
self.embedding_source = encoder.Embedding(config.config_embed_source,
prefix=self.prefix + C.SOURCE_EMBEDDING_PREFIX,
embed_weight=self.source_embed_weight)
self.embedding_target = encoder.Embedding(config.config_embed_target,
prefix=self.prefix + C.TARGET_EMBEDDING_PREFIX,
embed_weight=self.target_embed_weight)
# encoder & decoder first (to know the decoder depth)
self.encoder = encoder.get_encoder(self.config.config_encoder, prefix=self.prefix, dtype=config.dtype)
self.decoder = decoder.get_decoder(self.config.config_decoder, inference_only=inference_only,
prefix=self.prefix, dtype=config.dtype)
self.output_layer = layers.OutputLayer(hidden_size=self.decoder.get_num_hidden(),
vocab_size=self.config.vocab_target_size,
weight=self.output_weight, dtype=config.dtype,
prefix=self.prefix + C.DEFAULT_OUTPUT_LAYER_PREFIX)
# Optional target factor output layers
for i, factor_config in enumerate(self.target_factor_configs, 1):
# Each target stream has its own, independent output layer
# TODO also consider weight tying with target factor input embeddings
output_layer = layers.OutputLayer(hidden_size=self.decoder.get_num_hidden(),
vocab_size=factor_config.vocab_size,
weight=None,
dtype=config.dtype,
prefix=self.prefix + C.TARGET_FACTOR_OUTPUT_LAYER_PREFIX % i)
# Register the layer as child block
setattr(self, self._output_layer_factor_format_string % i, output_layer)
self.length_ratio = None
if self.config.config_length_task is not None:
utils.check_condition(self.config.config_length_task.weight > 0.0,
'Auxiliary length task requested, but its loss weight is zero')
self.length_ratio = layers.LengthRatio(hidden_size=self.encoder.get_num_hidden(),
num_layers=self.config.config_length_task.num_layers,
prefix=self.prefix + C.LENRATIOS_OUTPUT_LAYER_PREFIX)
def cast(self, dtype):
self.dtype = dtype
super().cast(dtype)
def state_structure(self):
return self.decoder.state_structure()
def encode(self, inputs, valid_length=None):
"""Encode the input sequence.
Parameters
----------
inputs : NDArray
valid_length : NDArray or None, default None
Returns
-------
outputs : list
Outputs of the encoder.
"""
source_embed, source_embed_length = self.embedding_source(inputs, valid_length)
source_encoded, source_encoded_length = self.encoder(source_embed, source_embed_length)
return source_encoded, source_encoded_length
def encode_and_initialize(self, inputs, valid_length=None, constant_length_ratio=0.0):
"""
Encodes the input sequence and initializes decoder states (and predicted output lengths if available).
Used for inference/decoding.
Parameters
----------
inputs : NDArray
valid_length : NDArray or None, default None
constant_length_ratio : float
Returns
-------
states : list
Initial states for the decoder.
predicted_output_length : NDArray
Predicted output length of shape (batch_size,), 0 if not available.
"""
if self.mc_dropout:
# Turn on training mode so mxnet knows to add dropout
_ = mx.autograd.set_training(True)
# Encode input. Shape: (batch, length, num_hidden), (batch,)
source_encoded, source_encoded_lengths = self.encode(inputs, valid_length=valid_length)
predicted_output_length = self.predict_output_length(source_encoded,
source_encoded_lengths,
constant_length_ratio)
# Decoder init states
states = self.decoder.init_state_from_encoder(source_encoded, source_encoded_lengths)
return states, predicted_output_length
def _embed_and_encode(self, source, source_length, target, target_length):
"""
Encode the input sequence, embed the target sequence, and initialize the decoder.
Used for training.
:param source: Source input data.
:param source_length: Length of source inputs.
:param target: Target input data.
:param target_length: Length of target inputs.
:return: encoder outputs and lengths, target embeddings, and decoder initial states
"""
source_embed, source_embed_length = self.embedding_source(source, source_length)
target_embed, target_embed_length = self.embedding_target(target, target_length)
source_encoded, source_encoded_length = self.encoder(source_embed, source_embed_length)
states = self.decoder.init_state_from_encoder(source_encoded, source_encoded_length)
return source_encoded, source_encoded_length, target_embed, states
def decode_step(self, step_input, states, vocab_slice_ids=None):
"""
One step decoding of the translation model.
Parameters
----------
step_input : NDArray
Shape (batch_size, num_target_factors)
states : list of NDArrays
vocab_slice_ids : NDArray or None
Returns
-------
step_output : NDArray
Shape (batch_size, C_out)
states : list
target_factor_outputs : list
Optional target factor predictions.
"""
if self.mc_dropout:
# Turn on training mode so mxnet knows to add dropout
_ = mx.autograd.set_training(True)
valid_length = mx.nd.ones(shape=(step_input.shape[0],), ctx=step_input.context)
target_embed, _ = self.embedding_target(step_input.reshape((0, 1, -1)), valid_length=valid_length)
target_embed = target_embed.squeeze(axis=1)
decoder_out, new_states = self.decoder(target_embed, states)
# step_output: (batch_size, target_vocab_size or vocab_slice_ids)
step_output = self.output_layer(decoder_out, vocab_slice_ids)
# Target factor outputs are currently stored in additional outputs.
target_factor_outputs = []
# TODO: consider a dictionary mapping as return value
for factor_output_layer in self.factor_output_layers:
target_factor_outputs.append(factor_output_layer(decoder_out, None))
return step_output, new_states, target_factor_outputs
def forward(self, source, source_length, target, target_length): # pylint: disable=arguments-differ
source_encoded, source_encoded_length, target_embed, states = self.embed_and_encode(source, source_length,
target, target_length)
target = self.decoder.decode_seq(target_embed, states=states)
forward_output = dict()
forward_output[C.LOGITS_NAME] = self.output_layer(target, None)
for i, factor_output_layer in enumerate(self.factor_output_layers, 1):
forward_output[C.FACTOR_LOGITS_NAME % i] = factor_output_layer(target, None)
if self.length_ratio is not None:
# predicted_length_ratios: (batch_size,)
forward_output[C.LENRATIO_NAME] = self.length_ratio(source_encoded, source_encoded_length)
return forward_output
def predict_output_length(self,
source_encoded: mx.nd.NDArray,
source_encoded_length: mx.nd.NDArray,
constant_length_ratio: float = 0.0):
if self.length_ratio is not None:
# predicted_length_ratios: (batch_size,)
predicted_length_ratio = self.length_ratio(source_encoded, source_encoded_length)
predicted_output_length = predicted_length_ratio * source_encoded_length
elif constant_length_ratio > 0.0:
# (batch,)
predicted_output_length = source_encoded_length * constant_length_ratio
else:
# (batch,)
predicted_output_length = mx.nd.zeros_like(source_encoded_length)
return predicted_output_length
def save_config(self, folder: str):
"""
Saves model configuration to <folder>/config
:param folder: Destination folder.
"""
fname = os.path.join(folder, C.CONFIG_NAME)
self.config.save(fname)
logger.info('Saved model config to "%s"', fname)
@staticmethod
def load_config(fname: str) -> ModelConfig:
"""
Loads model configuration.
:param fname: Path to load model configuration from.
:return: Model configuration.
"""
config = ModelConfig.load(fname)
logger.info('Loaded model config from "%s"', fname)
return cast(ModelConfig, config) # type: ignore
def save_parameters(self, fname: str):
"""
Saves model parameters to file.
:param fname: Path to save parameters to.
"""
super().save_parameters(fname, deduplicate=True)
logging.info('Saved params to "%s"', fname)
def load_parameters(self,
filename: str,
ctx: Union[mx.Context, List[mx.Context]] = None,
allow_missing: bool = False,
ignore_extra: bool = False,
cast_dtype: bool = False,
dtype_source: str = 'current'):
"""Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
cast_dtype : bool, default False
Cast the data type of the NDArray loaded from the checkpoint to the dtype
provided by the Parameter if any.
dtype_source : str, default 'current'
must be in {'current', 'saved'}
Only valid if cast_dtype=True, specify the source of the dtype for casting
the parameters
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
utils.check_condition(os.path.exists(filename), "No model parameter file found under %s. "
"This is either not a model directory or the first training "
"checkpoint has not happened yet." % filename)
super().load_parameters(filename, ctx=ctx, allow_missing=allow_missing, ignore_extra=ignore_extra,
cast_dtype=cast_dtype, dtype_source=dtype_source)
logger.info('Loaded params from "%s" to "%s"', filename, mx.cpu() if ctx is None else ctx)
def set_parameters(self,
new_params: Dict[str, mx.gluon.parameter.Parameter],
allow_missing: bool = True,
ignore_extra: bool = False):
"""
Update model params on all contexts of the model with new values from a dictionary.
:param new_params: Dictionary containing the new parameters.
:param allow_missing: Whether to skip setting parameters not represented in the dictionary.
:param ignore_extra: Whether to ignore parameters from new_params that are not present in this model.
"""
model_params = self.collect_params()
if not allow_missing:
for k in model_params.keys():
assert k in new_params.keys(), "Parameter '%s' is missing in new_params dictionary. " \
"Set allow_missing=True to ignore missing parameters." % k
for k in new_params:
assert new_params[k]._data is not None, "Parameter '%s' is not initialized in new_params dictionary." % k
if not ignore_extra and k not in model_params:
raise ValueError("Parameter '%s' in new_params dictionary is not preset in ParameterDict. "
"Set ignore_extra=True to ignore." % k)
if k in model_params:
assert model_params[k]._data is not None, "Parameter '%s' must be initialized before it can be reset " \
"using set_parameters." % k
assert model_params[k].shape == new_params[k].shape, \
"Parameter '%s' has shape '%s' in the model but shape '%s' in the new_params dictionary." % \
(k, model_params[k].shape, new_params[k].shape)
model_params[k].set_data(new_params[k].data())
@staticmethod
def save_version(folder: str):
"""
Saves version to <folder>/version.
:param folder: Destination folder.
"""
fname = os.path.join(folder, C.VERSION_NAME)
with open(fname, "w") as out:
out.write(__version__)
def _get_embedding_weights(self) -> Tuple[mx.gluon.Parameter, mx.gluon.Parameter, mx.gluon.Parameter]:
"""
Returns embeddings for source, target, and output layer.
When source and target embeddings are shared, they are created here and passed in to each side,
instead of being created in the Embedding constructors.
:return: Tuple of source, target, and output embedding parameters.
"""
share_embed = C.WEIGHT_TYING_SRC in self.config.weight_tying_type and \
C.WEIGHT_TYING_TRG in self.config.weight_tying_type
tie_weights = C.WEIGHT_TYING_SOFTMAX in self.config.weight_tying_type
source_embed_name = C.SOURCE_EMBEDDING_PREFIX + "weight" if not share_embed else C.SHARED_EMBEDDING_PREFIX + "weight"
target_embed_name = C.TARGET_EMBEDDING_PREFIX + "weight" if not share_embed else C.SHARED_EMBEDDING_PREFIX + "weight"
output_embed_name = "target_output_weight" if not tie_weights else target_embed_name
source_grad_stype = 'row_sparse' if self.config.config_embed_source.allow_sparse_grad and not tie_weights else 'default'
source_embed_weight = self.params.get(source_embed_name,
shape=(self.config.config_embed_source.vocab_size,
self.config.config_embed_source.num_embed),
allow_deferred_init=True,
grad_stype=source_grad_stype)
if share_embed:
target_embed_weight = source_embed_weight
else:
target_grad_stype = 'row_sparse' if self.config.config_embed_target.allow_sparse_grad and not tie_weights else 'default'
target_embed_weight = self.params.get(target_embed_name,
shape=(self.config.config_embed_target.vocab_size,
self.config.config_embed_target.num_embed),
allow_deferred_init=True,
grad_stype=target_grad_stype)
if tie_weights:
output_weight = target_embed_weight
else:
output_weight = self.params.get(output_embed_name,
shape=(self.config.config_embed_target.vocab_size,
self.config.config_decoder.model_size),
allow_deferred_init=True)
return source_embed_weight, target_embed_weight, output_weight
@property
def num_source_factors(self) -> int:
""" Returns the number of source factors of this model (at least 1). """
return self.config.config_data.num_source_factors
@property
def num_target_factors(self) -> int:
""" Returns the number of target factors of this model (at least 1). """
return self.config.config_data.num_target_factors
@property
def target_factor_configs(self) -> List[encoder.FactorConfig]:
""" Returns the factor configs for target factors. """
factor_configs = [] # type: List[encoder.FactorConfig]
if self.config.config_embed_target.factor_configs:
factor_configs = self.config.config_embed_target.factor_configs
return factor_configs
@property
def factor_output_layers(self) -> List[layers.OutputLayer]:
""" Returns the list of factor output layers. """
return [getattr(self, self._output_layer_factor_format_string % i) for i, _ in
enumerate(self.target_factor_configs, 1)]
@property
def training_max_observed_len_source(self) -> int:
""" The maximum sequence length on the source side observed during training. This includes the <eos> token. """
return self.config.config_data.data_statistics.max_observed_len_source
@property
def training_max_observed_len_target(self) -> int:
""" The maximum sequence length on the target side observed during training. This includes the <bos> token. """
return self.config.config_data.data_statistics.max_observed_len_target
@property
def max_supported_len_source(self) -> int:
""" The maximum supported source length. This includes the <eos> token. """
return self.config.config_data.max_seq_len_source
@property
def max_supported_len_target(self) -> int:
""" The maximum supported target length. This includes the <bos> token. """
return self.config.config_data.max_seq_len_target
@property
def length_ratio_mean(self) -> float:
return self.config.config_data.data_statistics.length_ratio_mean
@property
def length_ratio_std(self) -> float:
return self.config.config_data.data_statistics.length_ratio_std
@property
def output_layer_vocab_size(self) -> int:
return self.output_layer.vocab_size
def _cache_wrapper(self, class_func):
@lru_cache(maxsize=self.forward_pass_cache_size)
def cache_func(*args):
return class_func(*args)
return cache_func
def load_model(model_folder: str,
context: Union[List[mx.context.Context], mx.context.Context] = mx.cpu(),
dtype: Optional[str] = None,
checkpoint: Optional[int] = None,
hybridize: bool = True,
inference_only: bool = False,
mc_dropout: bool = False,
for_disk_saving: Optional[str] = None,
allow_missing: bool = False,
set_grad_req_null: bool = True,
forward_pass_cache_size: int = 0) -> Tuple[SockeyeModel, List[vocab.Vocab], List[vocab.Vocab]]:
"""
Load a model from model_folder.
:param model_folder: Model folder.
:param context: MXNet context to bind modules to.
:param checkpoint: Checkpoint to use. If none, uses best checkpoint.
:param dtype: Optional data type to use. If None, will be inferred from stored model.
:param hybridize: Whether to hybridize the loaded models. Default: true.
:param inference_only: Use the model only for inference, enabling optimizations.
:param mc_dropout: Turn on dropout during inference.
:param for_disk_saving: For saving quantized models to disk.
None: load as usual and the model will work.
int8: The model loaded into RAM will not work, but is suitable for
writing to disk in quantized format (including scaling factors).
float32: The model loaded into RAM will not work, but is suitable
for writing to disk as float32 with precomputed scaling factors.
:param allow_missing: Allow missing parameters in the loaded model.
:param set_grad_req_null: Set grad_req to null for model parameters.
:param forward_pass_cache_size: If > 0, cache encoder and embedding calculations of forward pass.
:return: List of models, source vocabularies, target vocabularies.
"""
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocabs = vocab.load_target_vocabs(model_folder)
model_version = utils.load_version(os.path.join(model_folder, C.VERSION_NAME))
logger.info("Model version: %s", model_version)
utils.check_version(model_version)
model_config = SockeyeModel.load_config(os.path.join(model_folder, C.CONFIG_NAME))
if inference_only and not mc_dropout:
logger.info("Disabling dropout layers for performance reasons")
model_config.disable_dropout()
if mc_dropout:
logger.info("Monte Carlo dropout enabled, inference output will be non-deterministic.")
if checkpoint is None:
params_fname = os.path.join(model_folder, C.PARAMS_BEST_NAME)
else:
params_fname = os.path.join(model_folder, C.PARAMS_NAME % checkpoint)
if (dtype == C.DTYPE_INT8 or
model_config.dtype == C.DTYPE_INT8 or
for_disk_saving is not None) and "intgemm_fully_connected" not in dir(mx.nd.contrib):
# We're going to use int8 but it's not compiled into mxnet.
path = os.path.abspath(model_config.intgemm_custom_lib)
try:
mx.library.load(path)
except mx.base.MXNetError:
raise NotImplementedError("8-bit int inference requested but intgemm was not compiled into MXNet and a "
"custom operator library was not found in `%s`. Compile the custom "
"operator then set the path using intgemm_custom_lib in the config file." % path)
# Are we converting the model to 8-bit?
quantizing = model_config.dtype != C.DTYPE_INT8 and (dtype == C.DTYPE_INT8 or for_disk_saving is not None)
if quantizing:
model_config.dtype = C.DTYPE_INT8 # Ensure the scaling factor parameters are created.
model = SockeyeModel(model_config, inference_only=inference_only, mc_dropout=mc_dropout,
forward_pass_cache_size=forward_pass_cache_size)
model.initialize(ctx=context)
if model_config.dtype != C.DTYPE_INT8:
# If model_config.dtype is int8, then the above model construction
# (which also used model_config) already set everything to the correct
# mix of float32 and int8. Cast would try to make everything int8.
model.cast(model_config.dtype)
if quantizing:
logger.info("Model dtype: quantizing from float32 to int8")
allow_missing = True # The scaling factors are missing
cast_dtype = True
dtype_source = 'saved'
elif dtype is None or dtype == model_config.dtype:
logger.info("Model dtype: %s" % model_config.dtype)
allow_missing = allow_missing
cast_dtype = False
dtype_source = 'saved'
else:
logger.info("Model dtype: overridden to %s" % dtype)
model.cast(dtype)
allow_missing = allow_missing
cast_dtype = True
dtype_source = 'current'
model.load_parameters(filename=params_fname,
ctx=context,
allow_missing=allow_missing,
ignore_extra=True, # Scaling factors may be present in float32 models.
cast_dtype=cast_dtype,
dtype_source=dtype_source)
params = model.collect_params()
if set_grad_req_null:
for param in params.values():
param.grad_req = 'null'
if for_disk_saving is not None:
# Saving scaling factors and possibly int8 values to disk.
if not quantizing:
raise RuntimeError("Model is already quantized and for_disk_saving is set.")
quantization.convert_weights_disk_format(params, for_disk_saving)
model.config.dtype = for_disk_saving
# TODO: check for missing parameters somehow (we allowed scaling to be missing)
if for_disk_saving is None and model_config.dtype == C.DTYPE_INT8:
# Disk format to CPU-dependent format.
quantization.convert_weights_cpu_dependent(params)
if hybridize:
model.hybridize(static_alloc=True)
utils.check_condition(model.num_source_factors == len(source_vocabs),
"Number of loaded source vocabularies (%d) does not match "
"number of source factors for model '%s' (%d)" % (len(source_vocabs), model_folder,
model.num_source_factors))
utils.check_condition(model.num_target_factors == len(target_vocabs),
"Number of loaded target vocabularies (%d) does not match "
"number of target factors for model '%s' (%d)" % (len(target_vocabs), model_folder,
model.num_target_factors))
return model, source_vocabs, target_vocabs
def load_models(context: Union[List[mx.context.Context], mx.context.Context],
model_folders: List[str],
checkpoints: Optional[List[int]] = None,
dtype: Optional[str] = C.DTYPE_FP32,
hybridize: bool = True,
inference_only: bool = False,
mc_dropout: bool = False,
allow_missing: bool = False,
set_grad_req_null: bool = True,
forward_pass_cache_size: int = 0) -> Tuple[List[SockeyeModel], List[vocab.Vocab], List[vocab.Vocab]]:
"""
Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param dtype: Optional data type to use. If None, will be inferred from stored model.
:param hybridize: Whether to hybridize the loaded models. Default: true.
:param inference_only: Use the model only for inference, enabling optimizations.
:param mc_dropout: Turn on dropout during inference.
:param allow_missing: Allow missing parameters in the loaded models.
:param set_grad_req_null: Set grad_req to null for model parameters.
:param forward_pass_cache_size: If > 0, cache encoder and embedding calculations of forward pass.
:return: List of models, source vocabulary, target vocabulary, source factor vocabularies.
"""
logger.info("Loading %d model(s) from %s ...", len(model_folders), model_folders)
load_time_start = time.time()
models = [] # type: List[SockeyeModel]
source_vocabs = [] # type: List[List[vocab.Vocab]]
target_vocabs = [] # type: List[List[vocab.Vocab]]
if checkpoints is None:
checkpoints = [None] * len(model_folders)
else:
utils.check_condition(len(checkpoints) == len(model_folders), "Must provide checkpoints for each model")
for model_folder, checkpoint in zip(model_folders, checkpoints):
model, src_vcbs, trg_vcbs = load_model(model_folder,
context=context,
dtype=dtype,
checkpoint=checkpoint,
hybridize=hybridize,
inference_only=inference_only,
mc_dropout=mc_dropout,
allow_missing=allow_missing,
set_grad_req_null=set_grad_req_null,
forward_pass_cache_size=forward_pass_cache_size)
models.append(model)
source_vocabs.append(src_vcbs)
target_vocabs.append(trg_vcbs)
first_model_vocabs = source_vocabs[0]
for fi in range(len(first_model_vocabs)):
utils.check_condition(vocab.are_identical(*[source_vocabs[i][fi] for i in range(len(source_vocabs))]),
"Source vocabulary ids do not match. Factor %d" % fi)
first_model_vocabs = target_vocabs[0]
for fi in range(len(first_model_vocabs)):
utils.check_condition(vocab.are_identical(*[target_vocabs[i][fi] for i in range(len(target_vocabs))]),
"Target vocabulary ids do not match. Factor %d" % fi)
load_time = time.time() - load_time_start
logger.info("%d model(s) loaded in %.4fs", len(models), load_time)
return models, source_vocabs[0], target_vocabs[0]
| 48.419087
| 132
| 0.632159
|
fd79aa86937eec4d8b3e63d2d9af52c0e3549b16
| 1,316
|
py
|
Python
|
scTenifold/core/_ko.py
|
qwerty239qwe/scTenifoldpy
|
655787d20c6b9d7176c33752c06d778a62487f54
|
[
"MIT"
] | 3
|
2021-11-18T05:49:13.000Z
|
2022-02-12T11:21:52.000Z
|
scTenifold/core/_ko.py
|
qwerty239qwe/scTenifoldpy
|
655787d20c6b9d7176c33752c06d778a62487f54
|
[
"MIT"
] | 4
|
2021-11-16T16:28:59.000Z
|
2022-01-28T16:30:56.000Z
|
scTenifold/core/_ko.py
|
qwerty239qwe/scTenifoldpy
|
655787d20c6b9d7176c33752c06d778a62487f54
|
[
"MIT"
] | 2
|
2021-12-11T07:09:38.000Z
|
2022-02-12T11:21:53.000Z
|
from typing import List
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
from scTenifold.core._networks import make_networks
def ko_propagation(B, x, ko_gene_id, degree: int) -> np.ndarray:
adj_mat = B.copy()
np.fill_diagonal(adj_mat, 0)
x_ko = x.copy()
p = np.zeros(shape=x.shape)
p[ko_gene_id, :] = x[ko_gene_id, :]
perturbs = [p]
is_visited = np.array([False for _ in range(x_ko.shape[0])])
for d in range(degree):
if not is_visited.all():
perturbs.append(adj_mat @ perturbs[d])
new_visited = (perturbs[d+1] != 0).any(axis=1)
adj_mat[is_visited, :] = 0
adj_mat[:, is_visited] = 0
is_visited = is_visited | new_visited
for p in perturbs:
x_ko = x_ko - p
return np.where(x_ko >= 0, x_ko, 0)
def reconstruct_pcnets(nets: List[coo_matrix],
X_df,
ko_gene_id,
degree,
**kwargs):
ko_nets = []
for net in nets:
data = ko_propagation(net.toarray(), X_df.values, ko_gene_id, degree)
data = pd.DataFrame(data, index=X_df.index, columns=X_df.columns)
ko_net = make_networks(data, n_nets=1, **kwargs)[0]
ko_nets.append(ko_net)
return ko_nets
| 31.333333
| 77
| 0.591185
|
c103902ec5f295334cf1410ec988b651f21ad444
| 3,351
|
py
|
Python
|
doc/src/sphinx/conf.py
|
MBcode/clowder
|
86ba0e36a38aed057bd5ca104495c5f3af67613a
|
[
"NCSA"
] | null | null | null |
doc/src/sphinx/conf.py
|
MBcode/clowder
|
86ba0e36a38aed057bd5ca104495c5f3af67613a
|
[
"NCSA"
] | null | null | null |
doc/src/sphinx/conf.py
|
MBcode/clowder
|
86ba0e36a38aed057bd5ca104495c5f3af67613a
|
[
"NCSA"
] | null | null | null |
# Clowder documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 26 13:24:40 2014.
# Required for readthedocs. See https://github.com/readthedocs/readthedocs.org/issues/2569
master_doc = 'index'
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Clowder'
copyright = '2019, University of Illinois at Urbana-Champaign'
author = 'Luigi Marini'
# The full version, including alpha/beta/rc tags
release = '1.20.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme", "sphinx_design", "m2r2"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logos_ncsa.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "../../../public/images/favicon.png"
# Path of logo for menu on right hand side.
html_logo = "../../../public/images/logo_60.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# -- Parser configuration -------------------------------------------------
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# -- Parser configuration -------------------------------------------------
def setup(app):
app.add_stylesheet('css/custom.css') # custom css
app.add_config_value('recommonmark_config', {
# 'url_resolver': lambda url: github_doc_root + url,
# 'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
| 35.648936
| 90
| 0.663682
|
dd6e75d61743072c5681af3cef146b8880c661ca
| 484
|
py
|
Python
|
DJANGO/trainingapp/warehouse/routers.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
DJANGO/trainingapp/warehouse/routers.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
DJANGO/trainingapp/warehouse/routers.py
|
NIXON707/Frameworks-7a-2020B
|
6892f8dd14b4b6f54eaf06ee5365c95006d815db
|
[
"MIT"
] | null | null | null |
class WareHouseRouter(object):
def db_for_read(self, model, **hints):
if model._meta.app_label == 'warehose' :
return 'warehouse'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'warehose' :
return 'warehouse'
return None
def allow_migrate(self, db, app_label, model_name=None, **hint):
if app_label == 'warehouse' :
return db == 'warehouse'
return None
| 30.25
| 68
| 0.592975
|
16fc1f9994285481d8ccc79e478ce85824979c61
| 4,278
|
py
|
Python
|
python/ray/tests/test_get_locations.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
python/ray/tests/test_get_locations.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
python/ray/tests/test_get_locations.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
import numpy as np
import pytest
import time
import ray
def test_uninitialized():
with pytest.raises(RuntimeError):
ray.experimental.get_object_locations([])
def test_get_locations_empty_list(ray_start_regular):
locations = ray.experimental.get_object_locations([])
assert len(locations) == 0
def test_get_locations_timeout(ray_start_regular):
sizes = [100, 1000]
obj_refs = [ray.put(np.zeros(s, dtype=np.uint8)) for s in sizes]
ray.wait(obj_refs)
timeout_ms = 0
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.experimental.get_object_locations(obj_refs, timeout_ms)
def test_get_locations(ray_start_regular):
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
sizes = [100, 1000]
obj_refs = [ray.put(np.zeros(s, dtype=np.uint8)) for s in sizes]
ray.wait(obj_refs)
locations = ray.experimental.get_object_locations(obj_refs)
assert len(locations) == 2
for idx, obj_ref in enumerate(obj_refs):
location = locations[obj_ref]
assert location["object_size"] > sizes[idx]
assert location["node_ids"] == [node_id.hex()]
def test_get_locations_inlined(ray_start_regular):
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
obj_refs = [ray.put("123")]
ray.wait(obj_refs)
locations = ray.experimental.get_object_locations(obj_refs)
for idx, obj_ref in enumerate(obj_refs):
location = locations[obj_ref]
assert location["node_ids"] == [node_id.hex()]
assert location["object_size"] > 0
def test_spilled_locations(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
cluster.wait_for_nodes()
node_id = ray.runtime_context.get_runtime_context().get()["node_id"]
@ray.remote
def task():
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.extend([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
ray.get(ray.put(arr))
return refs
object_refs = ray.get(task.remote())
ray.wait(object_refs)
locations = ray.experimental.get_object_locations(object_refs)
for obj_ref in object_refs:
location = locations[obj_ref]
assert location["node_ids"] == [node_id.hex()]
assert location["object_size"] > 0
def test_get_locations_multi_nodes(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
# head node
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
# add 1 worker node
cluster.add_node(
num_cpus=0, resources={"custom": 1}, object_store_memory=75 * 1024 * 1024
)
cluster.wait_for_nodes()
all_node_ids = list(map(lambda node: node["NodeID"], ray.nodes()))
driver_node_id = ray.runtime_context.get_runtime_context().get()["node_id"].hex()
all_node_ids.remove(driver_node_id)
worker_node_id = all_node_ids[0]
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.random.rand(1 * 1024 * 1024)
@ray.remote
def task():
return [create_object.remote()]
object_refs = ray.get(task.remote())
ray.wait(object_refs)
locations = ray.experimental.get_object_locations(object_refs)
for obj_ref in object_refs:
location = locations[obj_ref]
assert set(location["node_ids"]) == {driver_node_id, worker_node_id}
assert location["object_size"] > 0
def test_location_pending(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
ray.init(cluster.address)
cluster.wait_for_nodes()
@ray.remote
def task():
# sleep for 1 hour so the object will be pending
time.sleep(3600)
return 1
object_ref = task.remote()
locations = ray.experimental.get_object_locations([object_ref])
location = locations[object_ref]
assert location["node_ids"] == []
# TODO(chenshen): this is a result of converting int -1 to unsigned int;
# should be fix by https://github.com/ray-project/ray/issues/16321
assert location["object_size"] == 2 ** 64 - 1
| 33.162791
| 85
| 0.691211
|
5942408bde7c5235cf35fd246f9540f8ea696f78
| 2,271
|
py
|
Python
|
tools/convert_from_gluoncv.py
|
kbyran/poi
|
95d7a06c0aaf4a97050247b0d079c9dd4b850724
|
[
"Apache-2.0"
] | 6
|
2020-05-28T18:10:47.000Z
|
2021-12-20T02:08:08.000Z
|
tools/convert_from_gluoncv.py
|
kbyran/poi
|
95d7a06c0aaf4a97050247b0d079c9dd4b850724
|
[
"Apache-2.0"
] | 3
|
2021-03-22T08:44:22.000Z
|
2021-11-28T09:10:30.000Z
|
tools/convert_from_gluoncv.py
|
kbyran/poi
|
95d7a06c0aaf4a97050247b0d079c9dd4b850724
|
[
"Apache-2.0"
] | 2
|
2020-10-09T00:25:39.000Z
|
2020-10-09T00:25:40.000Z
|
import os
import mxnet as mx
from gluoncv.model_zoo.model_store import get_model_file
mxnet_gluon_repo = os.environ.get("MXNET_GLUON_REPO", None)
if not mxnet_gluon_repo:
os.environ["MXNET_GLUON_REPO"] = "https://apache-mxnet.s3.cn-north-1.amazonaws.com.cn"
num_layers = 50
version = 1
pretrained = True
root = "./pretrained"
file_path = get_model_file('resnet%d_v%d' % (num_layers, version), tag=pretrained, root=root)
print("models is saved in {}".format(file_path))
gcv_params = mx.nd.load(file_path)
cvt_params = dict()
for k in gcv_params:
if not k.startswith("features"):
continue
k_list = k.split(".")
if k_list[1] == "0":
cvt_k = "arg:conv0_" + k_list[2]
cvt_params[cvt_k] = gcv_params[k]
elif k_list[1] == "1":
if k_list[-1].endswith("running_mean"):
cvt_k = "aux:bn0_moving_mean"
elif k_list[-1].endswith("running_var"):
cvt_k = "aux:bn0_moving_var"
else:
cvt_k = "arg:bn0_" + k_list[2]
else:
stage = "stage{}".format(int(k_list[1]) - 3)
unit = "unit{}".format(int(k_list[2]) + 1)
if k_list[3] == "downsample":
if k_list[4] == "0":
layer = "sc"
elif k_list[4] == "1":
layer = "sc_bn"
elif k_list[3] == "body":
if k_list[4] == "0":
layer = "conv1"
elif k_list[4] == "1":
layer = "bn1"
elif k_list[4] == "3":
layer = "conv2"
elif k_list[4] == "4":
layer = "bn2"
elif k_list[4] == "6":
layer = "conv3"
elif k_list[4] == "7":
layer = "bn3"
if k_list[5].endswith("running_mean"):
prefix = "aux"
postfix = "moving_mean"
elif k_list[5].endswith("running_var"):
prefix = "aux"
postfix = "moving_var"
else:
prefix = "arg"
postfix = k_list[5]
cvt_k = "{}:{}_{}_{}_{}".format(prefix, stage, unit, layer, postfix)
print("{}-->{}".format(k, cvt_k))
cvt_params[cvt_k] = gcv_params[k].copy()
new_file_path = "{}-0000.params".format(file_path.split("-")[0])
mx.nd.save(new_file_path, cvt_params)
| 32.913043
| 93
| 0.534566
|
b560ad180ced689435b794517b00a719e05b1c48
| 18,383
|
py
|
Python
|
yolov5s/utils/plots.py
|
funny000/python_project
|
190289765d0bdd908ce289c78969b3702a2c4292
|
[
"MIT"
] | null | null | null |
yolov5s/utils/plots.py
|
funny000/python_project
|
190289765d0bdd908ce289c78969b3702a2c4292
|
[
"MIT"
] | null | null | null |
yolov5s/utils/plots.py
|
funny000/python_project
|
190289765d0bdd908ce289c78969b3702a2c4292
|
[
"MIT"
] | null | null | null |
# Plotting utils
import glob
import math
import os
import random
from copy import copy
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import yaml
from PIL import Image, ImageDraw, ImageFont
from scipy.signal import butter, filtfilt
from utils.general import xywh2xyxy, xyxy2xywh
from utils.metrics import fitness
# Settings
matplotlib.rc('font', **{'size': 11})
matplotlib.use('Agg') # for writing to files only
def color_list():
# Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
def hist2d(x, y, n=100):
# 2d histogram used in labels.png and evolve.png
xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
return np.log(hist[xidx, yidx])
def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
# https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
def butter_lowpass(cutoff, fs, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
return butter(order, normal_cutoff, btype='low', analog=False)
b, a = butter_lowpass(cutoff, fs, order=order)
return filtfilt(b, a, data) # forward-backward filter
def plot_one_box(x, img, color=None, label=None, line_thickness=3):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.imwrite(os.path.join(os.getcwd(), "img1.jpg"), img)
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None):
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
line_thickness = line_thickness or max(int(min(img.size) / 200), 2)
draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
if label:
fontsize = max(round(max(img.size) / 40), 12)
font = ImageFont.truetype("Arial.ttf", fontsize)
txt_width, txt_height = font.getsize(label)
draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
return np.asarray(img)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), tight_layout=True)
plt.plot(x, ya, '.-', label='YOLOv3')
plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.grid()
plt.legend()
fig.savefig('comparison.png', dpi=200)
def output_to_target(output):
# Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
targets = []
for i, o in enumerate(output):
for *box, conf, cls in o.cpu().numpy():
targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
return np.array(targets)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
# Plot image grid with labels
if isinstance(images, torch.Tensor):
images = images.cpu().float().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
colors = color_list() # list of colors
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
labels = image_targets.shape[1] == 6 # labels if no conf column
conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
if boxes.shape[1]:
if boxes.max() <= 1.01: # if normalized with tolerance 0.01
boxes[[0, 2]] *= w # scale to pixels
boxes[[1, 3]] *= h
elif scale_factor < 1: # absolute coords need scale if image scales
boxes *= scale_factor
boxes[[0, 2]] += block_x
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = colors[cls % len(colors)]
cls = names[cls] if names else cls
if labels or conf[j] > 0.25: # 0.25 conf thresh
label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths:
label = Path(paths[i]).name[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname:
r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
# cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
Image.fromarray(mosaic).save(fname) # PIL save
return mosaic
def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
# Plot LR simulating training for full epochs
optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
y = []
for _ in range(epochs):
scheduler.step()
y.append(optimizer.param_groups[0]['lr'])
plt.plot(y, '.-', label='LR')
plt.xlabel('epoch')
plt.ylabel('LR')
plt.grid()
plt.xlim(0, epochs)
plt.ylim(0)
plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
plt.close()
def plot_test_txt(): # from utils.plots import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
plt.savefig('targets.jpg', dpi=200)
def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
# ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
# for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
# for i in range(7):
# ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
# ax[i].set_title(s[i])
j = y[3].argmax() + 1
ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
ax2.grid(alpha=0.2)
ax2.set_yticks(np.arange(20, 60, 5))
ax2.set_xlim(0, 30)
ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
plt.savefig(str(Path(path).name) + '.png', dpi=300)
def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
nc = int(c.max() + 1) # number of classes
colors = color_list()
x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
# seaborn correlogram
sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
plt.close()
# matplotlib labels
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
ax[0].set_ylabel('instances')
if 0 < len(names) < 30:
ax[0].set_xticks(range(len(names)))
ax[0].set_xticklabels(names, rotation=90, fontsize=10)
else:
ax[0].set_xlabel('classes')
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
# rectangles
labels[:, 1:3] = 0.5 # center
labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
for cls, *box in labels[:1000]:
ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
ax[1].imshow(img)
ax[1].axis('off')
for a in [0, 1, 2, 3]:
for s in ['top', 'right', 'left', 'bottom']:
ax[a].spines[s].set_visible(False)
plt.savefig(save_dir / 'labels.jpg', dpi=200)
matplotlib.use('Agg')
plt.close()
# loggers
for k, v in loggers.items() or {}:
if k == 'wandb' and v:
v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
plt.figure(figsize=(10, 12), tight_layout=True)
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(6, 5, i + 1)
plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
plt.plot(mu, f.max(), 'k+', markersize=15)
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
if i % 5 != 0:
plt.yticks([])
print('%15s: %.3g' % (k, mu))
plt.savefig('evolve.png', dpi=200)
print('\nPlot saved as evolve.png')
def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
# Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
files = list(Path(save_dir).glob('frames*.txt'))
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
n = results.shape[1] # number of rows
x = np.arange(start, min(stop, n) if stop else n)
results = results[:, x]
t = (results[0] - results[0].min()) # set t0=0s
results[0] = x
for i, a in enumerate(ax):
if i < len(results):
label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
a.set_title(s[i])
a.set_xlabel('time (s)')
# if fi == len(files) - 1:
# a.set_ylim(bottom=0)
for side in ['top', 'right']:
a.spines[side].set_visible(False)
else:
a.remove()
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
# Plot training 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
ax[i].plot(x, y, marker='.', label=s[j])
# y_smooth = butter_lowpass_filtfilt(y)
# ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
# Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
ax = ax.ravel()
s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
if bucket:
# files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
files = ['results%g.txt' % x for x in id]
c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
os.system(c)
else:
files = list(Path(save_dir).glob('results*.txt'))
assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
for fi, f in enumerate(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # don't show zero loss values
# y /= y[0] # normalize
label = labels[fi] if len(labels) else f.stem
ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
ax[i].set_title(s[i])
# if i in [5, 6, 7]: # share train and val loss y axes
# ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except Exception as e:
print('Warning: Plotting error for %s; %s' % (f, e))
ax[1].legend()
fig.savefig(Path(save_dir) / 'results.png', dpi=200)
| 42.25977
| 120
| 0.576511
|
342d8c38ec77fd319fa9407adef6d61fd03581e4
| 76
|
py
|
Python
|
src/masonite/drivers/session/__init__.py
|
josephmancuso/masonite
|
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
|
[
"MIT"
] | 35
|
2018-01-08T01:20:16.000Z
|
2018-02-06T02:37:14.000Z
|
src/masonite/drivers/session/__init__.py
|
josephmancuso/masonite
|
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
|
[
"MIT"
] | 55
|
2018-01-03T02:42:03.000Z
|
2018-02-06T13:35:54.000Z
|
src/masonite/drivers/session/__init__.py
|
josephmancuso/masonite
|
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
|
[
"MIT"
] | 4
|
2018-01-08T13:13:14.000Z
|
2018-01-12T19:35:32.000Z
|
from .CookieDriver import CookieDriver
from .RedisDriver import RedisDriver
| 25.333333
| 38
| 0.868421
|
5be01b88750999d93577e3db462452641696487c
| 407
|
py
|
Python
|
scripts/fig6/fig6.py
|
julesvidal/wasserstein-pd-barycenter
|
1f62a5e1c40700030357b2bfb9a2f86fe4736861
|
[
"BSD-Source-Code"
] | 1
|
2019-09-10T12:36:52.000Z
|
2019-09-10T12:36:52.000Z
|
scripts/fig6/fig6.py
|
julesvidal/wasserstein-pd-barycenter
|
1f62a5e1c40700030357b2bfb9a2f86fe4736861
|
[
"BSD-Source-Code"
] | null | null | null |
scripts/fig6/fig6.py
|
julesvidal/wasserstein-pd-barycenter
|
1f62a5e1c40700030357b2bfb9a2f86fe4736861
|
[
"BSD-Source-Code"
] | 1
|
2021-04-28T12:36:58.000Z
|
2021-04-28T12:36:58.000Z
|
import matplotlib.pyplot as plt
import csv
x=[]
y=[]
with open('energy.txt','r') as file:
plots = csv.reader(file,delimiter='\t')
next(plots)
for row in plots:
x.append(float(row[1]))
y.append(float(row[0]))
plt.loglog(x,y,color='orange',linewidth=3)
plt.xlabel('time(s)')
plt.ylabel('energy')
plt.title('fig 6, Orange plot')
axes=plt.gca()
axes.set_xlim([1,1000])
plt.show()
| 19.380952
| 43
| 0.641278
|
b104d10100778496972216388b6dfbe70e651aa7
| 420
|
py
|
Python
|
plugins/sentry/girder_sentry/__init__.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
plugins/sentry/girder_sentry/__init__.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
plugins/sentry/girder_sentry/__init__.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sentry_sdk
from girder.plugin import GirderPlugin
from girder.models.setting import Setting
from . import rest
from .settings import PluginSettings
class SentryPlugin(GirderPlugin):
DISPLAY_NAME = 'Sentry'
CLIENT_SOURCE_PATH = 'web_client'
def load(self, info):
info['apiRoot'].sentry = rest.Sentry()
sentry_sdk.init(dsn=Setting().get(PluginSettings.DSN))
| 23.333333
| 62
| 0.721429
|
3c308b5aeafa2ab3fa9a508ee7901e6ab47bf7aa
| 5,255
|
py
|
Python
|
dateparser/data/date_translation_data/ast.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 1,804
|
2015-01-01T23:01:54.000Z
|
2022-03-30T18:36:16.000Z
|
dateparser/data/date_translation_data/ast.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 948
|
2015-01-04T22:18:39.000Z
|
2022-03-31T16:29:41.000Z
|
dateparser/data/date_translation_data/ast.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 463
|
2015-01-10T08:53:39.000Z
|
2022-03-18T12:45:49.000Z
|
info = {
"name": "ast",
"date_order": "DMY",
"january": [
"de xineru",
"xin",
"xineru"
],
"february": [
"de febreru",
"feb",
"febreru"
],
"march": [
"de marzu",
"mar",
"marzu"
],
"april": [
"abr",
"abril",
"d'abril"
],
"may": [
"de mayu",
"may",
"mayu"
],
"june": [
"de xunu",
"xun",
"xunu"
],
"july": [
"de xunetu",
"xnt",
"xunetu"
],
"august": [
"ago",
"agostu",
"d'agostu"
],
"september": [
"de setiembre",
"set",
"setiembre"
],
"october": [
"d'ochobre",
"och",
"ochobre"
],
"november": [
"de payares",
"pay",
"payares"
],
"december": [
"avi",
"avientu",
"d'avientu"
],
"monday": [
"llu",
"llunes"
],
"tuesday": [
"mar",
"martes"
],
"wednesday": [
"mié",
"miércoles"
],
"thursday": [
"xue",
"xueves"
],
"friday": [
"vie",
"vienres"
],
"saturday": [
"sáb",
"sábadu"
],
"sunday": [
"dom",
"domingu"
],
"am": [
"am",
"de la mañana",
"mañana"
],
"pm": [
"de la tarde",
"pm",
"tarde"
],
"year": [
"añu"
],
"month": [
"mes"
],
"week": [
"sel",
"selmana"
],
"day": [
"día"
],
"hour": [
"h",
"hora"
],
"minute": [
"m",
"min",
"minutu"
],
"second": [
"s",
"segundu"
],
"relative-type": {
"0 day ago": [
"güei"
],
"0 hour ago": [
"esta h",
"esta hora"
],
"0 minute ago": [
"esti min",
"esti minutu"
],
"0 month ago": [
"esti mes"
],
"0 second ago": [
"agora"
],
"0 week ago": [
"esta selm",
"esta selmana"
],
"0 year ago": [
"esti añu"
],
"1 day ago": [
"ayeri"
],
"1 month ago": [
"el mes pasáu",
"mes pas"
],
"1 week ago": [
"la selmana pasada",
"selm pas",
"selm pasada"
],
"1 year ago": [
"añu pas",
"l'añu pas",
"l'añu pasáu"
],
"in 1 day": [
"mañ",
"mañana"
],
"in 1 month": [
"el mes viniente",
"mes vin"
],
"in 1 week": [
"la selmana viniente",
"selm vin",
"selm viniente"
],
"in 1 year": [
"añu vin",
"l'añu vin",
"l'añu viniente"
]
},
"relative-type-regex": {
"\\1 day ago": [
"hai (\\d+) d",
"hai (\\d+) día",
"hai (\\d+) díes"
],
"\\1 hour ago": [
"hai (\\d+) h",
"hai (\\d+) hora",
"hai (\\d+) hores"
],
"\\1 minute ago": [
"hai (\\d+) min",
"hai (\\d+) minutos",
"hai (\\d+) minutu"
],
"\\1 month ago": [
"hai (\\d+) m",
"hai (\\d+) mes",
"hai (\\d+) meses"
],
"\\1 second ago": [
"hai (\\d+) s",
"hai (\\d+) seg",
"hai (\\d+) segundos",
"hai (\\d+) segundu"
],
"\\1 week ago": [
"hai (\\d+) se",
"hai (\\d+) selm",
"hai (\\d+) selmana",
"hai (\\d+) selmanes"
],
"\\1 year ago": [
"hai (\\d+) a",
"hai (\\d+) años",
"hai (\\d+) añu"
],
"in \\1 day": [
"en (\\d+) d",
"en (\\d+) día",
"en (\\d+) díes"
],
"in \\1 hour": [
"en (\\d+) h",
"en (\\d+) hora",
"en (\\d+) hores"
],
"in \\1 minute": [
"en (\\d+) min",
"en (\\d+) minutos",
"en (\\d+) minutu"
],
"in \\1 month": [
"en (\\d+) m",
"en (\\d+) mes",
"en (\\d+) meses"
],
"in \\1 second": [
"en (\\d+) s",
"en (\\d+) seg",
"en (\\d+) segundos",
"en (\\d+) segundu"
],
"in \\1 week": [
"en (\\d+) se",
"en (\\d+) selm",
"en (\\d+) selmana",
"en (\\d+) selmanes"
],
"in \\1 year": [
"en (\\d+) a",
"en (\\d+) años",
"en (\\d+) añu"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
| 18.701068
| 34
| 0.26451
|
d14f626f585c749733b2b7df5ec68076f4976328
| 8,626
|
py
|
Python
|
tests/test_model.py
|
tmuntianu/supereeg
|
cd6e3ca1a898f091ef2696281c9ea32d1baf3eea
|
[
"MIT"
] | 27
|
2018-03-30T22:15:18.000Z
|
2022-03-18T02:53:18.000Z
|
tests/test_model.py
|
tmuntianu/supereeg
|
cd6e3ca1a898f091ef2696281c9ea32d1baf3eea
|
[
"MIT"
] | 86
|
2018-03-30T02:58:18.000Z
|
2021-07-07T01:45:31.000Z
|
tests/test_model.py
|
tmuntianu/supereeg
|
cd6e3ca1a898f091ef2696281c9ea32d1baf3eea
|
[
"MIT"
] | 16
|
2018-03-30T03:04:00.000Z
|
2020-03-20T16:51:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
#from builtins import range
import supereeg as se
import numpy as np
import scipy
import pytest
# some example locations
locs = np.array([[-61., -77., -3.],
[-41., -77., -23.],
[-21., -97., 17.],
[-21., -37., 77.],
[-21., 63., -3.],
[ -1., -37., 37.],
[ -1., 23., 17.],
[ 19., -57., -23.],
[ 19., 23., -3.],
[ 39., -57., 17.],
[ 39., 3., 37.],
[ 59., -17., 17.]])
# number of timeseries samples
n_samples = 10
# number of subjects
n_subs = 6
# number of electrodes
n_elecs = 5
# simulate correlation matrix
data = [se.simulate_model_bos(n_samples=10, sample_rate=10, locs=locs, sample_locs = n_elecs,
set_random_seed=123, noise=0) for x in range(n_subs)]
# test model to compare
test_model = se.Model(data=data[0:3], locs=locs, rbf_width=20, n_subs=3)
def test_create_model_1bo():
model = se.Model(data=data[0], locs=locs)
assert isinstance(model, se.Model)
def test_create_model_2bo():
model = se.Model(data=data[0:2], locs=locs)
assert isinstance(model, se.Model)
def test_create_model_superuser():
locs = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=10)
numerator = scipy.linalg.toeplitz(np.linspace(0,10,len(locs))[::-1])
denominator = np.random.multivariate_normal(np.zeros(10), np.eye(10), size=10)
model = se.Model(numerator=numerator, denominator=denominator, locs=locs, n_subs=2)
assert isinstance(model, se.Model)
def test_model_predict():
model = se.Model(data=data[0:2], locs=locs)
bo = model.predict(data[0], nearest_neighbor=False)
print(data[0].dur)
assert isinstance(bo, se.Brain)
def test_model_gpu_predict():
cupy = pytest.importorskip("cupy")
cpu_model = se.Model(data=data[0:2], locs=locs)
cpu_bo = cpu_model.predict(data[3], nearest_neighbor=False)
gpu_model = se.Model(data=data[0:2], locs=locs, gpu=True)
gpu_bo = gpu_model.predict(data[3], nearest_neighbor=False)
assert np.allclose(gpu_model.numerator, cpu_model.numerator, equal_nan=True)
assert np.allclose(gpu_model.denominator, cpu_model.denominator, equal_nan=True)
assert isinstance(cpu_bo, se.Brain)
assert isinstance(gpu_bo, se.Brain)
assert np.allclose(cpu_bo.get_data(), gpu_bo.get_data(), rtol=0, atol=2e-5, equal_nan=True)
def test_model_predict_nn():
print(data[0].dur)
model = se.Model(data=data[0:2], locs=locs)
bo = model.predict(data[0], nearest_neighbor=True)
assert isinstance(bo, se.Brain)
def test_model_predict_nn_thresh():
model = se.Model(data=data[0:2], locs=locs)
bo = model.predict(data[0], nearest_neighbor=True, match_threshold=30)
assert isinstance(bo, se.Brain)
def test_model_predict_nn_0():
model = se.Model(data=data[0:2], locs=locs)
bo_1 = model.predict(data[0], nearest_neighbor=True, match_threshold=0)
bo_2 = model.predict(data[0], nearest_neighbor=False)
assert isinstance(bo_1, se.Brain)
assert np.allclose(bo_1.get_data(), bo_2.get_data())
def test_update():
model = se.Model(data=data[1:3], locs=locs)
mo = se.Model([model, data[0]])
assert isinstance(mo, se.Model)
assert np.allclose(mo.numerator.real, test_model.numerator.real, equal_nan=True)
assert np.allclose(mo.numerator.imag, test_model.numerator.imag, equal_nan=True)
assert np.allclose(mo.denominator, test_model.denominator, equal_nan=True)
def test_create_model_str():
model = se.Model('example_data')
assert isinstance(model, se.Model)
def test_create_model_model():
mo = se.Model(data=data[1:3], locs=locs)
model = se.Model(mo)
assert isinstance(model, se.Model)
def test_model_update_inplace():
mo = se.Model(data=data[1:3], locs=locs)
mo = mo.update(data[0])
assert mo is None
def test_model_update_not_inplace():
mo = se.Model(data=data[1:3], locs=locs)
mo = mo.update(data[0], inplace=False)
assert isinstance(mo, se.Model)
def test_model_update_with_model():
mo = se.Model(data=data[1:3], locs=locs)
mo = mo.update(mo, inplace=False)
assert isinstance(mo, se.Model)
def test_model_update_with_model_and_bo():
mo = se.Model(data=data[1:3], locs=locs)
mo = se.Model([mo, data[0]])
assert isinstance(mo, se.Model)
def test_model_update_with_array():
mo = se.Model(data=data[1:3], locs=locs)
d = np.random.rand(*mo.numerator.shape)
mo = se.Model([mo, d], locs=mo.get_locs())
assert isinstance(mo, se.Model)
#This syntax is ambiguous and no longer supported
#def test_model_update_with_smaller_array():
# mo = se.Model(data=data[1:3], locs=locs)
# d = np.random.rand(3,3)
# with pytest.raises(ValueError):
# mo = se.Model([mo, d])
def test_model_get_model():
mo = se.Model(data=data[1:3], locs=locs)
m = mo.get_model()
assert isinstance(m, np.ndarray)
def test_model_get_slice():
mo = se.Model(data=data[1:3], locs=locs)
inds = [0, 1]
s = mo.get_slice(inds)
assert(type(s) == se.Model)
s_model = s.get_model()
assert s_model.shape[0] == s_model.shape[1]
assert s_model.shape[0] == len(inds)
assert s_model.shape[0] == len(inds)
mo.get_slice(inds, inplace=True)
assert(type(mo) == se.Model)
mo_model = mo.get_model()
assert mo_model.shape[0] == mo_model.shape[1]
assert mo_model.shape[0] == len(inds)
assert mo_model.shape[0] == len(inds)
def test_model_add():
mo1 = se.Model(data=data[0:3], locs=locs)
mo2 = se.Model(data=data[3:6], locs=locs)
mo3 = mo1 + mo2
mo1_model = mo1.get_model()
mo2_model = mo2.get_model()
mo3_model = mo3.get_model()
assert np.allclose(mo1_model.shape, mo2_model.shape)
assert np.allclose(mo2_model.shape, mo3_model.shape)
assert mo1_model.shape[0] == mo1_model.shape[1]
assert mo3.n_subs == mo1.n_subs + mo2.n_subs
mo3_alt = se.Model(data=data[0:6], locs=locs)
assert np.allclose(mo3.numerator.real, mo3_alt.numerator.real, equal_nan=True)
assert np.allclose(mo3.numerator.imag, mo3_alt.numerator.imag, equal_nan=True)
assert np.allclose(mo3.denominator, mo3_alt.denominator, equal_nan=True)
def test_model_subtract():
mo1 = se.Model(data=data[0:3], locs=locs)
mo2 = se.Model(data=data[3:6], locs=locs)
mo3 = mo1 + mo2
mo1_model = mo1.get_model()
mo2_model = mo2.get_model()
mo3_model = mo3.get_model()
assert np.allclose(mo1_model.shape, mo2_model.shape)
assert np.allclose(mo2_model.shape, mo3_model.shape)
assert mo1_model.shape[0] == mo1_model.shape[1]
assert mo3.n_subs == mo1.n_subs + mo2.n_subs
mo2_recon = mo3 - mo1
assert np.allclose(mo2.get_model(), mo2_recon.get_model(), equal_nan=True)
assert mo2_recon.n_subs == mo2.n_subs
## test that the new model is now unstable
try:
assert mo2_recon + mo3
except AssertionError:
assert True == True
def test_cpu_vs_gpu_single_subject():
cupy = pytest.importorskip("cupy")
locs = np.random.randn(25, 3)
bo1 = se.simulate_bo(n_samples=512*30, locs=locs, sample_rate=512)
bo2 = se.simulate_bo(n_samples=512*30, locs=locs, sample_rate=512)
mo_cpu = se.Model([bo1, bo2])
mo_gpu = se.Model([bo1, bo2], gpu=True)
assert mo_cpu.locs.equals(mo_gpu.locs)
assert np.allclose(mo_cpu.get_model(), mo_gpu.get_model())
mo_cpu = se.Model(bo1) + se.Model(bo2)
mo_gpu = se.Model(bo1, gpu=True) + se.Model(bo2, gpu=True)
assert mo_cpu.locs.equals(mo_gpu.locs)
assert np.allclose(mo_cpu.get_model(), mo_gpu.get_model())
def test_cpu_vs_gpu_mult_subject():
cupy = pytest.importorskip("cupy")
locs1 = np.random.randn(25, 3)
bo1a = se.simulate_bo(n_samples=512*30, locs=locs1, sample_rate=512)
bo1b = se.simulate_bo(n_samples=512*30, locs=locs1, sample_rate=512)
locs2 = np.random.randn(25, 3)
bo2a = se.simulate_bo(n_samples=512*30, locs=locs2, sample_rate=512)
bo2b = se.simulate_bo(n_samples=512*30, locs=locs2, sample_rate=512)
mo_cpu = se.Model([bo1a, bo1b, bo2a, bo2b])
mo_gpu = se.Model([bo1a, bo1b, bo2a, bo2b], gpu=True)
assert mo_cpu.locs.equals(mo_gpu.locs)
assert np.allclose(mo_cpu.get_model(), mo_gpu.get_model())
mo_cpu = se.Model([bo1a, bo1b]) + se.Model([bo2a, bo2b])
mo_gpu = se.Model([bo1a, bo1b], gpu=True) + se.Model([bo2a, bo2b], gpu=True)
assert mo_cpu.locs.equals(mo_gpu.locs)
assert np.allclose(mo_cpu.get_model(), mo_gpu.get_model())
| 35.792531
| 95
| 0.667633
|
99520d7e42f58e7453927a6f86837ea61248e3e7
| 409
|
py
|
Python
|
diventi/ebooks/migrations/0044_auto_20190603_1348.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 2
|
2019-06-27T16:00:17.000Z
|
2020-08-14T07:46:05.000Z
|
diventi/ebooks/migrations/0044_auto_20190603_1348.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 26
|
2020-02-15T22:39:35.000Z
|
2022-02-19T21:09:01.000Z
|
diventi/ebooks/migrations/0044_auto_20190603_1348.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 1
|
2021-11-12T22:30:15.000Z
|
2021-11-12T22:30:15.000Z
|
# Generated by Django 2.2.1 on 2019-06-03 11:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0043_auto_20190531_0751'),
]
operations = [
migrations.RemoveField(
model_name='section',
name='category',
),
migrations.DeleteModel(
name='SectionCategory',
),
]
| 19.47619
| 47
| 0.577017
|
2c807dcc1e5c8ce2a775f48566b30cf829f6228a
| 911
|
py
|
Python
|
alipay/aop/api/response/AlipayOpenSearchOrderdetailQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayOpenSearchOrderdetailQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayOpenSearchOrderdetailQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SearchOrderDetailData import SearchOrderDetailData
class AlipayOpenSearchOrderdetailQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenSearchOrderdetailQueryResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, SearchOrderDetailData):
self._data = value
else:
self._data = SearchOrderDetailData.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayOpenSearchOrderdetailQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
| 30.366667
| 113
| 0.713502
|
d37b082bccb118e6c782ea983d185d2b961d774d
| 4,802
|
py
|
Python
|
tool/filter.py
|
FREEWING-JP/PiDAS
|
30295238c23273a266a99547e2862425f9cf0fe1
|
[
"Apache-2.0"
] | 1
|
2021-12-06T01:55:19.000Z
|
2021-12-06T01:55:19.000Z
|
tool/filter.py
|
nrck/PiDAS
|
2dfc2982cd497e3c25df988e9ecdc60173c5918e
|
[
"Apache-2.0"
] | null | null | null |
tool/filter.py
|
nrck/PiDAS
|
2dfc2982cd497e3c25df988e9ecdc60173c5918e
|
[
"Apache-2.0"
] | null | null | null |
import math
class Filter():
def __init__(self, sampling_hz):
self.__dt = 1.0 / sampling_hz
self.__f0 = 0.45
self.__f1 = 7.0
self.__f2 = 0.5
self.__f3 = 12.0
self.__f4 = 20.0
self.__f5 = 30.0
self.__h2a = 1.0
self.__h2b = 0.75
self.__h3 = 0.9
self.__h4 = 0.6
self.__h5 = 0.6
self.__g = 1.262
self.__pi = math.pi
def __func_A14(self, hc, fc, input_data):
# A14
omega_c = 2 * self.__pi * fc
a0 = 12 / (self.__dt * self.__dt) + (12 * hc * omega_c) / \
self.__dt + (omega_c * omega_c)
a1 = 10 * (omega_c * omega_c) - 24 / (self.__dt * self.__dt)
a2 = 12 / (self.__dt * self.__dt) - (12 * hc * omega_c) / \
self.__dt + (omega_c * omega_c)
b0 = omega_c * omega_c
b1 = 10 * (omega_c * omega_c)
b2 = omega_c * omega_c
return self.__func_A15(a0, a1, a2, b0, b1, b2, input_data)
def __func_A15(self, a0, a1, a2, b0, b1, b2, input_data:list[float]):
output_data = []
# 1つめ
k1 = (b0 * input_data[0]) / a0
output_data.append(k1)
# 2つめ
k2 = (-a1 * output_data[0] + b0 *
input_data[1] + b1 * input_data[0]) / a0
output_data.append(k2)
# 3つめ以降
for k in range(2, len(input_data)):
value = (-a1 * output_data[k-1] - a2 * output_data[k-2] + b0 *
input_data[k] + b1 * input_data[k-1] + b2 * input_data[k-2]) / a0
output_data.append(value)
return output_data
def __filter01(self, input_data):
fa1 = self.__f0
fa2 = self.__f1
# A11
omega_a1 = 2 * self.__pi * fa1
omega_a2 = 2 * self.__pi * fa2
a0 = 8 / (self.__dt * self.__dt) + (4 * omega_a1 + 2 * omega_a2) / \
self.__dt + omega_a1 * omega_a2
a1 = 2 * omega_a1 * omega_a2 - 16 / (self.__dt * self.__dt)
a2 = 8 / (self.__dt * self.__dt) - (4 * omega_a1 + 2 * omega_a2) / \
self.__dt + omega_a1 * omega_a2
b0 = 4 / (self.__dt * self.__dt) + 2 * omega_a2 / self.__dt
b1 = -8 / (self.__dt * self.__dt)
b2 = 4 / (self.__dt * self.__dt) - 2 * omega_a2 / self.__dt
return self.__func_A15(a0, a1, a2, b0, b1, b2, input_data)
def __filter02(self, input_data):
fa3 = self.__f1
# A12
omega_a3 = 2 * self.__pi * fa3
a0 = 16 / (self.__dt * self.__dt) + 17 * omega_a3 / self.__dt + (omega_a3 * omega_a3)
a1 = 2 * omega_a3 * omega_a3 - 32 / (self.__dt * self.__dt)
a2 = 16 / (self.__dt * self.__dt) - 17 * omega_a3 / self.__dt + (omega_a3 * omega_a3)
b0 = 4 / (self.__dt * self.__dt) + 8.5 * omega_a3 / self.__dt + (omega_a3 * omega_a3)
b1 = 2 * omega_a3 * omega_a3 - 8 / (self.__dt * self.__dt)
b2 = 4 / (self.__dt * self.__dt) - 8.5 * omega_a3 / self.__dt + (omega_a3 * omega_a3)
return self.__func_A15(a0, a1, a2, b0, b1, b2, input_data)
def __filter03(self, input_data):
hb1 = self.__h2a
hb2 = self.__h2b
fb = self.__f2
# A13
omega_b = 2 * self.__pi * fb
a0 = 12 / (self.__dt * self.__dt) + (12 * hb2 * omega_b) / \
self.__dt + (omega_b * omega_b)
a1 = 10 * (omega_b * omega_b) - 24 / (self.__dt * self.__dt)
a2 = 12 / (self.__dt * self.__dt) - (12 + hb2 * omega_b) / \
self.__dt + (omega_b * omega_b)
b0 = 12 / (self.__dt * self.__dt) + (12 * hb1 * omega_b) / \
self.__dt + (omega_b * omega_b)
b1 = 10 * (omega_b * omega_b) - 24 / (self.__dt * self.__dt)
b2 = 12 / (self.__dt * self.__dt) - (12 * hb1 * omega_b) / \
self.__dt + (omega_b * omega_b)
return self.__func_A15(a0, a1, a2, b0, b1, b2, input_data)
def __filter04(self, input_data):
hc = self.__h3
fc = self.__f3
return self.__func_A14(hc, fc, input_data)
def __filter05(self, input_data):
hc = self.__h4
fc = self.__f4
return self.__func_A14(hc, fc, input_data)
def __filter06(self, input_data):
hc = self.__h5
fc = self.__f5
return self.__func_A14(hc, fc, input_data)
def __filter07(self, input_data):
output_data = []
gd = self.__g
for k in range(0, len(input_data)):
output_data.append(input_data[k] * gd)
return output_data
def exec(self, input_data: list[float]):
return self.__filter07(self.__filter06(self.__filter05(self.__filter04(self.__filter03(self.__filter02(self.__filter01(input_data)))))))
| 35.835821
| 145
| 0.520408
|
c2ea6c45305731a55ec84c58f7c0aaae7e643cf6
| 98
|
py
|
Python
|
__getPythonSitePackages.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__getPythonSitePackages.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__getPythonSitePackages.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
'''a Python program to locate Python site-packages.'''
import site;
print(site.getsitepackages())
| 32.666667
| 54
| 0.755102
|
0e5e054ccccec636c29789a55521e54f03f08bfe
| 2,004
|
py
|
Python
|
reshapr/cli/commands.py
|
UBC-MOAD/Reshapr
|
652e7e7d1b2556f77421d2e6228cf183dfff7d41
|
[
"Apache-2.0"
] | null | null | null |
reshapr/cli/commands.py
|
UBC-MOAD/Reshapr
|
652e7e7d1b2556f77421d2e6228cf183dfff7d41
|
[
"Apache-2.0"
] | 16
|
2022-02-04T22:26:07.000Z
|
2022-03-23T17:43:32.000Z
|
reshapr/cli/commands.py
|
UBC-MOAD/Reshapr
|
652e7e7d1b2556f77421d2e6228cf183dfff7d41
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 – present, UBC EOAS MOAD Group and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SPDX-License-Identifier: Apache-2.0
"""Command-line interface setup and sub-commands registry.
"""
import logging
import click
import structlog
from reshapr.cli.extract import extract
@click.group(
help="""
Reshape model variable arrays from model products like SalishSeaCast, HRDPS & CANESM2/CGCM4.
"""
)
@click.version_option()
@click.option(
"-v",
"--verbosity",
default="info",
show_default=True,
type=click.Choice(("debug", "info", "warning", "error", "critical")),
help="""
Choose how much information you want to see about the progress of the process;
warning, error, and critical should be silent unless something bad goes wrong.
""",
)
def reshapr(verbosity):
"""Click commands group into which sub-commands must be registered.
:param str verbosity: Verbosity level of logging messages about the progress of the process.
Choices are :kbd:`debug, info, warning, error, critical`.
:kbd:`warning`, :kbd:`error`, and :kbd:`critical` should be silent
unless something bad goes wrong.
Default is :kbd:`info`.
"""
structlog.configure(
wrapper_class=structlog.make_filtering_bound_logger(
getattr(logging, verbosity.upper())
)
)
reshapr.add_command(extract)
| 32.322581
| 96
| 0.683134
|
08e6dbb57d60239cf5dce8b19a78fa8770c40697
| 16,394
|
py
|
Python
|
tensorflow/tensorboard/backend/event_processing/event_multiplexer.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 22
|
2017-06-26T01:27:45.000Z
|
2021-06-23T10:00:31.000Z
|
tensorflow/tensorboard/backend/event_processing/event_multiplexer.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
tensorflow/tensorboard/backend/event_processing/event_multiplexer.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 38
|
2017-04-28T04:15:48.000Z
|
2019-09-28T05:11:46.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend.event_processing import directory_watcher
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@Tensors
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
logging.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
logging.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
logging.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logging.info('Starting AddRunsFromDirectory: %s', path)
for subdir in GetLogdirSubdirectories(path):
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logging.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
logging.info('Finished with EventMultiplexer.Reload()')
return self
def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self._GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def HealthPills(self, run, node_name):
"""Retrieve the health pill events associated with a run and node name.
Args:
run: A string name of the run for which health pills are retrieved.
node_name: A string name of the node for which health pills are retrieved.
Raises:
KeyError: If the run is not found, or the node name is not available for
the given run.
Returns:
An array of `event_accumulator.HealthPillEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.HealthPills(node_name)
def GetOpsWithHealthPills(self, run):
"""Determines which ops have at least 1 health pill event for a given run.
Args:
run: The name of the run.
Raises:
KeyError: If the run is not found, or the node name is not available for
the given run.
Returns:
The list of names of ops with health pill events.
"""
return self._GetAccumulator(run).GetOpsWithHealthPills()
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Audio(tag)
def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Tensors(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
def GetLogdirSubdirectories(path):
"""Returns subdirectories with event files on path."""
if gfile.Exists(path) and not gfile.IsDirectory(path):
raise ValueError('GetLogdirSubdirectories: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
return (
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
)
| 34.368973
| 80
| 0.685068
|
8b8eb45a222442856a763db264d3838fe2ea53c5
| 21,202
|
py
|
Python
|
tests/components/hue/test_sensor_base.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
tests/components/hue/test_sensor_base.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
tests/components/hue/test_sensor_base.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Philips Hue sensors platform tests."""
import asyncio
from unittest.mock import Mock
import aiohue
import pytest
from homeassistant.components import hue
from homeassistant.components.hue import sensor_base
from homeassistant.components.hue.hue_event import CONF_HUE_EVENT
from homeassistant.const import ENTITY_CATEGORY_DIAGNOSTIC
from homeassistant.helpers.entity_registry import async_get
from homeassistant.util import dt as dt_util
from .conftest import create_mock_bridge, setup_bridge_for_sensors as setup_bridge
from tests.common import (
async_capture_events,
async_fire_time_changed,
mock_device_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
PRESENCE_SENSOR_1_PRESENT = {
"state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Living room sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_1 = {
"state": {
"lightlevel": 1,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 1",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_1 = {
"state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 1",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0402",
"capabilities": {"certified": True},
}
PRESENCE_SENSOR_2_NOT_PRESENT = {
"state": {"presence": False, "lastupdated": "2019-01-01T00:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Kitchen sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_2 = {
"state": {
"lightlevel": 10001,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 2",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_2 = {
"state": {"temperature": 1875, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 2",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0402",
"capabilities": {"certified": True},
}
PRESENCE_SENSOR_3_PRESENT = {
"state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Bedroom sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_3 = {
"state": {
"lightlevel": 1,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 3",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_3 = {
"state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 3",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0402",
"capabilities": {"certified": True},
}
UNSUPPORTED_SENSOR = {
"state": {"status": 0, "lastupdated": "2019-01-01T01:00:00"},
"config": {"on": True, "reachable": True},
"name": "Unsupported sensor",
"type": "CLIPGenericStatus",
"modelid": "PHWA01",
"manufacturername": "Philips",
"swversion": "1.0",
"uniqueid": "arbitrary",
"recycle": True,
}
HUE_TAP_REMOTE_1 = {
"state": {"buttonevent": 17, "lastupdated": "2019-06-22T14:43:50"},
"swupdate": {"state": "notupdatable", "lastinstall": None},
"config": {"on": True},
"name": "Hue Tap",
"type": "ZGPSwitch",
"modelid": "ZGPSWITCH",
"manufacturername": "Philips",
"productname": "Hue tap switch",
"diversityid": "d8cde5d5-0eef-4b95-b0f0-71ddd2952af4",
"uniqueid": "00:00:00:00:00:44:23:08-f2",
"capabilities": {"certified": True, "primary": True, "inputs": []},
}
HUE_DIMMER_REMOTE_1 = {
"state": {"buttonevent": 4002, "lastupdated": "2019-12-28T21:58:02"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-10-13T13:16:15"},
"config": {"on": True, "battery": 100, "reachable": True, "pending": []},
"name": "Hue dimmer switch 1",
"type": "ZLLSwitch",
"modelid": "RWL021",
"manufacturername": "Philips",
"productname": "Hue dimmer switch",
"diversityid": "73bbabea-3420-499a-9856-46bf437e119b",
"swversion": "6.1.1.28573",
"uniqueid": "00:17:88:01:10:3e:3a:dc-02-fc00",
"capabilities": {"certified": True, "primary": True, "inputs": []},
}
SENSOR_RESPONSE = {
"1": PRESENCE_SENSOR_1_PRESENT,
"2": LIGHT_LEVEL_SENSOR_1,
"3": TEMPERATURE_SENSOR_1,
"4": PRESENCE_SENSOR_2_NOT_PRESENT,
"5": LIGHT_LEVEL_SENSOR_2,
"6": TEMPERATURE_SENSOR_2,
"7": HUE_TAP_REMOTE_1,
"8": HUE_DIMMER_REMOTE_1,
}
async def test_no_sensors(hass, mock_bridge):
"""Test the update_items function when no sensors are found."""
mock_bridge.allow_groups = True
mock_bridge.mock_sensor_responses.append({})
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 0
async def test_sensors_with_multiple_bridges(hass, mock_bridge):
"""Test the update_items function with some sensors."""
mock_bridge_2 = create_mock_bridge(hass)
mock_bridge_2.mock_sensor_responses.append(
{
"1": PRESENCE_SENSOR_3_PRESENT,
"2": LIGHT_LEVEL_SENSOR_3,
"3": TEMPERATURE_SENSOR_3,
}
)
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
await setup_bridge(hass, mock_bridge_2, hostname="mock-bridge-2")
assert len(mock_bridge.mock_requests) == 1
assert len(mock_bridge_2.mock_requests) == 1
# 3 "physical" sensors with 3 virtual sensors each + 1 battery sensor
assert len(hass.states.async_all()) == 10
async def test_sensors(hass, mock_bridge):
"""Test the update_items function with some sensors."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 "physical" sensors with 3 virtual sensors each
assert len(hass.states.async_all()) == 7
presence_sensor_1 = hass.states.get("binary_sensor.living_room_sensor_motion")
light_level_sensor_1 = hass.states.get("sensor.living_room_sensor_light_level")
temperature_sensor_1 = hass.states.get("sensor.living_room_sensor_temperature")
assert presence_sensor_1 is not None
assert presence_sensor_1.state == "on"
assert light_level_sensor_1 is not None
assert light_level_sensor_1.state == "1.0"
assert light_level_sensor_1.name == "Living room sensor light level"
assert temperature_sensor_1 is not None
assert temperature_sensor_1.state == "17.75"
assert temperature_sensor_1.name == "Living room sensor temperature"
presence_sensor_2 = hass.states.get("binary_sensor.kitchen_sensor_motion")
light_level_sensor_2 = hass.states.get("sensor.kitchen_sensor_light_level")
temperature_sensor_2 = hass.states.get("sensor.kitchen_sensor_temperature")
assert presence_sensor_2 is not None
assert presence_sensor_2.state == "off"
assert light_level_sensor_2 is not None
assert light_level_sensor_2.state == "10.0"
assert light_level_sensor_2.name == "Kitchen sensor light level"
assert temperature_sensor_2 is not None
assert temperature_sensor_2.state == "18.75"
assert temperature_sensor_2.name == "Kitchen sensor temperature"
battery_remote_1 = hass.states.get("sensor.hue_dimmer_switch_1_battery_level")
assert battery_remote_1 is not None
assert battery_remote_1.state == "100"
assert battery_remote_1.name == "Hue dimmer switch 1 battery level"
ent_reg = async_get(hass)
assert (
ent_reg.async_get("sensor.hue_dimmer_switch_1_battery_level").entity_category
== ENTITY_CATEGORY_DIAGNOSTIC
)
async def test_unsupported_sensors(hass, mock_bridge):
"""Test that unsupported sensors don't get added and don't fail."""
response_with_unsupported = dict(SENSOR_RESPONSE)
response_with_unsupported["7"] = UNSUPPORTED_SENSOR
mock_bridge.mock_sensor_responses.append(response_with_unsupported)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 "physical" sensors with 3 virtual sensors each + 1 battery sensor
assert len(hass.states.async_all()) == 7
async def test_new_sensor_discovered(hass, mock_bridge):
"""Test if 2nd update has a new sensor."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 7
new_sensor_response = dict(SENSOR_RESPONSE)
new_sensor_response.update(
{
"9": PRESENCE_SENSOR_3_PRESENT,
"10": LIGHT_LEVEL_SENSOR_3,
"11": TEMPERATURE_SENSOR_3,
}
)
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 10
presence = hass.states.get("binary_sensor.bedroom_sensor_motion")
assert presence is not None
assert presence.state == "on"
temperature = hass.states.get("sensor.bedroom_sensor_temperature")
assert temperature is not None
assert temperature.state == "17.75"
async def test_sensor_removed(hass, mock_bridge):
"""Test if 2nd update has removed sensor."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 7
mock_bridge.mock_sensor_responses.clear()
keys = ("1", "2", "3")
mock_bridge.mock_sensor_responses.append({k: SENSOR_RESPONSE[k] for k in keys})
# Force updates to run again
await mock_bridge.sensor_manager.coordinator.async_refresh()
# To flush out the service call to update the group
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 3
sensor = hass.states.get("binary_sensor.living_room_sensor_motion")
assert sensor is not None
removed_sensor = hass.states.get("binary_sensor.kitchen_sensor_motion")
assert removed_sensor is None
async def test_update_timeout(hass, mock_bridge):
"""Test bridge marked as not available if timeout error during update."""
mock_bridge.api.sensors.update = Mock(side_effect=asyncio.TimeoutError)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
async def test_update_unauthorized(hass, mock_bridge):
"""Test bridge marked as not authorized if unauthorized during update."""
mock_bridge.api.sensors.update = Mock(side_effect=aiohue.Unauthorized)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert len(mock_bridge.handle_unauthorized_error.mock_calls) == 1
async def test_hue_events(hass, mock_bridge, device_reg):
"""Test that hue remotes fire events when pressed."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
events = async_capture_events(hass, CONF_HUE_EVENT)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 7
assert len(events) == 0
hue_tap_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:00:00:00:00:44:23:08")}
)
mock_bridge.api.sensors["7"].last_event = {"type": "button"}
mock_bridge.api.sensors["8"].last_event = {"type": "button"}
new_sensor_response = dict(SENSOR_RESPONSE)
new_sensor_response["7"] = dict(new_sensor_response["7"])
new_sensor_response["7"]["state"] = {
"buttonevent": 18,
"lastupdated": "2019-12-28T22:58:03",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
async_fire_time_changed(
hass, dt_util.utcnow() + sensor_base.SensorManager.SCAN_INTERVAL
)
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 7
assert len(events) == 1
assert events[-1].data == {
"device_id": hue_tap_device.id,
"id": "hue_tap",
"unique_id": "00:00:00:00:00:44:23:08-f2",
"event": 18,
"last_updated": "2019-12-28T22:58:03",
}
hue_dimmer_device = device_reg.async_get_device(
{(hue.DOMAIN, "00:17:88:01:10:3e:3a:dc")}
)
new_sensor_response = dict(new_sensor_response)
new_sensor_response["8"] = dict(new_sensor_response["8"])
new_sensor_response["8"]["state"] = {
"buttonevent": 3002,
"lastupdated": "2019-12-28T22:58:03",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
async_fire_time_changed(
hass, dt_util.utcnow() + sensor_base.SensorManager.SCAN_INTERVAL
)
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 7
assert len(events) == 2
assert events[-1].data == {
"device_id": hue_dimmer_device.id,
"id": "hue_dimmer_switch_1",
"unique_id": "00:17:88:01:10:3e:3a:dc-02-fc00",
"event": 3002,
"last_updated": "2019-12-28T22:58:03",
}
# Fire old event, it should be ignored
new_sensor_response = dict(new_sensor_response)
new_sensor_response["8"] = dict(new_sensor_response["8"])
new_sensor_response["8"]["state"] = {
"buttonevent": 18,
"lastupdated": "2019-12-28T22:58:02",
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
async_fire_time_changed(
hass, dt_util.utcnow() + sensor_base.SensorManager.SCAN_INTERVAL
)
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 4
assert len(hass.states.async_all()) == 7
assert len(events) == 2
# Add a new remote. In discovery the new event is registered **but not fired**
new_sensor_response = dict(new_sensor_response)
new_sensor_response["21"] = {
"state": {
"rotaryevent": 2,
"expectedrotation": 208,
"expectedeventduration": 400,
"lastupdated": "2020-01-31T15:56:19",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-11-26T03:35:21"},
"config": {"on": True, "battery": 100, "reachable": True, "pending": []},
"name": "Lutron Aurora 1",
"type": "ZLLRelativeRotary",
"modelid": "Z3-1BRL",
"manufacturername": "Lutron",
"productname": "Lutron Aurora",
"diversityid": "2c3a75ff-55c4-4e4d-8c44-82d330b8eb9b",
"swversion": "3.4",
"uniqueid": "ff:ff:00:0f:e7:fd:bc:b7-01-fc00-0014",
"capabilities": {
"certified": True,
"primary": True,
"inputs": [
{
"repeatintervals": [400],
"events": [
{"rotaryevent": 1, "eventtype": "start"},
{"rotaryevent": 2, "eventtype": "repeat"},
],
}
],
},
}
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
async_fire_time_changed(
hass, dt_util.utcnow() + sensor_base.SensorManager.SCAN_INTERVAL
)
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 5
assert len(hass.states.async_all()) == 8
assert len(events) == 2
# A new press fires the event
new_sensor_response["21"]["state"]["lastupdated"] = "2020-01-31T15:57:19"
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
async_fire_time_changed(
hass, dt_util.utcnow() + sensor_base.SensorManager.SCAN_INTERVAL
)
await hass.async_block_till_done()
hue_aurora_device = device_reg.async_get_device(
{(hue.DOMAIN, "ff:ff:00:0f:e7:fd:bc:b7")}
)
assert len(mock_bridge.mock_requests) == 6
assert len(hass.states.async_all()) == 8
assert len(events) == 3
assert events[-1].data == {
"device_id": hue_aurora_device.id,
"id": "lutron_aurora_1",
"unique_id": "ff:ff:00:0f:e7:fd:bc:b7-01-fc00-0014",
"event": 2,
"last_updated": "2020-01-31T15:57:19",
}
| 34.252019
| 85
| 0.639468
|
959489cd42ab189d493cf2939fae427e1ddc59ed
| 2,716
|
py
|
Python
|
model/cnn.py
|
dehuachen/mem_cnn_sim
|
3262a9b607a5aafccbe4e5a665e74a7cde926da3
|
[
"MIT"
] | null | null | null |
model/cnn.py
|
dehuachen/mem_cnn_sim
|
3262a9b607a5aafccbe4e5a665e74a7cde926da3
|
[
"MIT"
] | null | null | null |
model/cnn.py
|
dehuachen/mem_cnn_sim
|
3262a9b607a5aafccbe4e5a665e74a7cde926da3
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
from torch.nn import functional as F
from torch.autograd import Variable as V
class CNN(nn.Module):
def __init__(self, param, embedding=None):
super(CNN, self).__init__()
self.param = param
self.vocab_size = param['cand_vocab_size']
self.embedding_size = param['embedding_size']
self.num_filters = param['num_filters']
if embedding:
self.embedding = embedding
else:
self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
# a possible modification is apply n convolutions
self.cnn1 = nn.Conv2d(1, self.num_filters, (1, self.embedding_size))
self.cnn2 = nn.Conv2d(1, self.num_filters, (2, self.embedding_size))
self.cnn3 = nn.Conv2d(1, self.num_filters, (3, self.embedding_size))
self.l1 = nn.Linear(self.num_filters * 3, self.embedding_size)
self.l2 = nn.Linear(self.embedding_size, self.embedding_size)
self.l3 = nn.Linear(self.embedding_size, self.embedding_size)
self.weights_init()
def forward(self, cand):
# embedding
cand_ = self.embedding(cand) # (num_cand, cand_size, embed_size)
cand_ = cand_.unsqueeze(1) # (num_cand, 1, cand_size, embed_size)
cand_1 = F.relu(self.cnn1(cand_)).squeeze(3) # (num_cand, num_filters, Width, 1)
cand_1 = F.max_pool1d(cand_1, cand_1.size(2)).squeeze(2) # (num_cand, num_filters, 1)
cand_2 = F.relu(self.cnn2(cand_)).squeeze(3) # (num_cand, num_filters, Width, 1)
cand_2 = F.max_pool1d(cand_2, cand_2.size(2)).squeeze(2) # (num_cand, num_filters, 1)
cand_3 = F.relu(self.cnn3(cand_)).squeeze(3) # (num_cand, num_filters, Width, 1)
cand_3 = F.max_pool1d(cand_3, cand_3.size(2)).squeeze(2) # (num_cand, num_filters, 1)
cand_ = torch.cat([cand_1, cand_2, cand_3], 1)
cand_ = F.relu(self.l1(cand_)) # (num_cand, embed_size)
cand_ = F.relu(self.l2(cand_)) # (num_cand, embed_size)
cand_ = F.relu(self.l3(cand_)) # (num_cand, embed_size)
return cand_
def weights_init(self):
# weights initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) \
or isinstance(m, nn.Embedding) \
or isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.05)
if isinstance(m, nn.Embedding):
m.weight.data[0].zero_()
# if __name__ == '__main__':
# param = {'num_filters': 20, "cand_vocab_size": 20, "embedding_size": 20}
# mem = CNN(param)
#
# # print(param)
# utter = torch.ones(20, 10).long()
#
# print(mem(V(utter)))
| 36.702703
| 94
| 0.618925
|
99165326b771f2352d0ff5dcf51385da14fedd02
| 5,417
|
py
|
Python
|
lib/model/dense_align/box_3d.py
|
mit-drl/Stereo-RCNN
|
63c6ab98b7a5e36c7bcfdec4529804fc940ee900
|
[
"MIT"
] | 681
|
2019-04-10T08:24:25.000Z
|
2022-03-31T20:15:05.000Z
|
lib/model/dense_align/box_3d.py
|
xinCNnix/Stereo-RCNN
|
63c6ab98b7a5e36c7bcfdec4529804fc940ee900
|
[
"MIT"
] | 83
|
2019-04-12T11:19:08.000Z
|
2022-03-16T07:02:52.000Z
|
lib/model/dense_align/box_3d.py
|
xinCNnix/Stereo-RCNN
|
63c6ab98b7a5e36c7bcfdec4529804fc940ee900
|
[
"MIT"
] | 182
|
2019-04-10T09:06:08.000Z
|
2022-03-15T02:18:52.000Z
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
from torch.autograd import Variable
import numpy as np
from model.utils import kitti_utils
import math as m
class Box3d(nn.Module):
def __init__(self, poses):
super(Box3d, self).__init__()
self.T_c_o = poses[0:3]
self.size = poses[3:6]
self.R_c_o = torch.FloatTensor([[ m.cos(poses[6]), 0 ,m.sin(poses[6])],
[ 0, 1 , 0],
[-m.sin(poses[6]), 0 ,m.cos(poses[6])]]).type_as(self.T_c_o)
self.P_o = poses.new(8,3).zero_()
self.P_o[0,0],self.P_o[0,1], self.P_o[0,2] = -self.size[0]/2, 0, -self.size[2]/2.0
self.P_o[1,0],self.P_o[1,1], self.P_o[1,2] = -self.size[0]/2, 0, self.size[2]/2.0
self.P_o[2,0],self.P_o[2,1], self.P_o[2,2] = self.size[0]/2, 0, self.size[2]/2.0 #max
self.P_o[3,0],self.P_o[3,1], self.P_o[3,2] = self.size[0]/2, 0, -self.size[2]/2.0
self.P_o[4,0],self.P_o[4,1], self.P_o[4,2] = -self.size[0]/2, -self.size[1], -self.size[2]/2.0 # min
self.P_o[5,0],self.P_o[5,1], self.P_o[5,2] = -self.size[0]/2, -self.size[1], self.size[2]/2.0
self.P_o[6,0],self.P_o[6,1], self.P_o[6,2] = self.size[0]/2, -self.size[1], self.size[2]/2.0
self.P_o[7,0],self.P_o[7,1], self.P_o[7,2] = self.size[0]/2, -self.size[1], -self.size[2]/2.0
P_c = poses.new(8,3).zero_()
for i in range(8):
P_c[i] = torch.mm(self.R_c_o, self.P_o[i].unsqueeze(1)).squeeze(1) + self.T_c_o
def creatPlane(p1, p2, p3):
arrow1 = p2 - p1
arrow2 = p3 - p1
normal = torch.cross(arrow1, arrow2)
plane = p1.new((4)).zero_()
plane[0] = normal[0]
plane[1] = normal[1]
plane[2] = normal[2]
plane[3] = -normal[0] * p1[0] - normal[1] * p1[1] - normal[2] * p1[2]
return plane
self.planes_c = poses.new(6,4).zero_()
self.planes_c[0] = creatPlane(P_c[0], P_c[3], P_c[4]) #front 0
self.planes_c[1] = creatPlane(P_c[2], P_c[3], P_c[6]) #right 1
self.planes_c[2] = creatPlane(P_c[1], P_c[2], P_c[5]) #back 2
self.planes_c[3] = creatPlane(P_c[0], P_c[1], P_c[4]) #left 3
self.planes_c[4] = creatPlane(P_c[0], P_c[1], P_c[2]) #botom 4
self.planes_c[5] = creatPlane(P_c[4], P_c[5], P_c[6]) #top 5
# compute the nearest vertex
self.nearest_dist = 100000000
for i in range(P_c.size()[0]):
if torch.norm(P_c[i]) < self.nearest_dist:
self.nearest_dist = torch.norm(P_c[i])
self.nearest_vertex = i # find the nearest vertex with camera canter
def mask_out_box(self, valid_insec, insection_c):
DOUBLE_EPS = 0.01
R_c_o_t = self.R_c_o.permute(1,0)
insection_c = insection_c[:,:,0:3] - self.T_c_o
insection_o = insection_c.new(insection_c.size()).zero_()
insection_o[:,:,0] = R_c_o_t[0,0]*insection_c[:,:,0] + R_c_o_t[0,1]*insection_c[:,:,1] + R_c_o_t[0,2]*insection_c[:,:,2]
insection_o[:,:,1] = R_c_o_t[1,0]*insection_c[:,:,0] + R_c_o_t[1,1]*insection_c[:,:,1] + R_c_o_t[1,2]*insection_c[:,:,2]
insection_o[:,:,2] = R_c_o_t[2,0]*insection_c[:,:,0] + R_c_o_t[2,1]*insection_c[:,:,1] + R_c_o_t[2,2]*insection_c[:,:,2]
mask = ((insection_o[:,:,0] >= self.P_o[4,0] - DOUBLE_EPS) &\
(insection_o[:,:,1] >= self.P_o[4,1] - DOUBLE_EPS) &\
(insection_o[:,:,2] >= self.P_o[4,2] - DOUBLE_EPS) &\
(insection_o[:,:,0] <= self.P_o[2,0] + DOUBLE_EPS) &\
(insection_o[:,:,1] <= self.P_o[2,1] + DOUBLE_EPS) &\
(insection_o[:,:,2] <= self.P_o[2,2] + DOUBLE_EPS)).type_as(insection_o)
#print('valid_insec',valid_insec[valid_insec[:,:,3]==0])
#print('insection_o',insection_o[valid_insec[:,:,3]==0])
valid_insec[:,:,0][valid_insec[:,:,3]==0] = insection_c[:,:,0][valid_insec[:,:,3]==0]
valid_insec[:,:,1][valid_insec[:,:,3]==0] = insection_c[:,:,1][valid_insec[:,:,3]==0]
valid_insec[:,:,2][valid_insec[:,:,3]==0] = insection_c[:,:,2][valid_insec[:,:,3]==0]
valid_insec[:,:,3][valid_insec[:,:,3]==0] = mask[valid_insec[:,:,3]==0]
return valid_insec
def BoxRayInsec(self, pt2):
plane_group = torch.IntTensor([[0, 3, 4],
[2, 3, 4],
[1, 2, 4],
[0, 1, 4],
[0, 3, 5],
[2, 3, 5],
[1, 2, 5],
[0, 1, 5]])
homo_pt3 = torch.cat((pt2, torch.ones_like(pt2[:,:,0])),2)
valid_insec = homo_pt3.new(homo_pt3.size()[0],homo_pt3.size()[1], 4).zero_() # x_o, y_o, z_o, mask
for i in range(3):
plane = self.planes_c[plane_group[self.nearest_vertex,i]]
# get insection, t is a scalar
t = homo_pt3[:,:,0]*plane[0] + homo_pt3[:,:,1]*plane[1] + homo_pt3[:,:,2]*plane[2]
t = -t.reciprocal()*plane[3]
insection_c = homo_pt3 * t.unsqueeze(2)
valid_insec = self.mask_out_box(valid_insec, insection_c)
return valid_insec
| 51.103774
| 128
| 0.525568
|
b179522632a9cedd65fde08bc2acb3a3e2c5337b
| 3,593
|
py
|
Python
|
iss/test/test_generator.py
|
standage/InSilicoSeq
|
cd73724c65f2227ff90504f4367f9699840e09e9
|
[
"MIT"
] | null | null | null |
iss/test/test_generator.py
|
standage/InSilicoSeq
|
cd73724c65f2227ff90504f4367f9699840e09e9
|
[
"MIT"
] | null | null | null |
iss/test/test_generator.py
|
standage/InSilicoSeq
|
cd73724c65f2227ff90504f4367f9699840e09e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from iss import generator
from iss.util import cleanup
from iss.error_models import ErrorModel, basic, kde
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from nose.tools import with_setup, raises
import os
import sys
import random
import numpy as np
# due to inconsistent seeding between python 2 and 3, some of the following
# tests are disabled with python2
def setup_function():
output_file_prefix = 'data/.test'
def teardown_function():
cleanup(['data/.test.iss.tmp.my_genome.0_R1.fastq',
'data/.test.iss.tmp.my_genome.0_R2.fastq'])
@raises(SystemExit)
def test_cleanup_fail():
cleanup('data/does_not_exist')
@with_setup(setup_function, teardown_function)
def test_simulate_and_save():
err_mod = basic.BasicErrorModel()
ref_genome = SeqRecord(
Seq(str('AAAAACCCCC' * 100),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
generator.reads(ref_genome, err_mod, 1000, 0, 'data/.test', 0, True)
@with_setup(setup_function, teardown_function)
def test_simulate_and_save_short():
err_mod = basic.BasicErrorModel()
ref_genome = SeqRecord(
Seq(str('AACCC' * 100),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
generator.reads(ref_genome, err_mod, 1000, 0, 'data/.test', 0, True)
@raises(SystemExit)
def test_small_input():
err_mod = kde.KDErrorModel('data/ecoli.npz')
ref_genome = SeqRecord(
Seq(str('AAAAACCCCC'),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
generator.simulate_read(ref_genome, err_mod, 1)
def test_basic():
if sys.version_info > (3,):
random.seed(42)
np.random.seed(42)
err_mod = basic.BasicErrorModel()
ref_genome = SeqRecord(
Seq(str('AAAAACCCCC' * 100),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
read_tuple = generator.simulate_read(ref_genome, err_mod, 1)
big_read = ''.join(str(read_tuple[0].seq) + str(read_tuple[1].seq))
assert big_read[-15:] == 'TTTTGGGGGTTTTTG'
def test_kde():
if sys.version_info > (3,):
random.seed(42)
np.random.seed(42)
err_mod = kde.KDErrorModel('data/ecoli.npz')
ref_genome = SeqRecord(
Seq(str('CGTTTCAACC' * 400),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
read_tuple = generator.simulate_read(ref_genome, err_mod, 1)
big_read = ''.join(str(read_tuple[0].seq) + str(read_tuple[1].seq))
assert big_read[:15] == 'CCGTTTCAACCCGTT'
def test_kde_short():
if sys.version_info > (3,):
random.seed(42)
np.random.seed(42)
err_mod = kde.KDErrorModel('data/ecoli.npz')
ref_genome = SeqRecord(
Seq(str('AAACC' * 100),
IUPAC.unambiguous_dna
),
id='my_genome',
description='test genome'
)
read_tuple = generator.simulate_read(ref_genome, err_mod, 1)
big_read = ''.join(str(read_tuple[0].seq) + str(read_tuple[1].seq))
assert big_read == 'ACCAAACCAAACCAAACCAAGGTTTGGTTTGGTTTGGTGT'
| 28.515873
| 79
| 0.608127
|
a19fc018eed4af406c5975af7066f06e828aeba5
| 18,451
|
py
|
Python
|
test/functional/test_framework/comptool.py
|
BradVer/Titan
|
69aa5de8e9cdc8b843f11a7fed3a9defe4d9ae22
|
[
"MIT"
] | 7
|
2019-04-04T01:15:13.000Z
|
2021-11-11T08:19:23.000Z
|
test/functional/test_framework/comptool.py
|
BradVer/Titan
|
69aa5de8e9cdc8b843f11a7fed3a9defe4d9ae22
|
[
"MIT"
] | 3
|
2019-10-16T18:02:29.000Z
|
2020-08-18T14:57:45.000Z
|
test/functional/test_framework/comptool.py
|
BradVer/Titan
|
69aa5de8e9cdc8b843f11a7fed3a9defe4d9ae22
|
[
"MIT"
] | 23
|
2019-04-18T05:07:49.000Z
|
2021-10-03T04:51:20.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Titancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more titancoinds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port, wait_until
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult():
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
super().__init__()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance():
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
return all(node.wait_for_verack() for node in self.test_nodes)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 45.112469
| 149
| 0.601593
|
736462c9e5f9a883c30ca823dfa3f76c51fda70b
| 984
|
py
|
Python
|
EE.py
|
boyuanzheng010/bert_clsutering
|
3d2193542989fa799d8004f2e386b33c79ca3a1a
|
[
"MIT"
] | 6
|
2020-08-19T08:38:28.000Z
|
2022-02-16T16:00:32.000Z
|
EE.py
|
boyuanzheng010/bert_clsutering
|
3d2193542989fa799d8004f2e386b33c79ca3a1a
|
[
"MIT"
] | 2
|
2020-06-27T00:57:26.000Z
|
2021-10-31T15:30:34.000Z
|
EE.py
|
boyuanzheng010/bert_clsutering
|
3d2193542989fa799d8004f2e386b33c79ca3a1a
|
[
"MIT"
] | 4
|
2020-06-15T15:45:09.000Z
|
2022-01-20T03:40:52.000Z
|
# -*- coding: utf-8 -*-
import os
import ResponseHandler
import subprocess
import urllib
import dist_v2
import urllib.parse
singleton = None
try:
from subprocess import DEVNULL # Python 3.
except ImportError:
DEVNULL = open(os.devnull, 'wb')
class EE(ResponseHandler.ResponseHandler):
def handler(self,write_obj = None):
print("In derived class")
global singleton
if singleton is None:
singleton = dist_v2.BertEmbeds(os.getcwd(),0,'vocab.txt','bert_vectors.txt',True,True,'results/labels.txt','results/stats_dict.txt','preserve_1_2_grams.txt','glue_words.txt','bootstrap_entities.txt')
if (write_obj is not None):
param=urllib.parse.unquote(write_obj.path[1:])
print("Arg = ",param)
out = singleton.find_entities(param.split())
out = ' '.join(out)
print(out)
if (len(out) >= 1):
write_obj.wfile.write(str(out).encode())
else:
write_obj.wfile.write("0".encode())
def my_test():
cl = EE()
cl.handler()
#my_test()
| 20.5
| 202
| 0.692073
|
3a414a81176f00e43ddd3ec24ba895d47b628f89
| 8,788
|
py
|
Python
|
qa/rpc-tests/bip9-softforks.py
|
mirzaei-ce/core-bbbit
|
52b1ba0bd0fc847ba324bd39faf64f4eb40bfb72
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/bip9-softforks.py
|
mirzaei-ce/core-bbbit
|
52b1ba0bd0fc847ba324bd39faf64f4eb40bfb72
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/bip9-softforks.py
|
mirzaei-ce/core-bbbit
|
52b1ba0bd0fc847ba324bd39faf64f4eb40bfb72
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP3, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(hexlify(tx.serialize()))
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
stop_nodes(self.nodes)
wait_bbbitds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 536870913, self.sequence_lock_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.mtp_invalidate, self.donothing),
self.test_BIP('csv', 536870913, self.donothing, self.csv_invalidate)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP3, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| 39.945455
| 110
| 0.664087
|
969a2877083c40ecba51c28450913f934e1b140e
| 825
|
py
|
Python
|
pydungeon/utils/config.py
|
bayleaf1130/pydungeon
|
fbfa860fdc089d3774b5fa5056458018cff12fc3
|
[
"MIT"
] | 1
|
2019-04-18T15:12:20.000Z
|
2019-04-18T15:12:20.000Z
|
pydungeon/utils/config.py
|
bayleaf1130/pydungeon
|
fbfa860fdc089d3774b5fa5056458018cff12fc3
|
[
"MIT"
] | null | null | null |
pydungeon/utils/config.py
|
bayleaf1130/pydungeon
|
fbfa860fdc089d3774b5fa5056458018cff12fc3
|
[
"MIT"
] | 2
|
2020-03-03T15:02:02.000Z
|
2020-04-26T02:05:45.000Z
|
''' Configuration file manipulator '''
# Standard Imports
import yaml
# Third Party Imports
# None
# Local Imports
from pydungeon import logger
class Config(object):
def __init__(self, config_name):
self._config_name = config_name
def load(self):
with open(self._config_name, 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as error:
logger.error(f'Could Not Load YAML File {self._config_name}', exc_info=True)
def save(self, data):
with open(self._config_name, 'w') as stream:
try:
return yaml.dump(data, stream)
except yaml.YAMLError as error:
logger.error(f'Could Not Save YAML File {self._config_name}', exc_info=True)
| 25
| 92
| 0.624242
|
1354915140d7d7e8eb6791bd17be5654d3bf7a9b
| 3,069
|
py
|
Python
|
qa/rpc-tests/invalidateblock.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/invalidateblock.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/invalidateblock.py
|
MCLXI/BKS
|
6653d0b106151045ac6e3ceb24aab55354ac2e83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import BKSTestFramework
from test_framework.util import *
class InvalidateTest(BKSTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
print("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print("\nMake sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
print("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print("..and then mine a block")
self.nodes[2].generate(1)
print("Verify all nodes are at the right height")
time.sleep(5)
for i in range(3):
print(i,self.nodes[i].getblockcount())
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 39.857143
| 95
| 0.640925
|
12235fcf3088c328b817a6a0534f4530e9479b62
| 2,640
|
py
|
Python
|
ansible/environments/prod/dynamic_inventory.py
|
Otus-DevOps-2021-08/asm-n_infra
|
91785da1f83d74794964e405ab2470c046999bc8
|
[
"MIT"
] | null | null | null |
ansible/environments/prod/dynamic_inventory.py
|
Otus-DevOps-2021-08/asm-n_infra
|
91785da1f83d74794964e405ab2470c046999bc8
|
[
"MIT"
] | 1
|
2021-11-03T19:03:08.000Z
|
2021-11-03T19:03:08.000Z
|
ansible/environments/prod/dynamic_inventory.py
|
Otus-DevOps-2021-08/asm-n_infra
|
91785da1f83d74794964e405ab2470c046999bc8
|
[
"MIT"
] | 1
|
2021-09-22T22:36:01.000Z
|
2021-09-22T22:36:01.000Z
|
#!/usr/bin/python3
import os
import json
import subprocess
TERRAFORM_STATE_DIR = "../terraform/prod/"
DEFAULT_INVENTORY_FILEPATH = "./dynamic_inventory.json"
def ReadTerraformState(terraformDir: str) -> dict:
oldCwd = os.getcwd()
terraformState = dict()
try:
os.chdir(terraformDir)
terraformStateJson = subprocess.run(["terraform", "show", "-json"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).stdout
terraformState = json.loads(terraformStateJson)
except:
pass
finally:
os.chdir(oldCwd)
return terraformState
def IsEmptyTerraformState(terraformState: dict) -> bool:
return "values" not in terraformState
def LoadDefaultInventory() -> dict:
with open(DEFAULT_INVENTORY_FILEPATH, "r") as inv_file:
return json.load(inv_file)
def CreateInventory() -> dict:
inventory = dict()
inventory["_meta"] = dict()
inventory["_meta"]["hostvars"] = dict()
return inventory
def CreateInventoryGroup() -> dict:
inventoryGroup = dict()
inventoryGroup["hosts"] = list()
inventoryGroup["vars"] = dict()
return inventoryGroup
def AddHostToInventory(hostInfo: dict, inventory: dict):
hostname = hostInfo["hostname"]
group = hostInfo["group"]
ipAddress = hostInfo["ipAddress"]
inventory[group]["hosts"].append(hostname)
if hostname not in inventory["_meta"]["hostvars"]:
inventory["_meta"]["hostvars"][hostname] = dict()
inventory["_meta"]["hostvars"][hostname]["ansible_host"] = ipAddress
def main():
terraformState = ReadTerraformState(TERRAFORM_STATE_DIR)
#print(json.dumps(terraformState, indent=2))
inventory = CreateInventory()
if IsEmptyTerraformState(terraformState):
print(json.dumps(LoadDefaultInventory()))
return
for childModule in terraformState["values"]["root_module"]["child_modules"]:
for resource in childModule["resources"]:
if resource["type"] != "yandex_compute_instance":
continue
hostInfo = dict()
hostInfo["hostname"] = resource["values"]["name"]
hostInfo["group"] = resource["values"]["labels"]["group"]
hostInfo["ipAddress"] = resource["values"]["network_interface"][0]["nat_ip_address"]
if hostInfo["group"] not in inventory:
inventory[hostInfo["group"]] = CreateInventoryGroup()
AddHostToInventory(hostInfo, inventory)
inventory["all"] = dict()
inventory["all"]["vars"] = dict()
inventory["all"]["vars"]["db_internal_ip"] = terraformState["values"]["outputs"]["internal_ip_address_db"]["value"]
print(json.dumps(inventory))
if __name__ == "__main__":
main()
| 27.5
| 117
| 0.681439
|
d327f52987e3003554b412cd241d4f6a805307b0
| 430
|
py
|
Python
|
mysql_connection_pool/default_settings.py
|
maypimentel/mysql_connection_pool
|
a6d193ee62d24fbc9aec449dd3bc9bf00375927d
|
[
"MIT"
] | null | null | null |
mysql_connection_pool/default_settings.py
|
maypimentel/mysql_connection_pool
|
a6d193ee62d24fbc9aec449dd3bc9bf00375927d
|
[
"MIT"
] | null | null | null |
mysql_connection_pool/default_settings.py
|
maypimentel/mysql_connection_pool
|
a6d193ee62d24fbc9aec449dd3bc9bf00375927d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from decouple import config
POOL_NAME = config('POOL_NAME', default='mypool')
MYSQL_HOST = config('MYSQL_HOST', default='mysql')
MYSQL_PORT = config('MYSQL_PORT', default=3306, cast=int)
MYSQL_USER = config('MYSQL_USER', default='user')
MYSQL_PASSWORD = config('MYSQL_PASSWORD', default='password')
MYSQL_DATABASE = config('MYSQL_DATABASE', default='library')
| 39.090909
| 62
| 0.702326
|
22c56f76cafec045b3f7d48e3025ac499cf16186
| 2,459
|
py
|
Python
|
gcloud/apigw/views/node_callback.py
|
DomineCore/bk-sops
|
1461d7d80d979778cf9dafb62819c3234387fabc
|
[
"Apache-2.0"
] | 881
|
2019-03-25T02:45:42.000Z
|
2022-03-30T09:10:49.000Z
|
gcloud/apigw/views/node_callback.py
|
DomineCore/bk-sops
|
1461d7d80d979778cf9dafb62819c3234387fabc
|
[
"Apache-2.0"
] | 3,303
|
2019-03-25T04:18:03.000Z
|
2022-03-31T11:52:03.000Z
|
gcloud/apigw/views/node_callback.py
|
DomineCore/bk-sops
|
1461d7d80d979778cf9dafb62819c3234387fabc
|
[
"Apache-2.0"
] | 395
|
2019-03-25T02:53:36.000Z
|
2022-03-31T08:37:28.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import ujson as json
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from blueapps.account.decorators import login_exempt
from gcloud import err_code
from gcloud.apigw.decorators import mark_request_whether_is_trust
from gcloud.apigw.decorators import project_inject
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.apigw.views.utils import logger
from gcloud.iam_auth.intercept import iam_intercept
from gcloud.iam_auth.view_interceptors.apigw import TaskOperateInterceptor
from packages.bkoauth.decorators import apigw_required
@login_exempt
@csrf_exempt
@require_POST
@apigw_required
@mark_request_whether_is_trust
@project_inject
@iam_intercept(TaskOperateInterceptor())
def node_callback(request, task_id, project_id):
try:
params = json.loads(request.body)
except Exception:
return {"result": False, "message": "invalid json format", "code": err_code.REQUEST_PARAM_INVALID.code}
project = request.project
try:
task = TaskFlowInstance.objects.get(id=task_id, project_id=project.id)
except TaskFlowInstance.DoesNotExist:
message = (
"[API] node_callback task[id={task_id}] "
"of project[project_id={project_id}, biz_id{biz_id}] does not exist".format(
task_id=task_id, project_id=project.id, biz_id=project.bk_biz_id
)
)
logger.exception(message)
return {"result": False, "message": message, "code": err_code.CONTENT_NOT_EXIST.code}
node_id = params.get("node_id")
callback_data = params.get("callback_data")
version = params.get("version")
return task.callback(node_id, callback_data, version)
| 39.66129
| 115
| 0.763318
|
c24892c6fc23265fedb1d934dfc26bdf471f58e3
| 2,313
|
py
|
Python
|
tests/unit_test/hashing/test_hash_generator.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 52
|
2018-08-24T02:28:43.000Z
|
2021-07-06T04:44:22.000Z
|
tests/unit_test/hashing/test_hash_generator.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 62
|
2018-09-17T06:59:16.000Z
|
2021-12-15T06:02:51.000Z
|
tests/unit_test/hashing/test_hash_generator.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 35
|
2018-09-14T02:42:10.000Z
|
2022-02-05T10:34:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from iconservice.utils.hashing.hash_generator import HashGenerator
tx_data1 = {
"from": "hx930eb8a0e793253aad876503367c344fe8d4e282",
"to": "cx502c47463314f01e84b1b203c315180501eb2481",
"version": "0x3",
"nid": "0x1",
"stepLimit": "0x7a120",
"timestamp": "0x58d2588ab7288",
"nonce": "0x3059",
"dataType": "call",
"data": {
"method": "transfer",
"params": {
"_to": "hx1ada76577eac29b1e60efee22aac66af9f434036",
"_value": "0x2b5e3af16b1880000",
"_data": "20"
}
}
}
tx_hash1 = bytes.fromhex("c64119ddd6b0d5034cdcd8b903dadca34e3d79cfe3e00bb2bca8a9ec48e25978")
tx_data2 = {
"version": "0x3",
"from": "hx226e6e4340136836b36977bd76ca83746b8b071c",
"to": "cxb7ef03fea5fa9b2fe1f00f548d6da7ff2ddfebd5",
"stepLimit": "0x989680",
"timestamp": "0x58d25822f154c",
"nid": "0x1",
"nonce": "0x64",
"dataType": "call",
"data": {
"method": "transaction_RT",
"params": {
"_date": "20190708",
"_time": "0625",
"_div": "GOOGLE",
"_value": "[\"Earthquake\", \"Concacaf Gold Cup\", \"Concacaf Gold Cup\", \"Bella Thorne\", \"New York Knicks\"]"
}
}}
tx_hash2 = bytes.fromhex("77a6109d6be90643e54e4ebfbea86f966937cc7978c7105ffea9e852ef447ae3")
@pytest.mark.parametrize("tx_data,tx_hash", [(tx_data1, tx_hash1), (tx_data2, tx_hash2)])
def test_generate_hash(tx_data, tx_hash):
actual_tx_hash = HashGenerator.generate_hash(tx_data)
assert actual_tx_hash == tx_hash
| 35.584615
| 129
| 0.629053
|
100ba592ddc854953ab0511026ead31c6f669b75
| 17,732
|
py
|
Python
|
octaviaclient/osc/v2/pool.py
|
bizflycloud/python-octaviaclient
|
bf2ab57bba88f2e0a73c2b76483b8b2249f3773e
|
[
"Apache-2.0"
] | 17
|
2017-03-24T10:16:08.000Z
|
2020-11-04T07:58:34.000Z
|
octaviaclient/osc/v2/pool.py
|
bizflycloud/python-octaviaclient
|
bf2ab57bba88f2e0a73c2b76483b8b2249f3773e
|
[
"Apache-2.0"
] | 2
|
2019-02-06T12:30:31.000Z
|
2021-07-16T06:25:40.000Z
|
octaviaclient/osc/v2/pool.py
|
bizflycloud/python-octaviaclient
|
bf2ab57bba88f2e0a73c2b76483b8b2249f3773e
|
[
"Apache-2.0"
] | 12
|
2017-04-19T00:28:16.000Z
|
2021-07-15T04:58:03.000Z
|
# Copyright 2017 GoDaddy
# Copyright 2019 Red Hat, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Pool action implementation"""
from cliff import lister
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import tags as _tag
from oslo_utils import uuidutils
from octaviaclient.osc.v2 import constants as const
from octaviaclient.osc.v2 import utils as v2_utils
PROTOCOL_CHOICES = ['TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS', 'PROXY',
'PROXYV2', 'UDP', 'SCTP']
ALGORITHM_CHOICES = ['SOURCE_IP', 'ROUND_ROBIN', 'LEAST_CONNECTIONS',
'SOURCE_IP_PORT']
class CreatePool(command.ShowOne):
"""Create a pool"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<name>',
help="Set pool name."
)
parser.add_argument(
'--description',
metavar='<description>',
help="Set pool description."
)
parser.add_argument(
'--protocol',
metavar='{' + ','.join(PROTOCOL_CHOICES) + '}',
required=True,
choices=PROTOCOL_CHOICES,
type=lambda s: s.upper(), # case insensitive
help="Set the pool protocol."
)
parent_group = parser.add_mutually_exclusive_group(required=True)
parent_group.add_argument(
'--listener',
metavar='<listener>',
help="Listener to add the pool to (name or ID)."
)
parent_group.add_argument(
'--loadbalancer',
metavar='<load_balancer>',
help="Load balancer to add the pool to (name or ID)."
)
parser.add_argument(
'--session-persistence',
metavar='<session persistence>',
help="Set the session persistence for the listener (key=value)."
)
parser.add_argument(
'--lb-algorithm',
metavar='{' + ','.join(ALGORITHM_CHOICES) + '}',
required=True,
choices=ALGORITHM_CHOICES,
type=lambda s: s.upper(), # case insensitive
help="Load balancing algorithm to use."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=True,
help="Enable pool (default)."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable pool."
)
parser.add_argument(
'--tls-container-ref',
metavar='<container-ref>',
help="The reference to the key manager service secrets container "
"containing the certificate and key for ``tls_enabled`` "
"pools to re-encrpt the traffic to backend member servers."
)
parser.add_argument(
'--ca-tls-container-ref',
metavar='<ca_tls_container_ref>',
help="The reference to the key manager service secrets container "
"containing the CA certificate for ``tls_enabled`` pools "
"to check the backend member servers certificates."
)
parser.add_argument(
'--crl-container-ref',
metavar='<crl_container_ref>',
help="The reference to the key manager service secrets container "
"containting the CA revocation list file for ``tls_enabled`` "
"pools to validate the backend member servers certificates."
)
tls_enable = parser.add_mutually_exclusive_group()
tls_enable.add_argument(
'--enable-tls',
action='store_true',
default=None,
help="Enable backend member re-encryption."
)
tls_enable.add_argument(
'--disable-tls',
action='store_true',
default=None,
help="Disable backend member re-encryption."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
parser.add_argument(
'--tls-ciphers',
metavar='<tls_ciphers>',
help="Set the TLS ciphers to be used by the pool "
"in OpenSSL cipher string format."
)
parser.add_argument(
'--tls-version',
dest='tls_versions',
metavar='<tls_versions>',
nargs='?',
action='append',
help="Set the TLS protocol version to be used "
"by the pool (can be set multiple times)."
)
parser.add_argument(
'--alpn-protocol',
dest='alpn_protocols',
metavar='<alpn_protocols>',
nargs='?',
action='append',
help="Set the ALPN protocol to be used "
"by the pool (can be set multiple times)."
)
_tag.add_tag_option_to_parser_for_create(
parser, 'pool')
return parser
def take_action(self, parsed_args):
rows = const.POOL_ROWS
attrs = v2_utils.get_pool_attrs(self.app.client_manager, parsed_args)
body = {"pool": attrs}
data = self.app.client_manager.load_balancer.pool_create(
json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=(self.app.client_manager.load_balancer.
load_balancer_show),
res_id=data['pool']['loadbalancers'][0]['id']
)
data = {
'pool': (
self.app.client_manager.load_balancer.pool_show(
data['pool']['id']))
}
formatters = {'loadbalancers': v2_utils.format_list,
'members': v2_utils.format_list,
'listeners': v2_utils.format_list,
'session_persistence': v2_utils.format_hash,
'tags': v2_utils.format_list_flat}
return (rows, (utils.get_dict_properties(
data['pool'], rows, formatters=formatters,
mixed_case_fields=['enable-tls'])))
class DeletePool(command.Command):
"""Delete a pool"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar="<pool>",
help="Pool to delete (name or ID)."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_pool_attrs(self.app.client_manager, parsed_args)
pool_id = attrs.pop('pool_id')
self.app.client_manager.load_balancer.pool_delete(
pool_id=pool_id)
if parsed_args.wait:
v2_utils.wait_for_delete(
status_f=self.app.client_manager.load_balancer.pool_show,
res_id=pool_id
)
class ListPool(lister.Lister):
"""List pools"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'--loadbalancer',
metavar='<loadbalancer>',
help="Filter by load balancer (name or ID).",
)
_tag.add_tag_filtering_option_to_parser(parser, 'pool')
return parser
def take_action(self, parsed_args):
columns = const.POOL_COLUMNS
attrs = v2_utils.get_pool_attrs(self.app.client_manager, parsed_args)
data = self.app.client_manager.load_balancer.pool_list(**attrs)
formatters = {'loadbalancers': v2_utils.format_list,
'members': v2_utils.format_list,
'listeners': v2_utils.format_list}
return (columns,
(utils.get_dict_properties(
s, columns, formatters=formatters) for s in data['pools']))
class ShowPool(command.ShowOne):
"""Show the details of a single pool"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar='<pool>',
help='Name or UUID of the pool.'
)
return parser
def take_action(self, parsed_args):
rows = const.POOL_ROWS
data = None
if uuidutils.is_uuid_like(parsed_args.pool):
try:
data = self.app.client_manager.load_balancer.pool_show(
pool_id=parsed_args.pool)
except exceptions.NotFound:
pass
if data is None:
attrs = v2_utils.get_pool_attrs(self.app.client_manager,
parsed_args)
pool_id = attrs.pop('pool_id')
data = self.app.client_manager.load_balancer.pool_show(
pool_id=pool_id,
)
formatters = {'loadbalancers': v2_utils.format_list,
'members': v2_utils.format_list,
'listeners': v2_utils.format_list,
'session_persistence': v2_utils.format_hash,
'tags': v2_utils.format_list_flat}
return (rows, (utils.get_dict_properties(
data, rows, formatters=formatters,
mixed_case_fields=['enable-tls'])))
class SetPool(command.Command):
"""Update a pool"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar="<pool>",
help="Pool to update (name or ID)."
)
parser.add_argument(
'--name',
metavar='<name>',
help="Set the name of the pool."
)
parser.add_argument(
'--description',
metavar='<description>',
help="Set the description of the pool."
)
parser.add_argument(
'--session-persistence',
metavar='<session_persistence>',
help="Set the session persistence for the listener (key=value)."
)
parser.add_argument(
'--lb-algorithm',
metavar='{' + ','.join(ALGORITHM_CHOICES) + '}',
choices=ALGORITHM_CHOICES,
type=lambda s: s.upper(), # case insensitive
help="Set the load balancing algorithm to use."
)
admin_group = parser.add_mutually_exclusive_group()
admin_group.add_argument(
'--enable',
action='store_true',
default=None,
help="Enable pool."
)
admin_group.add_argument(
'--disable',
action='store_true',
default=None,
help="Disable pool."
)
parser.add_argument(
'--tls-container-ref',
metavar='<container-ref>',
help="The URI to the key manager service secrets container "
"containing the certificate and key for TERMINATED_TLS "
"pools to re-encrpt the traffic from TERMINATED_TLS "
"listener to backend servers."
)
parser.add_argument(
'--ca-tls-container-ref',
metavar='<ca_tls_container_ref>',
help="The URI to the key manager service secrets container "
"containing the CA certificate for TERMINATED_TLS listeners "
"to check the backend servers certificates in ssl traffic."
)
parser.add_argument(
'--crl-container-ref',
metavar='<crl_container_ref>',
help="The URI to the key manager service secrets container "
"containting the CA revocation list file for TERMINATED_TLS "
"listeners to valid the backend servers certificates in ssl "
"traffic."
)
tls_enable = parser.add_mutually_exclusive_group()
tls_enable.add_argument(
'--enable-tls',
action='store_true',
default=None,
help="Enable backend associated members re-encryption."
)
tls_enable.add_argument(
'--disable-tls',
action='store_true',
default=None,
help="disable backend associated members re-encryption."
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
parser.add_argument(
'--tls-ciphers',
metavar='<tls_ciphers>',
help="Set the TLS ciphers to be used by the pool "
"in OpenSSL cipher string format."
)
parser.add_argument(
'--tls-version',
dest='tls_versions',
metavar='<tls_versions>',
nargs='?',
action='append',
help="Set the TLS protocol version to be used "
"by the pool (can be set multiple times)."
)
parser.add_argument(
'--alpn-protocol',
dest='alpn_protocols',
metavar='<alpn_protocols>',
nargs='?',
action='append',
help="Set the ALPN protocol to be used "
"by the pool (can be set multiple times)."
)
_tag.add_tag_option_to_parser_for_set(parser, 'pool')
return parser
def take_action(self, parsed_args):
attrs = v2_utils.get_pool_attrs(self.app.client_manager, parsed_args)
pool_id = attrs.pop('pool_id')
v2_utils.set_tags_for_set(
self.app.client_manager.load_balancer.pool_show,
pool_id, attrs, clear_tags=parsed_args.no_tag)
body = {'pool': attrs}
self.app.client_manager.load_balancer.pool_set(
pool_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=self.app.client_manager.load_balancer.pool_show,
res_id=pool_id
)
class UnsetPool(command.Command):
"""Clear pool settings"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'pool',
metavar="<pool>",
help="Pool to modify (name or ID)."
)
parser.add_argument(
'--name',
action='store_true',
help="Clear the pool name."
)
parser.add_argument(
'--description',
action='store_true',
help="Clear the description of this pool."
)
parser.add_argument(
'--ca-tls-container-ref',
action='store_true',
help="Clear the certificate authority certificate reference on "
"this pool."
)
parser.add_argument(
'--crl-container-ref',
action='store_true',
help="Clear the certificate revocation list reference on "
"this pool."
)
parser.add_argument(
'--session-persistence',
action='store_true',
help="Disables session persistence on the pool."
)
parser.add_argument(
'--tls-container-ref',
action='store_true',
help="Clear the certificate reference for this pool."
)
parser.add_argument(
'--tls-versions',
action='store_true',
help='Clear all TLS versions from the pool.',
)
parser.add_argument(
'--tls-ciphers',
action='store_true',
help='Clear all TLS ciphers from the pool.',
)
parser.add_argument(
'--wait',
action='store_true',
help='Wait for action to complete.',
)
parser.add_argument(
'--alpn-protocols',
action='store_true',
help="Clear all ALPN protocols from the pool."
)
_tag.add_tag_option_to_parser_for_unset(parser, 'pool')
return parser
def take_action(self, parsed_args):
unset_args = v2_utils.get_unsets(parsed_args)
if not unset_args and not parsed_args.all_tag:
return
pool_id = v2_utils.get_resource_id(
self.app.client_manager.load_balancer.pool_list,
'pools', parsed_args.pool)
v2_utils.set_tags_for_unset(
self.app.client_manager.load_balancer.pool_show,
pool_id, unset_args, clear_tags=parsed_args.all_tag)
body = {'pool': unset_args}
self.app.client_manager.load_balancer.pool_set(
pool_id, json=body)
if parsed_args.wait:
v2_utils.wait_for_active(
status_f=self.app.client_manager.load_balancer.pool_show,
res_id=pool_id
)
| 33.775238
| 79
| 0.556226
|
a1397727634e6e5a7babbaa5feb3d4f3b1178e90
| 5,103
|
py
|
Python
|
scripts/Augmentation/bbox_aug.py
|
GSteinberg/mmdetection
|
2306846b7441860592119a9149e24c7707eaa7a7
|
[
"Apache-2.0"
] | null | null | null |
scripts/Augmentation/bbox_aug.py
|
GSteinberg/mmdetection
|
2306846b7441860592119a9149e24c7707eaa7a7
|
[
"Apache-2.0"
] | null | null | null |
scripts/Augmentation/bbox_aug.py
|
GSteinberg/mmdetection
|
2306846b7441860592119a9149e24c7707eaa7a7
|
[
"Apache-2.0"
] | null | null | null |
import albumentations as A
import cv2
import argparse
import os
import json
from progress.bar import IncrementalBar
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Augment images')
parser.add_argument('--input_dir', dest='input_dir',
help='directory to take input imgs and anns to augment',
type=str)
parser.add_argument('--output_dir', dest='output_dir',
help='directory to save augmented imgs and ann',
type=str)
args = parser.parse_args()
return args
def get_annot_for_img(img_name, annot):
# get image id associated with name
img_id = -1
for img in annot['images']:
if img['file_name'] == img_name:
img_id = img['id']
bboxes = []
cats = []
for ann in annot['annotations']:
if ann['image_id'] == img_id:
# get category name
cat_id = ann['category_id']
cat = annot['categories'][cat_id]
# append entry
bboxes.append(ann['bbox'])
cats.append(cat)
return bboxes, cats
def albument():
bbox_params = A.BboxParams(
format='coco',
min_area=600,
min_visibility=0.4,
label_fields=['class_categories']
)
# describe the transformations
transform_lst = [
# resize 10% smaller
A.Compose([
A.RandomScale(scale_limit=[-0.10, -0.10], p=1)],
bbox_params=bbox_params
),
# resize 15% smaller
A.Compose([
A.RandomScale(scale_limit=[-0.15, -0.15], p=1)],
bbox_params=bbox_params
),
# resize 20% smaller
A.Compose([
A.RandomScale(scale_limit=[-0.20, -0.20], p=1)],
bbox_params=bbox_params
)
]
return transform_lst
def augment(input_dir, output_dir):
# load coco annotations
ann_name = os.path.join(input_dir, "coco_annotation.json")
with open(ann_name) as json_file:
annot = json.load(json_file)
# for new images
new_annot = {'images':[], 'annotations':[], 'categories':[]}
img_id = 0
box_id = 0
# create transform objects
transform_lst = albument()
# for output viz
bar = IncrementalBar("Transforming images in " + input_dir, max=len(os.listdir(input_dir))*len(transform_lst))
# iterate through every image in input_dirs
for image in os.scandir(input_dir):
# only check images with correct extension
if not image.name.endswith(".tif"):
print('\n{} not being parsed - does not have .tif extension'.format(image.name))
bar.next()
continue
# load image
img = cv2.imread(image.path)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# get corresponding annotation
bboxes, cats = get_annot_for_img(image.name, annot)
# do actual transformations
for tr_idx, tr in enumerate(transform_lst):
transformed = tr(image=img, bboxes=bboxes, class_categories=cats)
transformed_img = transformed['image']
transformed_bboxes = transformed['bboxes']
transformed_cats = transformed['class_categories']
# image output
output_img_name = "Aug{:02d}_{}".format(tr_idx, image.name)
cv2.imwrite(os.path.join(output_dir, output_img_name), transformed_img)
# reconstruct new coco ann
# image entry
img_height, img_width = transformed_img.shape[:2]
new_annot['images'].append({
'id': img_id,
'file_name': output_img_name,
'height': img_height,
'width': img_width
})
# annotation entry
for i, box in enumerate(transformed_bboxes):
box = [int(coord) for coord in box]
x1, y1, h, w = box
x2 = x1 + w
y2 = y1 + h
area = h * w
seg = [[x1,y1 , x2,y1 , x2,y2 , x1,y2]]
new_annot['annotations'].append({
'image_id': img_id,
'id': box_id,
'category_id': transformed_cats[i]['id'],
'bbox': box,
'area': area,
'segmentation': seg,
'iscrowd': 0
})
box_id+=1
# categories entry
for cat in transformed_cats:
if cat not in new_annot['categories']:
new_annot['categories'].append(cat)
img_id+=1
bar.next()
bar.finish()
# annotation output
output_ann_name = os.path.join(output_dir, "coco_annotation.json")
with open(output_ann_name, 'w') as outfile:
json.dump(new_annot, outfile)
if __name__ == '__main__':
args = parse_args()
print("Called with args:")
print(args)
augment(args.input_dir, args.output_dir)
| 29.49711
| 114
| 0.550461
|
556102740e629a611281bc0c94979a88674b991c
| 2,430
|
py
|
Python
|
t/compile3_test.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | 2
|
2021-05-07T08:50:06.000Z
|
2021-06-12T15:48:23.000Z
|
t/compile3_test.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | 1
|
2018-11-29T08:49:42.000Z
|
2018-11-29T08:49:42.000Z
|
t/compile3_test.py
|
lmr/Template-Toolkit-Python
|
3b1affc79c2f650b057956b0dbf6e0cb51515999
|
[
"Artistic-2.0"
] | null | null | null |
import os
import shutil
import time
from template import Template, TemplateException
from template.test import TestCase, main
def append_file(path, text):
time.sleep(2) # Ensure file time stamps are different.
fh = open(path, "a")
fh.write(text)
fh.close()
class CompileTest(TestCase):
def testCompile(self):
ttcfg = {"POST_CHOMP": 1,
"INCLUDE_PATH": "test/src",
"COMPILE_EXT": ".ttc"}
# Test process fails when EVAL_PYTHON not set.
try:
Template(ttcfg).process("evalpython", {})
self.fail("did not raise exception")
except TemplateException as e:
self.assertEqual("python", e.type())
self.assertEqual("EVAL_PYTHON not set", e.info())
# Ensure we can run compiled templates without loading parser.
ttcfg["EVAL_PYTHON"] = 1
Template(ttcfg).process("evalpython", {})
# Check that compiled template file exists and grab modification time.
path = "test/src/complex"
self.assertTrue(os.path.exists(path + ".ttc"))
mod = os.stat(path + ".ttc")[9]
# Save copy of the source file because we're going to try to break it.
shutil.copy(path, path + ".org")
# Sleep for a couple of seconds to ensure clock has ticked.
time.sleep(2)
# Append a harmless newline to the end of the source file to change
# its modification time.
append_file(path, "\n")
# Define "bust_it" to append a lone "[% TRY %]" onto the end of the
# source file to cause re-compilation to fail.
replace = {"bust_it": lambda: append_file(path, "[% TRY %]")}
self.Expect(DATA, ttcfg, replace)
self.assertTrue(os.stat(path)[9] > mod)
# Restore original source file.
shutil.copy(path + ".org", path)
DATA = r"""
-- test --
[% META author => 'albert' version => 'emc2' %]
[% INCLUDE complex %]
-- expect --
This is the header, title: Yet Another Template Test
This is a more complex file which includes some BLOCK definitions
This is the footer, author: albert, version: emc2
- 3 - 2 - 1
-- test --
[%# we want to break 'compile' to check that errors get reported -%]
[% CALL bust_it -%]
[% TRY; INCLUDE complex; CATCH; "$error"; END %]
-- expect --
file error - parse error - complex line 18: unexpected end of input
"""
if __name__ == '__main__':
main()
| 31.973684
| 78
| 0.619342
|
a11af4b5de0abfe0c9fdd6b51d84a084c93508ad
| 3,417
|
py
|
Python
|
tests.py
|
UiO-CS/tf-wavelets
|
11da39282acc70f38b16330c3473ac43fdfb4353
|
[
"MIT"
] | 45
|
2018-09-11T10:12:44.000Z
|
2022-01-12T04:21:08.000Z
|
tests.py
|
UiO-CS/tf-wavelets
|
11da39282acc70f38b16330c3473ac43fdfb4353
|
[
"MIT"
] | 7
|
2019-03-02T12:57:07.000Z
|
2021-07-24T04:16:30.000Z
|
tests.py
|
UiO-CS/tf-wavelets
|
11da39282acc70f38b16330c3473ac43fdfb4353
|
[
"MIT"
] | 12
|
2019-03-03T14:59:26.000Z
|
2022-01-14T19:59:08.000Z
|
import tfwavelets as tfw
import numpy as np
def check_orthonormality_1d(wavelet, tol=1e-5, N=8):
matrix = np.zeros((N, N))
for i in range(N):
unit = np.zeros(N)
unit[i] = 1
matrix[:, i] = tfw.wrappers.dwt1d(unit, wavelet)
error1 = np.mean(np.abs(matrix.T @ matrix - np.eye(N)))
error2 = np.mean(np.abs(matrix @ matrix.T - np.eye(N)))
assert error1 < tol, "Mean error: %g" % error1
assert error2 < tol, "Mean error: %g" % error2
def check_linearity_1d(wavelet, tol=1e-5, N=256):
x1 = np.random.random(N)
x2 = np.random.random(N)
c1 = np.random.random(1)
c2 = np.random.random(1)
test1 = tfw.wrappers.dwt1d(c1 * x1 + c2 * x2)
test2 = c1 * tfw.wrappers.dwt1d(x1) + c2 * tfw.wrappers.dwt1d(x2)
error = np.mean(np.abs(test1 - test2))
assert error < tol, "Mean error: %g" % error
def check_linearity_2d(wavelet, tol=1e-5, N=256):
x1 = np.random.random((N, N))
x2 = np.random.random((N, N))
c1 = np.random.random(1)
c2 = np.random.random(1)
test1 = tfw.wrappers.dwt2d(c1 * x1 + c2 * x2)
test2 = c1 * tfw.wrappers.dwt2d(x1) + c2 * tfw.wrappers.dwt2d(x2)
error = np.mean(np.abs(test1 - test2))
assert error < tol, "Mean error: %g" % error
def check_inverse_1d(wavelet, levels=1, tol=1e-4, N=256):
signal = np.random.random(N)
reconstructed = tfw.wrappers.idwt1d(
tfw.wrappers.dwt1d(signal, levels=levels),
levels=levels
)
error = np.mean(np.abs(signal - reconstructed))
assert error < tol, "Mean error: %g" % error
def check_inverse_2d(wavelet, levels=1, tol=1e-4, N=256):
signal = np.random.random((N, N))
reconstructed = tfw.wrappers.idwt2d(
tfw.wrappers.dwt2d(signal, levels=levels),
levels=levels
)
error = np.mean(np.abs(signal - reconstructed))
assert error < tol, "Mean error: %g" % error
def test_ortho_haar():
check_orthonormality_1d("haar")
def test_linear_haar_1d():
check_linearity_1d("haar")
def test_linear_haar_2d():
check_linearity_2d("haar")
def test_inverse_haar_1d():
check_inverse_1d("haar", levels=1)
def test_inverse_haar_1d_level2():
check_inverse_1d("haar", levels=2)
def test_inverse_haar_2d():
check_inverse_2d("haar", levels=2)
def test_ortho_db2():
check_orthonormality_1d("db2")
def test_linear_db2_2d():
check_linearity_2d("db2")
def test_linear_db2_1d():
check_linearity_1d("db2")
def test_inverse_db2_1d():
check_inverse_1d("db2", levels=1)
def test_inverse_db2_1d_level2():
check_inverse_1d("db2", levels=2)
def test_inverse_db2_2d():
check_inverse_2d("db2", levels=2)
def test_ortho_db3():
check_orthonormality_1d("db3")
def test_linear_db3_2d():
check_linearity_2d("db3")
def test_linear_db3_1d():
check_linearity_1d("db3")
def test_inverse_db3_1d():
check_inverse_1d("db3", levels=1)
def test_inverse_db3_1d_level2():
check_inverse_1d("db3", levels=2)
def test_inverse_db3_2d():
check_inverse_2d("db3", levels=2)
def test_ortho_db4():
check_orthonormality_1d("db4")
def test_linear_db4_2d():
check_linearity_2d("db4")
def test_linear_db4_1d():
check_linearity_1d("db4")
def test_inverse_db4_1d():
check_inverse_1d("db4", levels=1)
def test_inverse_db4_1d_level2():
check_inverse_1d("db4", levels=2)
def test_inverse_db4_2d():
check_inverse_2d("db4", levels=2)
| 23.565517
| 69
| 0.671642
|
2237660edc2b315c6d1a8e947bbdd55091f794e0
| 2,765
|
py
|
Python
|
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | null | null | null |
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 21
|
2020-09-09T18:55:58.000Z
|
2021-07-26T19:42:46.000Z
|
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 6
|
2020-12-20T17:19:29.000Z
|
2021-08-09T22:33:04.000Z
|
#!/usr/bin/env python
import datetime
import logging
import os
import random
import rospy
import schedule
from interaction_engine.cordial_interface import CordialInterface
from interaction_engine.database import Database
from interaction_engine.int_engine import InteractionEngine
from interaction_engine.message import Message
from interaction_engine.state import State
from interaction_engine.state_collection import StateCollection
from cordial_msgs.msg import AskOnGuiAction, AskOnGuiGoal, MouseEvent
from std_msgs.msg import Bool
logging.basicConfig(level=logging.INFO)
class Keys:
GREETING = "greeting"
HOW_ARE_YOU = "how are you"
TAKE_CARE = "take care"
WHEN_TO_TALK = "when to talk"
greeting = State(
name=Keys.GREETING,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Hello!",
next_states=[Keys.HOW_ARE_YOU],
transitions={"Hello!": Keys.HOW_ARE_YOU, "Hi!": Keys.HOW_ARE_YOU}
)
how_are_you = State(
name=Keys.HOW_ARE_YOU,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="How are you doing today?",
next_states=[Keys.TAKE_CARE],
transitions={
"Pretty good.": Keys.TAKE_CARE,
"Great!": Keys.TAKE_CARE,
"Not too good.": Keys.TAKE_CARE
}
)
take_care = State(
name=Keys.TAKE_CARE,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Don't forget to drink enough water and get enough sleep!",
next_states=[Keys.WHEN_TO_TALK],
transitions={"Next": Keys.WHEN_TO_TALK}
)
when_to_talk = State(
name=Keys.WHEN_TO_TALK,
message_type=Message.Type.TIME_ENTRY,
content="When would you like to talk tomorrow?",
next_states=["exit"],
args=["15", "15:15"]
)
state_collection = StateCollection(
name="example interaction",
init_state_name=Keys.WHEN_TO_TALK,
states=[
greeting,
how_are_you,
take_care,
when_to_talk
]
)
cwd = os.getcwd()
database_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"example_interaction_database.json"
)
default_database_keys = [
Keys.GREETING,
Keys.HOW_ARE_YOU,
Keys.TAKE_CARE,
Keys.WHEN_TO_TALK
]
database_manager = Database(
database_file_name=database_file,
default_database_keys=default_database_keys
)
interface = CordialInterface(
action_name="cordial/say_and_ask_on_gui",
seconds_until_timeout=None
)
interaction_engine = InteractionEngine(
state_collection=state_collection,
database_manager=database_manager,
interface=interface
)
if __name__ == "__main__":
while not rospy.is_shutdown():
rospy.logdebug("Scheduled interaction running")
interaction_engine.run()
rospy.sleep(5)
| 24.469027
| 71
| 0.725859
|
ed60f58b3baaeebe8fc9b86b0abd0c6cab8a21fa
| 1,726
|
py
|
Python
|
from_cpython/Lib/test/test_readline.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/test/test_readline.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
from_cpython/Lib/test/test_readline.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
# expected: fail
"""
Very minimal unittests for parts of the readline module.
These tests were added to check that the libedit emulation on OSX and
the "real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
import unittest
from test.test_support import run_unittest, import_module
# Skip tests if there is no readline module
readline = import_module('readline')
class TestHistoryManipulation (unittest.TestCase):
@unittest.skipIf(not hasattr(readline, 'clear_history'),
"The history update test cannot be run because the "
"clear_history method is not available.")
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
def test_main():
run_unittest(TestHistoryManipulation)
if __name__ == "__main__":
test_main()
| 35.22449
| 76
| 0.720742
|
b08e38fa3f85e4cf4a94a1cc281d52f1681920d1
| 1,358
|
py
|
Python
|
airbyte-integrations/bases/base-normalization/normalization/__init__.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/bases/base-normalization/normalization/__init__.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
airbyte-integrations/bases/base-normalization/normalization/__init__.py
|
golf-canada/airbyte
|
a81b183a6b62d6bb4256347aaf39a3ada061aabe
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from normalization.destination_type import DestinationType
from normalization.transform_catalog.transform import TransformCatalog
from normalization.transform_config.transform import TransformConfig
__all__ = [
"DestinationType",
"TransformCatalog",
"TransformConfig",
]
| 38.8
| 78
| 0.804124
|
32b972f75aa109d86605a5334fa0fc49808e9cea
| 6,319
|
py
|
Python
|
modules/ui_splash_screen.py
|
dho-IOD/futu_algo
|
f4bdf5edcc261efbd252e9e9c53a89563b0ed68f
|
[
"Apache-2.0"
] | 66
|
2020-12-29T15:03:21.000Z
|
2022-03-29T01:24:59.000Z
|
modules/ui_splash_screen.py
|
dho-IOD/futu_algo
|
f4bdf5edcc261efbd252e9e9c53a89563b0ed68f
|
[
"Apache-2.0"
] | 22
|
2020-12-29T16:57:03.000Z
|
2022-03-01T08:23:37.000Z
|
modules/ui_splash_screen.py
|
dho-IOD/futu_algo
|
f4bdf5edcc261efbd252e9e9c53a89563b0ed68f
|
[
"Apache-2.0"
] | 30
|
2021-01-07T07:33:22.000Z
|
2022-03-17T11:37:02.000Z
|
# -*- coding: utf-8 -*-
# Futu Algo: Algorithmic High-Frequency Trading Framework
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by Bill Chan <billpwchan@hotmail.com>, 2021
# Copyright (c) billpwchan - All Rights Reserved
################################################################################
## Form generated from reading UI file 'splash_screenWmiKOh.ui'
##
## Created by: Qt User Interface Compiler version 6.0.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
class Ui_SplashScreen(object):
def setupUi(self, SplashScreen):
if not SplashScreen.objectName():
SplashScreen.setObjectName(u"SplashScreen")
SplashScreen.resize(300, 300)
SplashScreen.setMinimumSize(QSize(300, 300))
SplashScreen.setMaximumSize(QSize(300, 300))
self.centralwidget = QWidget(SplashScreen)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.container = QFrame(self.centralwidget)
self.container.setObjectName(u"container")
self.container.setFrameShape(QFrame.NoFrame)
self.container.setFrameShadow(QFrame.Raised)
self.verticalLayout_2 = QVBoxLayout(self.container)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout_2.setContentsMargins(20, 20, 20, 20)
self.circle_bg = QFrame(self.container)
self.circle_bg.setObjectName(u"circle_bg")
self.circle_bg.setStyleSheet(u"QFrame {\n"
" background-color: #282a36;\n"
" color: #f8f8f2;\n"
" border-radius: 120px;\n"
" font: 9pt \"Segoe UI\";\n"
"}")
self.circle_bg.setFrameShape(QFrame.NoFrame)
self.circle_bg.setFrameShadow(QFrame.Raised)
self.verticalLayout_3 = QVBoxLayout(self.circle_bg)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.texts = QFrame(self.circle_bg)
self.texts.setObjectName(u"texts")
self.texts.setMaximumSize(QSize(16777215, 200))
self.texts.setStyleSheet(u"background: none;")
self.texts.setFrameShape(QFrame.NoFrame)
self.texts.setFrameShadow(QFrame.Raised)
self.verticalLayout_4 = QVBoxLayout(self.texts)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.verticalLayout_4.setContentsMargins(-1, 25, -1, -1)
self.gridLayout = QGridLayout()
self.gridLayout.setObjectName(u"gridLayout")
self.frame = QFrame(self.texts)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.NoFrame)
self.frame.setFrameShadow(QFrame.Raised)
self.verticalLayout_5 = QVBoxLayout(self.frame)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.version = QLabel(self.frame)
self.version.setObjectName(u"version")
self.version.setMinimumSize(QSize(100, 24))
self.version.setMaximumSize(QSize(100, 24))
self.version.setStyleSheet(u"QLabel{\n"
" color: rgb(151, 159, 200);\n"
" background-color: rgb(68, 71, 90);\n"
" border-radius: 12px;\n"
"}")
self.version.setAlignment(Qt.AlignCenter)
self.verticalLayout_5.addWidget(self.version, 0, Qt.AlignHCenter)
self.gridLayout.addWidget(self.frame, 2, 0, 1, 1)
self.title = QLabel(self.texts)
self.title.setObjectName(u"title")
self.title.setMinimumSize(QSize(0, 30))
self.title.setAlignment(Qt.AlignCenter)
self.gridLayout.addWidget(self.title, 0, 0, 1, 1)
self.loading = QLabel(self.texts)
self.loading.setObjectName(u"loading")
self.loading.setAlignment(Qt.AlignCenter)
self.gridLayout.addWidget(self.loading, 3, 0, 1, 1)
self.empty = QFrame(self.texts)
self.empty.setObjectName(u"empty")
self.empty.setMinimumSize(QSize(0, 90))
self.empty.setFrameShape(QFrame.NoFrame)
self.empty.setFrameShadow(QFrame.Raised)
self.gridLayout.addWidget(self.empty, 1, 0, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout)
self.verticalLayout_3.addWidget(self.texts)
self.verticalLayout_2.addWidget(self.circle_bg)
self.verticalLayout.addWidget(self.container)
SplashScreen.setCentralWidget(self.centralwidget)
self.retranslateUi(SplashScreen)
QMetaObject.connectSlotsByName(SplashScreen)
# setupUi
def retranslateUi(self, SplashScreen):
SplashScreen.setWindowTitle(QCoreApplication.translate("SplashScreen", u"Loading...", None))
self.version.setText(QCoreApplication.translate("SplashScreen", u"By: Bill Chan", None))
self.title.setText(QCoreApplication.translate("SplashScreen", u"FUTU ALGO - TRADING", None))
self.loading.setText(QCoreApplication.translate("SplashScreen", u"loading...", None))
# retranslateUi
| 43.280822
| 100
| 0.647571
|
e893a13583802296359e7c2d134a28389c736089
| 1,369
|
py
|
Python
|
python/scrapy/MyScrapers/MyScrapers/spiders/quotes.py
|
RitamDey/My-Simple-Programs
|
147b455a6a40c371ec894ce979e8a61d242e03bd
|
[
"Unlicense"
] | 2
|
2016-10-14T16:58:05.000Z
|
2017-05-04T04:59:18.000Z
|
python/scrapy/MyScrapers/MyScrapers/spiders/quotes.py
|
GreenJoey/My-Simple-Programs
|
147b455a6a40c371ec894ce979e8a61d242e03bd
|
[
"Unlicense"
] | null | null | null |
python/scrapy/MyScrapers/MyScrapers/spiders/quotes.py
|
GreenJoey/My-Simple-Programs
|
147b455a6a40c371ec894ce979e8a61d242e03bd
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import QuotesItem
class QuotesSpider(Spider):
name = 'quotes'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
quotes_selector = '//div[@class="container"]/div[@class="row"]/div[@class="col-md-8"]/div[@class="quote"]'
quote_selector = './span[@class="text"]/text()'
author_selector = './span[2]/small[@class="author"]/text()'
# author_url_selector = './span[2]/a/@href'
tags = './div[@class="tags"]/a[@class="tag"]'
quote_item = QuotesItem()
for quote in response.xpath(quotes_selector):
quote_item['quote'] = quote.xpath(quote_selector).extract_first()[1:-1]
quote_item['author'] = quote.xpath(author_selector).extract_first()
quote_item['tags'] = [
tag.xpath('./text()').extract_first().capitalize() \
for tag in quote.xpath(tags)
]
yield quote_item
next_link = '//div[@class="container"]/div[@class="row"]/div[@class="col-md-8"]/nav/ul[' \
'@class="pager"]/li[@class="next"]/a/@href '
yield Request(
response.urljoin(response.xpath(next_link).extract_first()),
callback=self.parse
)
| 35.102564
| 114
| 0.575603
|
172fa9b9de4016d20009a8277519f349c210dbaa
| 1,624
|
py
|
Python
|
neutron/plugins/embrane/plugins/embrane_ml2_plugin.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 10
|
2015-09-22T10:22:53.000Z
|
2016-02-25T06:12:05.000Z
|
neutron/plugins/embrane/plugins/embrane_ml2_plugin.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 12
|
2015-01-08T18:30:45.000Z
|
2015-03-13T21:04:15.000Z
|
neutron/plugins/embrane/plugins/embrane_ml2_plugin.py
|
gampel/neutron
|
51a6260266dc59c066072ca890ad9c40b1aad6cf
|
[
"Apache-2.0"
] | 7
|
2015-02-05T10:23:52.000Z
|
2019-05-18T17:11:19.000Z
|
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import extraroute_db
from neutron.db import l3_dvr_db
from neutron.db import l3_gwmode_db
from neutron.plugins.embrane import base_plugin as base
from neutron.plugins.embrane.l2base.ml2 import ml2_support
from neutron.plugins.ml2 import plugin as l2
class EmbraneMl2Plugin(base.EmbranePlugin, l2.Ml2Plugin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
extraroute_db.ExtraRoute_db_mixin):
'''EmbraneMl2Plugin.
This plugin uses Modular Layer 2 plugin for providing L2 networks
and the base EmbranePlugin for L3.
'''
_plugin_support = ml2_support.Ml2Support()
def __init__(self):
'''First run plugin specific initialization, then Embrane's.'''
self._supported_extension_aliases.extend(["router", "extraroute",
"ext-gw-mode"])
l2.Ml2Plugin.__init__(self)
self._run_embrane_config()
| 38.666667
| 78
| 0.696429
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.