hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7eb2e5d8b39a38645fd5ca261b5ad38bc6b3a64e | 17,580 | py | Python | deploy.py | NASA-PDS/planetarydata.org | 16731a251c22408b433117f7f01e29d004f11467 | [
"Apache-2.0"
] | null | null | null | deploy.py | NASA-PDS/planetarydata.org | 16731a251c22408b433117f7f01e29d004f11467 | [
"Apache-2.0"
] | 5 | 2021-03-19T21:41:19.000Z | 2022-02-11T14:55:14.000Z | deploy.py | NASA-PDS/planetarydata.org | 16731a251c22408b433117f7f01e29d004f11467 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2014 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# deploy.py - Deploy the IPDA site into operations
import argparse, sys, logging, os, os.path, re, subprocess, pwd, urllib2, contextlib, tempfile, tarfile, string, random
import shutil
reload(sys)
sys.setdefaultencoding('utf-8')
_bufsiz = 512
_buildoutCache = u'/apps/ipdasite/buildout'
_setupToolsVersion = u'23.0.0'
_virtualEnvVersion = u'15.0.2'
_buildoutVersion = u'2.5.2'
_virtualEnvURL = u'https://pypi.python.org/packages/source/v/virtualenv/virtualenv-{}.tar.gz'.format(_virtualEnvVersion)
_cHeader = '''#ifdef __cplusplus
extern "C"
#endif
'''
class DeploymentError(Exception):
pass
def _setupLogging():
logging.basicConfig(level=logging.DEBUG, format=u'%(asctime)s %(levelname)-8s %(message)s',
filename=u'deploy.log', filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(u'%(message)s'))
logging.getLogger('').addHandler(console)
logging.debug(u'Logging configured')
def _getArgParser():
p = argparse.ArgumentParser(
description=u'Deploys the IPDA web site and services in this directory. If a previous installation'
u' exists, give its path on the command-line, and its content will be migrated over. Otherwise,'
u' an empty, content-free website will be deployed.',
epilog=u'For more information or help, contact sean.kelly@jpl.nasa.gov.'
)
p.add_argument(u'existing', nargs='?', help=u'Path to existing IPDA website installation for content')
p.add_argument(u'--buildout-cache', metavar=u'PATH', default=_buildoutCache,
help=u'Use cached downloads/eggs/extends in %(metavar)s instead of %(default)s')
p.add_argument(u'--libdir', metavar=u'PATH', action='append',
help=u'Add %(metavar)s to the list of dirs to check for libraries; repeat this option as needed')
g = p.add_argument_group(u'Internet', u'Hostnames and ports.')
g.add_argument(u'--public-hostname', metavar=u'HOSTNAME',
help=u'Override the default hostname "%(default)s" with %(metavar)s', default=u'planetarydata.org')
g.add_argument(u'--http-port', metavar=u'PORTNUM', type=int, default=80,
help=u'Override the default HTTP port %(default)d with %(metavar)s')
g.add_argument(u'--https-port', metavar=u'PORTNUM', type=int, default=443,
help=u'Override the default HTTPS port %(default)d with %(metavar)s')
g = p.add_argument_group(u'Usernames & Passwords', u'Random passwords will be generated unless specified below.')
g.add_argument(u'--supervisor-user', metavar=u'USERNAME', default=u'supervisor-admin',
help=u'Override the Supervisor username "%(default)s" with %(metavar)s')
g.add_argument(u'--supervisor-password', metavar=u'PASSWORD', help='Use %(metavar)s insetad of a random password')
g.add_argument(u'--tomcat-user', metavar=u'USERNAME', default=u'tomcat-admin',
help=u'Override the Tomcat username "%(default)s" with %(metavar)s')
g.add_argument(u'--tomcat-password', metavar=u'PASSWORD', help=u'Use %(metavar)s instead of a random password')
g.add_argument(u'--zope-user', metavar=u'USERNAME', default=u'zope-admin',
help=u'Override the Zope app server username "%(default)s" with %(metavar)s')
g.add_argument(u'--zope-password', metavar=u'PASSWORD', help=u'Use %(metavar)s instead of a random password')
g = p.add_argument_group(u'Executables', u'These will be searched on the executable PATH unless overridden.')
g.add_argument(u'--with-java', metavar=u'PATH', help=u'Use the Java language at %(metavar)s')
g.add_argument(u'--with-lynx', metavar=u'PATH', help=u'Use the lynx plain-text browser at %(metavar)s')
g.add_argument(u'--with-nginx', metavar=u'PATH', help=u'Use the nginx web server at %(metavar)s')
g.add_argument(u'--with-pdftohtml', metavar=u'PATH', help=u'Use the pdftohtml converter at %(metavar)s')
g.add_argument(u'--with-varnishd', metavar=u'PATH', help=u'Use the varnishd cache at %(metavar)s')
g.add_argument(u'--with-wvHtml', metavar=u'PATH', help=u'Use the wvHtml Word converter at %(metavar)s')
g.add_argument(u'--with-python', metavar=u'PATH', help=u'Use the Python language at %(metavar)s',
default=sys.executable)
return p
def _findExecutable(name, location=None):
logging.debug(u'Looking for executable "%s"%s', name,
u' (Possibly at {})'.format(location) if location is not None else u'')
if location:
if not os.path.isfile(location):
raise DeploymentError(u'The "{}" at "{}" is not a file'.format(name, location))
if not os.access(location, os.X_OK):
raise DeploymentError(u'The "{}" at "{}" is not executable'.format(name, location))
return location
for d in os.environ['PATH'].split(u':'):
candidate = os.path.join(d, name)
if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
return candidate
raise DeploymentError(u'Executable "{}" not found in PATH'.format(name))
def _checkVarnish(path):
logging.info('Checking varnishd version')
output = subprocess.check_output([path, u'-V'], stderr=subprocess.STDOUT)
if re.match(ur'varnishd \(varnish-3', output) is None:
raise DeploymentError(u'Varnish at "{}" needs to be version 3+'.format(path))
def _findExecutables(namespace):
logging.info('Finding dependent executables')
java = _findExecutable(u'java', namespace.with_java)
logging.info(u'Using java at %s', java)
nginx = _findExecutable(u'nginx', namespace.with_nginx)
logging.info(u'Using nginx at %s', nginx)
lynx = _findExecutable(u'lynx', namespace.with_lynx)
logging.info(u'Using lynx at %s', lynx)
pdftohtml = _findExecutable(u'pdftohtml', namespace.with_pdftohtml)
logging.info(u'Using pdftohtml at %s', pdftohtml)
varnishd = _findExecutable(u'varnishd', namespace.with_varnishd)
logging.info(u'Using varnishd at %s', varnishd)
wvHtml = _findExecutable(u'wvHtml', namespace.with_wvHtml)
logging.info(u'Using wvHtml at %s', wvHtml)
python = _findExecutable(u'python2.7', namespace.with_python)
logging.info(u'Using python2.7 at %s', python)
return dict(java=java, nginx=nginx, lynx=lynx, pdftohtml=pdftohtml, python=python, varnishd=varnishd, wvHtml=wvHtml)
def _checkLibrary(lib, func, libdirs):
logging.debug('Checking for %s in %s (extra libdirs: %r)', func, lib, libdirs)
fd, fn = tempfile.mkstemp(suffix='.c')
out = os.fdopen(fd, 'w')
out.write(_cHeader)
out.write('char %s();\nint main() {\nreturn %s();}\n' % (func, func))
out.close()
args = ['cc', fn, '-l{}'.format(lib)]
args.extend(['-L{}'.format(i) for i in libdirs])
_execAndLog(args)
os.remove('a.out')
def _checkLibraries(namespace):
logging.info('Finding dependent libraries and headers')
extraLibdirs = namespace.libdir
if extraLibdirs is None:
extraLibdirs = []
for lib, func in (
('xml2', 'xmlNewEntity'),
('xslt', 'xsltInit'),
):
logging.info('Checking for %s', lib)
_checkLibrary(lib, func, extraLibdirs)
# If we get here, then _checkLibrary didn't raise any exception and we found our symbols.
# Note: we should also check versions.
return extraLibdirs
def _getUserID():
logging.info(u'Getting user ID')
username = pwd.getpwuid(os.getuid())[0]
logname = os.environ['LOGNAME']
if logname != username:
logging.warning("LOGNAME \"%s\" does not match current user ID's account name \"%s\", preferring latter",
logname, username)
return username
def _installVirtualEnv(python):
logging.info(u'Installing virtualenv %s', _virtualEnvVersion)
sentinel = os.path.join(u'virtualenv-{}'.format(_virtualEnvVersion),u'virtualenv_support',u'__init__.py')
if not os.path.isfile(sentinel):
logging.debug(u'Downloading from %s', _virtualEnvURL)
with _download(_virtualEnvURL) as f:
tf = tarfile.open(fileobj=f, mode='r:gz')
tf.extractall()
else:
logging.debug(u'Found virtualenv already')
sentinel = os.path.join(u'python2.7', 'bin', 'activate.csh')
if not os.path.isfile(sentinel):
logging.debug(u'Installing virtualenv for python2.7')
ve = os.path.join(u'virtualenv-{}'.format(_virtualEnvVersion), u'virtualenv.py')
subprocess.check_call([python, ve, u'python2.7'])
else:
logging.debug(u'Found virtualenv python already')
# Check for upgraded setuptools?
def _checkCWD():
logging.info(u"Checking what directory we're in")
for name, test in (
('bootstrap.py', os.path.isfile),
('etc', os.path.isdir),
('ops.cfg', os.path.isfile),
('static', os.path.isdir),
('templates', os.path.isdir)
):
if not test(name):
raise DeploymentError(u"File/dir \"{}\" missing; are you running from the right directory?".format(name))
def _download(url):
tf = tempfile.TemporaryFile()
with contextlib.closing(urllib2.urlopen(url)) as con:
while True:
buf = con.read(_bufsiz)
if len(buf) == 0:
break
tf.write(buf)
tf.flush()
tf.seek(0)
return tf
def _installSiteConfig(
executables, extraPaths, libdirs, superUser, superPassword, tomcatUser, tomcatPassword,
zopeUser, zopePassword, hostname, http, https, userID, buildoutCache
):
logging.info(u'Creating site.cfg')
javaHome = os.path.dirname(os.path.dirname(executables['java']))
with open(u'site.cfg', 'w') as f:
f.write(u'[buildout]\n')
f.write(u'extends = ops.cfg\n')
for directive, directory in (
(u'download-cache', u'downloads'),
(u'eggs-directory', u'eggs'),
(u'extends-cache', u'extends')
):
directory = os.path.abspath(os.path.join(buildoutCache, directory))
f.write(u'{} = {}\n'.format(directive, directory))
f.write(u'[hosts]\n')
f.write(u'public-address = {}\n'.format(hostname))
f.write(u'[ports]\n')
f.write(u'nginx = {}\n'.format(http))
f.write(u'nginx-ssl = {}\n'.format(https))
f.write(u'[supervisor]\n')
f.write(u'username = {}\n'.format(superUser))
f.write(u'password = {}\n'.format(superPassword))
f.write(u'[tomcat]\n')
f.write(u'username = {}\n'.format(tomcatUser))
f.write(u'password = {}\n'.format(tomcatPassword))
f.write(u'[zope]\n')
f.write(u'username = {}\n'.format(zopeUser))
f.write(u'password = {}\n'.format(zopePassword))
f.write(u'[paths]\n')
f.write(u'java = {}\n'.format(executables['java']))
f.write(u'java_home = {}\n'.format(javaHome))
f.write(u'nginx = {}\n'.format(executables['nginx']))
f.write(u'varnishd = {}\n'.format(executables['varnishd']))
f.write(u'extra = {}\n'.format(u':'.join(extraPaths)))
if len(libdirs):
f.write(u'libs = {}\n'.format(u':'.join(libdirs)))
f.write(u'[users]\n')
for i in (u'nginx', u'tomcat', u'varnish', u'zeo', u'zope'):
f.write(u'{} = {}\n'.format(i, userID))
def _checkBuildoutCache(directory):
logging.info(u'Checking buildout cache')
def reportError(error):
raise DeploymentError(u'Cannot access "{}" (errno: {})'.format(error.filename, error.strerror))
logging.debug(u'Traversing all files/dirs under %s for writeability', directory)
for root, dirs, files in os.walk(directory, onerror=reportError):
for d in dirs:
d = os.path.abspath(os.path.join(root, d))
if not os.access(d, os.R_OK | os.X_OK | os.W_OK):
raise DeploymentError(u'Cannot read, write, and traverse "{}"'.format(d))
for f in files:
f = os.path.abspath(os.path.join(root, f))
if not os.access(f, os.R_OK | os.W_OK):
raise DeploymentError(u'Cannot read and write "{}"'.format(f))
for d in ('eggs', 'downloads', 'extends'):
d = os.path.abspath(os.path.join(directory, d))
logging.debug(u'Checking if %s is a directory', d)
if not os.path.isdir(d):
logging.debug(u'Creating %s', d)
os.makedirs(d)
def _getCredentials(kind, options):
username = getattr(options, u'{}_user'.format(kind))
passwd = getattr(options, u'{}_password'.format(kind), None)
if passwd is None:
chars = string.letters + string.digits
passwd = ''.join([random.choice(chars) for i in range(20)])
return username, passwd
def _execAndLog(args):
logging.debug(u'>>> %r', args)
sub = subprocess.Popen(args, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True, universal_newlines=True)
output, error = sub.communicate()
sub.wait()
for line in output.split('\n'):
logging.debug(u'... %s', line)
if sub.returncode != 0:
raise DeploymentError(u'Subprocess call failed with return code {} (command was {})'.format(sub.returncode,
repr(args)))
def _bootstrap():
logging.info(u'Bootstrapping the buildout')
args = [
os.path.join(u'python2.7', u'bin', u'python2.7'),
u'bootstrap.py',
u'--buildout-version={}'.format(_buildoutVersion),
u'--setuptools-version={}'.format(_setupToolsVersion),
'-c',
u'site.cfg'
]
_execAndLog(args)
def _buildout():
logging.info(u'Building out; this can take a long time')
args = [os.path.join(u'bin', u'buildout'), u'-c', u'site.cfg']
_execAndLog(args)
def _checkSite(directory):
logging.info(u'Checking old IPDA site at "%s"', directory)
var = os.path.abspath(os.path.join(directory, u'var'))
database = os.path.join(var, u'filestorage', u'Data.fs')
logging.debug(u'Testing if database file %s exists', database)
if not os.path.isfile(database):
raise DeploymentError(u'Existing site at "{}" lacks a database at "{}"'.format(directory, database))
blobs = os.path.join(var, u'blobstorage')
logging.debug(u'Testing if blob directory %s exists', blobs)
if not os.path.isdir(blobs):
raise DeploymentError(u'Existing site at "{}" lacks blobstorage at "{}"'.format(directory, blobs))
def _copyContent(directory):
logging.info(u'Copying content from existing IPDA site at "%s"', directory)
var = os.path.abspath(os.path.join(directory, u'var'))
database = os.path.join(var, u'filestorage', u'Data.fs')
targetDir = os.path.abspath(os.path.join(u'var', u'filestorage'))
if not os.path.isdir(targetDir):
logging.debug(u'Creating directory %s', directory)
os.makedirs(targetDir)
logging.debug(u'Copying %s to %s', database, targetDir)
shutil.copy(database, targetDir)
blobs = os.path.join(var, u'blobstorage')
targetDir = os.path.abspath(os.path.join(u'var', u'blobstorage'))
if os.path.isdir(targetDir):
logging.debug(u'Removing directory %s', targetDir)
shutil.rmtree(targetDir)
logging.debug(u'Copying tree %s to var', blobs)
shutil.copytree(blobs, os.path.abspath(u'var/blobstorage'))
registryDir = os.path.abspath(os.path.join(u'var', u'registry'))
if os.path.isdir(registryDir):
logging.debug(u'Removing barebones registry db at %s', registryDir)
shutil.rmtree(registryDir)
registryDB = os.path.join(var, u'registry')
logging.debug(u'Copying tree %s to var', registryDB)
shutil.copytree(registryDB, os.path.abspath(u'var/registry'))
def _deployEmptySite():
logging.info(u'Deploying IPDA website with minimal content')
args = [os.path.join(u'bin', u'buildout'), u'-c', u'site.cfg', 'install', 'basic-site']
_execAndLog(args)
def _upgradeSite(user, password):
logging.info(u'Setting up new Zope user and upgrading site')
args = [os.path.join(u'bin', u'zope-debug'), u'run', os.path.join(u'support', u'upgrade.py'), user, password]
_execAndLog(args)
def main(argv):
_setupLogging()
_checkCWD()
parser = _getArgParser()
ns = parser.parse_args(argv[1:])
if ns.existing:
_checkSite(ns.existing)
executables = _findExecutables(ns)
_checkVarnish(executables['varnishd'])
libdirs = _checkLibraries(ns)
extraPaths = set()
for path in (executables['lynx'], executables['pdftohtml'], executables['wvHtml']):
directory = os.path.dirname(path)
extraPaths.add(directory)
logging.debug(u'Extra PATH to set: %s', extraPaths)
userID = _getUserID()
logging.info(u'Processes will run with user ID "%s"', userID)
_checkBuildoutCache(ns.buildout_cache)
_installVirtualEnv(executables['python'])
superUser, superPassword = _getCredentials(u'supervisor', ns)
tomcatUser, tomcatPassword = _getCredentials(u'tomcat', ns)
zopeUser, zopePassword = _getCredentials(u'zope', ns)
_installSiteConfig(executables, extraPaths, libdirs, superUser, superPassword, tomcatUser, tomcatPassword,
zopeUser, zopePassword, ns.public_hostname, ns.http_port, ns.https_port, userID, ns.buildout_cache)
_bootstrap()
_buildout()
if ns.existing:
_copyContent(ns.existing)
_upgradeSite(zopeUser, zopePassword)
else:
_deployEmptySite()
return True
if __name__ == '__main__':
sys.exit(0 if main(sys.argv) else -1)
| 46.141732 | 121 | 0.659158 | 42 | 0.002389 | 0 | 0 | 0 | 0 | 0 | 0 | 6,133 | 0.348862 |
7eb37405a08b46272d97b80f6cb1504d1a02dffc | 884 | py | Python | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | aoc2020/4/d4_2.py | kewbish/ka-algorithms | 7a893fdaebd99530eaf0d9633c2721763707e92f | [
"MIT"
] | null | null | null | from re import match
with open("input.txt") as x:
lines = x.read().strip().split("\n\n")
lines = [line.replace("\n", " ") for line in lines]
valid = 0
fields = {
'byr': lambda x: 1920 <= int(x) <= 2002,
'iyr': lambda x: 2010 <= int(x) <= 2020,
'eyr': lambda x: 2020 <= int(x) <= 2030,
'hgt': lambda x: (x[-2:] == 'cm' and 150 <= int(x[:-2]) <= 193) or (x[-2:] == 'in' and 59 <= int(x[:-2]) <= 76),
'hcl': lambda x: match(r"^#[a-f0-9]{6}$", x), # match only matches from beginning of string
'ecl': lambda x: x in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'],
'pid': lambda x: x.isnumeric() and len(x) == 9
}
for passport in lines:
pd = dict(tuple(i.split(":")) for i in passport.split())
if all((field in pd.keys() and fields[field](pd[field])) for field in fields):
valid += 1
print(valid)
| 35.36 | 120 | 0.519231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.188914 |
7eb4d2c3850b1b1666b3b1e3ca926f8da835b076 | 1,321 | py | Python | test_inference.py | ilyes64/DenseNet-TF2 | 0081ec8823bd6af46a2948f3dc837e1302a8db73 | [
"MIT"
] | null | null | null | test_inference.py | ilyes64/DenseNet-TF2 | 0081ec8823bd6af46a2948f3dc837e1302a8db73 | [
"MIT"
] | null | null | null | test_inference.py | ilyes64/DenseNet-TF2 | 0081ec8823bd6af46a2948f3dc837e1302a8db73 | [
"MIT"
] | null | null | null | """Test ImageNet pretrained DenseNet"""
import cv2
import numpy as np
from tensorflow.keras.optimizers import SGD
import tensorflow.keras.backend as K
# We only test DenseNet-121 in this script for demo purpose
from densenet121 import DenseNet
im = cv2.resize(cv2.imread('resources/cat.jpg'), (224, 224)).astype(np.float32)
#im = cv2.resize(cv2.imread('shark.jpg'), (224, 224)).astype(np.float32)
# Subtract mean pixel and multiple by scaling constant
# Reference: https://github.com/shicai/DenseNet-Caffe
im[:,:,0] = (im[:,:,0] - 103.94) * 0.017
im[:,:,1] = (im[:,:,1] - 116.78) * 0.017
im[:,:,2] = (im[:,:,2] - 123.68) * 0.017
print(K.image_data_format())
# Use pre-trained weights for Tensorflow backend
weights_path = 'imagenet_models/densenet121_weights_tf.h5'
# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
out = model.predict(im)
# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
for line in list_:
classes.append(line.rstrip('\n'))
print('Prediction: '+str(classes[np.argmax(out)]))
| 32.219512 | 83 | 0.718395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.425435 |
7eb546f1428e9e46c412c95c4b290bb93627a3b7 | 713 | py | Python | global_id/tests/utils/callers/guid_caller.py | ThePokerFaCcCe/messenger | 2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c | [
"MIT"
] | null | null | null | global_id/tests/utils/callers/guid_caller.py | ThePokerFaCcCe/messenger | 2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c | [
"MIT"
] | null | null | null | global_id/tests/utils/callers/guid_caller.py | ThePokerFaCcCe/messenger | 2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c | [
"MIT"
] | null | null | null | from django.urls.base import reverse
from rest_framework import status
from global_id.urls import app_name
from core.tests.utils import BaseCaller
from ..creators import create_guid
def guid_detail_url(guid=None):
return reverse(f"{app_name}:guid-detail",
kwargs={'guid': guid or create_guid().guid})
class GUIDViewCaller(BaseCaller):
def retrieve__get(self, access_token, guid: str = None,
allowed_status=status.HTTP_200_OK):
"""Calls guid-detail view with GET method"""
return self.assert_status_code(
allowed_status, self.client.get,
guid_detail_url(guid),
**self.get_auth_header(access_token)
)
| 31 | 63 | 0.678822 | 383 | 0.537167 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.105189 |
7eb5995e03f60c1e708787551eef79b2faedc457 | 132 | py | Python | Neural Network/NNToyFx/python/activations.py | stormy-ua/MachineLearning | e03a50e4c9fab8c7c4232ae0c305f6e7b48134bf | [
"MIT"
] | null | null | null | Neural Network/NNToyFx/python/activations.py | stormy-ua/MachineLearning | e03a50e4c9fab8c7c4232ae0c305f6e7b48134bf | [
"MIT"
] | null | null | null | Neural Network/NNToyFx/python/activations.py | stormy-ua/MachineLearning | e03a50e4c9fab8c7c4232ae0c305f6e7b48134bf | [
"MIT"
] | null | null | null | from simulation import *
def relu(ctx: SimulationContext, x: Connection):
relu1 = ctx.max(x, ctx.variable(0))
return relu1 | 22 | 48 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7eb6be94d7cf79345f60800447c7a1dfb1e58dc6 | 1,126 | py | Python | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 7 | 2016-01-25T09:36:46.000Z | 2021-09-03T01:42:19.000Z | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 1 | 2016-03-07T17:11:44.000Z | 2016-03-07T17:11:44.000Z | dev/umm-exploration-has-calculator.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 9 | 2015-09-30T10:53:06.000Z | 2021-05-12T20:21:52.000Z | class MicromagneticModell:
def __init__(self, name, Ms, calc):
self.name = name
self.Ms = Ms
self.field = None
self.calc = calc
def __str__(self):
return "AbstractMicromagneticModell(name={})".format(self.name)
def relax(self):
self.calc.relax(self)
def set_H(self, field):
print("AbstractMicromagneticModell: setting field = {}")
self.field = field
def hysteresis(self, fieldlist):
print("AbstractMicromagneticModell: starting hysteresis")
for field in fieldlist:
self.set_H(field)
self.relax()
class OOMMFC():
def __init__(self):
pass
def __str__(self):
return "OOMMFC()"
def relax(self, mm):
print("Calling OOMMF to run relax() with H={}".format(mm.field))
#a = AbstractMicromagneticModell('simulation-name', 10)
#print(a)
#a.hysteresis([10, 20])
ocalc = OOMMFC()
o = MicromagneticModell(name='test', Ms=42, calc=ocalc)
print(o)
o.relax()
#f = FIDIMAGC(name='fidimag-simulation', Ms=8e6)
#print(o)
#f.relax()
#o.relax()
#o.hysteresis([10, 20, 30])
| 21.245283 | 72 | 0.617229 | 825 | 0.732682 | 0 | 0 | 0 | 0 | 0 | 0 | 384 | 0.34103 |
7eb7565f96b0068b9fddd9b40e2803cbd414b282 | 7,225 | py | Python | Tools/Converters/tetgen2ply.py | dbungert/opensurgsim | bd30629f2fd83f823632293959b7654275552fa9 | [
"Apache-2.0"
] | 24 | 2015-01-19T16:18:59.000Z | 2022-03-13T03:29:11.000Z | Tools/Converters/tetgen2ply.py | dbungert/opensurgsim | bd30629f2fd83f823632293959b7654275552fa9 | [
"Apache-2.0"
] | 3 | 2018-12-21T14:54:08.000Z | 2022-03-14T12:38:07.000Z | Tools/Converters/tetgen2ply.py | dbungert/opensurgsim | bd30629f2fd83f823632293959b7654275552fa9 | [
"Apache-2.0"
] | 8 | 2015-04-10T19:45:36.000Z | 2022-02-02T17:00:59.000Z | #!/usr/bin/python
# This file is a part of the OpenSurgSim project.
# Copyright 2012-2015, SimQuest Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert a TetGen set of files .node/.ele into a PLY file readable by OSS
Typical usage:
TetGen_to_PLY.py input.node input.ele [input.face] [input.fixedNodes] output.ply
"""
import csv
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Convert a TetGen set of filename into a PLY file readable by OSS.")
parser.add_argument('nodes', help='Filename for the nodes input.')
parser.add_argument('elements', help='Filename for the tetrahedrons input.')
parser.add_argument('--faces', help='Filename for the faces input.')
parser.add_argument('--fixedNodes', help='Filename for the fixed node indices.')
parser.add_argument('--massDensity', help='Mass density.', default='2000.0')
parser.add_argument('--youngModulus', help='Young modulus.', default='1e7')
parser.add_argument('--poissonRatio', help='Poisson ratio.', default='0.45')
parser.add_argument('output', help='Filename for the PLY output.')
args = parser.parse_args()
numNodes = 0 # Number of nodes, will be read from the header of the .node file
numElements = 0 # Number of elements, will be read from the header of the .ele file (support triangle (3) and tetrahedron(4) elements)
elementSize = 0 # Element size 3 for triangle, 4 for tetrahedron, will be read from the header of the .ele file
numFaces = 0 # Number of triangulated faces (if a .face is provided, number of entries in this file)
numFixedNodes = 0 # Number of fixed nodes (if a .fixedNode is provided, number of entries in this file)
with open(args.output, 'wb') as csvOutputFile:
writer = csv.writer(csvOutputFile, delimiter = ' ', quoting=csv.QUOTE_NONE)
writer.writerow(['ply'])
writer.writerow(['format', 'ascii', '1.0'])
writer.writerow(['comment', 'Created', 'by', 'tetgen2ply'])
with open(args.nodes, 'rb') as csvNodeFile:
reader = csv.reader(csvNodeFile, delimiter = ' ', skipinitialspace = True)
row = reader.next()
numNodes = row[0]
# Enforcing the need for 3 pieces of information per node (x, y, z)
if not row[1] == '3':
raise Exception('Invalid node information in ' + args.node + '. Node dimension (expecting 3) was ' + row[1])
writer.writerow(['element', 'vertex', numNodes])
writer.writerow(['property', 'double', 'x'])
writer.writerow(['property', 'double', 'y'])
writer.writerow(['property', 'double', 'z'])
with open(args.elements, 'rb') as csvElementFile:
reader = csv.reader(csvElementFile, delimiter = ' ', skipinitialspace = True)
row = reader.next()
numElements = row[0]
if row[1] == '4':
writer.writerow(['element', '3d_element', numElements])
elementSize = 4
elif row[1] == '3':
writer.writerow(['element', '2d_element', numElements])
elementSize = 3
else :
raise Exception('Invalid triangle/tetrahedron information in ' + args.ele + '. Element dimension (expecting 3 or 4) was ' + row[1])
writer.writerow(['property', 'list', 'uint', 'uint', 'vertex_indices'])
if args.faces:
with open(args.faces, 'rb') as csvFaceFile:
reader = csv.reader(csvFaceFile, delimiter = ' ', skipinitialspace = True)
row = reader.next()
numFaces = row[0]
writer.writerow(['element', 'face', numFaces])
writer.writerow(['property', 'list', 'uint', 'uint', 'vertex_indices'])
if args.fixedNodes:
with open(args.fixedNodes, 'rb') as csvFixedNodeFile:
readerFixedNodes = csv.reader(csvFixedNodeFile)
for row in readerFixedNodes:
numFixedNodes = numFixedNodes + 1
writer.writerow(['element', 'boundary_condition', numFixedNodes])
writer.writerow(['property', 'uint', 'vertex_index'])
# Extra parameter (thickness) if the element is a triangle
if elementSize == 3:
writer.writerow(['element', 'thickness', 1])
writer.writerow(['property', 'double', 'value'])
writer.writerow(['element', 'material', 1])
writer.writerow(['property', 'double', 'mass_density'])
writer.writerow(['property', 'double', 'poisson_ratio'])
writer.writerow(['property', 'double', 'young_modulus'])
writer.writerow(['end_header'])
# Parse the .node file to format the nodes (x,y,z)
with open(args.nodes, 'rb') as csvNodeFile:
reader = csv.reader(csvNodeFile, delimiter = ' ', skipinitialspace = True)
rowId = 0
# Write all nodes
for row in reader:
# Skip the commented lines (especially the last line of the .node generated by TetGen)
if row[0][0] == '#':
continue
# Skip the first line (header information), detected by the number of nodes being different than the expected node index
if not int(row[0]) == rowId:
continue
writer.writerow(row[1:])
rowId = rowId + 1
# Parse the .ele file to format the tetrahedrons
with open(args.elements, 'rb') as csvElementFile:
reader = csv.reader(csvElementFile, delimiter = ' ', skipinitialspace = True)
rowId = 0
# Write all tetrahedrons
for row in reader:
# Skip the commented lines (especially the last line of the .ele generated by TetGen)
if row[0][0] == '#':
continue
# Skip the first line (header information), detected by the number of elements being different than the expected element index
if not int(row[0]) == rowId:
continue
row[0] = elementSize
writer.writerow(row)
rowId = rowId + 1
# Parse the .face file to format the triangulated faces (if a file is specified)
if args.faces:
with open(args.faces, 'rb') as csvFaceFile:
reader = csv.reader(csvFaceFile, delimiter = ' ', skipinitialspace = True)
rowId = 0
# Write all faces
for row in reader:
# Skip the commented lines (especially the last line of the .face generated by TetGen)
if row[0][0] == '#':
continue
# Skip the first line (header information), detected by the number of faces being different than the expected face index
if not int(row[0]) == rowId:
continue
row[0] = 3
writer.writerow(row[:4])
rowId = rowId + 1
# Write the fixed nodes (boundary conditions) if any
if args.fixedNodes:
with open(args.fixedNodes, 'rb') as csvFixedNodeFile:
readerFixedNodes = csv.reader(csvFixedNodeFile, delimiter = ' ', skipinitialspace = True)
for row in readerFixedNodes:
writer.writerow(row)
# Write a default thickness if the element is a triangle
# We should have something closer to 1/100th of the mesh size if we wanted to have a more automated tool
if elementSize == 3:
writer.writerow(['0.01'])
# Write the material (default parameters)
writer.writerow([args.massDensity, args.poissonRatio, args.youngModulus])
| 43.005952 | 139 | 0.695779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,676 | 0.508789 |
7eb76c71fc078ebcda0b4348976ac428e72ebba9 | 2,848 | py | Python | movie/App/forms/user.py | caoluyang8/movie | 6f655706c2f50e21bdd34d210393e842fd2b26ba | [
"BSD-2-Clause"
] | null | null | null | movie/App/forms/user.py | caoluyang8/movie | 6f655706c2f50e21bdd34d210393e842fd2b26ba | [
"BSD-2-Clause"
] | null | null | null | movie/App/forms/user.py | caoluyang8/movie | 6f655706c2f50e21bdd34d210393e842fd2b26ba | [
"BSD-2-Clause"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField
from flask_wtf.file import FileAllowed,FileRequired,FileField
from wtforms.validators import DataRequired,Length,EqualTo,Email,ValidationError
from App.models import User
from App.extensions import file
class Register(FlaskForm):
username = StringField('用户名',validators=[DataRequired('用户名不能为空'),Length(min=6,max=12,message='用户名在6~12位之间')],render_kw={'placeholder':'请输入用户名','minlength':6,'maxlength':12})
userpass = PasswordField('密码',validators=[DataRequired('密码不能为空'),Length(min=6,max=12,message='密码在6~12位之间')],render_kw={'placeholder':'请输入密码','minlength':6,'maxlength':12})
confirm = PasswordField('确认密码',validators=[DataRequired('确认密码不能为空'),Length(min=6,max=12,message='密码在6~12位之间'),EqualTo('userpass',message='确认密码和密码不一致')],render_kw={'placeholder':'请输入确认密码','minlength':6,'maxlength':12})
email = StringField('邮箱',validators=[DataRequired('邮箱不能为空'),Email('请输入正确的邮箱地址')],render_kw={'placeholder':'请输入有效的邮箱地址'})
icon = FileField('选择头像',validators=[FileAllowed(file,message='该文件类型不允许上传!'),FileRequired('您还没有选择文件')])
submit = SubmitField('注册')
def validate_username(self,field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('该用户名已存在,请重新输入')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('该邮箱已存在,请重新输入')
class Login(FlaskForm):
username = StringField('用户名',validators=[DataRequired('用户名不能为空'),Length(min=6,max=12,message='用户名在6~12位之间')],render_kw={'placeholder':'请输入用户名','minlength':6,'maxlength':12})
userpass = PasswordField('密码',validators=[DataRequired('密码不能为空'),Length(min=6,max=12,message='密码在6~12位之间')],render_kw={'placeholder':'请输入密码','minlength':6,'maxlength':12})
remember = BooleanField('记住我')
submit = SubmitField('登录')
def validate_username(self,field):
if not User.query.filter_by(username=field.data).first():
raise ValidationError('该用户名不存在,请重新输入')
class Changeinfo(FlaskForm):
username = StringField('用户名',validators=[DataRequired('用户名不能为空'),Length(min=6,max=12,message='用户名在6~12位之间')],render_kw={'placeholder':'请输入用户名','minlength':6,'maxlength':12})
email = StringField('邮箱',validators=[DataRequired('邮箱不能为空'),Email('请输入正确的邮箱地址')],render_kw={'placeholder':'请输入有效的邮箱地址'})
icon = FileField('选择头像', validators=[FileAllowed(file, message='该文件类型不允许上传!'), FileRequired('您还没有选择文件')])
submit = SubmitField('修改')
def validate_username(self,field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('该用户名已存在,请重新输入')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('该邮箱已存在,请重新输入') | 59.333333 | 221 | 0.726475 | 3,157 | 0.909798 | 0 | 0 | 0 | 0 | 0 | 0 | 1,304 | 0.375793 |
0e18ce228904236db3f530d334423f0fcfc1bd83 | 443 | py | Python | doc/source/user/examples/cleanup-servers.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 96 | 2015-01-29T20:12:08.000Z | 2019-01-28T22:17:13.000Z | doc/source/user/examples/cleanup-servers.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 7 | 2015-08-14T18:47:28.000Z | 2019-02-18T16:32:36.000Z | doc/source/user/examples/cleanup-servers.py | noironetworks/shade | e46878bae44e7daebf32c0aeaeffea0011542525 | [
"Apache-2.0"
] | 88 | 2015-05-11T17:20:52.000Z | 2019-04-04T03:23:30.000Z | import shade
# Initialize and turn on debug logging
shade.simple_logging(debug=True)
for cloud_name, region_name in [
('my-vexxhost', 'ca-ymq-1'),
('my-citycloud', 'Buf1'),
('my-internap', 'ams01')]:
# Initialize cloud
cloud = shade.openstack_cloud(cloud=cloud_name, region_name=region_name)
for server in cloud.search_servers('my-server'):
cloud.delete_server(server, wait=True, delete_ips=True)
| 31.642857 | 76 | 0.683973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.293454 |
0e18f19e2f26af45d020c41ca5247b1bdd969657 | 151 | py | Python | abacus_tpot/tpot_config.py | workforce-data-initiative/tpot-abacus | a5abf4af544693e0c58f7891785718e7bc606ed6 | [
"Apache-2.0"
] | 1 | 2019-09-09T20:52:49.000Z | 2019-09-09T20:52:49.000Z | abacus_tpot/tpot_config.py | workforce-data-initiative/tpot-abacus | a5abf4af544693e0c58f7891785718e7bc606ed6 | [
"Apache-2.0"
] | 43 | 2018-02-11T11:24:18.000Z | 2022-02-22T05:32:16.000Z | abacus_tpot/tpot_config.py | workforce-data-initiative/tpot-abacus | a5abf4af544693e0c58f7891785718e7bc606ed6 | [
"Apache-2.0"
] | null | null | null | # eventually we will have a proper config
ANONYMIZATION_THRESHOLD = 10
WAREHOUSE_URI = 'postgres://localhost'
WAGE_RECORD_URI = 'postgres://localhost'
| 30.2 | 41 | 0.794702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.562914 |
0e1b5fee094ee42b7bc8cc6dd1d5a713200d6a4a | 837 | py | Python | tileServer/scripts/exportZoomLevels.py | greenhalos/tile-server | cc88f50839e70eeed359dfa09a63386e8c06b9e7 | [
"Apache-2.0"
] | null | null | null | tileServer/scripts/exportZoomLevels.py | greenhalos/tile-server | cc88f50839e70eeed359dfa09a63386e8c06b9e7 | [
"Apache-2.0"
] | null | null | null | tileServer/scripts/exportZoomLevels.py | greenhalos/tile-server | cc88f50839e70eeed359dfa09a63386e8c06b9e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import json
import yaml
result = {}
with open('app/static/greenhalos-style.json') as json_file:
data = json.load(json_file)
for layer in data['layers']:
if 'source-layer' in layer:
minzoom = layer.get('minzoom', 0)
maxzoom = layer.get('maxzoom', 24)
sourceLayer = layer['source-layer']
if not sourceLayer in result:
result[sourceLayer] = {
'min': minzoom,
'max': maxzoom,
'enabled': True,
}
if minzoom < result[sourceLayer]['min']:
result[sourceLayer]['min'] = minzoom
if maxzoom > result[sourceLayer]['max']:
result[sourceLayer]['max'] = maxzoom
print(yaml.dump(result, default_flow_style=False))
| 31 | 59 | 0.537634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.178017 |
0e1b623930809e5794e5e6725bf4a73e58a166cd | 14,203 | py | Python | formats/dcc_parser.py | C3RV1/LaytonEditor | 51e1a9a372a8acdaa4183ae008235a721dc56cdc | [
"Unlicense"
] | 6 | 2019-12-24T00:18:54.000Z | 2022-02-28T17:09:22.000Z | formats/dcc_parser.py | C3RV1/LaytonEditor | 51e1a9a372a8acdaa4183ae008235a721dc56cdc | [
"Unlicense"
] | 1 | 2021-08-18T11:10:35.000Z | 2021-08-18T17:32:21.000Z | formats/dcc_parser.py | C3RV1/LaytonEditor | 51e1a9a372a8acdaa4183ae008235a721dc56cdc | [
"Unlicense"
] | 2 | 2021-01-17T10:42:48.000Z | 2021-08-18T11:10:54.000Z | # Data and Code Container (DCC) format by Cervi
import typing
def is_int(var):
try:
int(var)
return True
except ValueError:
return False
def is_float(var):
try:
float(var)
return True
except ValueError:
return False
def is_hex(var):
try:
int(var, 16)
return True
except ValueError:
return False
class Parser:
def __init__(self):
self.code: typing.Union[str, dict] = ""
self.converted_paths = []
self.imported = {}
def parse(self, code_=None):
if code_:
self.code = code_
self.converted_paths = []
self.split_by_tokens()
self.create_structure()
self.convert_variables()
def serialize(self, code_=None, imported=None):
if imported is None:
imported = []
if code_:
self.code = code_
self.converted_paths = []
self.revert_variables()
self.remove_structure(imported)
self.join_by_tokens()
return self.code
def split_by_tokens(self):
tokens = []
in_string = False
code_index = 0
current_token = ""
while code_index < len(self.code):
if self.code[code_index] == "\n":
if not in_string:
if current_token != "":
tokens.append(current_token)
current_token = ""
code_index += 1
if code_index >= len(self.code):
continue
if self.code[code_index] == "#":
while self.code[code_index] != "\n" and code_index < len(self.code):
code_index += 1
continue
elif self.code[code_index] == "(" and not in_string:
tokens.append(current_token + "(")
current_token = ""
elif self.code[code_index] == ")" and not in_string:
tokens.append(current_token)
current_token = ")"
elif self.code[code_index] == "\"":
in_string = not in_string
current_token += self.code[code_index]
elif self.code[code_index] == "\\" and self.code[code_index + 1] == "\"" and in_string:
code_index += 2
current_token += "\\\""
continue
elif self.code[code_index] == " " and not in_string:
pass
else:
current_token += self.code[code_index]
code_index += 1
tokens.append(current_token)
self.code = list(filter(lambda x: len(x) > 0, tokens))
def join_by_tokens(self, add_string_newline=True):
joined = ""
indent = 0
for token in self.code:
if ":" in token:
token_split = token.split(":")
token = token_split[0] + ": " + token_split[1]
if "\"" in token:
token_parsed = ""
while token[0] != "\"":
token_parsed += token[0]
token = token[1:]
token_parsed += "\""
token = token[1:]
while len(token) > 0:
if token[:2] == "\\n" and add_string_newline:
token_parsed += "\\n\n"
token = token[2:]
continue
token_parsed += token[0]
token = token[1:]
token = token_parsed
if token[-1] == "]" or token == ")":
indent -= 1
if token[-1] == "(":
token = token[:-1] + " ("
joined += " " * (4 * indent) + token + "\n"
if token[-1] == "[" or token[-1] == "(":
indent += 1
self.code = joined
def create_structure(self):
tokens = self.code
def convert_to_group(is_call=False):
nonlocal tokens
current_group = {"unnamed": [], "named": {}, "calls": []}
while len(tokens) > 0:
token = tokens[0]
tokens = tokens[1:]
if token == "]" or token == ")":
break
if "=" == token[0]:
token_value = token[1:]
current_group['unnamed'].append(token_value)
elif is_call: # "," in token and
params = []
token_copy = token
current_token = ""
while len(token_copy) > 0:
if token_copy[0] == "\"":
current_token += "\""
token_copy = token_copy[1:]
while len(token_copy) > 0 and token_copy[0] != "\"":
if token_copy[:2] == '\\"':
current_token += '\\"'
token_copy = token_copy[2:]
continue
current_token += token_copy[0]
token_copy = token_copy[1:]
current_token += "\""
token_copy = token_copy[1:]
elif token_copy[0] == ",":
params.append(current_token)
current_token = ""
token_copy = token_copy[1:]
else:
current_token += token_copy[0]
token_copy = token_copy[1:]
params.append(current_token)
for par_i in range(len(params)):
params[par_i] = params[par_i].strip()
current_group["unnamed"].extend(params)
elif "$" == token[0]:
token_value = token[1:]
if token_value not in self.imported:
new_parser = Parser()
file_to_imp = open(token_value, "r")
new_parser.parse(file_to_imp.read())
file_to_imp.close()
self.imported[token_value] = new_parser
else:
new_parser = self.imported[token_value]
current_group["named"].update(new_parser.code["named"])
self.converted_paths.extend(new_parser.converted_paths[1:])
elif ":" in token:
if is_call:
raise SyntaxError("Call is not allowed to have named")
token_name = token.split(":")[0]
token_value = token.split(":")[1]
if token_value == "[":
token_value = convert_to_group()
current_group['named'][token_name] = token_value
elif "(" == token[-1]:
if is_call:
raise SyntaxError("Call is not allowed to have calls")
current_group['calls'].append({
"func": token[:-1],
"parameters": convert_to_group(is_call=True)
})
if is_call:
return current_group['unnamed']
return current_group
self.code = convert_to_group()
def remove_structure(self, imported):
def revert_group(group):
pre_group = []
for element in group["unnamed"]:
if not isinstance(element, dict):
pre_group.append(f"={element}")
else:
pre_group.append("=[")
pre_group.extend(revert_group(element))
pre_group.append("]")
for element in group["named"].keys():
if not isinstance(group["named"][element], dict):
pre_group.append(f"{element}:{group['named'][element]}")
else:
pre_group.append(f"{element}:[")
pre_group.extend(revert_group(group["named"][element]))
pre_group.append("]")
for element in group["calls"]:
pre_group.append(f"{element['func']}(" + ", ".join(element['parameters']) + ")")
return pre_group
for imported_file in imported:
if imported_file not in self.imported:
new_parser = Parser()
imported_code = open(imported_file, "r")
new_parser.parse(imported_code.read())
imported_code.close()
else:
new_parser = self.imported[imported_file]
for named in new_parser.code["named"].keys():
if named in self.code["named"].keys():
self.code["named"].pop(named)
# TODO: Finish remove imported
self.code = revert_group(self.code)
for imported_file in imported:
self.code.insert(0, f"${imported_file}")
@staticmethod
def convert_variable(value, self=None):
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
value = value.replace("\\n", "\n").replace('\\"', '"')
return value
elif is_int(value):
return int(value)
elif is_hex(value):
return int(value, 16)
elif is_float(value):
return float(value)
elif value == "true":
return True
elif value == "false":
return False
elif value == "null":
return None
elif self is not None:
if self.get_path(value) is not None:
if value not in self.converted_paths:
self.convert_path(".".join(value.split(".")[:-1]))
return self.get_path(value)
raise ValueError(f"{value} is not recognised as a valid value")
def convert_path(self, path):
path_obj = self.get_path(path)
if path in self.converted_paths:
return path_obj
self.converted_paths.append(path)
if isinstance(path_obj, dict):
for i in range(len(path_obj["unnamed"])):
path_obj["unnamed"][i] = self.convert_variable(path_obj["unnamed"][i], self)
for i in path_obj["named"].keys():
if path is not None:
new_path = path + "." + i
else:
new_path = i
path_obj["named"][i] = self.convert_path(new_path)
for i in path_obj["calls"]:
for parameter in range(len(i['parameters'])):
i['parameters'][parameter] = self.convert_variable(i['parameters'][parameter], self)
return path_obj
else:
return self.convert_variable(path_obj, self)
def convert_variables(self):
self.convert_path(None)
@staticmethod
def revert_variable(value):
if isinstance(value, str):
value = value.replace("\n", "\\n").replace('"', '\\"')
return f"\"{value}\""
elif value is True:
return "true"
elif value is False:
return "false"
elif value is None:
return "null"
elif isinstance(value, int):
return str(value)
elif isinstance(value, float):
return str(value)
else:
raise ValueError(f"{value} can't be converted to non-value")
def revert_path(self, path):
path_obj = self.get_path(path)
if isinstance(path_obj, dict):
for i in range(len(path_obj["unnamed"])):
path_obj["unnamed"][i] = self.revert_variable(path_obj["unnamed"][i])
for i in path_obj["named"].keys():
if path is not None:
new_path = path + "." + i
else:
new_path = i
path_obj["named"][i] = self.revert_path(new_path)
for i in path_obj["calls"]:
for parameter in range(len(i['parameters'])):
i['parameters'][parameter] = self.revert_variable(i['parameters'][parameter])
return path_obj
else:
return self.revert_variable(path_obj)
def revert_variables(self):
self.revert_path(None)
def get_path(self, path, create=False, index=0) -> typing.Union[dict, str, None]:
if path is not None:
path = path.split(".")
else:
path = []
if index != 0:
path = path[:index]
current: dict = self.code
while len(path) > 0:
if path[0] not in current['named']:
if create:
current['named'][path[0]] = {"unnamed": [], "named": {}, "calls": []}
else:
return None
current = current['named'][path[0]]
path = path[1:]
return current
def __getitem__(self, item):
if isinstance(item, str):
path = item.split("::")[0]
proper = item.split("::")[1:]
current = self.get_path(path)
while len(proper) > 0:
if is_int(proper[0]):
proper: list
proper[0] = int(proper[0])
current = current[proper[0]]
proper = proper[1:]
return current
else:
raise Exception
def reset(self):
self.code = {"unnamed": [], "named": {}, "calls": []}
def set_named(self, path, value):
path_obj = self.get_path(path, index=-1, create=True)
path_obj['named'][path.split(".")[-1]] = value
def exists(self, path):
if self.get_path(path) is not None:
return True
return False
"""if __name__ == "__main__":
file = open("event_example.dcc")
code = file.read()
file.close()
parser = Parser()
parser.parse(code)
# parser.serialize()
# parser.parse()
print(parser.code)"""
| 37.376316 | 104 | 0.472224 | 13,576 | 0.955854 | 0 | 0 | 1,474 | 0.103781 | 0 | 0 | 1,307 | 0.092023 |
0e1b7b2ebbd9b954f9c3ccf6192469e079ab9373 | 4,245 | py | Python | flock/__init__.py | fishface60/python-flock | 320fcc630c58852fa02653dbcc9cd80ff1304b46 | [
"0BSD"
] | null | null | null | flock/__init__.py | fishface60/python-flock | 320fcc630c58852fa02653dbcc9cd80ff1304b46 | [
"0BSD"
] | null | null | null | flock/__init__.py | fishface60/python-flock | 320fcc630c58852fa02653dbcc9cd80ff1304b46 | [
"0BSD"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2015, Richard Maw
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''Python Library/cli for providing a higher level interface to flock(2)'''
__version__ = '0.0.0'
__all__ = ('take_lock', 'release_lock', 'lockfile')
from contextlib import contextmanager
from errno import EINTR, EAGAIN, EBADF
from fcntl import flock, LOCK_SH, LOCK_EX, LOCK_NB, LOCK_UN
from multiprocessing import Pipe, Process
import os
from os import strerror
from signal import signal, SIGALRM, setitimer, ITIMER_REAL
from sys import exit
def _set_alarm_and_lock(fd, pipew, timeout, shared):
try:
# TODO: How can you deal with the race where the signal could
# be delivered before you lock, so instead of being woken up
# when the signal is delivered, we block forever.
signal(SIGALRM, lambda *_: None)
setitimer(ITIMER_REAL, timeout)
flock(fd, LOCK_SH if shared else LOCK_EX)
except BaseException as e:
# This loses the traceback, but it's not pickleable anyway
pipew.send(e)
exit(1)
else:
pipew.send(None)
exit(0)
def take_lock(fd, timeout=None, shared=False):
'''Take a lock on a file descriptor
If timeout is 0 the lock is taken without blocking,
if timeout is None we block indefinitely,
if timeout is a positive number we time out in that many seconds.
If shared is True this is a shared lock,
so can lock with other shared locks,
if shared is False this is an exclusive lock.
with open(path, 'r') as lock:
take_lock(lock.fileno(), timeout, shared)
'''
if timeout is None or timeout == 0:
flags = (LOCK_SH if shared else LOCK_EX)
flags |= (LOCK_NB if timeout == 0 else 0)
flock(fd, flags)
return
piper, pipew = Pipe(duplex=False)
p = Process(target=_set_alarm_and_lock,
args=(fd, pipew, timeout, shared))
p.start()
err = piper.recv()
p.join()
if err:
if isinstance(err, IOError) and err.errno == EINTR:
raise IOError(EAGAIN, strerror(EAGAIN))
raise err
def release_lock(fd):
'''Release a lock on a file descriptor
release_lock(lock.fileno())
'''
return flock(fd, LOCK_UN)
class _Lockfile(object):
def __init__(self, fd):
self.fd = fd
def lock(self, *args, **kwargs):
return take_lock(self.fd, *args, **kwargs)
def unlock(self):
return flock(self.fd, LOCK_UN)
@contextmanager
def lockfile(path):
'''Context manager for lock files.
with lockfile(path) as lockfobj:
lockfobj.lock(timeout=0, shared=False)
'''
fd = os.open(path, os.O_RDONLY)
lockfobj = _Lockfile(fd)
try:
yield lockfobj
finally:
# Handle double-close of file descriptor
try:
os.close(fd)
except OSError as e:
if e.errno != EBADF:
raise
if __name__ == '__main__':
from argparse import ArgumentParser
from subprocess import call
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version=('%(prog)s ' + __version__))
parser.add_argument('--shared', action='store_true', default=False)
parser.add_argument('--exclusive', dest='shared', action='store_false')
parser.add_argument('--timeout', default=None, type=int)
parser.add_argument('--wait', dest='timeout', action='store_const', const=None)
parser.add_argument('--nonblock', dest='timeout', action='store_const', const=0)
parser.add_argument('file')
parser.add_argument('argv', nargs='*')
opts = parser.parse_args()
if len(opts.argv) == 0:
fd = int(opts.file)
take_lock(fd, opts.timeout, opts.shared)
else:
with lockfile(opts.file) as lock:
lock.lock(timeout=opts.timeout, shared=opts.shared)
exit(call(opts.argv))
| 28.877551 | 81 | 0.725324 | 195 | 0.045936 | 354 | 0.083392 | 370 | 0.087161 | 0 | 0 | 1,931 | 0.454888 |
0e1c11840edfb52bafea761c6a2698a9114b7c8c | 10,333 | py | Python | uqtie/UqtStylesheet.py | langrind/UQtie | 032693695117083cdd715beb2a679e4ccee060d8 | [
"MIT"
] | null | null | null | uqtie/UqtStylesheet.py | langrind/UQtie | 032693695117083cdd715beb2a679e4ccee060d8 | [
"MIT"
] | null | null | null | uqtie/UqtStylesheet.py | langrind/UQtie | 032693695117083cdd715beb2a679e4ccee060d8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Class that manages a UQtie application's stylesheet
There are advantages and disadvantages to Qt stylesheets, Qt settings, and
Qt Style. They aren't mutually exclusive, and they don't all play together
either.
This module attempts to make it possible to use a stylesheet while still
using the QFontDialog to select fonts, and zoom-in/zoom-out shortcuts such
as CTRL-+.
The idea is that we have variables (which come from somewhere, right now a
pickled dictionary, but later could be elsewhere, or multiple elsewheres).
These variables can be used in the stylesheet, such that the dynamic changes in
appearance can be made at runtime without requiring stylesheet changes, and the
changes can be persisted also without changing the stylesheet.
Font, font size and widget sizes (e.g QScrollBar:vertical { width } ) seem like
good candidates to be determined dynamically instead of via hardcoded values
in a stylesheet. That way, when you have high-DPI monitor and less-than-perfect
vision, you can adjust easily.
Part of my motivation for doing this is because PyQt running on Windows, Linux,
MacOS and Cygwin doesn't behave identically, not even close to identical in
some ways. If they would all have identical QScreens given identical monitors
and graphics cards, then you could just make reasonable choices for your stylesheet,
and rely on the OS GUI settings.
Variables you can use in a QSS file are:
$main_font_family
$main_font_weight (not supported yet)
$main_font_size
$scroll_bar_width
Futures:
1) Add more variables
2) Make an inspector widget for stylesheet variables
"""
from __future__ import print_function
from pathlib import Path
import os,re,pickle
from PyQt5.QtCore import QSettings
class StylesheetManager(object):
"""
Class that manages a UQtie application's stylesheet
"""
# If no stylesheet has been provided, use this one. Should this
# really have scroll bar dimensions?
defaultStylesheet = """
QWidget {
font-family: $main_font_family;
font-weight: $main_font_weight;
font-size: $main_font_size;
}
QScrollBar:vertical {
width: $scroll_bar_width;
}
QScrollBar:horizontal {
height: $scroll_bar_width;
}
"""
def __init__(self, app, settings, appName ):
self.stylesheetFileName = None
self.stylesheetVarsFileName = None
self.app = app
self.appName = appName
self.appSettings = settings
self.varsDict = {}
self.defaultVarsDict = {
'main_font_family': 'Arial',
'main_font_weight': 'Regular',
'main_font_size' : '16pt',
'scroll_bar_width': '15px',
}
self.determine_stylesheet_filenames(appName)
def determine_stylesheet_path(self):
"""
Fill in self.appDirPath appropriately
"""
self.appDirPath = None
if os.name == 'nt':
# running on Windows
appDirPath = Path(os.path.expanduser('~')) / 'Application Files'
if not appDirPath.is_dir():
print ( '{p} is not a directory'.format(p=appDirPath))
return
appDirPath /= self.appName
if not appDirPath.is_dir():
try:
appDirPath.mkdir()
except:
print ( 'Could not create directory {p}'.format(p=appdirPath))
return
self.appDirPath = appDirPath
else:
# On other OS, we use Settings to determine where stylesheet lives
if self.appSettings:
self.appDirPath = Path(os.path.dirname(self.appSettings.fileName()))
def determine_stylesheet_filenames(self, appName):
"""
Fill in stylesheet filenames appropriately
"""
self.determine_stylesheet_path()
if self.appDirPath:
#print ("self.appDirPath: {}".format(self.appDirPath))
baseName = str(self.appDirPath / appName)
self.stylesheetFileName = baseName + '.qss'
self.stylesheetVarsFileName = baseName + 'Vars.pickle'
def apply(self):
"""
Apply the application window stylesheet, including variable value substitution
"""
# This means:
# 1) Read it from a file
# 2) Replace all '{' and '}' which are QSS syntax (e.g. 'QWidget {') with
# '{{' and '}}'. This protects the QSS syntax during the next steps.
# 3) Replace all $word with {word} thus turning the string into a Python
# string with argument specifiers (e.g. '{main_font_family}').
# 4) Replace all the format-string argument specifiers with variables
# 5) Apply the resulting stylesheet to the App
#print ( "apply: {}".format(self.stylesheetFileName))
stylesheetText = None
try:
with open(self.stylesheetFileName, 'r') as content_file:
stylesheetText = content_file.read()
except:
pass
if not stylesheetText:
print(f'No file {self.stylesheetFileName}')
stylesheetText = self.defaultStylesheet
try:
with open(self.stylesheetFileName, 'w') as content_file:
content_file.write(stylesheetText)
except:
print(f'Could not write default stylesheet file {self.stylesheetFileName}')
# These next two could be done in one pass using the cool multiple_replace() from
# https://stackoverflow.com/questions/15175142/how-can-i-do-multiple-substitutions-using-regex-in-python
# But this is easier to read
stylesheetText = stylesheetText.replace('{', '{{')
stylesheetText = stylesheetText.replace('}', '}}')
# Turn all $word into {word}
stylesheetText = re.sub(r'\$(([a-z]|[A-Z])\w*)', r'{\1}', stylesheetText)
# for k, v in self.varsDict.items():
# print ( f'{k}: {v}' )
# substitute everything from our variables dict
result = stylesheetText.format_map(self.varsDict)
# apply
self.app.setStyleSheet(result)
def save_stylesheet_vars(self ):
"""Write our variables dict out to a file"""
with open(self.stylesheetVarsFileName, 'wb') as h:
pickle.dump(self.varsDict, h)
def set_missing_stylesheet_vars(self ):
"""Set all the missing variables in the variables dict to default values"""
for k in self.defaultVarsDict:
self.varsDict.setdefault(k, self.defaultVarsDict[k])
def read_stylesheet_vars(self ):
"""Read all the variables from saved file into our dict"""
try:
with open(self.stylesheetVarsFileName, 'rb') as h:
self.varsDict = pickle.loads(h.read())
except FileNotFoundError as e:
print(e)
# Maybe some values are missing, fix it up
self.set_missing_stylesheet_vars()
def zoom_in(self):
"""Increase the value of variables that influence the size of the UI"""
# Trim off 'pt' at the end of the string. Maybe a little fragile...
fontSize = int(self.varsDict['main_font_size'][0:-2])
fontSize += 1
self.varsDict['main_font_size'] = f'{fontSize}pt'
# Trim off 'px' at the end of the string. Also a little fragile...
scrollBarWidth = int(self.varsDict['scroll_bar_width'][0:-2])
scrollBarWidth += 1
self.varsDict['scroll_bar_width'] = f'{scrollBarWidth}px'
def zoom_out(self):
"""Decrease the value of variables that influence the size of the UI"""
# Trim off 'pt' at the end of the string. Maybe a little fragile...
fontSize = int(self.varsDict['main_font_size'][0:-2])
if fontSize > 0:
fontSize -= 1
self.varsDict['main_font_size'] = f'{fontSize}pt'
# Trim off 'px' at the end of the string. Also a little fragile...
scrollBarWidth = int(self.varsDict['scroll_bar_width'][0:-2])
if scrollBarWidth > 0:
scrollBarWidth -= 1
self.varsDict['scroll_bar_width'] = f'{scrollBarWidth}px'
# Variable setters / Properties - don't want to keep adding these for every variable
# and the way it is evolving, it seems like we don't have to
def set_main_font_family(self, family):
self.varsDict['main_font_family'] = family
def set_main_font_weight(self, weight):
self.varsDict['main_font_weight'] = weight
def set_main_font_size(self, size):
self.varsDict['main_font_size'] = size
## Test Code
import argparse, sys
from PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow
class TestAppMainWindow(QMainWindow):
def __init__(self, parsedArgs, **kwargs ):
super(TestAppMainWindow, self).__init__()
self.setup_ui()
self.show()
def setup_ui(self):
vbox = QVBoxLayout(self.centralWidget())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--test-write-vars', action='store_const', const=True,
help='Test writing variables to file')
parser.add_argument('-r', '--test-read-vars', action='store_const', const=True,
help='Test reading variables from file')
parsedArgs,unparsedArgs = parser.parse_known_args()
organizationName='Craton'
appName='StyMgrTest'
# Pass unparsed args to Qt, might have some X Windows args, like --display
qtArgs = sys.argv[:1] + unparsedArgs
app = QApplication(qtArgs)
settings = QSettings(organizationName, appName)
styMgr = StylesheetManager(app, settings, appName)
if parsedArgs.test_write_vars:
print('write')
styMgr.save_stylesheet_vars()
sys.exit(0)
if parsedArgs.test_read_vars:
print('read')
styMgr.read_stylesheet_vars()
for k, v in styMgr.varsDict.items():
print(k, v)
sys.exit(0)
mainw = TestAppMainWindow(parsedArgs, app=app, organizationName=organizationName, appName=appName)
sys.exit(app.exec_())
| 36.512367 | 112 | 0.635246 | 7,268 | 0.703378 | 0 | 0 | 0 | 0 | 0 | 0 | 4,991 | 0.483016 |
0e1e160e8d90a3c603ba44d2101e9bfc5828fd10 | 7,104 | py | Python | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | null | null | null | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | 1 | 2021-08-09T20:59:22.000Z | 2021-08-09T20:59:22.000Z | clickmodel-experiments/scripts/model/ClickModelExperiment.py | nut-hatch/LOVBench | 365ea4ad0d6258b840439506ce97acb3827c39e6 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Anonymous'
import time
import csv
import os.path
import pyclick
from pyclick.utils.YandexRelPredChallengeParser import YandexRelPredChallengeParser
from pyclick.utils.Utils import Utils
from pyclick.click_models.Evaluation import LogLikelihood, Perplexity
from pyclick.click_models.UBM import UBM
from pyclick.click_models.DBN import DBN
from pyclick.click_models.SDBN import SDBN
from pyclick.click_models.DCM import DCM
from pyclick.click_models.CCM import CCM
from pyclick.click_models.CTR import DCTR, RCTR, GCTR
from pyclick.click_models.CM import CM
from pyclick.click_models.PBM import PBM
#
# Based on the original PyClick example from Ilya Markov: https://github.com/markovi/PyClick/blob/master/examples/Example.py
#
class ClickModelExperiment:
def __init__(self, output_path, search_log_file, click_model_name, search_sessions_num):
self.output_path = output_path
self.search_log_file = search_log_file
self.click_model_name = click_model_name
self.search_sessions_num = search_sessions_num
self.click_props_filename = "ClickProbability"
self.satisfaction_probs_filename = "SatisfactionProbability"
self.model_performance_filename = "PerformanceResults"
self.model_path = self.output_path + "models/"
try:
os.makedirs(self.model_path)
except OSError:
print ("folder exists")
else:
print ("folder created")
def run_experiment(self):
click_model = globals()[self.click_model_name]()
search_sessions = YandexRelPredChallengeParser().parse(self.search_log_file, self.search_sessions_num)
train_test_split = int(len(search_sessions) * 0.75)
train_sessions = search_sessions[:train_test_split]
train_queries = Utils.get_unique_queries(train_sessions)
test_sessions = Utils.filter_sessions(search_sessions[train_test_split:], train_queries)
test_queries = Utils.get_unique_queries(test_sessions)
print "-------------------------------"
print "Training on %d search sessions (%d unique queries)." % (len(train_sessions), len(train_queries))
print "-------------------------------"
start = time.time()
click_model.train(train_sessions)
end = time.time()
print "\tTrained %s click model in %i secs:\n%r" % (click_model.__class__.__name__, end - start, click_model)
self.evaluate_click_model(click_model, train_sessions, train_queries, test_sessions, test_queries)
model_file = self.model_path + click_model.__class__.__name__ + ".json"
with open(model_file, mode='w') as model_file:
model_file.write(click_model.to_json())
self.get_click_probs(click_model, search_sessions)
self.get_satisfaction_probs(click_model, search_sessions)
def evaluate_click_model(self, click_model, train_sessions, train_queries, test_sessions, test_queries):
print "-------------------------------"
print "Testing on %d search sessions (%d unique queries)." % (len(test_sessions), len(test_queries))
print "-------------------------------"
loglikelihood = LogLikelihood()
perplexity = Perplexity()
start = time.time()
ll_value_train = loglikelihood.evaluate(click_model, train_sessions)
ll_value_test = loglikelihood.evaluate(click_model, test_sessions)
end = time.time()
print "\tlog-likelihood: %f; time: %i secs" % (ll_value_test, end - start)
start = time.time()
perp_value_train = perplexity.evaluate(click_model, train_sessions)[0]
perp_value_test = perplexity.evaluate(click_model, test_sessions)[0]
end = time.time()
print "\tperplexity: %f; time: %i secs" % (perp_value_test, end - start)
model_performance_path = self.output_path + self.model_performance_filename + ".csv"
if not os.path.isfile(model_performance_path):
print "file does not exist"
with open(model_performance_path, mode='w') as model_performance_file:
performance_writer = csv.writer(model_performance_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
performance_writer.writerow(['TimeStamp', 'ClickLogFile', 'ClickModel', 'SearchSessions_Train', 'UniqueQueries_Train', 'SearchSessions_Test', 'UniqueQueries_Test', 'LogLikelihood_Train', 'LogLikelihood_Test', 'Perplexity_Train', 'Perplexity_Test'])
with open(model_performance_path, mode='a') as model_performance_file:
performance_writer = csv.writer(model_performance_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
performance_writer.writerow([time.time(), self.search_log_file, click_model.__class__.__name__, len(train_sessions), len(train_queries), len(test_sessions), len(test_queries), ll_value_train, ll_value_test, perp_value_train, perp_value_test])
def get_click_probs(self, click_model, search_sessions):
click_probabilites = []
query_cache = []
for search_session in search_sessions:
query = search_session.query
if query not in query_cache:
query_cache.append(query)
web_results = search_session.web_results
click_probs = click_model.get_full_click_probs(search_session)
for x in range(len(web_results)):
web_result = web_results[x]
click_prob = str(click_probs[x])
click_probabilites.append("\"" + query + "\",\"" + web_result.id + "\",\"" + click_prob + "\"\n")
# for rank, click_prob in enumerate(click_probs):
# print str(rank) + " " + str(click_prob)
# '../resources/output/VocabRankingClickProbabilities-v2.txt'
click_props_path = self.output_path + click_model.__class__.__name__ + "_" + self.click_props_filename + "_Raw.csv"
with open(click_props_path, 'w') as out:
out.writelines(click_probabilites)
out.close()
def get_satisfaction_probs(self, click_model, search_sessions):
satisfaction_probs = []
query_cache = []
for search_session in search_sessions:
query = search_session.query
if query not in query_cache:
query_cache.append(query)
web_results = search_session.web_results
for x in range(len(web_results)):
web_result = web_results[x]
satisfaction = click_model.predict_relevance(query, web_result.id)
# print query + " - " + web_result.id + " - " + str(relevance)
satisfaction_probs.append("\"" + query + "\",\"" + web_result.id + "\",\"" + str(satisfaction) + "\"\n")
satisfaction_probs_path = self.output_path + click_model.__class__.__name__ + "_" + self.satisfaction_probs_filename + "_Raw.csv"
with open(satisfaction_probs_path, 'w') as out:
out.writelines(satisfaction_probs)
out.close()
| 48.326531 | 265 | 0.6692 | 6,359 | 0.89513 | 0 | 0 | 0 | 0 | 0 | 0 | 1,138 | 0.160191 |
0e1e867c79f973710511eae3645a7ccaa06989d8 | 242 | py | Python | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 6 | 2017-11-08T14:04:39.000Z | 2019-03-24T22:11:04.000Z | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | null | null | null | archive/2016/week5/homework/even.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 7 | 2015-10-27T09:04:58.000Z | 2019-03-03T14:18:26.000Z | """
Дефинирайте фуннкция `is_even`, която приема число и върща `True` ако числото е
четно и `False` в противен случай.
>>> is_even(4)
True
>>> is_even(5)
False
"""
def is_even(number):
raise Exception('Not implemented')
| 18.615385 | 79 | 0.652893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.857143 |
0e1f37ae758638a4fc7ba5ee3a4f94668d053d38 | 2,165 | py | Python | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | null | null | null | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | 9 | 2015-05-18T09:04:35.000Z | 2017-03-24T10:47:23.000Z | piws/views/actions.py | neurospin/piws | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | [
"CECILL-B"
] | 17 | 2015-03-16T08:27:47.000Z | 2017-08-04T16:26:29.000Z | ##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from packaging import version
# Cubicweb import
import cubicweb
cw_version = version.parse(cubicweb.__version__)
if cw_version >= version.parse("3.21.0"):
from cubicweb import _
from cubicweb.predicates import is_instance
from cubicweb.predicates import authenticated_user
from cubicweb.web.action import Action
from cubicweb.web.views.wdoc import HelpAction, AboutAction
from cubicweb.web.views.actions import PoweredByAction
from cubicweb.web.views.actions import UserPreferencesAction
from cubicweb.web.views.actions import UserInfoAction
from logilab.common.registry import yes
###############################################################################
# ACTIONS
###############################################################################
class NeurospinAction(Action):
__regid__ = "neurospin"
__select__ = yes()
category = "footer"
order = 1
title = _("NeuroSpin")
def url(self):
return "http://i2bm.cea.fr/drf/i2bm/NeuroSpin"
class LicenseAction(Action):
__regid__ = "license"
__select__ = yes()
category = "footer"
order = 2
title = _("License")
def url(self):
return self._cw.build_url("license")
class PIWSPoweredByAction(Action):
__regid__ = "poweredby"
__select__ = yes()
category = "footer"
order = 3
title = _("Powered by NSAp")
def url(self):
return "https://github.com/neurospin/piws"
def registration_callback(vreg):
# Update the footer
vreg.register_and_replace(PIWSPoweredByAction, PoweredByAction)
vreg.register(NeurospinAction)
vreg.register(LicenseAction)
vreg.unregister(HelpAction)
vreg.unregister(AboutAction)
vreg.unregister(UserPreferencesAction)
vreg.unregister(UserInfoAction)
| 28.116883 | 79 | 0.627714 | 654 | 0.302079 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.359815 |
0e1f4a5b7788dcd9e9439809509e149a2ed438c4 | 505 | py | Python | events/migrations/0043_remove_premium_restrictions.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 446 | 2018-01-21T09:22:41.000Z | 2022-03-25T17:46:12.000Z | events/migrations/0043_remove_premium_restrictions.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 272 | 2018-01-03T16:55:39.000Z | 2022-03-11T23:12:30.000Z | events/migrations/0043_remove_premium_restrictions.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
] | 100 | 2018-01-27T02:04:15.000Z | 2021-09-09T09:02:21.000Z | # Generated by Django 2.0 on 2018-08-25 14:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("events", "0042_allow_team_without_country")]
operations = [
migrations.RemoveField(model_name="team", name="is_premium"),
migrations.RemoveField(model_name="team", name="premium_by"),
migrations.RemoveField(model_name="team", name="premium_expires"),
migrations.RemoveField(model_name="team", name="premium_started"),
]
| 31.5625 | 74 | 0.710891 | 422 | 0.835644 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.332673 |
0e1fd30efaef6619653d82bc4e1ec4b62cc2fe4c | 1,904 | py | Python | examples/Utopia2_planning_single_node_DN/tutorial2.py | AAmedeo/Hypatia | 22e14bc9bbb043c4d7dbf5b881d9dadbce6c0de1 | [
"Apache-2.0"
] | 5 | 2021-12-17T15:16:12.000Z | 2022-03-24T10:42:33.000Z | examples/Utopia2_planning_single_node_DN/tutorial2.py | AAmedeo/Hypatia | 22e14bc9bbb043c4d7dbf5b881d9dadbce6c0de1 | [
"Apache-2.0"
] | 10 | 2022-01-11T09:44:52.000Z | 2022-03-30T12:29:50.000Z | examples/Utopia2_planning_single_node_DN/tutorial2.py | AAmedeo/Hypatia | 22e14bc9bbb043c4d7dbf5b881d9dadbce6c0de1 | [
"Apache-2.0"
] | 3 | 2021-12-17T15:30:12.000Z | 2022-03-23T15:37:42.000Z | from hypatia import Model,Plotter
#%%
utopia = Model(
path = 'sets',
mode = 'Planning'
)
#%%
#utopia.create_data_excels(
# path = r'parameters'
#)
#%%
utopia.read_input_data(
path = r'parameters'
)
#%%
utopia.run(
solver = 'scipy',
verbosity = True,
)
#%%
utopia.to_csv(path='results')
#%%
#utopia.create_config_file(path=r'config.xlsx')
#%%
results = Plotter(utopia,config=r'config.xlsx',hourly_resolution=False)
#%%
# Sketching the new installed capacity of different technologies for given tech group
results.plot_new_capacity(
path = r'plots/newcapacity.html',
tech_group = 'Power Generation',
cummulative=False,
)
#%%
# Sketching the total installed capacity of different technologies for given tech group (considering the decommissioned capacities)
results.plot_total_capacity(
path = r'plots/totalcapacity.html',
tech_group = 'Power Generation',
kind="bar",
decom_cap=True,
)
#%%
# Sketching the annual production of each technology
results.plot_prod_by_tech(
tech_group ='Power Generation',
path = r'plots/productionbytech.html',
)
#%%
# Sketching the annual production of each technology
results.plot_prod_by_tech(
tech_group ='Refinery',
path = r'plots/productionbytech_oil.html',
)
#%%
# Skething the prduction and consumption share of each technology including the imports and exports
results.plot_fuel_prod_cons(
path = r'plots/prod_con_share.html',
years = [2030],
fuel_group = 'Electricity',
trade=False,
)
#%%
# Skething the prduction and consumption share of each technology including the imports and exports
results.plot_fuel_prod_cons(
path = r'plots/prod_con_share_oil.html',
years = [2030],
fuel_group = 'Fuel',
trade=False,
)
#%%
# Sketching the annual CO2-equivalent emissions
results.plot_emissions(
path = r'plots/emissions.html',
tech_group = 'Resource Extraction',
) | 25.052632 | 131 | 0.719013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.564601 |
0e224344c163f81deb574207f38b8233548b28f8 | 119 | bzl | Python | test/com/facebook/buck/parser/testdata/disable_implicit_native_rules/skylark/implicit_in_extension_bzl/extension.bzl | Unknoob/buck | 2dfc734354b326f2f66896dde7746a11965d5a13 | [
"Apache-2.0"
] | 8,027 | 2015-01-02T05:31:44.000Z | 2022-03-31T07:08:09.000Z | test/com/facebook/buck/parser/testdata/disable_implicit_native_rules/skylark/implicit_in_extension_bzl/extension.bzl | Unknoob/buck | 2dfc734354b326f2f66896dde7746a11965d5a13 | [
"Apache-2.0"
] | 2,355 | 2015-01-01T15:30:53.000Z | 2022-03-30T20:21:16.000Z | test/com/facebook/buck/parser/testdata/disable_implicit_native_rules/skylark/implicit_in_extension_bzl/extension.bzl | Unknoob/buck | 2dfc734354b326f2f66896dde7746a11965d5a13 | [
"Apache-2.0"
] | 1,280 | 2015-01-09T03:29:04.000Z | 2022-03-30T15:14:14.000Z | """ Example module """
def java_maker(*args, **kwargs):
""" Make you a java """
java_library(*args, **kwargs)
| 19.833333 | 33 | 0.588235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.378151 |
0e22d4557755570980a88c3dd6efa6cf9f1ddd9e | 3,600 | py | Python | backend/config/settings/base.py | r0tii/process-status-viewer | 6c94a7a6f5e37f37f63d6140c806a0b6fc49ae1c | [
"MIT"
] | null | null | null | backend/config/settings/base.py | r0tii/process-status-viewer | 6c94a7a6f5e37f37f63d6140c806a0b6fc49ae1c | [
"MIT"
] | null | null | null | backend/config/settings/base.py | r0tii/process-status-viewer | 6c94a7a6f5e37f37f63d6140c806a0b6fc49ae1c | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
env = environ.Env()
# GENERAL
# -------------------------------------------------------------------------
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
PROJECT_NAME = "process_status_monitoring"
APPS_DIR = BASE_DIR / PROJECT_NAME
SECRET_KEY = env("SECRET_KEY")
DEBUG = env("DEBUG")
# APPS
# ------------------------------------------------------------------------------
INSTALLED_APPS = [
# django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# third-party
"rest_framework",
"corsheaders",
# Local
"process_status_monitoring.processes.apps.ProcessesConfig",
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# DATABASES
# ------------------------------------------------------------------------------
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# PROJECT CONFIG
# ------------------------------------------------------------------------------
ROOT_URLCONF = "config.urls"
WSGI_APPLICATION = "config.wsgi.application"
APPEND_SLASH = False
# REST FRAMEWORK
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": [
"rest_framework.renderers.JSONRenderer",
],
"DEFAULT_PARSER_CLASSES": [
"rest_framework.parsers.JSONParser",
],
}
# PASSWORD VALIDATION
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# INTERNATIONALIZATION / LOCALIZATION
# ------------------------------------------------------------------------------
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# STATIC FILES (CSS, JAVASCRIPT, IMAGES)
# ------------------------------------------------------------------------------
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "staticfiles"
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR / "media"
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
| 30.252101 | 99 | 0.536667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,565 | 0.7125 |
0e24145bbea1ff58122f48a87e0382857109d106 | 2,039 | py | Python | virtual_agent.py | kavimathi26-2001/virtual-agent-built. | df553a16ef4e7e32a93b35a3d55e6891f21ac98d | [
"Apache-2.0"
] | 23 | 2021-10-01T13:10:29.000Z | 2021-10-30T07:57:32.000Z | virtual_agent.py | kavimathi26-2001/virtual-agent-built. | df553a16ef4e7e32a93b35a3d55e6891f21ac98d | [
"Apache-2.0"
] | null | null | null | virtual_agent.py | kavimathi26-2001/virtual-agent-built. | df553a16ef4e7e32a93b35a3d55e6891f21ac98d | [
"Apache-2.0"
] | 28 | 2021-09-13T05:41:56.000Z | 2021-10-30T07:31:46.000Z | #import all the libraries required
import csv, pickle, numpy as np, os
from sentence_transformers import SentenceTransformer, util
#Virtual Agent Model
class VAModel():
def __init__(self):
self.model = SentenceTransformer("stsb-mpnet-base-v2") #load pretrained model
self.qa = dict()
self.emb = list()
#train virtual assistant
def train(self, training_file):
# if model doesn't exist in the location, compute embeddings again and store as a model
if not os.path.exists(r"models/model_va.pickle"):
header = False
dict_model = dict()
with open(training_file, "r", encoding="utf-8", errors="ignore") as file:
reader = csv.reader(file)
for qa_pair in reader:
self.qa[qa_pair[0]] = qa_pair[1]
self.emb.append(self.model.encode(qa_pair[0])) #compute embeddings
dict_model["qa"] = self.qa
dict_model["embeddings"] = self.emb
#persist trained model
with open(r"models/model_va.pickle", "wb") as file:
pickle.dump(dict_model, file)
#predict answer to user query
def pred_answer(self, usr_query):
query_embedding = self.model.encode(usr_query) #compute embedding for the user query
if not self.qa and not self.emb: #load trained model if not done already
with open(r"models/model_va.pickle", "rb") as file:
dict_model = pickle.load(file)
self.qa = dict_model["qa"]
self.emb = dict_model["embeddings"]
sim_scores = util.pytorch_cos_sim(query_embedding, self.emb) #computet similarity scores
matched_query = list(self.qa.keys())[np.argmax(sim_scores)] #identify matched query based on the best score
answer = self.qa.get(matched_query) #get answer to the matched query
return answer if answer else "Sorry, Would you rephrase it?"
def free_up(self):
self.emb = None
self.model = None
| 49.731707 | 115 | 0.62972 | 1,886 | 0.924963 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.305542 |
0e2735db31e23d6ef2afd7530322a682fcd103d9 | 2,703 | py | Python | hyperloglog/hashfunctions.py | mlkra/various-algorithms | 9cb2d21fe9ec0613c88e70f70e0ff1d471e43079 | [
"MIT"
] | 2 | 2021-12-04T16:12:03.000Z | 2021-12-25T06:57:27.000Z | mincount/hashfunctions.py | mlkra/various-algorithms | 9cb2d21fe9ec0613c88e70f70e0ff1d471e43079 | [
"MIT"
] | null | null | null | mincount/hashfunctions.py | mlkra/various-algorithms | 9cb2d21fe9ec0613c88e70f70e0ff1d471e43079 | [
"MIT"
] | null | null | null | from typing import Callable
import hashlib
import zlib
def __common(n: int, h: Callable, digest_size: int, b=0) -> float:
assert b <= digest_size
if b == 0:
return int.from_bytes(h(n.to_bytes(8, "big")).digest(), 'big') / 2**digest_size
else:
return (int.from_bytes(h(n.to_bytes(8, "big")).digest(), 'big') >> (digest_size - b)) / 2**b
def md5(n: int, b=0) -> float:
return __common(n, hashlib.md5, 128, b)
def sha1(n: int, b=0) -> float:
return __common(n, hashlib.sha1, 160, b)
def sha12(n: int) -> int:
return int(hashlib.sha1(n.to_bytes(8, "big")).hexdigest()[:8], 16)
def sha224(n: int, b=0) -> float:
return __common(n, hashlib.sha224, 224, b)
def sha256(n: int, b=0) -> float:
return __common(n, hashlib.sha256, 256, b)
def sha384(n: int, b=0) -> float:
return __common(n, hashlib.sha384, 384, b)
def sha512(n: int, b=0) -> float:
return __common(n, hashlib.sha512, 512, b)
def blake2b(n: int, b=0) -> float:
return __common(n, hashlib.blake2b, 512, b)
def blake2s(n: int, b=0) -> float:
return __common(n, hashlib.blake2s, 256, b) #pylint: disable=no-member
def blake2s2(n: int) -> int:
return int(hashlib.blake2s(n.to_bytes(8, "big")).hexdigest()[:8], 16) #pylint: disable=no-member
def sha3_224(n: int, b=0) -> float:
return __common(n, hashlib.sha3_224, 224, b)
def sha3_256(n: int, b=0) -> float:
return __common(n, hashlib.sha3_256, 256, b)
def sha3_384(n: int, b=0) -> float:
return __common(n, hashlib.sha3_384, 384, b)
def sha3_512(n: int, b=0) -> float:
return __common(n, hashlib.sha3_512, 512, b)
def adler32(n: int, b=0) -> float:
if b == 0:
return zlib.adler32(n.to_bytes(8, "big")) / 2**32
else:
return (zlib.adler32(n.to_bytes(8, "big")) >> (32 - b)) / 2**b
def adler322(n: int) -> int:
return zlib.adler32(n.to_bytes(8, "big"))
def crc32(n: int, b=0) -> float:
if b == 0:
return zlib.crc32(n.to_bytes(8, "big")) / 2**32
else:
return (zlib.crc32(n.to_bytes(8, "big")) >> (32 - b)) / 2**b
def crc322(n: int) -> int:
return zlib.crc32(n.to_bytes(8, "big"))
hash_functions = {
"md5": md5,
"sha1": sha1,
"sha224": sha224,
"sha256": sha256,
"sha384": sha384,
"sha512": sha512,
"blake2b": blake2b,
"blake2s": blake2s,
"sha3_224": sha3_224,
"sha3_256": sha3_256,
"sha3_384": sha3_384,
"sha3_512": sha3_512,
"adler32": adler32,
"crc32": crc32
}
def main():
for name, h in hash_functions.items():
a = [h(n, 16) for n in range(10000)]
print(name)
print(min(a))
print(max(a))
if __name__ == "__main__":
main()
| 22.525 | 101 | 0.594895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.08842 |
0e282455991d9cc519b5a1a793207bc93c3d9068 | 793 | py | Python | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | data_loaders/data_loader_interface.py | jennis0/pdf2vtt | 4aad37ef3dfce4d83f3a2744856879598cd4446f | [
"MIT"
] | null | null | null | import abc
from typing import List
from utils.datatypes import Source
class DataLoaderInterface(object):
@abc.abstractmethod
def get_name() -> str:
'''Returns an internal name for this loader'''
raise NotImplementedError("users must define a name for this loader")
@staticmethod
@abc.abstractmethod
def get_filetypes() -> List[str]:
'''Returns a list of file types supported by this data loader'''
raise NotImplementedError('users must define a list of supported filetypes.')
@abc.abstractmethod
def load_data_from_file(self, filepath: str) -> Source:
'''Reads file and extracts lines of texts. Returns one section per page'''
raise NotImplementedError("userers must define a function to load data from a file.") | 36.045455 | 93 | 0.706179 | 721 | 0.909206 | 0 | 0 | 669 | 0.843632 | 0 | 0 | 334 | 0.421185 |
0e28d4b2f8c651a0637bb6e71017d6d807b4a977 | 247 | py | Python | Learn/30-Days-Of-Code/Day 28/regexdb.py | Adriel-M/HackerRank | 0f361bb7eaa3d2db3dd96de511c4b7fa02efa8c5 | [
"MIT"
] | 1 | 2021-11-24T16:43:10.000Z | 2021-11-24T16:43:10.000Z | Learn/30-Days-Of-Code/Day 28/regexdb.py | Adriel-M/HackerRank | 0f361bb7eaa3d2db3dd96de511c4b7fa02efa8c5 | [
"MIT"
] | null | null | null | Learn/30-Days-Of-Code/Day 28/regexdb.py | Adriel-M/HackerRank | 0f361bb7eaa3d2db3dd96de511c4b7fa02efa8c5 | [
"MIT"
] | null | null | null | N = int(input().strip())
names = []
for _ in range(N):
name,email = input().strip().split(' ')
name,email = [str(name),str(email)]
if email.endswith("@gmail.com"):
names.append(name)
names.sort()
for n in names:
print(n)
| 19 | 43 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.060729 |
0e2930d0e963e50e57e9afabe5382cdd9bed7229 | 4,176 | py | Python | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | 1 | 2018-04-09T11:43:52.000Z | 2018-04-09T11:43:52.000Z | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | null | null | null | dbglang/dbp.py | thautwarm/dbg-lang | 22b5230bc1658494b93a2741aab3ef63a06fdee3 | [
"MIT"
] | 1 | 2019-06-21T08:33:24.000Z | 2019-06-21T08:33:24.000Z | from Ruikowa.ObjectRegex.Node import Ref, AstParser, SeqParser, LiteralParser, CharParser, MetaInfo, DependentAstParser
try:
from .etoken import token
except:
from etoken import token
import re
namespace = globals()
recurSearcher = set()
PrimaryDefList = AstParser([Ref('FieldDef'), SeqParser([LiteralParser(',', name='\',\''), Ref('FieldDef')])],
name='PrimaryDefList', toIgnore=[{}, {','}])
FieldDefList = AstParser([SeqParser([Ref('FieldDef'), SeqParser([LiteralParser('\n', name='\'\n\'')])]),
SeqParser([LiteralParser('\n', name='\'\n\'')])], name='FieldDefList', toIgnore=[{}, {'\n'}])
TableDef = AstParser(
[Ref('Symbol'), LiteralParser('(', name='\'(\''), Ref('PrimaryDefList'), LiteralParser(')', name='\')\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('{', name='\'{\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), Ref('FieldDefList'),
SeqParser([Ref('ReprDef'), SeqParser([LiteralParser('\n', name='\'\n\'')])], atmost=1),
LiteralParser('}', name='\'}\'')], name='TableDef', toIgnore=[{}, {'{', '}', '(', ')', '\n'}])
FieldDef = AstParser([Ref('Symbol'), LiteralParser(':', name='\':\''), Ref('Type')], name='FieldDef',
toIgnore=[{}, {':'}])
Type = AstParser([Ref('Symbol'), SeqParser([Ref('Option')]),
SeqParser([LiteralParser('=', name='\'=\''), Ref('Default')], atmost=1)], name='Type',
toIgnore=[{}, {'='}])
Option = AstParser([LiteralParser('?', name='\'?\'')], [LiteralParser('!', name='\'!\'')],
[LiteralParser('~', name='\'~\'')], name='Option')
Default = AstParser([SeqParser([LiteralParser('.+', name='\'.+\'', isRegex=True)], atleast=1)], name='Default')
ReprDef = AstParser([LiteralParser('repr', name='\'repr\''), DependentAstParser(
[LiteralParser('{', name='\'{\''), SeqParser([LiteralParser('\n', name='\'\n\'')]), Ref('SymbolList'),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('}', name='\'}\'')],
[LiteralParser('=', name='\'=\''), LiteralParser('all', name='\'all\'')])], name='ReprDef',
toIgnore=[{}, {'=', '{', '}', 'all', 'repr', '\n'}])
SymbolList = AstParser([Ref('Symbol'), SeqParser([LiteralParser(',', name='\',\''), Ref('Symbol')])], name='SymbolList',
toIgnore=[{}, {','}])
Comment = AstParser([LiteralParser('#', name='\'#\''), Ref('Default')], name='Comment')
Symbol = AstParser([LiteralParser('[a-zA-Z][a-zA-Z_]*', name='\'[a-zA-Z][a-zA-Z_]*\'', isRegex=True)], name='Symbol')
WeightedSymbol = AstParser([Ref('Symbol'), SeqParser([LiteralParser('^', name='\'^\'')])], name='WeightedSymbol')
Relation = AstParser(
[Ref('WeightedSymbol'), Ref('Left'), LiteralParser('-', name='\'-\''), Ref('Right'), Ref('WeightedSymbol'),
SeqParser([LiteralParser('\n', name='\'\n\'')]), LiteralParser('{', name='\'{\''),
SeqParser([LiteralParser('\n', name='\'\n\'')]), SeqParser([Ref('FieldDefList')], atmost=1),
LiteralParser('}', name='\'}\'')], name='Relation', toIgnore=[{}, {'-', '}', '{', '\n'}])
Left = AstParser([SeqParser([LiteralParser('<', name='\'<\'')], atleast=1, atmost=2)], name='Left')
Right = AstParser([SeqParser([LiteralParser('>', name='\'>\'')], atleast=1, atmost=2)], name='Right')
Stmts = AstParser(
[SeqParser([DependentAstParser([LiteralParser('\n', name='\'\n\'')], [Ref('Relation')], [Ref('TableDef')])])],
name='Stmts', toIgnore=[{}, {'\n'}])
PrimaryDefList.compile(namespace, recurSearcher)
FieldDefList.compile(namespace, recurSearcher)
TableDef.compile(namespace, recurSearcher)
FieldDef.compile(namespace, recurSearcher)
Type.compile(namespace, recurSearcher)
Option.compile(namespace, recurSearcher)
Default.compile(namespace, recurSearcher)
ReprDef.compile(namespace, recurSearcher)
SymbolList.compile(namespace, recurSearcher)
Comment.compile(namespace, recurSearcher)
Symbol.compile(namespace, recurSearcher)
WeightedSymbol.compile(namespace, recurSearcher)
Relation.compile(namespace, recurSearcher)
Left.compile(namespace, recurSearcher)
Right.compile(namespace, recurSearcher)
Stmts.compile(namespace, recurSearcher)
| 64.246154 | 120 | 0.608477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.212165 |
0e2afcb852554bb61271877076847899b292fc37 | 4,128 | py | Python | App/components/combEntrada.py | Alexfm101/automata | a39760a04d384ef96ce49cac2517d7248380bd72 | [
"MIT"
] | null | null | null | App/components/combEntrada.py | Alexfm101/automata | a39760a04d384ef96ce49cac2517d7248380bd72 | [
"MIT"
] | null | null | null | App/components/combEntrada.py | Alexfm101/automata | a39760a04d384ef96ce49cac2517d7248380bd72 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
data_edoSiguiente = [[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4]]
data_entrada = [[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4]]
data_edoActual = [[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],
[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4]]
fnznnz
class combEntrada(QDialog):
def __init__(self):
super(combEntrada ,self).__init__()
layout = QGridLayout()
self.setLayout(layout)
self.data_edoSiguiente = data_edoSiguiente
self.data_edoActual = data_edoActual
self.data_entrada = data_entrada
#tabla
entrada = QTableWidget(16, 4)
salida = QTableWidget(16, 4)
estado = QTableWidget(16, 4)
newitem = QTableWidgetItem()
label_entrada = QLabel()
label_entrada.setText("tabla de entradas")j
label_salida = QLabel()
label_salida.setText("Estado siguiente")
label_edo = QLabel()
label_edo.setText("estado actual")
#boton
ok = QPushButton('ok')
ok2 = QPushButton('ok2')
ok3 = QPushButton('ok3')
def _matrix():
k for i in range(0, 16):
for j in range(0, 4):
newitem = entrada.item(i, j)
if (newitem == None):
a = "x"
pass
elif (not newitem.text() == "1" and not newitem.text() == "0"):
a = "x"
pass
else:
a = newitem.text()
pass
data_entrada[i][j] = a
def _matrix2():
for i in range(0, 16):
for j in range(0, 4):
newitem = salida.item(i, j)
if (newitem == None):
a = "x"
pass
elif (not newitem.text() == "1" and not newitem.text() == "0"):
a = "x"
pass
else:
a = newitem.text()
pass
data_edoSiguiente[i][j] = a
def _matrix3():
for i in range(0, 16):
for j in range(0, 4):
newitem = estado.item(i, j)
if (newitem == None):
a = "x"
pass
elif (not newitem.text() == "1" and not newitem.text() == "0"):
a = "x"
pass
else:
a = newitem.text()
pass
data_edoActual[i][j] = a
def _print():
print(self.data_entrada)
def _print2():
print(self.data_edoSiguiente)
def _print3():
print(self.data_edoActual)
entrada.cellChanged.connect(_matrix)
salida.cellChanged.connect(_matrix2)
estado.cellChanged.connect(_matrix3)
ok.clicked.connect(_print)
ok2.clicked.connect(_print2)
ok3.clicked.connect(_print3)
#mostrar
layout.addWidget(entrada, 1, 0)
layout.addWidget(salida, 1, 1)
layout.addWidget(estado,1,2)
layout.addWidget(ok,2,0)
layout.addWidget(ok2, 2, 1)
layout.addWidget(ok3,2,2)
layout.addWidget(label_entrada, 0, 0)
layout.addWidget(label_salida, 0, 1)
layout.addWidget(label_edo,0,2)
| 33.024 | 83 | 0.428052 | 3,230 | 0.782461 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.029554 |
0e2b1e0932a1e36d8ff5f038e7d31ff803df6266 | 1,794 | py | Python | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | 1 | 2020-09-15T07:58:55.000Z | 2020-09-15T07:58:55.000Z | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py | Sky-zzt/lintcodePractice | d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1 | [
"MIT"
] | null | null | null | import heapq
class Solution:
"""
@param matrix: a matrix of integers
@param k: An integer
@return: the kth smallest number in the matrix
在一个排序矩阵中找从小到大的第 k 个整数。
排序矩阵的定义为:每一行递增,每一列也递增。
Example
样例 1:
输入:
[
[1 ,5 ,7],
[3 ,7 ,8],
[4 ,8 ,9],
]
k = 4
输出: 5
样例 2:
输入:
[
[1, 2],
[3, 4]
]
k = 3
输出: 3
Challenge
时间复杂度 O(klogn), n 是矩阵的宽度和高度的最大值
"""
# todo 用java的treeset 自带排序 remove还是log的
def kthSmallest(self, nums, k):
# write your code here
self.minheap, self.maxheap = [], []
medians = []
for i in range(len(nums)):
self.add(nums[i], i, k)
medians.append(self.median)
return medians
@property
def median(self):
if len(self.minheap) > len(self.maxheap):
return self.minheap[0]
return -self.maxheap[0]
def add(self, value, index, winsize):
if len(self.maxheap) + len(self.minheap) > winsize: # todo
self.remove(index - winsize)
if len(self.maxheap) == 0:
heapq.heappush(self.maxheap, -value)
return
if -self.maxheap[0] < value:
heapq.heappush(self.minheap, value)
else:
heapq.heappush(self.maxheap, -value)
self.modifyTwoHeapsSize()
def remove(self, idx):
if idx in self.minheap:
self.minheap.remove(idx)
else:
self.maxheap.remove(idx)
def modifyTwoHeapsSize(self):
if len(self.maxheap) + 2 == len(self.minheap):
heapq.heappush(self.maxheap, -heapq.heappop(self.minheap))
if len(self.minheap) + 2 == len(self.maxheap):
heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))
| 23.92 | 70 | 0.544593 | 1,938 | 0.991812 | 0 | 0 | 148 | 0.075742 | 0 | 0 | 647 | 0.331116 |
0e2c27724ea07da3d95021b9be790ef2004472e6 | 2,158 | py | Python | thriftpy2/contrib/aio/transport/framed.py | JonnoFTW/thriftpy2 | 3b4d28d611a752f8796604ee274f03c517670a43 | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | thriftpy2/contrib/aio/transport/framed.py | JonnoFTW/thriftpy2 | 3b4d28d611a752f8796604ee274f03c517670a43 | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | thriftpy2/contrib/aio/transport/framed.py | JonnoFTW/thriftpy2 | 3b4d28d611a752f8796604ee274f03c517670a43 | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import struct
import asyncio
from io import BytesIO
from .base import TAsyncTransportBase, readall
from .buffered import TAsyncBufferedTransport
class TAsyncFramedTransport(TAsyncTransportBase):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans):
self._trans = trans
self._rbuf = BytesIO()
self._wbuf = BytesIO()
def is_open(self):
return self._trans.is_open()
@asyncio.coroutine
def open(self):
return (yield from self._trans.open())
def close(self):
return self._trans.close()
@asyncio.coroutine
def read(self, sz):
# Important: don't attempt to read the next frame if the caller
# doesn't actually need any data.
if sz == 0:
return b''
ret = self._rbuf.read(sz)
if len(ret) != 0:
return ret
yield from self.read_frame()
return self._rbuf.read(sz)
@asyncio.coroutine
def read_frame(self):
buff = yield from readall(self._trans.read, 4)
sz, = struct.unpack('!i', buff)
frame = yield from readall(self._trans.read, sz)
self._rbuf = BytesIO(frame)
def write(self, buf):
self._wbuf.write(buf)
@asyncio.coroutine
def flush(self):
# reset wbuf before write/flush to preserve state on underlying failure
out = self._wbuf.getvalue()
self._wbuf = BytesIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive
# copies
self._trans.write(struct.pack("!i", len(out)) + out)
yield from self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TAsyncFramedTransportFactory(object):
def get_transport(self, trans):
return TAsyncBufferedTransport(TAsyncFramedTransport(trans))
| 28.773333 | 79 | 0.647822 | 1,941 | 0.899444 | 1,191 | 0.5519 | 1,283 | 0.594532 | 0 | 0 | 552 | 0.255792 |
0e2d0a35bac66187f607550adfc8a5821291ee5b | 3,493 | py | Python | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | null | null | null | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | 1 | 2015-10-07T02:20:24.000Z | 2015-10-07T02:20:24.000Z | test/test-funders.py | yurivict/habanero | 857897a88811153f7460472219fd78d4e68bdc12 | [
"MIT"
] | null | null | null | import pytest
import os
import requests
from habanero import exceptions, Crossref
from requests.exceptions import HTTPError
cr = Crossref()
@pytest.mark.vcr
def test_funders():
"funders - basic test"
res = cr.funders(limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_query():
"funders - param: query"
res = cr.funders(query="NSF", limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_sample_err():
with pytest.raises(exceptions.RequestError):
cr.funders(sample=2)
@pytest.mark.vcr
def test_funders_filter_fails_noidsworks():
with pytest.raises(exceptions.RequestError):
cr.funders(filter={"from_pub_date": "2014-03-03"})
@pytest.mark.vcr
def test_funders_filter_fails_noids():
with pytest.raises(exceptions.RequestError):
cr.funders(works=True, filter={"has_assertion": True})
@pytest.mark.vcr
def test_funders_filter_works():
"funders - filter works when used with id and works=True"
res = cr.funders(
ids="10.13039/100000001", works=True, filter={"has_assertion": True}
)
assert dict == res.__class__
assert 20 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_fail_limit():
with pytest.raises(KeyError):
cr.funders(limit="things")
@pytest.mark.vcr
def test_funders_fail_offset():
with pytest.raises(KeyError):
cr.funders(offset="things")
@pytest.mark.vcr
def test_funders_fail_sort():
with pytest.raises(exceptions.RequestError):
cr.funders(sort="things")
@pytest.mark.vcr
def test_funders_field_queries():
"funders - param: kwargs - field queries work as expected"
res = cr.funders(
ids="10.13039/100000001",
works=True,
query_container_title="engineering",
filter={"type": "journal-article"},
limit=100,
)
titles = [x.get("title") for x in res["message"]["items"]]
assert dict == res.__class__
assert 5 == len(res["message"])
assert list == titles.__class__
assert 100 == len(titles)
@pytest.mark.vcr
def test_funders_query_filters_not_allowed_with_dois():
with pytest.raises(HTTPError):
cr.funders(ids="10.13039/100000001", query_container_title="engineering")
@pytest.mark.vcr
def test_funders_bad_id_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids=["10.13039/100000001", "10.13039/notarealdoi"], warn=True)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
@pytest.mark.vcr
def test_funders_bad_id_works_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", works=True, warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_works_warn():
"funders - param: warn"
with pytest.warns(UserWarning):
out = cr.funders(
ids=["10.13039/100000001", "10.13039/notarealdoi"], works=True, warn=True
)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
| 26.263158 | 87 | 0.681363 | 0 | 0 | 0 | 0 | 3,307 | 0.946751 | 0 | 0 | 701 | 0.200687 |
0e2d51cb9fe9bb1546fc7bbea2cb77d09472d09a | 812 | py | Python | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | 1 | 2021-02-22T23:45:22.000Z | 2021-02-22T23:45:22.000Z | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | 1 | 2021-09-08T03:42:52.000Z | 2021-09-08T03:42:52.000Z | geomagio/api/ws/algorithms.py | alejandrodelcampillo/geomag-algorithms | 43a734d63a8eb2a696f14237e0054e21d36de7c3 | [
"CC0-1.0"
] | null | null | null | from fastapi import APIRouter, Depends
from starlette.responses import Response
from ... import TimeseriesFactory
from ...algorithm import DbDtAlgorithm
from .DataApiQuery import DataApiQuery
from .data import format_timeseries, get_data_factory, get_data_query, get_timeseries
router = APIRouter()
@router.get("/algorithms/dbdt/")
def get_dbdt(
query: DataApiQuery = Depends(get_data_query),
data_factory: TimeseriesFactory = Depends(get_data_factory),
) -> Response:
dbdt = DbDtAlgorithm()
# read data
raw = get_timeseries(data_factory, query)
# run dbdt
timeseries = dbdt.process(raw)
elements = [f"{element}_DT" for element in query.elements]
# output response
return format_timeseries(
timeseries=timeseries, format=query.format, elements=elements
)
| 29 | 85 | 0.748768 | 0 | 0 | 0 | 0 | 507 | 0.624384 | 0 | 0 | 72 | 0.08867 |
0e2d851e83694bcead0aaa245c930030f6827cc1 | 1,478 | py | Python | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | lesson3/stage3/src/jvm/udacity/storm/resources/urltext.py | haitanle/storm-twitter | b68c90129d31eb11808922ec56ac9ac6535fdab2 | [
"MIT"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/apache/storm/blob/master/examples/storm-starter/multilang/resources/splitsentence.py
import storm
import urllib2
from bs4 import BeautifulSoup
class URLBolt(storm.BasicBolt):
def process(self, tup):
url = tup.values[0]
# python urllib2
try:
html = urllib2.urlopen(url).read()
# using BeautifulSoup, "Making the Soup"
soup = BeautifulSoup(html)
# return title and paragraph tags
urlText = soup.findAll({'title' : True, 'p' : True})
#emit tuple if string exists
if urlText:
[storm.emit([t.string]) for t in urlText]
except:
pass
URLBolt().run()
| 35.190476 | 105 | 0.699594 | 513 | 0.347091 | 0 | 0 | 0 | 0 | 0 | 0 | 997 | 0.67456 |
0e2e642553f3d2eca725be5123dcabd2612e0fef | 598 | py | Python | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | 1 | 2020-06-26T09:59:27.000Z | 2020-06-26T09:59:27.000Z | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | null | null | null | ncl/property.py | MichaelBittencourt/NCL-Generator-API | 8eecf2ea4948354fae9f64b68da2f24ad0663d60 | [
"MIT"
] | 1 | 2020-01-07T23:16:11.000Z | 2020-01-07T23:16:11.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Michael Bittencourt <mchl.bittencourt@gmail.com>
#
# Distributed under terms of the MIT license.
"""
"""
from ncl.abstractelement import AbstractElement
class Property(AbstractElement):
def __init__(self, name, value=None, externable=None):
super().__init__("property", ["name", "value", "externable"], [])
self.set("name", name)
if value is not None:
self.set("value", value)
if externable is not None:
self.set("externable", externable)
pass
| 23.92 | 73 | 0.633779 | 356 | 0.594324 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.407346 |
0e2f13b1e7a11523bafcf1c9ffaaf118b159dadf | 372 | py | Python | DSTK/Timeseries/recurrence_plots.py | jotterbach/dstk | d371262ca09527fb8279f066b564abf5a1fe28df | [
"MIT"
] | 12 | 2017-01-10T19:50:32.000Z | 2020-03-30T20:28:31.000Z | DSTK/Timeseries/recurrence_plots.py | jotterbach/dstk | d371262ca09527fb8279f066b564abf5a1fe28df | [
"MIT"
] | 7 | 2016-10-12T16:21:58.000Z | 2016-12-01T00:34:54.000Z | DSTK/Timeseries/recurrence_plots.py | jotterbach/dstk | d371262ca09527fb8279f066b564abf5a1fe28df | [
"MIT"
] | 8 | 2016-08-22T11:23:12.000Z | 2020-03-13T23:18:39.000Z | import _recurrence_map
import numpy as np
def poincare_map(ts, ts2=None, threshold=0.1):
rec_dist = poincare_recurrence_dist(ts, ts2)
return (rec_dist < threshold).astype(int)
def poincare_recurrence_dist(ts, ts2=None):
if ts2 is None:
return _recurrence_map.recurrence_map(ts, ts)
else:
return _recurrence_map.recurrence_map(ts, ts2)
| 23.25 | 54 | 0.725806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0e2f2f3cf1a3df42e62c08dda70b278f740b11d2 | 2,755 | py | Python | preprocess.py | costagreg/mnist-handwritten-ml | c3e26eb7ab653afa6f9e9dc91f5a438026b64329 | [
"MIT"
] | null | null | null | preprocess.py | costagreg/mnist-handwritten-ml | c3e26eb7ab653afa6f9e9dc91f5a438026b64329 | [
"MIT"
] | 1 | 2019-06-23T16:41:26.000Z | 2019-08-12T09:54:52.000Z | preprocess.py | costagreg/mnist-handwritten-ml | c3e26eb7ab653afa6f9e9dc91f5a438026b64329 | [
"MIT"
] | 1 | 2019-08-12T06:38:35.000Z | 2019-08-12T06:38:35.000Z | import cv2
import math
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy import ndimage
from utils import ValueInvert
# TO-DO: Refactor this with np.nonzero??
def find_center_image(img):
left = 0
right = img.shape[1] - 1
empty_left = True
empty_right = True
for col in range(int(img.shape[1])):
if empty_left == False and empty_right == False:
break
for row in range(img.shape[0] - 1):
if img[row, col] > 0 and empty_left == True:
empty_left = False
left = col
if img[row, img.shape[1] - col - 1] > 0 and empty_right == True:
empty_right = False
right = img.shape[1] - col
top = 0
bottom = img.shape[0] - 1
empty_top = True
empty_bottom = True
for row in range(int(img.shape[0])):
if empty_top == False and empty_bottom == False:
break
for col in range(img.shape[1] - 1):
if img[row, col] > 0 and empty_top == True:
empty_top = False
top = row
if img[img.shape[0] - row - 1, col] > 0 and empty_bottom == True:
empty_bottom = False
bottom = img.shape[0] - row
return top, right, bottom, left
def getBestShift(img):
cy, cx = ndimage.measurements.center_of_mass(img)
rows, cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx, shifty
def shift(img, sx, sy):
rows, cols = img.shape
M = np.float32([[1, 0, sx], [0, 1, sy]])
shifted = cv2.warpAffine(img, M, (cols, rows))
return shifted
def process_image(img):
img = ValueInvert(img)
img = cv2.resize(img, (28, 28))
(thresh, gray) = cv2.threshold(img, 128,
255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
top, right, bottom, left = find_center_image(img)
cropped_img = img[top:bottom, left:right]
rows, cols = cropped_img.shape
# resize 20x20 keeping ratio
if rows > cols:
rows = 20
factor = cols/rows
cols = int(round(rows*factor))
else:
cols = 20
factor = rows/cols
rows = int(round(cols*factor))
gray = cv2.resize(cropped_img, (cols, rows))
# plt.imshow(gray)
# plt.show()
colsPadding = (int(math.ceil((28-cols)/2.0)),
int(math.floor((28-cols)/2.0)))
rowsPadding = (int(math.ceil((28-rows)/2.0)),
int(math.floor((28-rows)/2.0)))
gray = np.lib.pad(gray, (rowsPadding, colsPadding), 'constant')
shiftx, shifty = getBestShift(gray)
shifted = shift(gray, shiftx, shifty)
gray = shifted
return gray
| 26.490385 | 77 | 0.56951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.039201 |
0e3027662b184f06c13870ffba5c59a0f9bdc59f | 1,223 | py | Python | buffer.py | Shahaf-Yamin/CartPole-Policy-Gradients | 98845fa6aa24d1de22644b783ea564a778a5c64f | [
"MIT"
] | null | null | null | buffer.py | Shahaf-Yamin/CartPole-Policy-Gradients | 98845fa6aa24d1de22644b783ea564a778a5c64f | [
"MIT"
] | null | null | null | buffer.py | Shahaf-Yamin/CartPole-Policy-Gradients | 98845fa6aa24d1de22644b783ea564a778a5c64f | [
"MIT"
] | null | null | null | import numpy as np
from collections import namedtuple, deque
import random
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'not_done'))
class ReplayBuffer(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
self.memory.append([*args])
def sample(self, batch_size):
batch = random.sample(self.memory, batch_size)
batch = list(map(np.asarray, zip(*batch)))[
0].T # FIXME: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
states, actions, rewards, next_states, done = np.vstack(batch[0]), np.vstack(batch[1]), np.vstack(batch[2]), np.vstack(batch[3]), \
np.vstack(batch[4])
return states, actions, rewards, next_states, done
def sample_last(self):
batch = self.memory[-1]
return batch
def __len__(self):
return len(self.memory)
| 40.766667 | 293 | 0.63287 | 1,041 | 0.851186 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.271464 |
0e30b1db2b544a801697916acc86f22b8a9e7d0e | 4,109 | py | Python | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 22 | 2019-05-03T03:39:09.000Z | 2022-02-26T17:14:15.000Z | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 3 | 2019-07-29T19:48:49.000Z | 2022-01-10T07:24:43.000Z | benchmarks/secure_data_SDK-benchmarks/bootloader/load_firmware.py | ghsecuritylab/BenchIoT | 4919427d35e578a7ff07ef5e0b4710b6455dd0b9 | [
"Apache-2.0"
] | 8 | 2019-05-16T08:02:33.000Z | 2021-08-03T03:41:37.000Z |
from struct import pack, unpack
import binascii
import socket
HOST = '192.168.0.10'
PORT = 1337
BUFF_SIZE = 1024
START_TOKEN = "init"
DONE_TOKEN = "done"
FAIL_TOKEN = "fail"
def create_test_application(load_addr=0x08002000, size=64*1024):
'''
Creates a test application that simply returns to the bootloader.
Creates and ISR Table that point to a infinte loop, except reset
vector that points to two instructions
' mov sp, r3'
' bx lr'
'''
SP_ADDR = 0x20050000 # Address of stack for loaded application
fw_list = []
fw_list.append(pack("<I",SP_ADDR))
fw_list.append(pack("<I",load_addr+1025))
# build rest of ISR
for isr in xrange(2,256):
fw_list.append(pack("<I",load_addr+1029)) # 4 bytes after end of ISR
# Add Code
fw_list.append('\x9d\x46\x70\x47') # mov sp,r3; bx lr
fw_list.append('\xfe\xbf\xff\xf7') # b.w
# Fill rest with garbage
i = 0
#TODO when bootloader does check sum update to be random data
while (len(fw_list)< size / 4):
fw_list.append(pack("<I",i))
i += 1
return ''.join(fw_list)
def tx(filename):
with open(filename,'rb') as fw_file:
fw_data = fw_file.read()
tx_data(fw_data)
def tx_data(fw_data):
client = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(50)
client.connect(( HOST, PORT ))
client.settimeout(None)
print "Sending Start Token:", START_TOKEN
client.send(START_TOKEN)
data = client.recv(len(START_TOKEN))
if data and data == START_TOKEN:
print "Got Start Token:", data
client.send(pack("<I", len(fw_data)))
print "Sent Length: ", len(fw_data)
print "Sending FW: ", len(fw_data)
for i in xrange(0,len(fw_data), 128):
client.send(fw_data[i:i+128])
data = client.recv(len(DONE_TOKEN))
if data and data == DONE_TOKEN:
print "Sent Successfully, Token: ", data
else:
print "Transmission Failed, Token: ", data
def rx():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
data = conn.recv(5)
print data
if (data and data == START_TOKEN):
conn.send(START_TOKEN)
data = conn.recv(4)
size = unpack('<I', data)[0]
print "Size: ",size
received_count = 0
with open("outfile.bin",'wb') as outfile:
while (received_count < size):
request = size - received_count
if request > BUFF_SIZE:
request = BUFF_SIZE
data = conn.recv(request)
if (data):
received_count += len(data)
print "Received %i: %s..."% (len(data),
binascii.hexlify(data[0:10]))
outfile.write(data)
else:
print ("Failed")
conn.send(FAIL_TOKEN)
conn.close()
return
conn.send(DONE_TOKEN) # echo
print "Done"
else:
conn.send(FAIL_TOKEN)
conn.close()
if __name__ == "__main__":
from argparse import ArgumentParser
arg_parser = ArgumentParser()
arg_parser.add_argument('-f','--filename',metavar="FILE",
help='Firmware file to transmit (use ' + \
'arm-none-eabi-objcopy -O binary <file.elf> <outfile>)')
arg_parser.add_argument('--start_addr', default=0x08020000, type=int,
help='Start Address for generated test firmware')
arg_parser.add_argument('--size', default=16*1024, type=int,
help='Size of generated firmware to transmit')
args = arg_parser.parse_args()
#
if args.filename:
tx(args.filename)
else:
fw_data = create_test_application(args.start_addr, args.size)
with open('gen_fw.bin', 'wb') as outfile:
outfile.write(fw_data)
tx_data(fw_data)
| 29.35 | 80 | 0.578973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,004 | 0.244342 |
0e323cbc9a74dd18dfee4350d0c771fc984cd04d | 1,264 | py | Python | time/humanize_time.py | liudmil-mitev/experiments | 724b4f528e628760772eed289b832aa9fd45af17 | [
"WTFPL"
] | 1 | 2018-05-02T15:45:18.000Z | 2018-05-02T15:45:18.000Z | time/humanize_time.py | liudmil-mitev/experiments | 724b4f528e628760772eed289b832aa9fd45af17 | [
"WTFPL"
] | null | null | null | time/humanize_time.py | liudmil-mitev/experiments | 724b4f528e628760772eed289b832aa9fd45af17 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
INTERVALS = [1, 60, 3600, 86400, 604800, 2419200, 29030400]
NAMES = [('second', 'seconds'),
('minute', 'minutes'),
('hour', 'hours'),
('day', 'days'),
('week', 'weeks'),
('month', 'months'),
('year', 'years')]
def humanize_time(amount, units):
'''
Divide `amount` in time periods.
Useful for making time intervals more human readable.
>>> humanize_time(173, "hours")
[(1, 'week'), (5, 'hours')]
>>> humanize_time(17313, "seconds")
[(4, 'hours'), (48, 'minutes'), (33, 'seconds')]
>>> humanize_time(90, "weeks")
[(1, 'year'), (10, 'months'), (2, 'weeks')]
>>> humanize_time(42, "months")
[(3, 'years'), (6, 'months')]
>>> humanize_time(500, "days")
[(1, 'year'), (5, 'months'), (3, 'weeks'), (3, 'days')]
'''
result = []
unit = map(lambda a: a[1], NAMES).index(units)
# Convert to seconds
amount = amount * INTERVALS[unit]
for i in range(len(NAMES)-1, -1, -1):
a = amount // INTERVALS[i]
if a > 0:
result.append( (a, NAMES[i][1 % a]) )
amount -= a * INTERVALS[i]
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28.088889 | 61 | 0.508703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.545095 |
0e333752c015ad9cb399b65e23737e7c8b00cd94 | 27,010 | py | Python | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | fauxfactory/__init__.py | sthirugn/fauxfactory | b320f46d34124d0fbc0b93bc6c56ff8231c8dbc5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Generate random data for your tests."""
__all__ = (
'gen_alpha',
'gen_alphanumeric',
'gen_boolean',
'gen_choice',
'gen_cjk',
'gen_cyrillic',
'gen_date',
'gen_datetime',
'gen_email',
'gen_html',
'gen_integer',
'gen_ipaddr',
'gen_iplum',
'gen_latin1',
'gen_mac',
'gen_netmask',
'gen_negative_integer',
'gen_numeric_string',
'gen_positive_integer',
'gen_string',
'gen_time',
'gen_url',
'gen_utf8',
'gen_uuid',
)
import datetime
import random
import re
import string
import sys
import unicodedata
import uuid
import warnings
from collections import Iterable
from fauxfactory.constants import (
HTML_TAGS, LOREM_IPSUM_TEXT,
MAX_YEARS, MIN_YEARS,
SCHEMES, SUBDOMAINS, TLDS, VALID_NETMASKS
)
from functools import wraps
# Private Functions -----------------------------------------------------------
def _make_unicode(data):
"""Convert ``data`` to a unicode string if running Python 2.
:param str data: A string to be type cast.
:return: ``data``, but as unicode. ``data`` is never modified: if a type
cast is necessary, a copy of ``data`` is returned.
"""
if sys.version_info[0] == 2:
return unicode(data) # flake8:noqa pylint:disable=undefined-variable
return data
def _is_positive_int(length):
"""Check that ``length`` argument is an integer greater than zero.
:param int length: The desired length of the string
:raises: ``ValueError`` if ``length`` is not an ``int`` or is less than 1.
:returns: Nothing.
:rtype: None
"""
if not isinstance(length, int) or length <= 0:
raise ValueError("{0} is an invalid 'length'.".format(length))
def _unicode_letters_generator():
"""Generates unicode characters in the letters category
:return: a generator which will generates all unicode letters available
"""
if sys.version_info[0] == 2:
chr_function = unichr # pylint:disable=undefined-variable
range_function = xrange # pylint:disable=undefined-variable
else:
chr_function = chr
range_function = range
# Use sys.maxunicode instead of 0x10FFFF to avoid the exception below, in a
# narrow Python build (before Python 3.3)
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
# For more information, read PEP 261.
for i in range_function(sys.maxunicode):
char = chr_function(i)
if unicodedata.category(char).startswith('L'):
yield char
UNICODE_LETTERS = [c for c in _unicode_letters_generator()]
# Public Functions ------------------------------------------------------------
def gen_string(str_type, length=None):
"""A simple wrapper that calls other string generation methods.
:param str str_type: The type of string which should be generated.
:param int length: The length of the generated string. Must be 1 or
greater.
:raises: ``ValueError`` if an invalid ``str_type`` is specified.
:returns: A string.
:rtype: str
Valid values for ``str_type`` are as follows:
* alpha
* alphanumeric
* cjk
* cyrillic
* html
* latin1
* numeric
* utf8
"""
str_types_functions = {
u'alpha': gen_alpha,
u'alphanumeric': gen_alphanumeric,
u'cjk': gen_cjk,
u'cyrillic': gen_cyrillic,
u'html': gen_html,
u'latin1': gen_latin1,
u'numeric': gen_numeric_string,
u'utf8': gen_utf8,
}
str_type_lower = str_type.lower() # do not modify user data
if str_type_lower not in str_types_functions.keys():
raise ValueError(
'{0} is not a supported string type. Valid string types are {1}.'
''.format(str_type_lower, u','.join(str_types_functions.keys()))
)
method = str_types_functions[str_type_lower]
if length is None:
return method()
return method(length)
def gen_alpha(length=10):
"""Returns a random string made up of alpha characters.
:param int length: Length for random data.
:returns: A random string made up of alpha characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.ascii_letters) for i in range(length)
)
return _make_unicode(output_string)
def gen_alphanumeric(length=10):
"""Returns a random string made up of alpha and numeric characters.
:param int length: Length for random data.
:returns: A random string made up of alpha and numeric characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(
string.ascii_letters + string.digits
) for i in range(length))
return _make_unicode(output_string)
def gen_boolean():
"""Returns a random Boolean value.
:returns: A random Boolean value.
:rtype: bool
"""
choices = (True, False)
return gen_choice(choices)
def gen_choice(choices):
"""Returns a random choice from the available choices.
:param list choices: List of choices from which select a random value.
:raises: ``ValueError`` if ``choices`` is ``None`` or not ``Iterable`` or
a ``dict``.
:returns: A random element from ``choices``.
"""
# Validation for 'choices'
if choices is None:
raise ValueError("Choices argument cannot be None.")
# We don't want a single dictionary value.
if not isinstance(choices, Iterable) or isinstance(choices, dict):
raise ValueError("Choices argument is not iterable.")
if len(choices) == 0:
raise ValueError("Choices argument cannot be empty.")
# If only 1 item is present, return it right away
if len(choices) == 1:
return choices[0]
return random.choice(choices)
def gen_cjk(length=10):
"""Returns a random string made up of CJK characters.
(Source: Wikipedia - CJK Unified Ideographs)
:param int length: Length for random data.
:returns: A random string made up of CJK characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of CJK codepoints is 0x4E00 - 0x9FCC, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x4E00, 0x9FCC) for _ in range(length)]
if sys.version_info[0] == 2:
# pylint:disable=undefined-variable
output = u''.join(unichr(codepoint) for codepoint in codepoints)
else:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_cyrillic(length=10):
"""Returns a random string made up of Cyrillic characters.
:param int length: Length for random data.
:returns: A random string made up of Cyrillic characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
# Generate codepoints, then convert the codepoints to a string. The
# valid range of Cyrillic codepoints is 0x410 - 0x4ff, inclusive. Python 2
# and 3 support the `unichr` and `chr` functions, respectively.
codepoints = [random.randint(0x0400, 0x04FF) for _ in range(length)]
try:
# (undefined-variable) pylint:disable=E0602
output = u''.join(unichr(codepoint) for codepoint in codepoints)
except NameError:
output = u''.join(chr(codepoint) for codepoint in codepoints)
return _make_unicode(output)
def gen_date(min_date=None, max_date=None):
"""Returns a random date value
:param min_date: A valid ``datetime.date`` object.
:param max_date: A valid ``datetime.date`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.date``
objects.
:returns: Random ``datetime.date`` object.
"""
_min_value = (datetime.date.today() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.date.today() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
if not isinstance(max_date, datetime.date):
raise ValueError("%s is not a valid datetime.date object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a day between min and max dates
diff = max_date - min_date
days = random.randint(0, diff.days)
date = min_date + datetime.timedelta(days=days)
return date
def gen_datetime(min_date=None, max_date=None):
"""Returns a random datetime value
:param min_date: A valid ``datetime.datetime`` object.
:param max_date: A valid ``datetime.datetime`` object.
:raises: ``ValueError`` if arguments are not valid ``datetime.datetime``
objects.
:returns: Random ``datetime.datetime`` object.
"""
_min_value = (datetime.datetime.now() -
datetime.timedelta(365 * MIN_YEARS))
_max_value = (datetime.datetime.now() +
datetime.timedelta(365 * MAX_YEARS))
if min_date is None:
min_date = _min_value
if max_date is None:
max_date = _max_value
# Validation
if not isinstance(min_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
if not isinstance(max_date, datetime.datetime):
raise ValueError("%s is not a valid datetime.datetime object")
# Check that max_date is not before min_date
assert min_date < max_date
# Pick a time between min and max dates
diff = max_date - min_date
seconds = random.randint(0, diff.days * 3600 * 24 + diff.seconds)
return min_date + datetime.timedelta(seconds=seconds)
def gen_email(name=None, domain=None, tlds=None):
"""Generates a random email address.
:param str name: Email name.
:param str domain: Domain name.
:param str tlds: Top Level Domain Server
:returns: An email address.
:rtype: str
"""
# Generate a new name if needed
if name is None:
name = gen_alpha(8)
# Obtain a random domain if needed
if domain is None:
domain = gen_choice(SUBDOMAINS)
# Obtain a random top level domain if needed
if tlds is None:
tlds = gen_choice(TLDS)
email = u"{0}@{1}.{2}".format(name, domain, tlds)
return _make_unicode(email)
def gen_integer(min_value=None, max_value=None):
"""Returns a random integer value based on the current platform.
:param int min_value: The minimum allowed value.
:param int max_value: The maximum allowed value.
:raises: ``ValueError`` if arguments are not integers or if they are
less or greater than the system's allowed range for integers.
:returns: Returns a random integer value.
:rtype: int
"""
# Platform-specific value range for integers
_min_value = - sys.maxsize - 1
_max_value = sys.maxsize
if min_value is None:
min_value = _min_value
if max_value is None:
max_value = _max_value
if sys.version_info[0] < 3:
integer_types = (int, long,) # pylint:disable=undefined-variable
else:
integer_types = (int,)
# Perform some validations
if not isinstance(min_value, integer_types) or min_value < _min_value:
raise ValueError("\'%s\' is not a valid minimum." % min_value)
if not isinstance(max_value, integer_types) or max_value > _max_value:
raise ValueError("\'%s\' is not a valid maximum." % max_value)
value = random.randint(min_value, max_value)
return value
def gen_iplum(words=None, paragraphs=None):
"""Returns a lorem ipsum string. If no arguments are passed, then
return the entire default lorem ipsum string.
:param int words: The number of words to return.
:param int paragraphs: The number of paragraphs to return.
:raises: ``ValueError`` if ``words`` is not a valid positive integer.
:returns: A ``lorem ipsum`` string containing either the number of ``words``
or ``paragraphs``, extending and wrapping around the text as needed to
make sure that it has the specified length.
:rtype: str
"""
# Check parameters
if words is None or words == 0:
words = len(LOREM_IPSUM_TEXT.split())
if paragraphs is None:
paragraphs = 1
if not isinstance(words, int) or words < 0:
raise ValueError(
"Cannot generate a string with negative number of words.")
_is_positive_int(paragraphs)
# Original Lorem Ipsum string
all_words = LOREM_IPSUM_TEXT.split()
# How many words do we need?
total_words_needed = words * paragraphs
quotient = int(total_words_needed / len(all_words))
modulus = total_words_needed % len(all_words)
# Pool of words to use
all_words = all_words * (quotient + modulus)
result = u""
start_pos = 0
for _ in range(0, paragraphs):
sentence = u" ".join(
all_words[start_pos:start_pos + words])
# Remove comma from the end, if it exists
if sentence.endswith(','):
sentence = sentence.rstrip(',')
# Remove period from the end, if it exists
if sentence.endswith('.'):
sentence = sentence.rstrip('.')
# Each sentence should be properly capitalized
cap_sentence = [
frag.capitalize() + u'.' for frag in sentence.split('. ')]
# Add newline at the end
result += " ".join(cap_sentence) + u"\n"
# Increment positional counter
start_pos += words
return _make_unicode(result.rstrip())
def gen_latin1(length=10):
"""Returns a random string made up of UTF-8 characters.
(Font: Wikipedia - Latin-1 Supplement Unicode Block)
:param int length: Length for random data.
:returns: A random string made up of ``Latin1`` characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
range0 = range1 = range2 = []
range0 = ['00C0', '00D6']
range1 = ['00D8', '00F6']
range2 = ['00F8', '00FF']
output_array = []
for i in range(int(range0[0], 16), int(range0[1], 16)):
output_array.append(i)
for i in range(int(range1[0], 16), int(range1[1], 16)):
output_array.append(i)
for i in range(int(range2[0], 16), int(range2[1], 16)):
output_array.append(i)
if sys.version_info[0] == 2:
output_string = u''.join(
# pylint:disable=E0602
unichr(random.choice(output_array)) for _ in range(length)
)
else:
output_string = u''.join(
chr(random.choice(output_array)) for _ in range(length)
)
return _make_unicode(output_string)
def gen_negative_integer():
"""Returns a random negative integer based on the current platform.
:returns: Returns a random negative integer value.
:rtype: int
"""
max_value = 0
return gen_integer(max_value=max_value)
def gen_ipaddr(ip3=False, ipv6=False, prefix=()):
"""Generates a random IP address.
You can also specify an IP address prefix if you are interested in
local network address generation, etc.
:param bool ip3: Whether to generate a 3 or 4 group IP.
:param bool ipv6: Whether to generate IPv6 or IPv4
:param list prefix: A prefix to be used for an IP (e.g. [10, 0, 1]). It
must be an iterable with strings or integers. Can be left unspecified or
empty.
:returns: An IP address.
:rtype: str
:raises: ``ValueError`` if ``prefix`` would lead to no random fields at all.
This means the length that triggers the ``ValueError`` is 4 for regular
IPv4, 3 for IPv4 with ip3 and 8 for IPv6. It will be raised in any case
the prefix length reaches or exceeds those values.
"""
# Set the lengths of the randomly generated sections
if ipv6:
rng = 8
elif ip3:
rng = 3
else:
rng = 4
prefix = [str(field) for field in prefix]
# Prefix reduces number of random fields generated, so subtract the length
# of it from the rng to keep the IP address have correct number of fields
rng -= len(prefix)
if rng == 0:
raise ValueError(
"Prefix {} would lead to no randomness at all".format(
repr(prefix)))
elif rng < 0:
raise ValueError(
"Prefix {} is too long for this configuration".format(
repr(prefix)))
if ipv6:
# StackOverflow.com questions: generate-random-ipv6-address
random_fields = [
'{0:x}'.format(random.randint(0, 2**16 - 1)) for _ in range(rng)]
ipaddr = u':'.join(prefix + random_fields)
else:
random_fields = [str(random.randrange(0, 255, 1)) for _ in range(rng)]
ipaddr = u".".join(prefix + random_fields)
if ip3:
ipaddr = ipaddr + u".0"
return _make_unicode(ipaddr)
def gen_mac(delimiter=':', multicast=None, locally=None):
"""Generates a random MAC address.
For more information about how unicast or multicast and globally unique and
locally administered MAC addresses are generated check this link
https://en.wikipedia.org/wiki/MAC_address.
:param str delimeter: Valid MAC delimeter (e.g ':', '-').
:param bool multicast: Indicates if the generated MAC address should be
unicast or multicast. If no value is provided a random one will be
chosen.
:param bool locally: Indicates if the generated MAC address should be
globally unique or locally administered. If no value is provided a
random one will be chosen.
:returns: A random MAC address.
:rtype: str
"""
if delimiter not in [':', '-']:
raise ValueError('Delimiter is not a valid option: %s' % delimiter)
if multicast is None:
multicast = bool(random.randint(0, 1))
if locally is None:
locally = bool(random.randint(0, 1))
first_octet = random.randint(0, 255)
if multicast:
# Ensure that the first least significant bit is 1
first_octet |= 0b00000001
else:
# Ensure that the first least significant bit is 0
first_octet &= 0b11111110
if locally:
# Ensure that the second least significant bit is 1
first_octet |= 0b00000010
else:
# Ensure that the second least significant bit is 0
first_octet &= 0b11111101
octets = [first_octet]
octets.extend([
random.randint(0, 255) for _ in range(5)
])
mac = delimiter.join(['{0:02x}'.format(octet) for octet in octets])
return _make_unicode(mac)
def gen_netmask(min_cidr=1, max_cidr=31):
"""Generates a random valid netmask.
For more info: http://www.iplocation.net/tools/netmask.php
:param int min_cidr: Inferior CIDR limit
:param int max_cidr: Superior CIDR limit
:returns: The netmask is chosen from
:data:`fauxfactory.constants.VALID_NETMASKS` respecting the CIDR range
:rtype: str
:raises: ``ValueError`` if ``min_cidr`` or ``max_cidr`` have an invalid
value. For example, ``max_cidr`` cannot be 33.
"""
if min_cidr < 0:
raise ValueError(
'min_cidr must be 0 or greater, but is {0}'.format(min_cidr)
)
if max_cidr >= len(VALID_NETMASKS):
raise ValueError(
'max_cidr must be less than {0}, but is {1}'
.format(len(VALID_NETMASKS), max_cidr)
)
return VALID_NETMASKS[random.randint(min_cidr, max_cidr)]
def gen_numeric_string(length=10):
"""Returns a random string made up of numbers.
:param int length: Length for random data.
:returns: A random string made up of numbers.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
output_string = u''.join(
random.choice(string.digits) for i in range(length)
)
return _make_unicode(output_string)
def gen_positive_integer():
"""Returns a random positive integer based on the current platform.
:returns: A random positive integer value.
:rtype: int
"""
min_value = 0
return gen_integer(min_value=min_value)
def gen_time():
"""Generates a random time.
:returns: A random ``datetime.time`` object.
"""
return datetime.time(
random.randint(0, 23),
random.randint(0, 59),
random.randint(0, 59),
random.randint(0, 999999),
)
def gen_url(scheme=None, subdomain=None, tlds=None):
"""Generates a random URL address
:param str scheme: Either http, https or ftp.
:param str subdomain: A valid subdmain
:param str tlds: A qualified top level domain name (e.g. 'com', 'net')
:raises: ``ValueError`` if arguments are not valid.
:returns: A random URL address.
:rtype: str
"""
# Regex for subdomain names
subdomainator = re.compile(r"^[a-zA-Z0-9][-\w.~]*$")
# Regex for URL scheme
schemenator = re.compile(r"^(https?|ftp)$")
# Regex for TLDS
tldsnator = re.compile(r"^[a-zA-Z]{1,3}$")
if scheme:
if schemenator.match(scheme) is None:
raise ValueError("Protocol {0} is not valid.".format(scheme))
else:
scheme = gen_choice(SCHEMES)
if subdomain:
if subdomainator.match(subdomain) is None:
raise ValueError("Subdomain {0} is invalid.".format(subdomain))
else:
subdomain = gen_choice(SUBDOMAINS)
if tlds:
if tldsnator.match(tlds) is None:
raise ValueError("TLDS name {0} is invalid.".format(tlds))
else:
tlds = gen_choice(TLDS)
url = u"{0}://{1}.{2}".format(scheme, subdomain, tlds)
return _make_unicode(url)
def gen_utf8(length=10):
"""Returns a random string made up of UTF-8 letters characters, as per
`RFC 3629`_.
:param int length: Length for random data.
:returns: A random string made up of ``UTF-8`` letters characters.
:rtype: str
.. _`RFC 3629`: http://www.rfc-editor.org/rfc/rfc3629.txt
"""
# Validate length argument
_is_positive_int(length)
return u''.join([random.choice(UNICODE_LETTERS) for _ in range(length)])
def gen_uuid():
"""Generates a UUID string (universally unique identifiers).
:returns: Returns a string representation for a UUID.
:rtype: str
"""
output_uuid = _make_unicode(str(uuid.uuid4()))
return output_uuid
def gen_html(length=10):
"""Returns a random string made up of html characters.
:param int length: Length for random data.
:returns: A random string made up of html characters.
:rtype: str
"""
# Validate length argument
_is_positive_int(length)
html_tag = random.choice(HTML_TAGS)
output_string = u'<{0}>{1}</{2}>'.format(
html_tag, gen_string("alpha", length), html_tag)
return _make_unicode(output_string)
# Backward Compatibility ------------------------------------------------------
# Code borrowed from http://code.activestate.com/recipes/391367-deprecated/
def deprecated(func):
"""A decorator used to mark functions as deprecated.
Emit a warning when the decorated function is called.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""Emit a warning, then call ``func``."""
old_name = func.__name__
if old_name == 'codify':
new_name = '_make_unicode'
else:
new_name = old_name.replace('generate', 'gen')
warnings.warn(
'{0} is deprecated! Please use {1} instead.'
.format(old_name, new_name),
category=Warning
)
return func(*args, **kwargs)
return wrapper
@deprecated
def codify(data):
# pylint:disable=missing-docstring
return _make_unicode(data)
class FauxFactory(object):
# This issue is no longer relevant, as the class has been turned into a set
# of functions.
# pylint:disable=too-many-public-methods
#
# This code is not imported when `from fauxfactory import *` is called, nor
# does this code show up in Sphinx's output. See `__all__`.
# pylint:disable=missing-docstring
@classmethod
@deprecated
def generate_string(cls, str_type, length):
return gen_string(str_type, length)
@classmethod
@deprecated
def generate_alpha(cls, length=10):
return gen_alpha(length)
@classmethod
@deprecated
def generate_alphanumeric(cls, length=10):
return gen_alphanumeric(length)
@classmethod
@deprecated
def generate_boolean(cls):
return gen_boolean()
@classmethod
@deprecated
def generate_choice(cls, choices):
return gen_choice(choices)
@classmethod
@deprecated
def generate_cjk(cls, length=10):
return gen_cjk(length)
@classmethod
@deprecated
def generate_date(cls, min_date=None, max_date=None):
return gen_date(min_date, max_date)
@classmethod
@deprecated
def generate_datetime(cls, min_date=None, max_date=None):
return gen_datetime(min_date, max_date)
@classmethod
@deprecated
def generate_email(cls, name=None, domain=None, tlds=None):
return gen_email(name, domain, tlds)
@classmethod
@deprecated
def generate_integer(cls, min_value=None, max_value=None):
return gen_integer(min_value, max_value)
@classmethod
@deprecated
def generate_iplum(cls, words=None, paragraphs=None):
return gen_iplum(words, paragraphs)
@classmethod
@deprecated
def generate_latin1(cls, length=10):
return gen_latin1(length)
@classmethod
@deprecated
def generate_negative_integer(cls):
return gen_negative_integer()
@classmethod
@deprecated
def generate_ipaddr(cls, ip3=False, ipv6=False):
return gen_ipaddr(ip3, ipv6)
@classmethod
@deprecated
def generate_mac(cls, delimiter=":"):
return gen_mac(delimiter)
@classmethod
@deprecated
def generate_numeric_string(cls, length=10):
return gen_numeric_string(length)
@classmethod
@deprecated
def generate_positive_integer(cls):
return gen_integer()
@classmethod
@deprecated
def generate_time(cls):
return gen_time()
@classmethod
@deprecated
def generate_url(cls, scheme=None, subdomain=None, tlds=None):
return gen_url(scheme, subdomain, tlds)
@classmethod
@deprecated
def generate_utf8(cls, length=10):
return gen_utf8(length)
@classmethod
@deprecated
def generate_uuid(cls):
return gen_uuid()
@classmethod
@deprecated
def generate_html(cls, length=10):
return gen_html(length)
| 28.703507 | 80 | 0.646501 | 2,942 | 0.108923 | 814 | 0.030137 | 3,020 | 0.11181 | 0 | 0 | 12,564 | 0.465161 |
0e3363abecde73d1f0885ef85ab14787eb8932f1 | 2,614 | py | Python | eda.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | 4 | 2021-01-04T05:46:43.000Z | 2022-01-06T16:33:40.000Z | eda.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | null | null | null | eda.py | justinhchae/app_courts | c46d48c4fa02cec91bda6fc3818ab677d6a83281 | [
"MIT"
] | null | null | null | import gc
import pandas as pd
from application.application import Application
from clean_data.maker import Maker
from do_data.config import Columns
from do_data.getter import Reader
from do_data.joiner import Joiner
from do_data.writer import Writer
from do_data.config import Columns
from analyze_data.utils import Utilities
from analyze_data.metrics import Metrics
from analyze_data.network import Network
reader = Reader()
writer = Writer()
joiner = Joiner()
maker = Maker()
app = Application()
name = Columns()
utils = Utilities()
metrics = Metrics()
tracker = Network()
# tracker.organize()
# tracker.create_membership_table()
# tracker.make_network()
# utils.ov1_initiation()
# utils.ov1_disposition()
# utils.ov1_sentencing()
# utils.dv1_bond()
# utils.misc_tax_data()
# metrics.ov1_regression(chart_type='static')
# metrics.dv1_bond()
# metrics.dv1_bond_timeseries(chart_type='dynamic')
# metrics.dv1_sentencing_network()
def parse_em_data():
cols = ['ir', 'detainee_status', 'detainee_status_date', 'ej_status']
may_em = reader.to_df('EM_513.csv', clean_em=True, preview=False)
may_em = maker.make_status_date(may_em, '2020-05-13')
may_em = maker.make_status(may_em, 'EM')
may_em = may_em[cols].drop_duplicates()
may_jail = reader.to_df('Jail_513.csv', clean_jail=True, preview=False)
may_jail = maker.make_status_date(may_jail, '2020-5-13')
may_jail = maker.make_status(may_jail, 'Jail')
may_jail = may_jail[cols].drop_duplicates()
june_em = reader.to_df('EM_630.csv', clean_em=True, preview=False)
june_em = maker.make_status_date(june_em, '2020-06-30')
june_em = maker.make_status(june_em, 'EM')
june_em = june_em[cols].drop_duplicates()
june_jail = reader.to_df('Jail_630.csv', clean_jail=True, preview=False)
june_jail = maker.make_status_date(june_jail, '2020-06-30')
june_jail = maker.make_status(june_jail, 'Jail')
june_jail = june_jail[cols].drop_duplicates()
df = pd.concat([may_em, may_jail, june_em, june_jail])
df.reset_index(drop=True)
df.to_csv('data/em_testing.csv', index=False)
# parse_em_data()
# initiation = reader.to_df('Initiation.zip'
# , clean_initiation=True
# , preview=False
# , classify=True
# )
# initiation = reader.to_df('initiation_modified.bz2'
# , preview=False)
# sentencing = reader.to_df('sentencing_modified.bz2'
# , preview=False)
# gui = show(initiation)
| 29.044444 | 76 | 0.682096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,031 | 0.394415 |
0e3448c59b854530205264fdc986d590b2b2ab8d | 197 | py | Python | retrieval/hybrid/__init__.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 67 | 2021-05-12T15:54:28.000Z | 2022-03-12T15:55:35.000Z | retrieval/hybrid/__init__.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 71 | 2021-05-01T06:07:37.000Z | 2022-01-28T16:54:46.000Z | retrieval/hybrid/__init__.py | park-sungmoo/odqa_baseline_code | 45954be766e5f987bef18e5b8a2e47f1508742cd | [
"Apache-2.0"
] | 14 | 2021-05-24T10:57:27.000Z | 2022-02-18T06:34:11.000Z | from retrieval.hybrid.hybrid_base import HybridRetrieval, HybridLogisticRetrieval
from retrieval.hybrid.hybrid import TfidfDprBert, AtireBm25DprBert, LogisticTfidfDprBert, LogisticAtireBm25DprBert
| 65.666667 | 114 | 0.898477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0e351d8158536eec299c27c17ae7a1f82b512746 | 455 | py | Python | upper_print.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | upper_print.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | upper_print.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def upper_print(f):
def wrapper(*args, **kwargs):
f(*[i.upper() if hasattr(i, 'upper') else i for i in args], **kwargs)
return wrapper
if __name__ == '__main__':
text = 'hello world!'
print(text) # hello world!
old_print = print
print = upper_print(print)
print(text) # HELLO WORLD!
print = old_print
print(text) # hello world!
| 18.2 | 77 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.281319 |
0e381333bec3bba2900ca4f5a909c7203ac63e80 | 10,182 | py | Python | odxtools/units.py | floroks/odxtools | c3ac0031234188820266d2de35b23bec7f00a0cb | [
"MIT"
] | null | null | null | odxtools/units.py | floroks/odxtools | c3ac0031234188820266d2de35b23bec7f00a0cb | [
"MIT"
] | null | null | null | odxtools/units.py | floroks/odxtools | c3ac0031234188820266d2de35b23bec7f00a0cb | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: MIT
# Copyright (c) 2022 MBition GmbH
from dataclasses import dataclass, field
from typing import List, Literal, Optional
from .nameditemlist import NamedItemList
from .utils import read_description_from_odx
UnitGroupCategory = Literal["COUNTRY", "EQUIV-UNITS"]
@dataclass
class PhysicalDimension:
"""A physical dimension is a formal definition of a unit.
It consists of the exponents for the SI units:
| Symbol | Name | Quantity | Property |
| --- | --- | --- | --- |
| s | second | time | `time_exp` |
| m | metre | length | `length_exp` |
| kg | kilogram | mass | `mass_exp` |
| A | ampere | electric current | `current_exp` |
| K | kelvin | thermodynamic temperature | `temperature_exp` |
| mol | mole | amount of substance | `molar_amount_exp` |
| cd | candela | luminous intensity | `luminous_intensity_exp` |
(The first three columns are from https://en.wikipedia.org/wiki/International_System_of_Units.)
Examples
--------
The unit `m/s` (or `m**1 * s**(-1)`) can be represented as
```
PhysicalDimension(
id="velocity",
short_name="metre_per_second",
length_exp=1,
time_exp=-1
)
```
"""
id: str
short_name: str
oid: Optional[str] = None
long_name: Optional[str] = None
description: Optional[str] = None
length_exp: int = 0
mass_exp: int = 0
time_exp: int = 0
current_exp: int = 0
temperature_exp: int = 0
molar_amount_exp: int = 0
luminous_intensity_exp: int = 0
@dataclass
class Unit:
"""
A unit consists of an ID, short name and a display name.
Additionally, a unit may reference an SI unit (`.physical_dimension`)
and an offset to that unit (`factor_si_to_unit`, `offset_si_to_unit`).
The factor and offset are defined such that the following equation holds true:
UNIT = FACTOR-SI-TO-UNIT * SI-UNIT + OFFSET-SI-TO-UNIT
For example: 1km = 1000 * 1m + 0
Examples
--------
A minimal unit representing kilometres:
```
Unit(
id="kilometre",
short_name="kilometre",
display_name="km"
)
```
A unit that also references a physical dimension:
```
Unit(
id="ID.kilometre",
short_name="Kilometre",
display_name="km",
physical_dimension_ref="ID.metre",
factor_si_to_unit=1000,
offset_si_to_unit=0
)
# where the physical_dimension_ref references, e.g.:
PhysicalDimension(id="ID.metre", short_name="metre", length_exp=1)
```
"""
id: str
short_name: str
display_name: str
oid: Optional[str] = None
long_name: Optional[str] = None
description: Optional[str] = None
factor_si_to_unit: Optional[float] = None
offset_si_to_unit: Optional[float] = None
physical_dimension_ref: Optional[str] = None
def __post_init__(self):
self._physical_dimension = None
if self.factor_si_to_unit is not None or self.offset_si_to_unit is not None or self.physical_dimension_ref is not None:
assert self.factor_si_to_unit is not None and self.offset_si_to_unit is not None and self.physical_dimension_ref is not None, (
f"Error 54: If one of factor_si_to_unit, offset_si_to_unit and physical_dimension_ref is defined,"
f" all of them must be defined: {self.factor_si_to_unit} and {self.offset_si_to_unit} and {self.physical_dimension_ref}"
)
@property
def physical_dimension(self) -> PhysicalDimension:
return self._physical_dimension
def _resolve_references(self, id_lookup):
if self.physical_dimension_ref:
self._physical_dimension = id_lookup[self.physical_dimension_ref]
assert isinstance(self._physical_dimension, PhysicalDimension), (
f"The physical_dimension_ref must be resolved to a PhysicalDimension."
f" {self.physical_dimension_ref} referenced {self._physical_dimension}"
)
@dataclass
class UnitGroup:
"""A group of units.
There are two categories of groups: COUNTRY and EQUIV-UNITS.
"""
short_name: str
category: UnitGroupCategory
unit_refs: List[str] = field(default_factory=list)
oid: Optional[str] = None
long_name: Optional[str] = None
description: Optional[str] = None
def __post_init__(self):
self._units = NamedItemList[Unit](lambda unit: unit.short_name)
def _resolve_references(self, id_lookup):
self._units = NamedItemList[Unit](
lambda unit: unit.short_name,
[id_lookup[ref] for ref in self.unit_refs]
)
@property
def units(self) -> NamedItemList[Unit]:
return self._units
@dataclass
class UnitSpec:
"""
A unit spec encapsulates three lists:
* unit groups
* units
* physical_dimensions
The following odx elements are not internalized: ADMIN-DATA, SDGS
"""
unit_groups: NamedItemList[UnitGroup] = field(default_factory=list)
units: NamedItemList[Unit] = field(default_factory=list)
physical_dimensions: NamedItemList[PhysicalDimension] = field(
default_factory=list)
def __post_init__(self):
self.unit_groups = NamedItemList(lambda x: x.short_name,
self.unit_groups)
self.units = NamedItemList(lambda x: x.short_name, self.units)
self.physical_dimensions = NamedItemList(lambda x: x.short_name,
self.physical_dimensions)
def _build_id_lookup(self):
id_lookup = {}
id_lookup.update({
unit.id: unit for unit in self.units
})
id_lookup.update({
dim.id: dim for dim in self.physical_dimensions
})
return id_lookup
def _resolve_references(self, id_lookup):
for unit in self.units:
unit._resolve_references(id_lookup)
for group in self.unit_groups:
group._resolve_references(id_lookup)
def read_unit_from_odx(et_element):
id = et_element.get("ID")
oid = et_element.get("OID")
short_name = et_element.find("SHORT-NAME").text
long_name = et_element.findtext("LONG-NAME")
description = read_description_from_odx(et_element.find("DESC"))
display_name = et_element.find("DISPLAY-NAME").text
def read_optional_float(element, name):
if element.findtext(name):
return float(element.findtext(name))
else:
return None
factor_si_to_unit = read_optional_float(et_element, "FACTOR-SI-TO-UNIT")
offset_si_to_unit = read_optional_float(et_element, "OFFSET-SI-TO-UNIT")
ref_element = et_element.find("PHYSICAL-DIMENSION-REF")
if ref_element is not None:
physical_dimension_ref = ref_element.get("ID-REF")
else:
physical_dimension_ref = None
return Unit(
id=id,
short_name=short_name,
display_name=display_name,
oid=oid,
long_name=long_name,
description=description,
factor_si_to_unit=factor_si_to_unit,
offset_si_to_unit=offset_si_to_unit,
physical_dimension_ref=physical_dimension_ref
)
def read_physical_dimension_from_odx(et_element):
id = et_element.get("ID")
oid = et_element.get("OID")
short_name = et_element.find("SHORT-NAME").text
long_name = et_element.findtext("LONG-NAME")
description = read_description_from_odx(et_element.find("DESC"))
def read_optional_int(element, name):
if element.findtext(name):
return int(element.findtext(name))
else:
return 0
length_exp = read_optional_int(et_element, "LENGTH-EXP")
mass_exp = read_optional_int(et_element, "MASS-EXP")
time_exp = read_optional_int(et_element, "TIME-EXP")
current_exp = read_optional_int(et_element, "CURRENT-EXP")
temperature_exp = read_optional_int(et_element, "TEMPERATURE-EXP")
molar_amount_exp = read_optional_int(et_element, "MOLAR-AMOUNT-EXP")
luminous_intensity_exp = read_optional_int(et_element,
"LUMINOUS-INTENSITY-EXP")
return PhysicalDimension(
id=id,
short_name=short_name,
oid=oid,
long_name=long_name,
description=description,
length_exp=length_exp,
mass_exp=mass_exp,
time_exp=time_exp,
current_exp=current_exp,
temperature_exp=temperature_exp,
molar_amount_exp=molar_amount_exp,
luminous_intensity_exp=luminous_intensity_exp
)
def read_unit_group_from_odx(et_element):
oid = et_element.get("OID")
short_name = et_element.find("SHORT-NAME").text
long_name = et_element.findtext("LONG-NAME")
description = read_description_from_odx(et_element.find("DESC"))
category = et_element.findtext("CATEGORY")
assert category in [
"COUNTRY", "EQUIV-UNITS"], f'A UNIT-GROUP-CATEGORY must be "COUNTRY" or "EQUIV-UNITS". It was {category}.'
unit_refs = [el.get("ID-REF")
for el in et_element.iterfind("UNIT-REFS/UNIT-REF")]
return UnitGroup(
short_name=short_name,
category=category,
unit_refs=unit_refs,
oid=oid,
long_name=long_name,
description=description
)
def read_unit_spec_from_odx(et_element):
unit_groups = [read_unit_group_from_odx(el)
for el in et_element.iterfind("UNIT-GROUPS/UNIT-GROUP")]
units = [read_unit_from_odx(el)
for el in et_element.iterfind("UNITS/UNIT")]
physical_dimensions = [read_physical_dimension_from_odx(el)
for el in et_element.iterfind("PHYSICAL-DIMENSIONS/PHYSICAL-DIMENSION")]
return UnitSpec(
unit_groups=unit_groups,
units=units,
physical_dimensions=physical_dimensions
)
| 33.166124 | 139 | 0.641328 | 5,996 | 0.588882 | 0 | 0 | 6,040 | 0.593204 | 0 | 0 | 3,416 | 0.335494 |
0e38d2aaf004540b815fbb6471e43af110b9c1be | 562 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_ttk_textonly.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | import os
from test import test_support
# Skip this test if _tkinter does not exist.
test_support.import_module('_tkinter')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, '..', 'lib-tk', 'test'))
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(gui=False, packages=['test_ttk']))
if __name__ == '__main__':
test_main()
| 28.1 | 78 | 0.704626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.16548 |
0e3954cbaaaca908b84089a968d649cf3012eb17 | 1,254 | py | Python | app/auth/forms.py | dancan-sandys/Becky_pizza | 5a12669a9def43768f8ef00ff863dbf3c78ed896 | [
"MIT"
] | null | null | null | app/auth/forms.py | dancan-sandys/Becky_pizza | 5a12669a9def43768f8ef00ff863dbf3c78ed896 | [
"MIT"
] | null | null | null | app/auth/forms.py | dancan-sandys/Becky_pizza | 5a12669a9def43768f8ef00ff863dbf3c78ed896 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,BooleanField,PasswordField,SubmitField
from wtforms.validators import Email,Required,EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField("enter your email adress",validators = [Required(),Email()])
password = PasswordField("enter password", validators = [Required()])
remember = BooleanField("remember me")
login = SubmitField("login ")
class Signup(FlaskForm):
username = StringField("enter your username", validators=[Required()])
email = StringField("enter your email", validators=[Required(),Email()])
password = PasswordField("enter password" ,validators=[Required(),EqualTo("confirm_password", message= "password must be the same")])
confirm_password = PasswordField ("confirm password", validators=[Required()])
submit = SubmitField("signup")
# def validate_email(self,data_field):
# if User.query.filter_by(email=data_field.data):
# raise ValidationError("invalid email")
# def validate_username(self,data_field):
# if User.query.filter_by(username=data_field.data):
# raise ValidationError("username already taken")
| 36.882353 | 137 | 0.72488 | 1,027 | 0.818979 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.383573 |
0e3bb55563f6de20b1c6744bf517893f39901074 | 992 | py | Python | tests/check_predictions.py | WGierke/informatiCup2018 | a07c21f6e092e516028c2f45594efbb2071d9b79 | [
"MIT"
] | null | null | null | tests/check_predictions.py | WGierke/informatiCup2018 | a07c21f6e092e516028c2f45594efbb2071d9b79 | [
"MIT"
] | 12 | 2018-02-10T14:37:14.000Z | 2021-06-01T21:52:44.000Z | tests/check_predictions.py | WGierke/informatiCup2018 | a07c21f6e092e516028c2f45594efbb2071d9b79 | [
"MIT"
] | 1 | 2018-01-27T11:20:40.000Z | 2018-01-27T11:20:40.000Z | if __name__ == '__main__':
# Check correct price prediction
price_input_path = 'tests/data/Price_Simple.csv'
price_input = open(price_input_path, 'r').read().splitlines()[0].split(';')
price_prediction = open('price_prediction.csv', 'r').read().splitlines()[0].split(';')
assert price_input == price_prediction[:3]
ground_truth = 1309
assert int(price_prediction[-1]) - ground_truth <= 15, "Prediction deviation > 15 deci-cents"
# Check correct route prediction
route_input_path = 'tests/data/Route_Bertha_Simple.csv'
route_input = open(route_input_path, 'r').read().splitlines()[1].split(';')
_, gas_station_id = route_input
route_prediction = open('route_prediction.csv', 'r').read().splitlines()[0].split(';')
gas_station_id2, price, liters = route_prediction
assert gas_station_id == gas_station_id2
assert liters == '0'
ground_truth = 1469
assert int(price) - ground_truth <= 15, "Prediction deviation > 15 deci-cents" | 52.210526 | 97 | 0.698589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.288306 |
0e3c572a1d88b93beced8ffe5d8e701ff718fc40 | 4,421 | py | Python | src/ralph_assets/tests/unit/test_rest_asset_info_per_rack.py | quamilek/ralph_assets | 705aa8ffbc347e643115d772ac9b3660376cb5c2 | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/tests/unit/test_rest_asset_info_per_rack.py | quamilek/ralph_assets | 705aa8ffbc347e643115d772ac9b3660376cb5c2 | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/tests/unit/test_rest_asset_info_per_rack.py | quamilek/ralph_assets | 705aa8ffbc347e643115d772ac9b3660376cb5c2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIClient
from ralph_assets.models_assets import Orientation
from ralph_assets.rest.serializers.models_dc_asssets import (
TYPE_ACCESSORY,
TYPE_ASSET,
)
from ralph_assets.tests.utils.assets import (
AssetFactory,
RackFactory,
RackAccessoryFactory,
)
class TestRestAssetInfoPerRack(TestCase):
def setUp(self):
User.objects.create_superuser('test', 'test@test.test', 'test')
self.client = APIClient()
self.client.login(username='test', password='test')
self.rack_1 = RackFactory(max_u_height=3)
rack_2 = RackFactory()
self.asset_1 = AssetFactory(
device_info__position=1,
device_info__slot_no='',
)
self.asset_2 = AssetFactory(
device_info__position=2,
device_info__slot_no='',
)
asset_3 = AssetFactory()
self.rack_1.deviceinfo_set.add(self.asset_1.device_info)
self.rack_1.deviceinfo_set.add(self.asset_2.device_info)
rack_2.deviceinfo_set.add(asset_3.device_info)
self.rack1_accessory = RackAccessoryFactory(
rack=self.rack_1,
server_room=self.rack_1.server_room,
data_center=self.rack_1.server_room.data_center,
orientation=Orientation.front,
)
self.rack2_accessory = RackAccessoryFactory(
rack=rack_2,
server_room=rack_2.server_room,
data_center=rack_2.server_room.data_center,
orientation=Orientation.front,
)
def tearDown(self):
self.client.logout()
def test_get(self):
returned_json = json.loads(
self.client.get(
'/assets/api/rack/{0}/'.format(self.rack_1.id)
).content
)
self.maxDiff = None
expected_json = {
'info': {
'id': self.rack_1.id,
'name': self.rack_1.name,
'data_center': self.rack_1.data_center.id,
'server_room': self.rack_1.server_room.id,
'max_u_height': self.rack_1.max_u_height,
'visualization_col': self.rack_1.visualization_col,
'visualization_row': self.rack_1.visualization_row,
'free_u': self.rack_1.get_free_u(),
'description': '{}'.format(self.rack_1.description),
'orientation': '{}'.format(self.rack_1.get_orientation_desc())
},
'front': [
{
'_type': TYPE_ASSET,
'id': self.asset_1.id,
'url': '{}'.format(self.asset_1.url),
'category': '{}'.format(self.asset_1.model.category),
'barcode': self.asset_1.barcode,
'sn': '{}'.format(self.asset_1.sn),
'height': float(self.asset_1.model.height_of_device),
'position': self.asset_1.device_info.position,
'model': self.asset_1.model.name,
'children': [],
'layout': u'',
},
{
'_type': TYPE_ASSET,
'id': self.asset_2.id,
'url': '{}'.format(self.asset_2.url),
'category': '{}'.format(self.asset_2.model.category),
'barcode': self.asset_2.barcode,
'sn': '{}'.format(self.asset_2.sn),
'height': float(self.asset_2.model.height_of_device),
'position': self.asset_2.device_info.position,
'model': self.asset_2.model.name,
'children': [],
'layout': u'',
},
{
'_type': TYPE_ACCESSORY,
'position': self.rack1_accessory.position,
'remarks': self.rack1_accessory.remarks,
'type': self.rack1_accessory.accessory.name,
},
],
'back': [],
'pdus': []
}
self.assertEquals(returned_json, expected_json)
| 35.943089 | 78 | 0.549649 | 3,850 | 0.870844 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.106537 |
0e3c6ee65a6dc7c61f1f0de5840c630be1c19d33 | 57,419 | py | Python | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | mflu/openvstorage_centos | 280a98d3e5d212d58297e0ffcecd325dfecef0f8 | [
"Apache-2.0"
] | 1 | 2015-08-29T16:36:40.000Z | 2015-08-29T16:36:40.000Z | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | ovs/extensions/db/arakoon/arakoon/ArakoonManagement.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | """
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from Compat import X
import os
import ArakoonRemoteControl
import os.path
import itertools
import subprocess
import time
import types
import signal
import string
import logging
import Arakoon
from ArakoonExceptions import ArakoonNodeNotLocal
def which_arakoon():
path = '/'.join([X.appDir,"arakoon/bin/arakoon"])
if X.fileExists(path):
return path
else:
return "arakoon"
class ArakoonManagement:
def getCluster(self, clusterName):
"""
@type clusterName: string
@return a helper to config that cluster
"""
return ArakoonCluster(clusterName)
def listClusters(self):
"""
Returns a list with the existing clusters.
"""
fn = '/'.join ([X.cfgDir, "arakoonclusters"])
config = X.getConfig(fn)
return config.sections()
def start(self):
"""
Starts all clusters.
"""
[clus.start() for clus in [self.getCluster(cluster) for cluster in self.listClusters()]]
def stop(self):
"""
Stops all clusters.
"""
[clus.stop() for clus in [self.getCluster(cluster) for cluster in self.listClusters()]]
def restart(self):
"""
Restarts all clusters.
"""
self.stop()
self.start()
class ArakoonCluster:
def __init__(self, clusterName):
self.__validateName(clusterName)
"""
There's a difference between the clusterId and the cluster's name.
The name is used to construct the path to find the config file.
the id is what's inside the cfg file and
what you need to provide to a client that want's to talk to the cluster.
"""
self._clusterName = clusterName
self._binary = which_arakoon()
self._arakoonDir = '/'.join([X.cfgDir, "arakoon"])
self._clustersFNH = '/'.join([X.cfgDir, 'arakoonclusters'])
clusterConfig = X.getConfig(self._clustersFNH)
if not clusterConfig.has_section(self._clusterName):
clusterPath = '/'.join([X.cfgDir,"qconfig", "arakoon", clusterName])
clusterConfig.add_section(clusterName)
clusterConfig.set(clusterName, "path", clusterPath)
if not X.fileExists(self._arakoonDir):
X.createDir(self._arakoonDir)
if not X.fileExists(clusterPath):
X.createDir(clusterPath)
X.writeConfig(clusterConfig, self._clustersFNH)
self._clusterPath = clusterConfig.get(clusterName, "path" )
def _servernodes(self):
return '%s_local_nodes' % self._clusterName
def __repr__(self):
return "<ArakoonCluster:%s>" % self._clusterName
def _getConfigFileName(self):
p = X.getConfig(self._clustersFNH)
if not p.has_section(self._clusterName):
raise Exception("%s not present in %s" % (self._clusterName, self._clustersFNH))
cfgDir = p.get( self._clusterName, "path", False)
cfgFile = '/'.join([cfgDir, self._clusterName])
return cfgFile
def _saveConfig(self,config):
fn = self._getConfigFileName()
X.writeConfig(config,fn)
def _getConfigFile(self):
h = self._getConfigFileName()
return X.getConfig(h)
def _getClusterId(self):
clusterId = self._clusterName
try:
config = self._getConfigFile()
clusterId = config.get("global", "cluster_id")
except:
logging.info("setting cluster_id to %s", clusterId)
config.set("global","cluster_id",clusterId)
return clusterId
def addBatchedTransactionConfig(self,
name,
max_entries = None,
max_size = None):
"""
Add a batched transaction config section to the configuration of the supplied cluster
@param name the name of the batched transaction config section
@param max_entries the maximum amount of entries before the batched store will persist the changes to tokyo cabinet; default is None, which results in 200.
@param max_size the maximum combined size of the entries (in bytes) before the batched store will persist the changes to tokyo cabinet; default is None, which results in 100_000.
"""
config = self._getConfigFile()
config.addSection(name)
if max_entries is not None:
config.set(name, "max_entries", max_entries)
if max_size is not None:
config.set(name, "max_size", max_size)
config.write()
def addLogConfig(self,
name,
client_protocol = None,
paxos = None,
tcp_messaging = None):
"""
Add a log config section to the configuration of the supplied cluster
@param name the name of the log config section
@param client_protocol the log level for the client_protocol log section
@param paxos the log level for the paxos log section
@param tcp_messaging the log level for the tcp_messaging log section
"""
config = self._getConfigFile()
config.addSection(name)
if client_protocol is not None:
config.set(name, "client_protocol", client_protocol)
if paxos is not None:
config.set(name, "paxos", paxos)
if tcp_messaging is not None:
config.set(name, "tcp_messaging", tcp_messaging)
config.write()
def addNode(self,
name,
ip = "127.0.0.1",
clientPort = 7080,
messagingPort = 10000,
logLevel = "info",
logDir = None,
home = None,
tlogDir = None,
wrapper = None,
isLearner = False,
targets = None,
isLocal = False,
logConfig = None,
batchedTransactionConfig = None,
tlfDir = None,
headDir = None,
isWitness = False,
collapseSlowdown = None):
"""
Add a node to the configuration of the supplied cluster
@param name : the name of the node, should be unique across the environment
@param ip : the ip(s) this node should be contacted on (string or string list)
@param clientPort : the port the clients should use to contact this node
@param messagingPort : the port the other nodes should use to contact this node
@param logLevel : the loglevel (debug info notice warning error fatal)
@param logDir : the directory used for logging
@param home : the directory used for the nodes data
@param tlogDir : the directory used for tlogs (if none, home will be used)
@param wrapper : wrapper line for the executable (for example 'softlimit -o 8192')
@param isLearner : whether this node is a learner node or not
@param targets : for a learner node the targets (string list) it learns from
@param isLocal : whether this node is a local node and should be added to the local nodes list
@param logConfig : specifies the log config to be used for this node
@param batchedTransactionConfig : specifies the batched transaction config to be used for this node
@param tlfDir : the directory used for tlfs (if none, tlogDir will be used)
@param headDir : the directory used for head.db (if none, tlfDir will be used)
@param isWitness : whether this node is a witness or not
@param collapseSlowdown : the factor with which collapsing should be slowed down
"""
self.__validateName(name)
self.__validateLogLevel(logLevel)
if isinstance(ip, basestring):
ip = [ip]
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
raise Exception("node %s already present" % name )
if not isLearner:
nodes.append(name)
config.add_section(name)
config.set(name, "ip", ', '.join(ip))
self.__validateInt("clientPort", clientPort)
config.set(name, "client_port", clientPort)
self.__validateInt("messagingPort", messagingPort)
config.set(name, "messaging_port", messagingPort)
config.set(name, "log_level", logLevel)
if logConfig is not None:
config.set(name, "log_config", logConfig)
if batchedTransactionConfig is not None:
config.set(name, "batched_transaction_config", batchedTransactionConfig)
if wrapper is not None:
config.set(name, "wrapper", wrapper)
if logDir is None:
logDir = '/'.join([X.logDir, self._clusterName, name])
config.set(name, "log_dir", logDir)
if home is None:
home = '/'.join([X.varDir, "db", self._clusterName, name])
config.set(name, "home", home)
if tlogDir:
config.set(name,"tlog_dir", tlogDir)
if tlfDir:
config.set(name,"tlf_dir", tlfDir)
if headDir:
config.set(name,"head_dir", headDir)
if isLearner:
config.set(name, "learner", "true")
if targets is None:
targets = self.listNodes()
config.set(name, "targets", string.join(targets,","))
if isWitness:
config.set(name, "witness", "true")
if collapseSlowdown:
config.set(name, "collapse_slowdown", collapseSlowdown)
if not config.has_section("global") :
config.add_section("global")
config.set("global", "cluster_id", self._clusterName)
config.set("global","cluster", ",".join(nodes))
self._saveConfig(config)
if isLocal:
self.addLocalNode(name)
def removeNode(self, name):
"""
Remove a node from the configuration of the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
self.removeLocalNode(name)
config.remove_section(name)
nodes.remove(name)
config.set("global","cluster", ",".join(nodes))
self._saveConfig(config)
return
raise Exception("No node with name %s" % name)
def setMasterLease(self, duration=None):
"""
Set the master lease duration in the supplied cluster
@param duration The duration of the master lease in seconds
"""
section = "global"
key = "lease_period"
config = self._getConfigFile()
if not config.has_section( section ):
raise Exception("Section '%s' not found in config" % section )
if duration:
if not isinstance( duration, int ) :
raise AttributeError( "Invalid value for lease duration (expected int, got: '%s')" % duration)
config.set(section, key, duration)
else:
config.remove_option(section, key)
self._saveConfig(config)
def forceMaster(self, name=None, preferred = False):
"""
Force a master in the supplied cluster
@param name the name of the master to force. If None there is no longer a forced master
@param preferred: Set given node to be preferred master
@type preferred: `bool`
"""
config = self._getConfigFile()
g = 'global'
pm = 'preferred_master'
m = 'master'
if name:
nodes = self.__getNodes(config)
self.__validateName(name)
if not name in nodes:
raise Exception("No node with name %s configured in cluster %s" % (name,self._clusterName) )
config.set(g,m,name)
if preferred:
config.set(g,pm,'true')
else:
config.remove_option(g, m)
if config.has_option(g, pm):
config.remove_option(g, pm)
self._saveConfig(config)
def preferredMasters(self, nodes):
'''
Set a list of preferred master nodes
When the given list is empty, the configuration item is unset.
Since this option is incompatible with a fixed master, this method will
- raise an exception if 'master' is set and 'preferred_master' is false
(or not set, which defaults to false)
- unset 'master' and 'preferred_master' if both are set and
'preferred_master' is true
@param nodes: Names of preferred master nodes
@type nodes: `list` of `str`
'''
if isinstance(nodes, basestring):
raise TypeError('Expected list of strings, not string')
config = self._getConfigFile()
if not nodes:
if config.has_option('global', 'preferred_masters'):
config.remove_option('global', 'preferred_masters')
self._saveConfig(config)
return
section = 'global'
master = 'master'
preferred_master = 'preferred_master'
# Check existing master/preferred_master configuration. Bail out if
# incompatible.
if config.has_option(section, master):
preferred_master_setting = \
config.get(section, preferred_master).lower() \
if config.has_option(section, preferred_master) \
else 'false'
if preferred_master_setting != 'true':
raise Exception(
'Can\'t set both \'master\' and \'preferred_masters\'')
# If reached, 'master' was set and 'preferred_master' was true.
# We're free to remove both, since they're replaced by the
# 'preferred_masters' setting.
config.remove_option(section, master)
if config.has_option(section, preferred_master):
config.remove_option(section, preferred_master)
# Set up preferred_masters
preferred_masters = 'preferred_masters'
config.set(section, preferred_masters, ', '.join(nodes))
self._saveConfig(config)
def setLogConfig(self, logConfig, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
config = self._getConfigFile()
for n in nodes:
config.set(n, "log_config", logConfig)
config.write()
def setLogLevel(self, level, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
self.__validateLogLevel( level )
config = self._getConfigFile()
for n in nodes:
config.set( n, "log_level", level )
self._saveConfig(config)
def setCollapseSlowdown(self, collapseSlowdown, nodes=None):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes :
self.__validateName( n )
config = self._getConfigFile()
for n in nodes:
if collapseSlowdown:
config.set(n, "collapse_slowdown", collapseSlowdown)
else:
config.remove_option(n, "collapse_slowdown")
self._saveConfig(config)
def _setTlogCompression(self,nodes, compressor):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for n in nodes:
config.remove_option(n, "disable_tlog_compression")
config.set(n, "tlog_compression", compressor)
self._saveConfig(config)
def enableTlogCompression(self, nodes=None, compressor='bz2'):
"""
Enables tlog compression for the given nodes (this is enabled by default)
@param nodes List of node names
@param compressor one of 'bz2', 'snappy', 'none'
"""
self._setTlogCompression(nodes,compressor)
def disableTlogCompression(self, nodes=None):
"""
Disables tlog compression for the given nodes
@param nodes List of node names
"""
self._setTlogCompression(nodes,"none")
def _changeFsync(self, nodes, value):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for node in nodes:
config.set(node, 'fsync', value)
self._saveConfig(config)
def enableFsync(self, nodes=None):
'''Enable fsync'ing of tlogs after every operation'''
self._changeFsync(nodes, 'true')
def disableFsync(self, nodes=None):
'''Disable fsync'ing of tlogs after every operation'''
self._changeFsync(nodes, 'false')
def setTLSCACertificate(self, ca_cert_path):
'''Configure path to TLS CA certificate
This corresponds to the `tls_ca_cert` entry in the `global` section.
Set to `None` to unset/disable.
The path should point to a valid file, otherwise a `ValueError` will be
raised.
:param ca_cert_path: Path to CA certificate
:type ca_cert_path: `str`
'''
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
config = self._getConfigFile()
if ca_cert_path is None:
if config.has_option(global_, tls_ca_cert):
config.remove_option(global_, tls_ca_cert)
self._saveConfig(config)
return
if not os.path.isfile(ca_cert_path):
raise ValueError(
'Invalid ca_cert_path \'%s\': no such file' % ca_cert_path)
config.set(global_, tls_ca_cert, ca_cert_path)
self._saveConfig(config)
def enableTLSService(self):
'''Enable TLS on the client service
This corresponds to the `tls_service` entry in the `global` section.
Note `tls_ca_cert` should be configured before calling this method,
otherwise an `Exception` will be raised.
'''
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
tls_service = 'tls_service'
config = self._getConfigFile()
if not config.has_option(global_, tls_ca_cert):
raise Exception('No tls_ca_cert configured')
if config.has_option(global_, tls_service):
config.remove_option(global_, tls_service)
config.set(global_, tls_service, 'true')
self._saveConfig(config)
def disableTLSService(self):
'''Disable TLS on the client service
This corresponds to the `tls_service` entry in the `global` section.
'''
global_ = 'global'
tls_service = 'tls_service'
config = self._getConfigFile()
if config.has_option(global_, tls_service):
config.remove_option(global_, tls_service)
self._saveConfig(config)
def enableTLSServiceValidatePeer(self):
'''Enable TLS peer verification on the client service
This corresponds to the `tls_service_validate_peer` entry in the
`global` section.
Note `tls_service` should be enabled before calling this method,
otherwise an `Exception` is raised.
'''
global_ = 'global'
tls_service = 'tls_service'
tls_service_validate_peer = 'tls_service_validate_peer'
config = self._getConfigFile()
if (not config.has_option(global_, tls_service)) \
or (config.get(global_, tls_service).lower() != 'true'):
raise Exception('tls_service not enabled')
if config.has_option(global_, tls_service_validate_peer):
config.remove_option(global_, tls_service_validate_peer)
config.set(global_, tls_service_validate_peer, 'true')
self._saveConfig(config)
def disableTLSServiceValidatePeer(self):
'''Disable TLS peer verification on the client service
This corresponds to the `tls_service_validate_peer` entry in the
`global` section.
'''
global_ = 'global'
tls_service_validate_peer = 'tls_service_validate_peer'
config = self._getConfigFile()
if config.has_option(global_, tls_service_validate_peer):
config.remove_option(global_, tls_service_validate_peer)
self._saveConfig(config)
def setTLSCertificate(self, node, cert_path, key_path):
'''Set the TLS certificate & key paths for a node
This corresponds to the `tls_cert` and `tls_key` entries in a node
section.
Set both `cert_path` and `key_path` to `None` to unset the setting and
disable TLS usage.
Both paths should point to valid files, otherwise a `ValueError` is
raised.
`tls_ca_cert` should be configured before calling this method,
otherwise an `Exception` is raised.
:param node: Node name
:type node: `str`
:param cert_path: Path to node certificate file
:type cert_path: `str`
:param key_path: Path to node key file
:type key_path: `str`
'''
self.__validateName(node)
if cert_path is None and key_path is not None:
raise ValueError('cert_path is None but key_path isn\'t')
if cert_path is not None and key_path is None:
raise ValueError('key_path is None but cert_path isn\'t')
global_ = 'global'
tls_ca_cert = 'tls_ca_cert'
tls_cert = 'tls_cert'
tls_key = 'tls_key'
config = self._getConfigFile()
if cert_path is None and key_path is None:
if config.has_option(node, tls_cert):
config.remove_option(node, tls_cert)
if config.has_option(node, tls_key):
config.remove_option(node, tls_key)
self._saveConfig(config)
return
if not config.has_option(global_, tls_ca_cert):
raise Exception('No tls_ca_cert configured')
if not os.path.isfile(cert_path):
raise ValueError(
'Invalid cert_path \'%s\': no such file' % cert_path)
if not os.path.isfile(key_path):
raise ValueError(
'Invalid key_path \'%s\': no such file' % key_path)
if config.has_option(node, tls_cert):
config.remove_option(node, tls_cert)
if config.has_option(node, tls_key):
config.remove_option(node, tls_key)
config.set(node, tls_cert, cert_path)
config.set(node, tls_key, key_path)
self._saveConfig(config)
def setReadOnly(self, flag = True):
config = self._getConfigFile()
if flag and len(self.listNodes()) <> 1:
raise Exception("only for clusters of size 1")
g = "global"
p = "readonly"
if config.has_option(g,p):
config.remove_option(g, p)
if flag :
config.set(g, p, "true")
self._saveConfig(config)
def setQuorum(self, quorum=None):
"""
Set the quorum for the supplied cluster
The quorum dictates on how many nodes need to acknowledge the new value before it becomes accepted.
The default is (nodes/2)+1
@param quorum the forced quorum. If None, the default is used
"""
config = self._getConfigFile()
if quorum:
try :
if ( int(quorum) != quorum or
quorum < 0 or
quorum > len( self.listNodes())) :
raise Exception ( "Illegal value for quorum %s" % quorum )
except:
raise Exception("Illegal value for quorum %s " % quorum)
config.set("global", "quorum", int(quorum))
else:
config.remove("global", "quorum")
self._saveConfig(config)
def getClientConfig(self):
"""
Get an object that contains all node information in the supplied cluster
@return dict the dict can be used as param for the ArakoonConfig object
"""
config = self._getConfigFile()
clientconfig = dict()
nodes = self.__getNodes(config)
for name in nodes:
ips = config.get(name, "ip")
ip_list = map(lambda x: x.strip(), ips.split(","))
port = int(config.get(name, "client_port"))
clientconfig[name] = (ip_list, port)
return clientconfig
def getClient(self):
config = self.getClientConfig()
id = self._getClusterId()
client = Arakoon.ArakoonClient(Arakoon.ArakoonClientConfig(id, config))
return client
def listNodes(self):
"""
Get a list of all node names in the supplied cluster
@return list of strings containing the node names
"""
config = self._getConfigFile()
return self.__getNodes(config)
def getNodeConfig(self,name):
"""
Get the parameters of a node section
@param name the name of the node
@return dict keys and values of the nodes parameters
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if config.has_section(name):
d = {}
for option in config.options(name):
d[option] = config.get(name,option,False)
return d
else:
raise Exception("No node with name %s configured" % name)
def createDirs(self, name):
"""
Create the Directories for a local arakoon node in the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
if config.has_section(name):
home = config.get(name, "home")
X.createDir(home)
if config.has_option(name, "tlog_dir"):
tlogDir = config.get(name, "tlog_dir")
X.createDir(tlogDir)
if config.has_option(name, "tlf_dir"):
tlfDir = config.get(name, "tlf_dir")
X.createDir(tlfDir)
if config.has_option(name, "head_dir"):
headDir = config.get(name, "head_dir")
X.createDir(headDir)
logDir = config.get(name, "log_dir")
X.createDir(logDir)
return
msg = "No node %s configured" % name
raise Exception(msg)
def removeDirs(self, name):
"""
Remove the Directories for a local arakoon node in the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
if name in nodes:
home = config.get(name, "home")
X.removeDirTree(home)
if config.has_option(name, "tlog_dir"):
tlogDir = config.get(name, "tlog_dir")
X.removeDirTree(tlogDir)
logDir = config.get(name, "log_dir")
X.removeDirTree(logDir)
return
raise Exception("No node %s" % name )
def addLocalNode(self, name):
"""
Add a node to the list of nodes that have to be started locally
from the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
nodes = self.__getNodes(config)
config_name = self._servernodes()
if config.has_section(name):
config_name_path = '/'.join([self._clusterPath, config_name])
nodesconfig = X.getConfig(config_name_path)
if not nodesconfig.has_section("global"):
nodesconfig.add_section("global")
nodesconfig.set("global","cluster", "")
nodes = self.__getNodes(nodesconfig)
if name in nodes:
raise Exception("node %s already present" % name)
nodes.append(name)
nodesconfig.set("global","cluster", ",".join(nodes))
X.writeConfig(nodesconfig,config_name_path)
return
raise Exception("No node %s" % name)
def removeLocalNode(self, name):
"""
Remove a node from the list of nodes that have to be started locally
from the supplied cluster
@param name the name of the node as configured in the config file
"""
self.__validateName(name)
config_name = self._servernodes()
config_name_path = '/'.join([self._clusterPath, config_name])
config = X.getConfig(config_name_path)
if not config.has_section("global"):
return
node_str = config.get("global", "cluster").strip()
nodes = node_str.split(',')
if name in nodes:
nodes.remove(name)
node_str = ','.join(nodes)
config.set("global","cluster", node_str)
X.writeConfig(config, config_name_path)
def listLocalNodes(self):
"""
Get a list of the local nodes in the supplied cluster
@return list of strings containing the node names
"""
config_name = self._servernodes()
config_name_path = '/'.join([self._clusterPath, config_name])
config = X.getConfig(config_name_path)
return self.__getNodes(config)
def setUp(self, numberOfNodes, basePort = 7080):
"""
Sets up a local environment
@param numberOfNodes the number of nodes in the environment
@return the dict that can be used as a param for the ArakoonConfig object
"""
cid = self._clusterName
clientPort = basePort
messagingPort = basePort + 1
for i in range(0, numberOfNodes):
nodeName = "%s_%i" %(cid, i)
self.addNode(name = nodeName,
clientPort = clientPort,
messagingPort = messagingPort)
self.addLocalNode(nodeName)
self.createDirs(nodeName)
clientPort += 10
messagingPort += 10
if numberOfNodes > 0:
self.forceMaster("%s_0" % cid)
config = self._getConfigFile()
config.set( 'global', 'cluster_id', cid)
self._saveConfig(config)
def tearDown(self, removeDirs=True ):
"""
Tears down a local environment
@param removeDirs remove the log and home dir
@param cluster the name of the arakoon cluster
"""
config = self._getConfigFile()
nodes = self.__getNodes(config)
for node in nodes:
if removeDirs:
self.removeDirs(node)
self.removeNode(node)
if self.__getForcedMaster(config):
self.forceMaster(None)
self.remove()
def remove(self):
clients_fn = "%s/%s" % (X.cfgDir, "arakoonclients")
clientConf = X.getConfig(clients_fn)
clientConf.remove_section(self._clusterName)
X.writeConfig(clientConf,clients_fn)
fn = self._clustersFNH
clusterConf = X.getConfig(fn)
clusterConf.remove_section(self._clusterName)
X.writeConfig(clusterConf, fn)
X.removeDirTree(self._clusterPath)
def __getForcedMaster(self, config):
if not config.has_section("global"):
return []
if config.has_option("global", "master"):
return config.get("global", "master").strip()
else:
return []
def __getNodes(self, config):
if not config.has_section("global"):
return []
nodes = []
try:
if config.has_option("global", "cluster"):
line = config.get("global", "cluster").strip()
# "".split(",") -> ['']
if line == "":
nodes = []
else:
nodes = line.split(",")
nodes = map(lambda x: x.strip(), nodes)
else:
nodes = []
except LookupError:
pass
return nodes
def __validateInt(self,name, value):
typ = type(value)
if not typ == type(1):
raise Exception("%s=%s (type = %s) but should be an int" % (name, value, typ))
def __validateName(self, name):
if name is None or name.strip() == "":
raise Exception("A name should be passed. An empty name is not an option")
if not type(name) == type(str()):
raise Exception("Name should be of type string")
for char in [' ', ',', '#']:
if char in name:
raise Exception("name should not contain %s" % char)
def __validateLogLevel(self, name):
if not name in ["info", "debug", "notice", "warning", "error", "fatal"]:
raise Exception("%s is not a valid log level" % name)
def start(self):
"""
start all nodes in the cluster
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._startOne(name)
return rcs
def stop(self):
"""
stop all nodes in the supplied cluster
@param cluster the arakoon cluster name
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._stopOne(name)
return rcs
def restart(self):
"""
Restart all nodes in the supplied cluster
"""
rcs = {}
for name in self.listLocalNodes():
rcs[name] = self._restartOne(name)
return rcs
def getStatus(self):
"""
Get the status the cluster's nodes running on this machine
@return dict node name -> status (AppStatusType)
"""
status = {}
for name in self.listLocalNodes():
status[name] = self._getStatusOne(name)
return status
def _requireLocal(self, nodeName):
if not nodeName in self.listLocalNodes():
raise ArakoonNodeNotLocal( nodeName)
def startOne(self, nodeName):
"""
Start the node with a given name
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._startOne(nodeName)
def catchupOnly(self, nodeName):
"""
make the node catchup, but don't start it.
(This is handy if you want to minimize downtime before you,
go from a 1 node setup to a 2 node setup)
"""
self._requireLocal(nodeName)
cmd = [self._binary,
'-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName),
'--node',
nodeName,
'-catchup-only']
return subprocess.call(cmd)
def stopOne(self, nodeName):
"""
Stop the node with a given name
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._stopOne(nodeName)
def remoteCollapse(self, nodeName, n):
"""
Tell the targetted node to collapse all but n tlog files
@type nodeName: string
@type n: int
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.collapse(ip,port,clusterId, n)
def copyDbToHead(self, nodeName, n):
"""
Tell the targetted node to take a copy of it's db to be used as head, removing all but n tlogs
@type nodeName: string
@type n: int
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.copyDbToHead(ip,port,clusterId, n)
def optimizeDb(self, nodeName):
"""
Tell a node to optimize its database (only works on slaves)
@param nodeName The name of the node you want to optimize
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.optimizeDb(ip,port, clusterId)
def injectAsHead(self, nodeName, newHead, force=False, inPlace=False):
"""
tell the node to use the file as its new head database
@param nodeName The (local) node where you want to inject the database
@param newHead a database file that can serve as head
@param force forces the database to be injected even when the current head is corrupt
@param inPlace Use in-place rename instead of copying `newHead`
@return Return code of inject-as-head call
"""
self._requireLocal(nodeName)
cmd = [self._binary,'--inject-as-head', newHead, nodeName, '-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName) ]
if force:
cmd.append('--force')
if inPlace:
cmd.append('--inplace')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0]
rc = p.returncode
logging.debug("injectAsHead returned [%d] %s", rc, output)
return rc
def defragDb(self, nodeName):
"""
Tell a node to defrag its database (only works on slaves)
@param nodeName The name of the node you want to optimize
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.defragDb(ip,port, clusterId)
def dropMaster(self, nodeName):
"""
Request a node to drop its master role
@param nodeName The name of the node you want to drop its master role
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.dropMaster(ip,port, clusterId)
def flushStore(self, nodeName):
"""
Request a node to flush its batched store to disk
@param nodeName The name of the node you want to perform the flush of its store
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.flushStore(ip,port, clusterId)
def restartOne(self, nodeName):
"""
Restart the node with a given name in the supplied cluster
@param nodeName The name of the node
"""
self._requireLocal( nodeName)
return self._restartOne(nodeName)
def getStatusOne(self, nodeName):
"""
Get the status node with a given name in the supplied cluster
@param nodeName The name of the node
"""
self._requireLocal(nodeName)
return self._getStatusOne(nodeName)
def backupDb(self, nodeName, location):
"""
Make a backup the live database to the specified file
@param nodeName The name of the node you want to backup
@param location The path to the file where the backup should be stored
@return void
"""
config = self.getNodeConfig(nodeName)
ip_mess = config['ip']
ip = self._getIp(ip_mess)
port = int(config['client_port'])
clusterId = self._getClusterId()
ArakoonRemoteControl.downloadDb(ip,port,clusterId, location)
def _cmd(self, name):
r = [self._binary,'--node',name,'-config',
'%s/%s.cfg' % (self._clusterPath, self._clusterName),
'-start']
return r
def _cmdLine(self, name):
cmd = self._cmd(name)
cmdLine = string.join(cmd, ' ')
return cmdLine
def _startOne(self, name):
if self._getStatusOne(name) == X.AppStatusType.RUNNING:
return
config = self.getNodeConfig(name)
cmd = []
if 'wrapper' in config :
wrapperLine = config['wrapper']
cmd = wrapperLine.split(' ')
command = self._cmd(name)
cmd.extend(command)
cmd.append('-daemonize')
logging.debug('calling: %s', str(cmd))
return subprocess.call(cmd, close_fds = True)
def _getIp(self,ip_mess):
t_mess = type(ip_mess)
if t_mess == types.StringType:
parts = ip_mess.split(',')
ip = string.strip(parts[0])
return ip
elif t_mess == types.ListType:
return ip_mess[0]
else:
raise Exception("should '%s' be a string or string list")
def _stopOne(self, name):
line = self._cmdLine(name)
cmd = ['pkill', '-f', line]
logging.debug("stopping '%s' with: %s",name, string.join(cmd, ' '))
rc = subprocess.call(cmd, close_fds = True)
logging.debug("%s=>rc=%i" % (cmd,rc))
i = 0
while(self._getStatusOne(name) == X.AppStatusType.RUNNING):
rc = subprocess.call(cmd, close_fds = True)
logging.debug("%s=>rc=%i" % (cmd,rc))
time.sleep(1)
i += 1
logging.debug("'%s' is still running... waiting" % name)
if i == 10:
msg = "Requesting '%s' to dump crash log information" % name
logging.debug(msg)
X.subprocess.call(['pkill', '-%d' % signal.SIGUSR2, '-f', line], close_fds=True)
time.sleep(1)
logging.debug("stopping '%s' with kill -9" % name)
rc = X.subprocess.call(['pkill', '-9', '-f', line], close_fds = True)
if rc == 0:
rc = 9
cnt = 0
while (self._getStatusOne(name) == X.AppStatusType.RUNNING ) :
logging.debug("'%s' is STILL running... waiting" % name)
time.sleep(1)
cnt += 1
if( cnt > 10):
break
break
else:
X.subprocess.call(cmd, close_fds=True)
if rc < 9:
rc = 0 # might be we looped one time too many.
return rc
def _restartOne(self, name):
self._stopOne(name)
return self._startOne(name)
def _getPid(self, name):
if self._getStatusOne(name) == X.AppStatusType.HALTED:
return None
line = self._cmdLine(name)
cmd = ['pgrep', '-o' ,'-f' , line]
try:
stdout = X.subprocess.check_output( cmd )
return int(stdout)
except:
return None
def _getStatusOne(self,name):
line = self._cmdLine(name)
cmd = ['pgrep','-fn', line]
proc = subprocess.Popen(cmd,
close_fds = True,
stdout=subprocess.PIPE)
pids = proc.communicate()[0]
pid_list = pids.split()
lenp = len(pid_list)
result = None
if lenp == 1:
result = X.AppStatusType.RUNNING
elif lenp == 0:
result = X.AppStatusType.HALTED
else:
for pid in pid_list:
try:
f = open('/proc/%s/cmdline' % pid,'r')
startup = f.read()
f.close()
logging.debug("pid=%s; cmdline=%s", pid, startup)
except:
pass
raise Exception("multiple matches", pid_list)
return result
def getStorageUtilization(self, node = None):
"""Calculate and return the disk usage of the supplied arakoon cluster on the system
When no node name is given, the aggregate consumption of all nodes
configured in the supplied cluster on the system is returned.
Return format is a dictionary containing 3 keys: 'db', 'tlog' and
'log', whose values denote the size of database files
(*.db, *.db.wall), TLog files (*.tlc, *.tlog) and log files (*).
:param node: Name of the node to check
:type node: `str`
:param cluster: Name of the arakoon cluster
:type cluster: `str`
:return: Storage utilization of the node(s)
:rtype: `dict`
:raise ValueError: No such local node
"""
local_nodes = self.listLocalNodes()
if node is not None and node not in local_nodes:
raise ArakoonNodeNotLocal ( node )
def helper(config):
home = config['home']
log_dir = config['log_dir']
real_tlog_dir = config.get('tlog_dir', home)
tlf_dir = config.get('tlf_dir', real_tlog_dir)
head_dir = config.get('head_dir', real_tlog_dir)
tlog_dirs = set([real_tlog_dir, tlf_dir])
# 'head_dir' might have a place in here, but head.db wasn't counted
# before (in most cases), so...
db_dirs = set([home])
log_dirs = set([log_dir])
files_in_dir = lambda dir_: itertools.ifilter(os.path.isfile,
(os.path.join(dir_, name) for name in os.listdir(dir_)))
files_in_dirs = lambda dirs: itertools.chain(*(files_in_dir(dir_)
for dir_ in dirs))
matching_files = lambda *exts: lambda files: \
(file_ for file_ in files
if any(file_.endswith(ext) for ext in exts))
tlog_files = matching_files('.tlc', '.tlog','.tlf')
db_files = matching_files('.db', '.db.wal')
log_files = matching_files('') # Every string ends with ''
sum_size = lambda files: sum(os.path.getsize(file_)
for file_ in files)
return {
'tlog': sum_size(tlog_files(files_in_dirs(tlog_dirs))),
'db': sum_size(db_files(files_in_dirs(db_dirs))),
'log': sum_size(log_files(files_in_dirs(log_dirs)))
}
nodes = (node, ) if node is not None else local_nodes
stats = (helper(self.getNodeConfig(node)) for node in nodes)
result = {}
for stat in stats:
for key, value in stat.iteritems():
result[key] = result.get(key, 0) + value
return result
def gatherEvidence(self,
destination,
clusterCredentials=None,
includeLogs=True,
includeDB=True,
includeTLogs=True,
includeConfig=True, test = False):
"""
@param destination : path INCLUDING FILENAME where the evidence archive is saved. Can be URI, in other words, ftp://..., smb://, /tmp, ...
@param clusterCredentials : dict of tuples e.g. {"node1" ('login', 'password'), "node2" ('login', 'password'), "node3" ('login', 'password')}
@param includeLogs : Boolean value indicating that the logs need to be included in the evidence archive, default is True
@param includeDB : Boolean value indicating that the Tokyo Cabinet db and db.wall files need to be included in the evidence archive, default is True
@param includeTLogs : Boolean value indicating that the tlogs need to be included in the evidence archive, default is True
@param includeConfig : Boolean value indicating that the arakoon configuration files should be included in the resulting archive
"""
nodes_list = self.listNodes()
diff_list = self.listNodes()
if q.qshellconfig.interactive or test:
if not clusterCredentials:
clusterCredentials = self._getClusterCredentials(nodes_list,diff_list,test)
elif len(clusterCredentials) < len(nodes_list):
nodes_list = [x for x in nodes_list if x not in clusterCredentials]
diff_list = [x for x in nodes_list if x not in clusterCredentials]
sub_clusterCredentials = self._getClusterCredentials(nodes_list, diff_list, test)
clusterCredentials.update(sub_clusterCredentials)
else:
q.gui.dialog.message("All Nodes have Credentials.")
self._transferFiles(destination,
clusterCredentials,
includeLogs,
includeDB,
includeTLogs,
includeConfig)
else:
if not clusterCredentials or len(clusterCredentials) < len(nodes_list):
raise NameError('Error: QShell is Not interactive')
else:
q.gui.dialog.message("All Nodes have Credentials.")
self._transferFiles(destination,
clusterCredentials,
includeLogs,
includeDB,
includeTLogs,
includeConfig)
def _getClusterCredentials(self,
nodes_list,
diff_list, test):
clusterCredentials = dict()
same_credentials_nodes = list()
for nodename in nodes_list:
node_passwd = ''
if not test:
if nodename in diff_list:
node_config = self.getNodeConfig(nodename)
node_ip_mess = node_config['ip']
node_ip = self._getIp(node_ip_mess)
node_login = q.gui.dialog.askString("Please provide login name for %s @ %s default 'root'" % (nodename, node_ip))
if node_login == '':
node_login = 'root'
while node_passwd == '':
node_passwd = q.gui.dialog.askPassword('Please provide password for %s @ %s' % (nodename, node_ip))
if node_passwd == '':
q.gui.dialog.message("Error: Password is Empty.")
clusterCredentials[nodename] = (node_login, node_passwd)
if len(diff_list) > 1:
same_credentials = q.gui.dialog.askYesNo('Do you want to set the same credentials for any other node?')
diff_list.remove(nodename)
if same_credentials:
same_credentials_nodes = q.gui.dialog.askChoiceMultiple("Please choose node(s) that will take same credentials:",diff_list)
for node in same_credentials_nodes:
clusterCredentials[node] = (node_login, node_passwd)
#end for
if len(same_credentials_nodes) == len(diff_list):
break
else:
diff_list = list(set(diff_list).difference(set(same_credentials_nodes)))
if test:
clusterCredentials[nodename] = ('hudson', 'hudson')
#end for
return clusterCredentials
def _transferFiles(self,
destination,
clusterCredentials,
includeLogs=True,
includeDB=True,
includeTLogs=True,
includeConfig=True):
"""
This function copies the logs, db, tlog and config files to a Temp folder on the machine running the script then compresses the Temp
folder and places a copy at the destination provided at the beginning
"""
nodes_list = self.listNodes()
archive_name = self._clusterName + "_cluster_details"
archive_folder = q.system.fs.joinPaths(q.dirs.tmpDir , archive_name)
cfs = q.cloud.system.fs
sfs = q.system.fs
for nodename in nodes_list:
node_folder = sfs.joinPaths( archive_folder, nodename)
sfs.createDir(node_folder)
configDict = self.getNodeConfig(nodename)
source_ip_mess = configDict['ip']
source_ip = self._getIp(source_ip_mess)
userName = clusterCredentials[nodename][0]
password = clusterCredentials[nodename][1]
source_path = 'sftp://' + userName + ':' + password + '@' + source_ip
if includeDB:
db_files = cfs.listDir( source_path + configDict['home'] )
files2copy = filter ( lambda fn : fn.startswith( nodename ), db_files )
for fn in files2copy :
full_db_file = source_path + configDict['home'] + "/" + fn
cfs.copyFile(full_db_file , 'file://' + node_folder)
if includeLogs:
for fname in cfs.listDir(source_path + configDict['log_dir']):
if fname.startswith(nodename):
fileinlog = q.system.fs.joinPaths(configDict['log_dir'] ,fname)
cfs.copyFile(source_path + fileinlog, 'file://' + node_folder)
if includeTLogs:
source_dir = None
if configDict.has_key('tlog_dir'):
source_dir = configDict['tlog_dir']
else:
source_dir = configDict['home']
full_source_dir = source_path + source_dir
for fname in q.cloud.system.fs.listDir( full_source_dir ):
if fname.endswith('.tlog') or fname.endswith('.tlc') or fname.endswith('.tlf'):
tlogfile = q.system.fs.joinPaths(source_dir ,fname)
cfs.copyFile(source_path + tlogfile, 'file://' + node_folder)
clusterId = self._clusterName + '.cfg'
clusterNodes = self._clusterName + '_local_nodes.cfg'
clusterPath = '/'.join(self._clusterPath, clusterId)
q.cloud.system.fs.copyFile(source_path + clusterPath, 'file://' + node_folder)
clusterNodesPath = q.system.fs.joinPaths(self._clusterPath, clusterNodes)
if q.cloud.system.fs.sourcePathExists('file://' + clusterNodesPath):
q.cloud.system.fs.copyFile(source_path + clusterNodesPath, 'file://' + node_folder)
archive_file = sfs.joinPaths( q.dirs.tmpDir, self._clusterName + '_cluster_evidence.tgz')
q.system.fs.targzCompress( archive_folder, archive_file)
cfs.copyFile('file://' + archive_file , destination)
q.system.fs.removeDirTree( archive_folder )
q.system.fs.unlink( archive_file )
def setNurseryKeeper(self, clusterId):
"""
Updates the cluster configuration file to the correct nursery keeper cluster.
If the keeper needs to be removed from the cluster config, specify None as clusterId
This requires a valid client configuration on the system that can be used to access the keeper cluster.
@param clusterId: The id of the cluster that will function as nursery keeper
@type clusterId: string / None
@return void
"""
config = self._getConfigFile()
if clusterId is None:
config.remove_section("nursery")
return
cliCfg = q.clients.arakoon.getClientConfig(clusterId)
nurseryNodes = cliCfg.getNodes()
if len(nurseryNodes) == 0:
raise RuntimeError("A valid client configuration is required for cluster '%s'" % (clusterId) )
config.add_section("nursery")
config.set("nursery", "cluster_id", clusterId)
config.set("nursery", "cluster", ",".join( nurseryNodes.keys() ))
for (id,(ip,port)) in nurseryNodes.iteritems() :
if isinstance(ip, basestring):
ip = [ip]
config.add_section(id)
config.set(id, "ip", ', '.join(ip))
config.set(id,"client_port",port)
self._saveConfig(config)
| 34.403235 | 186 | 0.579582 | 56,443 | 0.983002 | 0 | 0 | 0 | 0 | 0 | 0 | 18,561 | 0.323255 |
0e3cd70195904ea45cfece716793aff4e55ba9e8 | 647 | py | Python | setup.py | THUIR/click_model_for_mobile_search | e6c02c0b559822fa236e74934805f934da3cd162 | [
"BSD-3-Clause"
] | 10 | 2019-03-14T21:10:43.000Z | 2022-02-02T15:19:07.000Z | setup.py | THUIR/click_model_for_mobile_search | e6c02c0b559822fa236e74934805f934da3cd162 | [
"BSD-3-Clause"
] | null | null | null | setup.py | THUIR/click_model_for_mobile_search | e6c02c0b559822fa236e74934805f934da3cd162 | [
"BSD-3-Clause"
] | 1 | 2019-08-24T13:08:08.000Z | 2019-08-24T13:08:08.000Z | from distutils.core import setup
import glob
from setuptools import setup
def read_md(file_name):
try:
from pypandoc import convert
return convert(file_name, 'rest')
except:
return ''
setup(
name='clickmodels',
version='2.0.0',
author='Jiaxin Mao',
packages=['clickmodels'],
scripts=glob.glob('bin/*.py'),
url='https://github.com/defaultstr/clickmodels',
license='LICENSE',
description='Click models for mobile search, forked from the project by Aleksandr Chuklin (https://github.com/varepsilon/clickmodels)',
long_description=read_md('README.md'),
install_requires=[],
)
| 25.88 | 139 | 0.681607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.383308 |
0e3d9b2cb4c223b6f43d3995bfdac7d9d91e409d | 3,227 | py | Python | scripts/extract_user_sudo_privileges.py | worr/sysops-api | e67dcdb7adf2ba82dc5215a73ccf6b592978e93f | [
"Apache-2.0"
] | 40 | 2015-01-24T19:00:05.000Z | 2022-03-03T01:30:20.000Z | scripts/extract_user_sudo_privileges.py | worr/sysops-api | e67dcdb7adf2ba82dc5215a73ccf6b592978e93f | [
"Apache-2.0"
] | null | null | null | scripts/extract_user_sudo_privileges.py | worr/sysops-api | e67dcdb7adf2ba82dc5215a73ccf6b592978e93f | [
"Apache-2.0"
] | 12 | 2015-02-03T17:02:27.000Z | 2020-07-24T03:23:37.000Z | #!/usr/bin/python2.6
# (c) [2013] LinkedIn Corp. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from optparse import OptionParser
import sys
sys.path.append("/usr/local/admin")
import sysopsapi.cache_extractor
##########################################################################
def main():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 1.0")
parser.add_option("--verbose",
action="store_true",
dest="verbose",
default=False,
help="Enable verbose execution")
parser.add_option("--range-query",
action="store",
dest="range_query",
help="Specify a range cluster of hosts you which to use to use to make queries against.")
parser.add_option("--user",
action="store",
dest="user",
help="Specify a unix user uid or id that you are interested in searching for.")
(options, args) = parser.parse_args()
ina_groups = []
ina_group_file = open(
'/etc/sudo.d/sudoers-USERS_GROUP_WORLD_READABLE', 'r').readlines()
for line in ina_group_file:
if options.user in line:
ina_groups.append(line.split()[1])
sudoers_rules = {}
sudoers_file = open('/etc/sudo.d/sudoers_WORLD_READABLE', 'r').readlines()
for line in sudoers_file:
if " = " in line:
ina_group = line.split()[0].strip()
machine_group = line.split()[1].strip()
privs = line.split('=')[1].strip()
if ina_group in ina_groups:
sudoers_rules[machine_group] = privs
cache_results = sysopsapi.cache_extractor.CacheExtractor(verbose=options.verbose,
scope='global',
contents=True,
range_query=options.range_query,
search_string='sudoers-MACHINE_GROUP')
for key in cache_results._gold.iterkeys():
host = key.split('#')[0]
for line in cache_results._gold[key].splitlines():
if "Host_Alias" in line:
system_machine_group = line.split()[1]
if sudoers_rules.get(system_machine_group):
print ("user: " + options.user).ljust(0) + \
("privs: " + sudoers_rules.get(system_machine_group)).center(40) + \
("host: " + host).center(40) + \
("machine_group: " + system_machine_group).rjust(10)
##########################################################################
if __name__ == '__main__':
main()
| 43.608108 | 135 | 0.534242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,159 | 0.359157 |
0e3e2d475e4d28c9969630febc816f8bc00f2ddf | 626 | py | Python | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 194 | 2017-04-24T15:28:16.000Z | 2021-12-29T03:35:28.000Z | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 17 | 2018-05-31T07:45:42.000Z | 2021-12-16T08:55:44.000Z | lbworkflow/tests/purchase/models.py | wearypossum4770/django-lb-workflow | 8db36c7a8c5cf3aa2492048cad9fbf26d895c8c7 | [
"MIT"
] | 67 | 2017-05-18T02:28:28.000Z | 2022-01-20T02:05:10.000Z | from django.db import models
from lbworkflow.models import BaseWFObj
class Purchase(BaseWFObj):
title = models.CharField("Title", max_length=255)
reason = models.CharField("Reason", max_length=255)
def __str__(self):
return self.reason
class Item(models.Model):
purchase = models.ForeignKey(
Purchase,
on_delete=models.CASCADE,
)
name = models.CharField("Name", max_length=255)
qty = models.IntegerField("Qty")
note = models.CharField("Note", max_length=255)
class Meta:
verbose_name = "Purchase Item"
def __str__(self):
return self.name
| 22.357143 | 55 | 0.670927 | 550 | 0.878594 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.07508 |
0e3e3487ba35fcedce9a553cfee80ee9e1454f0d | 900 | py | Python | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | 1 | 2020-09-24T02:41:23.000Z | 2020-09-24T02:41:23.000Z | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | null | null | null | interface/python/test.py | gaubert/nessDB | 69eed5ec2d4e6bf06853cebf7a506d214eaf7d62 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author : KDr2
# BohuTANG @2012
#
import sys
import random
import string
import time
import nessdb
def gen_random_str(len):
return ''.join([random.choice('abcdefghijklmnoprstuvwyxzABCDEFGHIJKLMNOPRSTUVWXYZ') for i in range(len)])
def ness_open(db_name):
return nessdb.NessDB(db_name)
def ness_write(db, c):
s_time = time.time()
for i in range(0, c):
key = gen_random_str(16)
db.db_add(key, "abcd")
if (i % 10000) == 0:
sys.stdout.write("\r\x1b[K ....write finished " + i.__str__())
sys.stdout.flush()
e_time = time.time()
print ""
print "---->count:<%i>,cost time:<%i>, %i/sec\n" %(c, e_time - s_time, c / (e_time - s_time))
if __name__ == '__main__':
if (len(sys.argv) > 2):
if (sys.argv[1] == "write"):
db = ness_open("test")
ness_write(db, int(sys.argv[2]))
db.db_close()
else:
print "test.py write <count>"
| 21.428571 | 106 | 0.645556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.282222 |
0e42a4face2412bee6e21ed42755d4303ac0f51b | 1,588 | py | Python | tests/extensions/openapi/test_external_docs.py | JonarsLi/sanic-ext | 58b2fe7e5e67e5cc2766bfb61be2e8d0f28115f4 | [
"MIT"
] | 14 | 2021-06-18T12:16:02.000Z | 2022-02-10T09:36:50.000Z | tests/extensions/openapi/test_external_docs.py | JonarsLi/sanic-ext | 58b2fe7e5e67e5cc2766bfb61be2e8d0f28115f4 | [
"MIT"
] | 37 | 2021-10-03T10:47:12.000Z | 2022-03-24T10:08:20.000Z | tests/extensions/openapi/test_external_docs.py | JonarsLi/sanic-ext | 58b2fe7e5e67e5cc2766bfb61be2e8d0f28115f4 | [
"MIT"
] | 7 | 2021-10-03T09:43:36.000Z | 2022-03-27T14:29:49.000Z | from sanic import Request, Sanic
from sanic.response import text
from sanic_ext import openapi
from sanic_ext.extensions.openapi.definitions import ExternalDocumentation
from utils import get_spec
def test_external_docs(app: Sanic):
@app.route("/test0")
@openapi.document("http://example.com/more", "Find more info here")
async def handler0(request: Request):
return text("ok")
@app.route("/test1")
@openapi.definition(
document=ExternalDocumentation(
"http://example.com/more", "Find more info here"
)
)
async def handler1(request: Request):
return text("ok")
@app.route("/test2")
@openapi.definition(document="http://example.com/more")
async def handler2(request: Request):
return text("ok")
@app.route("/test3")
async def handler3(request: Request):
"""
openapi:
---
summary: This is a summary.
externalDocs:
description: Find more info here
url: http://example.com/more
"""
return text("ok")
@app.route("/test4")
@openapi.document(
ExternalDocumentation("http://example.com/more", "Find more info here")
)
async def handler4(request: Request):
return text("ok")
spec = get_spec(app)
paths = spec["paths"]
assert len(paths) == 5
for i in range(5):
doc_obj = paths[f"/test{i}"]["get"]["externalDocs"]
assert doc_obj["url"] == "http://example.com/more"
if i != 2:
assert doc_obj["description"] == "Find more info here"
| 28.357143 | 79 | 0.612091 | 0 | 0 | 0 | 0 | 1,016 | 0.639798 | 508 | 0.319899 | 508 | 0.319899 |
0e4381d0af212201e83d5139e926caa71f20f745 | 2,472 | py | Python | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 892 | 2015-01-06T13:51:18.000Z | 2022-03-28T08:54:27.000Z | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 37 | 2015-01-24T19:50:38.000Z | 2022-03-11T01:51:17.000Z | cube2/server.py | bobssup/kripken | a19f0aca7b9251c08dd930fa670eaa72db0806f7 | [
"CC-BY-3.0"
] | 264 | 2015-01-31T14:31:47.000Z | 2022-03-30T16:46:54.000Z | #!/usr/bin/env python
'''
Sets up websocket server support to run the server in one HTML page and the client in another HTML page. Each connects to a websocket server, which we relay together, so the two pages think they are connected to each other (see websocket_bi tests in emscripten).
Instructions for websocket networking:
Mode 1: Two clients (one with embedded server)
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/game.html?low,low,windowed,serve in one browser
4. Run http://localhost:8888/game.html?low,low,windowed in another browser
5. In the second browser, do /connect
'windowed' runs in non-fullscreen mode, useful to run two browsers at once - scroll
all the way down to see the canvas. 'serve' runs the embedded server in that
client.
Mode 2: Server and client
1. Run this script
2. Run a webserver (e.g. python -m SimpleHTTPServer 8888)
3. Run http://localhost:8888/server.html
4. Run http://localhost:8888/game.html?low,low
5. In the client, do /connect
Note that you likely need to run the server and client in different browsers or at least browser windows, since browsers throttle background tabs.
'''
import os, sys, multiprocessing, time
from subprocess import Popen, PIPE, STDOUT
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('tools/'), path_from_root('tools/websockify')]
import websockify
def websockify_func(wsp):
wsp.start_server()
client = websockify.WebSocketProxy(verbose=True, listen_port=28785, target_host="127.0.0.1", target_port=28786, run_once=True)
client_process = multiprocessing.Process(target=websockify_func, args=(client,))
client_process.start()
print 'client on process', client_process.pid
server = websockify.WebSocketProxy(verbose=True, listen_port=28780, target_host="127.0.0.1", target_port=28781, run_once=True)
server_process = multiprocessing.Process(target=websockify_func, args=(server,))
server_process.start()
print 'server on process', server_process.pid
def relay_server(child):
child.communicate()
relay_child = Popen(['python', path_from_root('tools', 'socket_relay.py'), '28781', '28786'])
relay_process = multiprocessing.Process(target=relay_server, args=(relay_child,))
relay_process.start()
print 'relay on process', relay_process.pid
while 1:
time.sleep(1)
| 37.454545 | 262 | 0.767395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,364 | 0.55178 |
0e44e2abfa92ca47f5eabcd2125c3a73c5fbe7bb | 13,106 | py | Python | apps/api/modules/bkdata_aiops.py | qqqqqie/bk-log | 1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1 | [
"MIT"
] | 75 | 2021-07-14T09:32:36.000Z | 2022-03-31T15:26:53.000Z | apps/api/modules/bkdata_aiops.py | qqqqqie/bk-log | 1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1 | [
"MIT"
] | 561 | 2021-07-14T07:45:47.000Z | 2022-03-31T11:41:28.000Z | apps/api/modules/bkdata_aiops.py | qqqqqie/bk-log | 1765f1901aafaa6fb6a57b8db5d35dd32b3cb5c1 | [
"MIT"
] | 41 | 2021-07-14T07:39:50.000Z | 2022-03-25T09:22:18.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext_lazy as _ # noqa
from apps.api.modules.utils import add_esb_info_before_request_for_bkdata_user # noqa
from config.domains import AIOPS_APIGATEWAY_ROOT, AIOPS_MODEL_APIGATEWAY_ROOT # noqa
from apps.api.base import DataAPI, DataApiRetryClass # noqa
class _BkDataAIOPSApi:
MODULE = _("数据平台aiops模块")
def __init__(self):
self.create_sample_set = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/",
module=self.MODULE,
description=u"创建样本集",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.add_rt_to_sample_set = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/{sample_set_id}/result_table/",
module=self.MODULE,
url_keys=["sample_set_id"],
description=u"RT提交, 把RT添加到 stage表中",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.collect_configs = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/{sample_set_id}/collect_configs/",
module=self.MODULE,
url_keys=["sample_set_id"],
description=u"创建或更新样本采集配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.auto_collect = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT
+ "sample_set/{sample_set_id}/result_table/{result_table_id}/extract/auto_collect/",
module=self.MODULE,
url_keys=["sample_set_id", "result_table_id"],
description=u"创建或更新自动修改样本集配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.apply_sample_set = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/{sample_set_id}/submit/apply/",
module=self.MODULE,
url_keys=["sample_set_id"],
description=u"执行样本集提交",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.submit_status = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/{sample_set_id}/submit/status/",
module=self.MODULE,
url_keys=["sample_set_id"],
description=u"查询提交后的固化任务执行状态",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.delete_sample_set = DataAPI(
method="DELETE",
url=AIOPS_APIGATEWAY_ROOT + "sample_set/{sample_set_id}/",
module=self.MODULE,
url_keys=["sample_set_id"],
description=u"删除样本集",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.create_model = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/",
module=self.MODULE,
description=u"模型创建",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.create_experiment = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/",
module=self.MODULE,
url_keys=["model_id"],
description=u"AIOps 创建实验",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.experiments_config = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/config/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"获取实验配置信息",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.retrieve_execute_config = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "meta_data/retrieve_execute_config/",
module=self.MODULE,
description=u"获取实验执行配置信息",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.update_execute_config = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "meta_data/update_execute_config/",
module=self.MODULE,
description=u"更新实验执行配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.execute_experiments = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/node/execute/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"执行实验配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.execute_experiments_node_status = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/node/execute/status/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"实验步骤状态 (当前用于切分状态捕获)",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.basic_models_training_status = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/basic_models/training_status/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"备选模型训练状态列表",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.aiops_get_costum_algorithm = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "algorithm/{algorithm_name}/",
module=self.MODULE,
url_keys=["algorithm_name"],
description=u"获取单个自定义算法(最新版本)",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.basic_models_evaluation_status = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/basic_models/evaluation_status/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"备选模型评估状态列表",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.basic_model_evaluation_result = DataAPI(
method="GET",
url=AIOPS_MODEL_APIGATEWAY_ROOT
+ "models/{model_id}/experiments/{experiment_id}/basic_models/{basic_model_id}/evaluation_result/",
module=self.MODULE,
url_keys=["model_id", "experiment_id", "basic_model_id"],
description=u"备选模型评估结果",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.pre_commit = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/pre_commit/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"实验提交前查看配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.experiment_commit = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/experiments/{experiment_id}/commit/",
module=self.MODULE,
url_keys=["model_id", "experiment_id"],
description=u"实验提交",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.release_config = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/release/{experiment_id}/{basic_model_id}/config/",
module=self.MODULE,
url_keys=["model_id", "experiment_id", "basic_model_id"],
description=u"获取模型发布配置",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.release = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/release/{experiment_id}/{basic_model_id}",
module=self.MODULE,
url_keys=["model_id", "experiment_id", "basic_model_id"],
description=u"模型发布",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.update_model_info = DataAPI(
method="PUT",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/",
module=self.MODULE,
url_keys=["model_id"],
description=u"修改模型",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.aiops_release = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/release/",
module=self.MODULE,
url_keys=["model_id"],
description=u"备选模型列表",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.aiops_release_model_release_id_model_file = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "models/{model_id}/release/{model_release_id}/model_file/",
module=self.MODULE,
url_keys=["model_id", "model_release_id"],
description=u"获取发布的模型对应的模型文件",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.aiops_experiments_debug = DataAPI(
method="POST",
url=AIOPS_APIGATEWAY_ROOT + "experiments/debug/",
module=self.MODULE,
description=u"训练和预测调试",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
self.serving_data_processing_id_config = DataAPI(
method="GET",
url=AIOPS_APIGATEWAY_ROOT + "serving/{data_processing_id}/config/",
module=self.MODULE,
url_keys=["data_processing_id"],
description=u"AIOps 模型实例信息",
before_request=add_esb_info_before_request_for_bkdata_user,
after_request=None,
default_timeout=300,
)
BkDataAIOPSApi = _BkDataAIOPSApi()
| 43.254125 | 120 | 0.628872 | 11,863 | 0.872023 | 0 | 0 | 0 | 0 | 0 | 0 | 4,161 | 0.305866 |
0e45678b0b57475a51da878ed9c0ce4e52c29673 | 7,106 | py | Python | inverse_test.py | Sangbaek/clas12-nflows | c69ce0d962b87e793a996c6cb263389f30c2bcb5 | [
"MIT"
] | null | null | null | inverse_test.py | Sangbaek/clas12-nflows | c69ce0d962b87e793a996c6cb263389f30c2bcb5 | [
"MIT"
] | null | null | null | inverse_test.py | Sangbaek/clas12-nflows | c69ce0d962b87e793a996c6cb263389f30c2bcb5 | [
"MIT"
] | null | null | null | import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.use('pdf')
import itertools
import numpy as np
from datetime import datetime
import torch
from torch import nn
from torch import optim
import os
import sys
import pandas as pd
from utils.utilities import meter
from utils import make_histos
from utils.utilities import cartesian_converter
from utils.utilities import make_model
from utils import dataXZ
from nflows.transforms.autoregressive import MaskedUMNNAutoregressiveTransform
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
dev = "cuda" if torch.cuda.is_available() else "cpu"
print(dev)
device = torch.device(dev)
#reonstruct an nflow model
#model_path = "models/"
model_path = "models/Cond/3features/"
feature_subset = [1,2,3,5,6,7,9,10,11] #All 16 features
print(" reading electron NF model ")
model_name = "TM-Final-UMNN_elec_3_6_80_128_-9.54.pt"
model_name ="TM-Final-UMNN_elec_3_6_80_128_-8.26.pt"
params = model_name.split("_")
num_features = int(params[2])
num_layers = int(params[3])
num_hidden_features = int(params[4])
training_sample_size = int(params[5])
print(num_features,num_layers,num_hidden_features,training_sample_size)
flow_e, optimizer_e = make_model(num_layers,num_features,num_hidden_features,device)
print("number of params: ", sum(p.numel() for p in flow_e.parameters()))
flow_e.load_state_dict(torch.load(model_path+model_name))
flow_e.eval()
print(" reading proton NF model ")
model_name = "TM-Final-UMNN_prot_3_6_80_128_-9.97.pt"
model_name = "TM-Final-UMNN_prot_3_6_80_128_-9.23.pt"
params = model_name.split("_")
num_features = int(params[2])
num_layers = int(params[3])
num_hidden_features = int(params[4])
training_sample_size = int(params[5])
print(num_features,num_layers,num_hidden_features,training_sample_size)
flow_p, optimizer_p = make_model(num_layers,num_features,num_hidden_features,device)
print("number of params: ", sum(p.numel() for p in flow_p.parameters()))
flow_p.load_state_dict(torch.load(model_path+model_name))
flow_p.eval()
print(" reading photon NF model ")
model_name = "TM-Final-UMNN_phot_3_2_80_128_-6.81.pt"
model_name = "TM-Final-UMNN_phot_3_6_80_128_-6.43.pt"
params = model_name.split("_")
num_features = int(params[2])
num_layers = int(params[3])
num_hidden_features = int(params[4])
training_sample_size = int(params[5])
print(num_features,num_layers,num_hidden_features,training_sample_size)
flow_g, optimizer_g = make_model(num_layers,num_features,num_hidden_features,device)
print("number of params: ", sum(p.numel() for p in flow_g.parameters()))
flow_g.load_state_dict(torch.load(model_path+model_name))
flow_g.eval()
print("reading truth data")
train = dataXZ.dataXZ(feature_subset=feature_subset, file = "data/train.pkl", mode = "epg")
truth_entire = train.truth.detach().numpy()
reco_entire = train.reco.detach().numpy()
print("done with reading truth data")
print("reading test data")
test = dataXZ.dataX(feature_subset=feature_subset, file = "data/test.pkl", mode = "epg")
reco_test = test.reco.detach().numpy()
print("done with reading test data")
trials = 10000 #Number of overall loops
means = np.mean(reco_entire - truth_entire, axis = 1)
stds = np.std(reco_entire - truth_entire, axis = 1)
n_sample = 100
n_loop = len(reco_test)//n_sample
for loop_num in range(n_loop):
print("new loop "+str(loop_num))
truths_guess = []
start = datetime.now()
start_time = start.strftime("%H:%M:%S")
print("Start Time =", start_time)
for i in range(n_sample):
maxtruth_e = []
logprob_e = []
maxtruth_p = []
logprob_p = []
maxtruth_g = []
logprob_g = []
#electron
reco_e = reco_test[i+n_sample*loop_num:i+n_sample*loop_num+1, [0,1,2]]
mean_e = means[[0,1,2]]
std_e = stds[[0,1,2]]
#proton
reco_p = reco_test[i+n_sample*loop_num:i+n_sample*loop_num+1, [3,4,5]]
mean_p = means[[3,4,5]]
std_p = stds[[3,4,5]]
#photon
reco_g = reco_test[i+n_sample*loop_num:i+n_sample*loop_num+1, [6,7,8]]
mean_g = means[[6,7,8]]
std_g = stds[[6,7,8]]
truth_e = reco_e + np.random.normal(mean_e, std_e, (trials, 3))
truth_p = reco_p + np.random.normal(mean_p, std_p, (trials, 3))
truth_g = reco_g + np.random.normal(mean_g, std_g, (trials, 3))
# for reco in reco_e:
reco_useful = np.tile(reco_e, (trials, 1))
reco_useful = torch.tensor(reco_useful, dtype=torch.float32).to(device)
truth_e = torch.tensor(truth_e, dtype=torch.float32).to(device)
logprob = flow_e.log_prob(inputs=reco_useful,context=truth_e)
ind_max = np.argmax(logprob.cpu().detach().numpy())
maxtruth_e.append(truth_e[ind_max:ind_max+1, :])
logprob_e.append(logprob[ind_max])
# for reco in reco_p:
reco_useful = np.tile(reco_p, (trials, 1))
reco_useful = torch.tensor(reco_useful, dtype=torch.float32).to(device)
truth_p = torch.tensor(truth_p, dtype=torch.float32).to(device)
logprob = flow_p.log_prob(inputs=reco_useful,context=truth_p)
ind_max = np.argmax(logprob.cpu().detach().numpy())
maxtruth_p.append(truth_p[ind_max:ind_max+1, :])
logprob_p.append(logprob[ind_max])
# for reco in reco_g:
reco_useful = np.tile(reco_g, (trials, 1))
reco_useful = torch.tensor(reco_useful, dtype=torch.float32).to(device)
truth_g = torch.tensor(truth_g, dtype=torch.float32).to(device)
logprob = flow_g.log_prob(inputs=reco_useful,context=truth_g)
ind_max = np.argmax(logprob.cpu().detach().numpy())
maxtruth_g.append(truth_g[ind_max:ind_max+1, :])
logprob_g.append(logprob[ind_max])
#electron
truth_val_e = maxtruth_e[np.argmax(logprob_e)].cpu().detach().numpy()
E_true_e = np.sqrt(truth_val_e[:, 0]**2 + truth_val_e[:, 1]**2 + truth_val_e[:, 2]**2 + (0.5109989461 * 0.001)**2).reshape((-1, 1))
#proton
truth_val_p = maxtruth_p[np.argmax(logprob_p)].cpu().detach().numpy()
E_true_p = np.sqrt(truth_val_p[:, 0]**2 + truth_val_p[:, 1]**2 + truth_val_p[:, 2]**2 + (0.938272081)**2).reshape((-1, 1))
#photon
truth_val_g = maxtruth_g[np.argmax(logprob_g)].cpu().detach().numpy()
E_true_g = np.sqrt(truth_val_g[:, 0]**2 + truth_val_g[:, 1]**2 + truth_val_g[:, 2]**2).reshape((-1, 1))
NF_true = np.hstack( (E_true_e, truth_val_e, E_true_p, truth_val_p, E_true_g, truth_val_g))
truths_guess.append(NF_true)
now = datetime.now()
elapsedTime = (now - start )
print("Current time is {}".format(now.strftime("%H:%M:%S")))
print("Elapsed time is {}".format(elapsedTime))
# print("Total estimated run time is {}".format(elapsedTime+elapsedTime/i*(max_range+1-i)))
Truths = np.concatenate(truths_guess)
df_Truths = pd.DataFrame(Truths)
df_Truths.to_pickle("gendata/Cond/3features/UMNN/Truths_Test_dvcs_{}.pkl".format(loop_num))#num_features,
# num_layers,num_hidden_features,training_sample_size,loop_num))
#print(truths_guess )
#print(truth_validation[0:n_sample, :])
#print(reco_test[0:n_sample, :])
print("done")
quit()
| 36.071066 | 136 | 0.744582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.167183 |
0e458fb301ccce1987472231c3b00c1628040969 | 13,195 | py | Python | pytket/pytket/passes/script.py | NewGitter2017/tket | 6ff81af26280770bf2ca80bfb2140e8fa98182aa | [
"Apache-2.0"
] | null | null | null | pytket/pytket/passes/script.py | NewGitter2017/tket | 6ff81af26280770bf2ca80bfb2140e8fa98182aa | [
"Apache-2.0"
] | null | null | null | pytket/pytket/passes/script.py | NewGitter2017/tket | 6ff81af26280770bf2ca80bfb2140e8fa98182aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from lark import Lark, Transformer
from pytket.circuit import OpType # type: ignore
from pytket.passes import BasePass, RepeatPass, SequencePass # type: ignore
from pytket.passes import ( # type: ignore
CliffordSimp,
CommuteThroughMultis,
ContextSimp,
DecomposeArbitrarilyControlledGates,
DecomposeBoxes,
DecomposeClassicalExp,
DecomposeMultiQubitsCX,
DecomposeSingleQubitsTK1,
DelayMeasures,
EulerAngleReduction,
FlattenRegisters,
FullPeepholeOptimise,
GuidedPauliSimp,
KAKDecomposition,
OptimisePhaseGadgets,
PauliSimp,
PauliSquash,
PeepholeOptimise2Q,
RebaseCirq,
RebaseHQS,
RebaseProjectQ,
RebasePyZX,
RebaseQuil,
RebaseTket,
RebaseUMD,
RebaseUFR,
RebaseOQC,
RemoveBarriers,
RemoveDiscarded,
RemoveRedundancies,
SimplifyInitial,
SimplifyMeasured,
SynthesiseHQS,
SynthesiseTket,
SynthesiseOQC,
SynthesiseUMD,
SquashHQS,
ThreeQubitSquash,
)
from pytket.transform import CXConfigType, PauliSynthStrat # type: ignore
pass_grammar = """
start: comp_pass
comp_pass: ( basic_pass | seq_pass | repeat_pass )
basic_pass:
| clifford_simp
| clifford_simp_no_swaps
| commute_through_multis
| context_simp
| context_simp_no_classical
| decompose_arbitrarily_controlled_gates
| decompose_boxes
| decompose_classical_exp
| decompose_multi_qubits_cx
| decompose_single_qubits_tk1
| delay_measures
| euler_angle_reduction
| flatten_registers
| full_peephole_optimise
| full_peephole_optimise_no_swaps
| guided_pauli_simp
| guided_pauli_simp_default
| kak_decomposition
| optimise_phase_gadgets
| optimise_phase_gadgets_default
| pauli_simp
| pauli_simp_default
| pauli_squash
| pauli_squash_default
| peephole_optimise_2q
| rebase_cirq
| rebase_hqs
| rebase_oqc
| rebase_projectq
| rebase_pyzx
| rebase_quil
| rebase_tket
| rebase_ufr
| rebase_umd
| remove_barriers
| remove_discarded
| remove_redundancies
| simplify_initial
| simplify_initial_no_classical
| simplify_measured
| squash_hqs
| synthesise_hqs
| synthesise_tket
| synthesise_oqc
| synthesise_umd
| three_qubit_squash
seq_pass: "[" pass_list "]"
pass_list: comp_pass ("," comp_pass)*
repeat_pass: "repeat" "(" comp_pass ")"
clifford_simp: "CliffordSimp"
clifford_simp_no_swaps: "CliffordSimpNoSwaps"
commute_through_multis: "CommuteThroughMultis"
context_simp: "ContextSimp"
context_simp_no_classical: "ContextSimpNoClassical"
decompose_arbitrarily_controlled_gates: "DecomposeArbitrarilyControlledGates"
decompose_boxes: "DecomposeBoxes"
decompose_classical_exp: "DecomposeClassicalExp"
decompose_multi_qubits_cx: "DecomposeMultiQubitsCX"
decompose_single_qubits_tk1: "DecomposeSingleQubitsTK1"
delay_measures: "DelayMeasures"
euler_angle_reduction: "EulerAngleReduction" "(" op_type "," op_type ")"
flatten_registers: "FlattenRegisters"
full_peephole_optimise: "FullPeepholeOptimise"
full_peephole_optimise_no_swaps: "FullPeepholeOptimiseNoSwaps"
guided_pauli_simp: "GuidedPauliSimp" "(" pauli_synth_strat "," cx_config_type ")"
guided_pauli_simp_default: "GuidedPauliSimp"
kak_decomposition: "KAKDecomposition"
optimise_phase_gadgets: "OptimisePhaseGadgets" "(" cx_config_type ")"
optimise_phase_gadgets_default: "OptimisePhaseGadgets"
pauli_simp: "PauliSimp" "(" pauli_synth_strat "," cx_config_type ")"
pauli_simp_default: "PauliSimp"
pauli_squash: "PauliSquash" "(" pauli_synth_strat "," cx_config_type ")"
pauli_squash_default: "PauliSquash"
peephole_optimise_2q: "PeepholeOptimise2Q"
rebase_cirq: "RebaseCirq"
rebase_hqs: "RebaseHQS"
rebase_oqc: "RebaseOQC"
rebase_projectq: "RebaseProjectQ"
rebase_pyzx: "RebasePyZX"
rebase_quil: "RebaseQuil"
rebase_tket: "RebaseTket"
rebase_ufr: "RebaseUFR"
rebase_umd: "RebaseUMD"
remove_barriers: "RemoveBarriers"
remove_discarded: "RemoveDiscarded"
remove_redundancies: "RemoveRedundancies"
simplify_initial: "SimplifyInitial"
simplify_initial_no_classical: "SimplifyInitialNoClassical"
simplify_measured: "SimplifyMeasured"
squash_hqs: "SquashHQS"
synthesise_hqs: "SynthesiseHQS"
synthesise_tket: "SynthesiseTket"
synthesise_oqc: "SynthesiseOQC"
synthesise_umd: "SynthesiseUMD"
three_qubit_squash: "ThreeQubitSquash"
cx_config_type:
| cx_config_type_snake
| cx_config_type_star
| cx_config_type_tree
| cx_config_type_multi_q_gate
cx_config_type_snake: "Snake"
cx_config_type_star: "Star"
cx_config_type_tree: "Tree"
cx_config_type_multi_q_gate: "MultiQGate"
op_type: ( op_type_rx | op_type_ry | op_type_rz )
op_type_rx: "Rx"
op_type_ry: "Ry"
op_type_rz: "Rz"
pauli_synth_strat:
| pauli_synth_strat_individual
| pauli_synth_strat_pairwise
| pauli_synth_strat_sets
pauli_synth_strat_individual: "Individual"
pauli_synth_strat_pairwise: "Pairwise"
pauli_synth_strat_sets: "Sets"
%import common.WS_INLINE -> WS
%import common.CR
%import common.LF
_NEWLINE: CR? LF
%ignore WS
%ignore _NEWLINE
"""
class PassTransformer(Transformer):
def start(self, t: List) -> BasePass:
return t[0]
def comp_pass(self, t: List) -> BasePass:
return t[0]
def basic_pass(self, t: list) -> BasePass:
return t[0]
def seq_pass(self, t: List) -> BasePass:
return t[0]
def pass_list(self, t: List) -> BasePass:
return SequencePass(t)
def repeat_pass(self, t: List) -> BasePass:
return RepeatPass(t[0])
def clifford_simp(self, t: List) -> BasePass:
return CliffordSimp()
def clifford_simp_no_swaps(self, t: List) -> BasePass:
return CliffordSimp(allow_swaps=False)
def commute_through_multis(self, t: List) -> BasePass:
return CommuteThroughMultis()
def context_simp(self, t: List) -> BasePass:
return ContextSimp()
def context_simp_no_classical(self, t: List) -> BasePass:
return ContextSimp(allow_classical=False)
def decompose_arbitrarily_controlled_gates(self, t: List) -> BasePass:
return DecomposeArbitrarilyControlledGates()
def decompose_boxes(self, t: List) -> BasePass:
return DecomposeBoxes()
def decompose_classical_exp(self, t: List) -> BasePass:
return DecomposeClassicalExp()
def decompose_multi_qubits_cx(self, t: List) -> BasePass:
return DecomposeMultiQubitsCX()
def decompose_single_qubits_tk1(self, t: List) -> BasePass:
return DecomposeSingleQubitsTK1()
def delay_measures(self, t: List) -> BasePass:
return DelayMeasures()
def euler_angle_reduction(self, t: List) -> BasePass:
return EulerAngleReduction(t[0], t[1])
def flatten_registers(self, t: List) -> BasePass:
return FlattenRegisters()
def full_peephole_optimise(self, t: List) -> BasePass:
return FullPeepholeOptimise()
def full_peephole_optimise_no_swaps(self, t: List) -> BasePass:
return FullPeepholeOptimise(allow_swaps=False)
def guided_pauli_simp(self, t: List) -> BasePass:
return GuidedPauliSimp(strat=t[0], cx_config=t[1])
def guided_pauli_simp_default(self, t: List) -> BasePass:
return GuidedPauliSimp()
def kak_decomposition(self, t: List) -> BasePass:
return KAKDecomposition()
def optimise_phase_gadgets(self, t: List) -> BasePass:
return OptimisePhaseGadgets(cx_config=t[0])
def optimise_phase_gadgets_default(self, t: List) -> BasePass:
return OptimisePhaseGadgets()
def pauli_simp(self, t: List) -> BasePass:
return PauliSimp(strat=t[0], cx_config=t[1])
def pauli_simp_default(self, t: List) -> BasePass:
return PauliSimp()
def pauli_squash(self, t: List) -> BasePass:
return PauliSquash(strat=t[0], cx_config=t[1])
def pauli_squash_default(self, t: List) -> BasePass:
return PauliSquash()
def peephole_optimise_2q(self, t: List) -> BasePass:
return PeepholeOptimise2Q()
def rebase_cirq(self, t: List) -> BasePass:
return RebaseCirq()
def rebase_hqs(self, t: List) -> BasePass:
return RebaseHQS()
def rebase_oqc(self, t: List) -> BasePass:
return RebaseOQC()
def rebase_projectq(self, t: List) -> BasePass:
return RebaseProjectQ()
def rebase_pyzx(self, t: List) -> BasePass:
return RebasePyZX()
def rebase_quil(self, t: List) -> BasePass:
return RebaseQuil()
def rebase_tket(self, t: List) -> BasePass:
return RebaseTket()
def rebase_ufr(self, t: List) -> BasePass:
return RebaseUFR()
def rebase_umd(self, t: List) -> BasePass:
return RebaseUMD()
def remove_barriers(self, t: List) -> BasePass:
return RemoveBarriers()
def remove_discarded(self, t: List) -> BasePass:
return RemoveDiscarded()
def remove_redundancies(self, t: List) -> BasePass:
return RemoveRedundancies()
def simplify_initial(self, t: List) -> BasePass:
return SimplifyInitial()
def simplify_initial_no_classical(self, t: List) -> BasePass:
return SimplifyInitial(allow_classical=False)
def simplify_measured(self, t: List) -> BasePass:
return SimplifyMeasured()
def squash_hqs(self, t: List) -> BasePass:
return SquashHQS()
def synthesise_hqs(self, t: List) -> BasePass:
return SynthesiseHQS()
def synthesise_tket(self, t: List) -> BasePass:
return SynthesiseTket()
def synthesise_oqc(self, t: List) -> BasePass:
return SynthesiseOQC()
def synthesise_umd(self, t: List) -> BasePass:
return SynthesiseUMD()
def three_qubit_squash(self, t: List) -> BasePass:
return ThreeQubitSquash()
def cx_config_type(self, t: List) -> CXConfigType:
return t[0]
def cx_config_type_snake(self, t: List) -> CXConfigType:
return CXConfigType.Snake
def cx_config_type_star(self, t: List) -> CXConfigType:
return CXConfigType.Star
def cx_config_type_tree(self, t: List) -> CXConfigType:
return CXConfigType.Tree
def cx_config_type_multi_q_gate(self, t: List) -> CXConfigType:
return CXConfigType.MultiQGate
def op_type(self, t: List) -> OpType:
return t[0]
def op_type_rx(self, t: List) -> OpType:
return OpType.Rx
def op_type_ry(self, t: List) -> OpType:
return OpType.Ry
def op_type_rz(self, t: List) -> OpType:
return OpType.Rz
def pauli_synth_strat(self, t: List) -> PauliSynthStrat:
return t[0]
def pauli_synth_strat_individual(self, t: List) -> PauliSynthStrat:
return PauliSynthStrat.Individual
def pauli_synth_strat_pairwise(self, t: List) -> PauliSynthStrat:
return PauliSynthStrat.Pairwise
def pauli_synth_strat_sets(self, t: List) -> PauliSynthStrat:
return PauliSynthStrat.Sets
parser = Lark(pass_grammar)
transformer = PassTransformer()
def compilation_pass_from_script(script: str) -> BasePass:
"""Generate a compilation pass from a specification.
The specification must conform to a simple grammar. For example, the following are
valid specifications:
* "RemoveRedundancies"
* "[RemoveBarriers, RemoveRedundancies]" (a sequence of passes)
* "repeat(FullPeepholeOptimise)" (repeat a pass until it doesn't change the circuit)
Sequences and repeats can be nested arbitrarily. Whitespace is ignored.
Most passes are specified using their Python names. For those that take enums as
parameters, non-default values can be specified using their Python names:
* "PauliSimp" (default parameters)
* "PauliSimp(Pairwise, Tree)"
* "EulerAngleReduction(Ry, Rz)"
For some passes with optional boolean parameters the name can be modified as
follows:
* "CliffordSimp" (default parameters)
* "CliffordSimpNoSwaps"
* "SimplifyInitial" (default parameters)
* "SimplifyInitialNoClassical"
There is currently no support for passes requiring more complex parameters such as
lambdas or circuits.
The full formal grammar can be inspected using :py:meth:`compilation_pass_grammar`.
:param script: specification of pass
"""
tree = parser.parse(script)
return transformer.transform(tree)
def compilation_pass_grammar() -> str:
"""Formal grammar for specifying compilation passes.
This is the grammar assumed by :py:meth:`complilation_pass_from_script`.
:return: grammar in extended Backus--Naur form"""
return pass_grammar
| 30.125571 | 88 | 0.721031 | 5,855 | 0.443729 | 0 | 0 | 0 | 0 | 0 | 0 | 5,996 | 0.454415 |
0e465c7c5db67591f279b01b569e5ac410ce6462 | 506 | py | Python | terrascript/data/nomad.py | amlodzianowski/python-terrascript | 1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49 | [
"BSD-2-Clause"
] | null | null | null | terrascript/data/nomad.py | amlodzianowski/python-terrascript | 1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49 | [
"BSD-2-Clause"
] | null | null | null | terrascript/data/nomad.py | amlodzianowski/python-terrascript | 1111affe6cd30d9b8b7bc74ae4e27590f7d4dc49 | [
"BSD-2-Clause"
] | null | null | null | # terrascript/data/nomad.py
import terrascript
class nomad_acl_policy(terrascript.Data):
pass
class nomad_acl_token(terrascript.Data):
pass
class nomad_deployments(terrascript.Data):
pass
class nomad_job(terrascript.Data):
pass
class nomad_namespaces(terrascript.Data):
pass
class nomad_regions(terrascript.Data):
pass
__all__ = [
"nomad_acl_policy",
"nomad_acl_token",
"nomad_deployments",
"nomad_job",
"nomad_namespaces",
"nomad_regions",
]
| 13.315789 | 42 | 0.715415 | 290 | 0.573123 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.247036 |
0e495fc33b825c87732c91ad66b85197faa78b41 | 690 | py | Python | features/environment.py | mfuhrmann/meshping | 01355b89cf28c2b43b81d7c0c5edfde4840139a0 | [
"MIT"
] | 17 | 2019-12-06T16:01:41.000Z | 2022-02-24T11:04:09.000Z | features/environment.py | mfuhrmann/meshping | 01355b89cf28c2b43b81d7c0c5edfde4840139a0 | [
"MIT"
] | 23 | 2019-12-05T19:18:59.000Z | 2021-09-03T13:41:06.000Z | features/environment.py | mfuhrmann/meshping | 01355b89cf28c2b43b81d7c0c5edfde4840139a0 | [
"MIT"
] | 6 | 2019-12-06T02:51:02.000Z | 2021-01-31T08:39:59.000Z | import json
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
def before_all(context):
context.peer_queue = None
class DummyPeeringHandler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
if context.peer_queue is not None:
context.peer_queue.put(json.loads(body))
httpd = HTTPServer(('0.0.0.0', 31337), DummyPeeringHandler)
context.peerserv = threading.Thread(target=httpd.serve_forever, daemon=True)
context.peerserv.start()
| 32.857143 | 80 | 0.685507 | 364 | 0.527536 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.036232 |
0e49791b45f3761db0f53cd99453bc620539bd37 | 456 | py | Python | Modulo 02/exercicios/d041.py | euyag/python-cursoemvideo | d2f684854d926e38ea193816a6c7d2c48d25aa3d | [
"MIT"
] | 2 | 2021-06-22T00:15:11.000Z | 2021-08-02T11:28:56.000Z | Modulo 02/exercicios/d041.py | euyag/python-cursoemvideo | d2f684854d926e38ea193816a6c7d2c48d25aa3d | [
"MIT"
] | null | null | null | Modulo 02/exercicios/d041.py | euyag/python-cursoemvideo | d2f684854d926e38ea193816a6c7d2c48d25aa3d | [
"MIT"
] | null | null | null | print('===== DESAFIO 041 =====')
nascimento = int(input('Digite o ano q vc nasceu: '))
idade = 2021 - nascimento
print(f'vc tem {idade} anos')
if idade <= 9:
print('vc é um nadador mirim')
elif idade > 9 and idade <= 14:
print('vc é um nadador infantil')
elif idade > 14 and idade <= 19:
print('vc é um nadador junior')
elif idade > 19 and idade <= 20:
print('vc é um nadador senior')
elif idade > 20:
print('vc é um nadador master') | 26.823529 | 53 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.436009 |
0e49ee0d9a2475f1064dcee3fd80cff56b6b1889 | 165 | py | Python | project/settings/prod/cors.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | project/settings/prod/cors.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | project/settings/prod/cors.py | danielbraga/hcap | a3ca0d6963cff19ed6ec0436cce84e2b41615454 | [
"MIT"
] | null | null | null | """
django:
https://docs.djangoproject.com/en/3.0/ref/settings/#allowed-hosts
"""
from ..env import env
ALLOWED_HOSTS = env("HCAP__ALLOWED_HOSTS", default=[])
| 18.333333 | 69 | 0.70303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.642424 |
0e4cabb3fb9a2919ca8974672393ca3fb3160fbf | 16,677 | py | Python | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | nt_m.py | kwj1399/ryu_app | df094994e09788053b47aad1c6164033e6bd4c14 | [
"MIT"
] | null | null | null | '''
FileName:
Author:KWJ(kyson)
UpdateTime:2016/10/10
Introduction:
'''
from __future__ import division
import copy
from operator import attrgetter
from ryu.base import app_manager
from ryu.base.app_manager import lookup_service_brick
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.controller import ofp_event
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet import ether_types
import setting
import redis
class NetworkMonitor(app_manager.RyuApp):
"""
NetworkMonitor is a Ryu app for collecting traffic information.
"""
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(NetworkMonitor, self).__init__(*args, **kwargs)
self.name = 'monitor'
self.datapaths = {}
self.port_stats = {}
self.port_speed = {}
self.flow_stats = {}
self.flow_speed = {}
self.stats = {}
self.port_features = {}
self.free_bandwidth = {}
self.mac_to_port = {}
self.ip_to_port = {}
self.ipfreebw = {}
# Start to green thread to monitor traffic and calculating
# free bandwidth of links respectively.
self.monitor_thread = hub.spawn(self._monitor)
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def _state_change_handler(self, ev):
"""
Record datapath's info
"""
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def _monitor(self):
"""
Main entry method of monitoring traffic.
"""
while True:
self.stats['flow'] = {}
self.stats['port'] = {}
self._save_ipfreebw(self.free_bandwidth,self.ip_to_port,self.ipfreebw)
pool = redis.ConnectionPool(host='127.0.0.1',port=6379,db=0)
r = redis.StrictRedis(connection_pool=pool)
for key in self.ipfreebw.keys():
r.set(key,self.ipfreebw[key])
print(self.free_bandwidth)
print (self.ip_to_port)
print (self.ipfreebw)
for dp in self.datapaths.values():
self.port_features.setdefault(dp.id, {})
self._request_stats(dp)
# refresh data.
hub.sleep(setting.MONITOR_PERIOD)
if self.stats['flow'] or self.stats['port']:
self.show_stat('flow')
self.show_stat('port')
hub.sleep(1)
def _request_stats(self, datapath):
"""
Sending request msg to datapath
"""
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
def _save_freebandwidth(self, dpid, port_no, speed):
# Calculate free bandwidth of port and save it.
port_state = self.port_features.get(dpid).get(port_no)
if port_state:
capacity = port_state[2]
curr_bw = self._get_free_bw(capacity, speed)
key = (dpid,port_no)
if key not in setting.SW_PORT:
self.free_bandwidth.setdefault(key, None)
self.free_bandwidth[(dpid, port_no)] = curr_bw
else:
self.logger.info("Fail in getting port state")
def _save_stats(self, _dict, key, value, length):
if key not in _dict:
_dict[key] = []
_dict[key].append(value)
if len(_dict[key]) > length:
_dict[key].pop(0)
def _get_speed(self, now, pre, period):
if period:
return (now - pre) / (period)
else:
return 0
def _get_free_bw(self, capacity, speed):
# capacity:OFPPortDescStatsReply default is kbit/s
return max(capacity*10**3 - speed * 8, 0)
def _get_time(self, sec, nsec):
return sec + nsec / (10 ** 9)
def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):
return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)
def _save_ipfreebw(self,freebw,ip_port,ipfreebw):
for key in ip_port.keys():
ipfreebw[ip_port[key]]=freebw[key]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
#self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
# mac_of_port
for p in pkt.get_protocols(arp.arp):
key = (dpid, in_port)
value = p.src_ip
if key not in setting.SW_PORT:
self.ip_to_port.setdefault(key, value)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Save flow stats reply info into self.flow_stats.
Calculate flow speed and Save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['flow'][dpid] = body
self.flow_stats.setdefault(dpid, {})
self.flow_speed.setdefault(dpid, {})
for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
key = (stat.match['in_port'], stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)
value = (stat.packet_count, stat.byte_count,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.flow_stats[dpid], key, value, 5)
# Get flow's speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.flow_stats[dpid][key]
if len(tmp) > 1:
pre = tmp[-2][1]
period = self._get_period(tmp[-1][2], tmp[-1][3],
tmp[-2][2], tmp[-2][3])
speed = self._get_speed(self.flow_stats[dpid][key][-1][1],
pre, period)
self._save_stats(self.flow_speed[dpid], key, speed, 5)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
"""
Save port's stats info
Calculate port's speed and save it.
"""
body = ev.msg.body
dpid = ev.msg.datapath.id
self.stats['port'][dpid] = body
for stat in sorted(body, key=attrgetter('port_no')):
port_no = stat.port_no
if port_no != ofproto_v1_3.OFPP_LOCAL:
key = (dpid, port_no)
value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,
stat.duration_sec, stat.duration_nsec)
self._save_stats(self.port_stats, key, value, 5)
# Get port speed.
pre = 0
period = setting.MONITOR_PERIOD
tmp = self.port_stats[key]
if len(tmp) > 1:
pre = tmp[-2][0] + tmp[-2][1]
period = self._get_period(tmp[-1][3], tmp[-1][4],
tmp[-2][3], tmp[-2][4])
speed = self._get_speed(
self.port_stats[key][-1][0] + self.port_stats[key][-1][1],
pre, period)
self._save_stats(self.port_speed, key, speed, 5)
self._save_freebandwidth(dpid, port_no, speed)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
"""
Save port description info.
"""
msg = ev.msg
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
config_dict = {ofproto.OFPPC_PORT_DOWN: "Down",
ofproto.OFPPC_NO_RECV: "No Recv",
ofproto.OFPPC_NO_FWD: "No Farward",
ofproto.OFPPC_NO_PACKET_IN: "No Packet-in"}
state_dict = {ofproto.OFPPS_LINK_DOWN: "Down",
ofproto.OFPPS_BLOCKED: "Blocked",
ofproto.OFPPS_LIVE: "Live"}
ports = []
for p in ev.msg.body:
ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '
'state=0x%08x curr=0x%08x advertised=0x%08x '
'supported=0x%08x peer=0x%08x curr_speed=%d '
'max_speed=%d' %
(p.port_no, p.hw_addr,
p.name, p.config,
p.state, p.curr, p.advertised,
p.supported, p.peer, p.curr_speed,
p.max_speed))
if p.config in config_dict:
config = config_dict[p.config]
else:
config = "up"
if p.state in state_dict:
state = state_dict[p.state]
else:
state = "up"
port_feature = (config, state, p.curr_speed*100)
self.port_features[dpid][p.port_no] = port_feature
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
"""
Handle the port status changed event.
"""
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
dpid = msg.datapath.id
ofproto = msg.datapath.ofproto
reason_dict = {ofproto.OFPPR_ADD: "added",
ofproto.OFPPR_DELETE: "deleted",
ofproto.OFPPR_MODIFY: "modified", }
if reason in reason_dict:
print "switch%d: port %s %s" % (dpid, reason_dict[reason], port_no)
else:
print "switch%d: Illeagal port state %s %s" % (port_no, reason)
def show_stat(self, type):
'''
Show statistics info according to data type.
type: 'port' 'flow'
'''
if setting.TOSHOW is False:
return
bodys = self.stats[type]
if(type == 'flow'):
print('datapath '' in-port ip-dst '
'out-port packets bytes flow-speed(B/s)')
print('---------------- '' -------- ----------------- '
'-------- -------- -------- -----------')
for dpid in bodys.keys():
for stat in sorted(
[flow for flow in bodys[dpid] if flow.priority == 1],
key=lambda flow: (flow.match.get('in_port'),
flow.match.get('eth_dst'))):
print('%016x %8x %17s %8x %8d %8d %8.1f' % (
dpid,
stat.match['in_port'], stat.match['eth_dst'],
stat.instructions[0].actions[0].port,
stat.packet_count, stat.byte_count,
abs(self.flow_speed[dpid][
(stat.match.get('in_port'),
stat.match.get('eth_dst'),
stat.instructions[0].actions[0].port)][-1])))
print '\n'
if(type == 'port'):
print('datapath port ''rx-pkts rx-bytes rx-error '
'tx-pkts tx-bytes tx-error port-speed(B/s)'
' current-capacity(Kbps) '
'port-stat link-stat')
print('---------------- -------- ''-------- -------- -------- '
'-------- -------- -------- '
'---------------- ---------------- '
' ----------- -----------')
format = '%016x %8x %8d %8d %8d %8d %8d %8d %8.1f %16d %16s %16s'
for dpid in bodys.keys():
for stat in sorted(bodys[dpid], key=attrgetter('port_no')):
if stat.port_no != ofproto_v1_3.OFPP_LOCAL:
print(format % (
dpid, stat.port_no,
stat.rx_packets, stat.rx_bytes, stat.rx_errors,
stat.tx_packets, stat.tx_bytes, stat.tx_errors,
abs(self.port_speed[(dpid, stat.port_no)][-1]),
self.port_features[dpid][stat.port_no][2],
self.port_features[dpid][stat.port_no][0],
self.port_features[dpid][stat.port_no][1]))
print '\n'
| 39.518957 | 86 | 0.538166 | 16,039 | 0.961744 | 0 | 0 | 8,914 | 0.534509 | 0 | 0 | 2,952 | 0.17701 |
0e4ecce9b8ee44d7a2ecb07d04597a919fe2daa5 | 146 | py | Python | euler_01.py | zhou-le/euler | 78033a10bc186fbd2a004760966e15724b0a98f9 | [
"Apache-2.0"
] | 2 | 2018-12-24T15:36:00.000Z | 2021-06-07T09:35:59.000Z | euler_01.py | zhou-le/Euler | 78033a10bc186fbd2a004760966e15724b0a98f9 | [
"Apache-2.0"
] | null | null | null | euler_01.py | zhou-le/Euler | 78033a10bc186fbd2a004760966e15724b0a98f9 | [
"Apache-2.0"
] | null | null | null | #coding: utf-8
#date: 2018/7/30 19:07
#author: zhou_le
# 求1000以下3和5的倍数之和
print(sum([i for i in range(1000) if i % 3 == 0 or i % 5 == 0])) | 20.857143 | 64 | 0.59589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.560976 |
0e4f69138aecd1313f122259efebb214ee565651 | 19,502 | py | Python | venv/Lib/site-packages/tensorboard/uploader/proto/server_info_pb2.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | venv/Lib/site-packages/tensorboard/uploader/proto/server_info_pb2.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | venv/Lib/site-packages/tensorboard/uploader/proto/server_info_pb2.py | masterrey/SmartMachines | e48aff314b1171a13a39c3a41230d900bf090a1f | [
"Apache-2.0"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorboard/uploader/proto/server_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorboard/uploader/proto/server_info.proto',
package='tensorboard.service',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n,tensorboard/uploader/proto/server_info.proto\x12\x13tensorboard.service\"l\n\x11ServerInfoRequest\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x46\n\x14plugin_specification\x18\x02 \x01(\x0b\x32(.tensorboard.service.PluginSpecification\"\xb7\x02\n\x12ServerInfoResponse\x12\x39\n\rcompatibility\x18\x01 \x01(\x0b\x32\".tensorboard.service.Compatibility\x12\x32\n\napi_server\x18\x02 \x01(\x0b\x32\x1e.tensorboard.service.ApiServer\x12<\n\nurl_format\x18\x03 \x01(\x0b\x32(.tensorboard.service.ExperimentUrlFormat\x12:\n\x0eplugin_control\x18\x04 \x01(\x0b\x32\".tensorboard.service.PluginControl\x12\x38\n\rupload_limits\x18\x05 \x01(\x0b\x32!.tensorboard.service.UploadLimits\"\\\n\rCompatibility\x12:\n\x07verdict\x18\x01 \x01(\x0e\x32).tensorboard.service.CompatibilityVerdict\x12\x0f\n\x07\x64\x65tails\x18\x02 \x01(\t\"\x1d\n\tApiServer\x12\x10\n\x08\x65ndpoint\x18\x01 \x01(\t\"?\n\x13\x45xperimentUrlFormat\x12\x10\n\x08template\x18\x01 \x01(\t\x12\x16\n\x0eid_placeholder\x18\x02 \x01(\t\"-\n\x13PluginSpecification\x12\x16\n\x0eupload_plugins\x18\x02 \x03(\t\"(\n\rPluginControl\x12\x17\n\x0f\x61llowed_plugins\x18\x01 \x03(\t\"\x92\x02\n\x0cUploadLimits\x12\x1f\n\x17max_scalar_request_size\x18\x03 \x01(\x03\x12\x1f\n\x17max_tensor_request_size\x18\x04 \x01(\x03\x12\x1d\n\x15max_blob_request_size\x18\x05 \x01(\x03\x12#\n\x1bmin_scalar_request_interval\x18\x06 \x01(\x03\x12#\n\x1bmin_tensor_request_interval\x18\x07 \x01(\x03\x12!\n\x19min_blob_request_interval\x18\x08 \x01(\x03\x12\x15\n\rmax_blob_size\x18\x01 \x01(\x03\x12\x1d\n\x15max_tensor_point_size\x18\x02 \x01(\x03*`\n\x14\x43ompatibilityVerdict\x12\x13\n\x0fVERDICT_UNKNOWN\x10\x00\x12\x0e\n\nVERDICT_OK\x10\x01\x12\x10\n\x0cVERDICT_WARN\x10\x02\x12\x11\n\rVERDICT_ERROR\x10\x03\x62\x06proto3')
)
_COMPATIBILITYVERDICT = _descriptor.EnumDescriptor(
name='CompatibilityVerdict',
full_name='tensorboard.service.CompatibilityVerdict',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='VERDICT_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERDICT_OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERDICT_WARN', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERDICT_ERROR', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1049,
serialized_end=1145,
)
_sym_db.RegisterEnumDescriptor(_COMPATIBILITYVERDICT)
CompatibilityVerdict = enum_type_wrapper.EnumTypeWrapper(_COMPATIBILITYVERDICT)
VERDICT_UNKNOWN = 0
VERDICT_OK = 1
VERDICT_WARN = 2
VERDICT_ERROR = 3
_SERVERINFOREQUEST = _descriptor.Descriptor(
name='ServerInfoRequest',
full_name='tensorboard.service.ServerInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='tensorboard.service.ServerInfoRequest.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plugin_specification', full_name='tensorboard.service.ServerInfoRequest.plugin_specification', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=177,
)
_SERVERINFORESPONSE = _descriptor.Descriptor(
name='ServerInfoResponse',
full_name='tensorboard.service.ServerInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='compatibility', full_name='tensorboard.service.ServerInfoResponse.compatibility', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='api_server', full_name='tensorboard.service.ServerInfoResponse.api_server', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url_format', full_name='tensorboard.service.ServerInfoResponse.url_format', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plugin_control', full_name='tensorboard.service.ServerInfoResponse.plugin_control', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upload_limits', full_name='tensorboard.service.ServerInfoResponse.upload_limits', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=491,
)
_COMPATIBILITY = _descriptor.Descriptor(
name='Compatibility',
full_name='tensorboard.service.Compatibility',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='verdict', full_name='tensorboard.service.Compatibility.verdict', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='tensorboard.service.Compatibility.details', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=493,
serialized_end=585,
)
_APISERVER = _descriptor.Descriptor(
name='ApiServer',
full_name='tensorboard.service.ApiServer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='endpoint', full_name='tensorboard.service.ApiServer.endpoint', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=587,
serialized_end=616,
)
_EXPERIMENTURLFORMAT = _descriptor.Descriptor(
name='ExperimentUrlFormat',
full_name='tensorboard.service.ExperimentUrlFormat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='template', full_name='tensorboard.service.ExperimentUrlFormat.template', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id_placeholder', full_name='tensorboard.service.ExperimentUrlFormat.id_placeholder', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=618,
serialized_end=681,
)
_PLUGINSPECIFICATION = _descriptor.Descriptor(
name='PluginSpecification',
full_name='tensorboard.service.PluginSpecification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='upload_plugins', full_name='tensorboard.service.PluginSpecification.upload_plugins', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=683,
serialized_end=728,
)
_PLUGINCONTROL = _descriptor.Descriptor(
name='PluginControl',
full_name='tensorboard.service.PluginControl',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allowed_plugins', full_name='tensorboard.service.PluginControl.allowed_plugins', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=730,
serialized_end=770,
)
_UPLOADLIMITS = _descriptor.Descriptor(
name='UploadLimits',
full_name='tensorboard.service.UploadLimits',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_scalar_request_size', full_name='tensorboard.service.UploadLimits.max_scalar_request_size', index=0,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_tensor_request_size', full_name='tensorboard.service.UploadLimits.max_tensor_request_size', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_blob_request_size', full_name='tensorboard.service.UploadLimits.max_blob_request_size', index=2,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_scalar_request_interval', full_name='tensorboard.service.UploadLimits.min_scalar_request_interval', index=3,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_tensor_request_interval', full_name='tensorboard.service.UploadLimits.min_tensor_request_interval', index=4,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_blob_request_interval', full_name='tensorboard.service.UploadLimits.min_blob_request_interval', index=5,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_blob_size', full_name='tensorboard.service.UploadLimits.max_blob_size', index=6,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_tensor_point_size', full_name='tensorboard.service.UploadLimits.max_tensor_point_size', index=7,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=773,
serialized_end=1047,
)
_SERVERINFOREQUEST.fields_by_name['plugin_specification'].message_type = _PLUGINSPECIFICATION
_SERVERINFORESPONSE.fields_by_name['compatibility'].message_type = _COMPATIBILITY
_SERVERINFORESPONSE.fields_by_name['api_server'].message_type = _APISERVER
_SERVERINFORESPONSE.fields_by_name['url_format'].message_type = _EXPERIMENTURLFORMAT
_SERVERINFORESPONSE.fields_by_name['plugin_control'].message_type = _PLUGINCONTROL
_SERVERINFORESPONSE.fields_by_name['upload_limits'].message_type = _UPLOADLIMITS
_COMPATIBILITY.fields_by_name['verdict'].enum_type = _COMPATIBILITYVERDICT
DESCRIPTOR.message_types_by_name['ServerInfoRequest'] = _SERVERINFOREQUEST
DESCRIPTOR.message_types_by_name['ServerInfoResponse'] = _SERVERINFORESPONSE
DESCRIPTOR.message_types_by_name['Compatibility'] = _COMPATIBILITY
DESCRIPTOR.message_types_by_name['ApiServer'] = _APISERVER
DESCRIPTOR.message_types_by_name['ExperimentUrlFormat'] = _EXPERIMENTURLFORMAT
DESCRIPTOR.message_types_by_name['PluginSpecification'] = _PLUGINSPECIFICATION
DESCRIPTOR.message_types_by_name['PluginControl'] = _PLUGINCONTROL
DESCRIPTOR.message_types_by_name['UploadLimits'] = _UPLOADLIMITS
DESCRIPTOR.enum_types_by_name['CompatibilityVerdict'] = _COMPATIBILITYVERDICT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ServerInfoRequest = _reflection.GeneratedProtocolMessageType('ServerInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _SERVERINFOREQUEST,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.ServerInfoRequest)
})
_sym_db.RegisterMessage(ServerInfoRequest)
ServerInfoResponse = _reflection.GeneratedProtocolMessageType('ServerInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _SERVERINFORESPONSE,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.ServerInfoResponse)
})
_sym_db.RegisterMessage(ServerInfoResponse)
Compatibility = _reflection.GeneratedProtocolMessageType('Compatibility', (_message.Message,), {
'DESCRIPTOR' : _COMPATIBILITY,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.Compatibility)
})
_sym_db.RegisterMessage(Compatibility)
ApiServer = _reflection.GeneratedProtocolMessageType('ApiServer', (_message.Message,), {
'DESCRIPTOR' : _APISERVER,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.ApiServer)
})
_sym_db.RegisterMessage(ApiServer)
ExperimentUrlFormat = _reflection.GeneratedProtocolMessageType('ExperimentUrlFormat', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENTURLFORMAT,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.ExperimentUrlFormat)
})
_sym_db.RegisterMessage(ExperimentUrlFormat)
PluginSpecification = _reflection.GeneratedProtocolMessageType('PluginSpecification', (_message.Message,), {
'DESCRIPTOR' : _PLUGINSPECIFICATION,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.PluginSpecification)
})
_sym_db.RegisterMessage(PluginSpecification)
PluginControl = _reflection.GeneratedProtocolMessageType('PluginControl', (_message.Message,), {
'DESCRIPTOR' : _PLUGINCONTROL,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.PluginControl)
})
_sym_db.RegisterMessage(PluginControl)
UploadLimits = _reflection.GeneratedProtocolMessageType('UploadLimits', (_message.Message,), {
'DESCRIPTOR' : _UPLOADLIMITS,
'__module__' : 'tensorboard.uploader.proto.server_info_pb2'
# @@protoc_insertion_point(class_scope:tensorboard.service.UploadLimits)
})
_sym_db.RegisterMessage(UploadLimits)
# @@protoc_insertion_point(module_scope)
| 40.127572 | 1,788 | 0.762383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,818 | 0.298328 |
0e506f5e878b4e3e3f626f39450924970f216a72 | 2,192 | py | Python | pandas/_testing/_hypothesis.py | dycloud-chan/pandas | 39ccb352b53e9b5b694c4f7f044774f9c3677e98 | [
"BSD-3-Clause"
] | 1 | 2021-09-15T07:45:23.000Z | 2021-09-15T07:45:23.000Z | pandas/_testing/_hypothesis.py | dycloud-chan/pandas | 39ccb352b53e9b5b694c4f7f044774f9c3677e98 | [
"BSD-3-Clause"
] | null | null | null | pandas/_testing/_hypothesis.py | dycloud-chan/pandas | 39ccb352b53e9b5b694c4f7f044774f9c3677e98 | [
"BSD-3-Clause"
] | 1 | 2020-01-03T17:02:48.000Z | 2020-01-03T17:02:48.000Z | """
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from pandas.compat import is_platform_windows
import pandas as pd
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
max_size=10,
min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
max_size=10,
min_size=3,
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
min_value=pd.Timestamp.min.to_pydatetime(warn=False),
max_value=pd.Timestamp.max.to_pydatetime(warn=False),
)
INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime information (e.g. type hints) to infer how to build them.
YQM_OFFSET = st.one_of(
*map(
st.from_type,
[
MonthBegin,
MonthEnd,
BMonthBegin,
BMonthEnd,
QuarterBegin,
QuarterEnd,
BQuarterBegin,
BQuarterEnd,
YearBegin,
YearEnd,
BYearBegin,
BYearEnd,
],
)
)
| 25.488372 | 86 | 0.675639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.088504 |
0e5101dd7fe4f67284e5978fa838a8443cbd53ae | 1,397 | py | Python | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2018-03-20T11:19:07.000Z | 2021-10-05T07:53:11.000Z | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 802 | 2018-02-05T14:16:13.000Z | 2022-02-10T10:59:21.000Z | export_readiness/migrations/0058_auto_20190912_1326.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2019-01-22T13:19:37.000Z | 2019-07-01T10:35:26.000Z | # Generated by Django 2.2.4 on 2019-09-12 13:26
from django.db import migrations
INDUSTRY_NAMES = (
'Advanced manufacturing',
'Aerospace',
'Agri-technology',
'Automotive',
'Biotechnology',
'Cleantech',
'Construction ',
'Consumer products',
'Cyber security',
'E-commerce',
'Education',
'Energy',
'Engineering',
'Financial services',
'Fintech',
'Food and drink',
'Healthcare',
'Infrastructure',
'International organisations',
'Life sciences',
'Low carbon',
'Luxury',
'Offshore wind',
'Oil and gas',
'Pharmaceuticals',
'Professional services',
'Renewables',
'Retail',
'Safety',
'Security',
'Smart cities',
'Technology',
'Training',
)
def create_industry_tags(apps, schema_editor):
IndustryTag = apps.get_model('export_readiness', 'IndustryTag')
objs = (IndustryTag(name=name) for name in INDUSTRY_NAMES)
IndustryTag.objects.bulk_create(objs)
def delete_industry_tags(apps, schema_editor):
IndustryTag = apps.get_model('export_readiness', 'IndustryTag')
IndustryTag.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0057_auto_20190912_1326'),
]
operations = [
migrations.RunPython(create_industry_tags, reverse_code=delete_industry_tags, elidable=True)
]
| 22.174603 | 100 | 0.65927 | 250 | 0.178955 | 0 | 0 | 0 | 0 | 0 | 0 | 615 | 0.440229 |
0e514b7610b489149791d3b281551aebbd6d433b | 4,656 | py | Python | bakery_cli/utils.py | lowks/fontbakery-cli | 514cc010c1118bbcef078aeb64a87add05322c4a | [
"Apache-2.0"
] | 1 | 2020-06-17T04:44:42.000Z | 2020-06-17T04:44:42.000Z | bakery_cli/utils.py | lowks/fontbakery-cli | 514cc010c1118bbcef078aeb64a87add05322c4a | [
"Apache-2.0"
] | null | null | null | bakery_cli/utils.py | lowks/fontbakery-cli | 514cc010c1118bbcef078aeb64a87add05322c4a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import lxml.etree
import os
import os.path as op
import re
class RedisFd(object):
""" Redis File Descriptor class, publish writen data to redis channel
in parallel to file """
def __init__(self, name, mode='a', write_pipeline=None):
self.filed = open(name, mode)
self.filed.write("Start: Start of log\n") # end of log
self.write_pipeline = write_pipeline
if write_pipeline and not isinstance(write_pipeline, list):
self.write_pipeline = [write_pipeline]
def write(self, data, prefix=''):
if self.write_pipeline:
for pipeline in self.write_pipeline:
data = pipeline(data)
if not data.endswith('\n'):
data += '\n'
data = re.sub('\n{3,}', '\n\n', data)
if data:
self.filed.write("%s%s" % (prefix, data))
self.filed.flush()
def close(self):
self.filed.write("End: End of log\n") # end of log
self.filed.close()
class UpstreamDirectory(object):
""" Describes structure of upstream directory
>>> upstream = UpstreamDirectory("tests/fixtures/upstream-example")
>>> upstream.UFO
['Font-Regular.ufo']
>>> upstream.TTX
['Font-Light.ttx']
>>> upstream.BIN
['Font-SemiBold.ttf']
>>> upstream.METADATA
['METADATA.json']
>>> sorted(upstream.LICENSE)
['APACHE.txt', 'LICENSE.txt']
>>> upstream.SFD
['Font-Bold.sfd']
>>> sorted(upstream.TXT)
['APACHE.txt', 'LICENSE.txt']
"""
OFL = ['open font license.markdown', 'ofl.txt', 'ofl.md']
LICENSE = ['license.txt', 'license.md', 'copyright.txt']
APACHE = ['apache.txt', 'apache.md']
UFL = ['ufl.txt', 'ufl.md']
ALL_LICENSES = OFL + LICENSE + APACHE + UFL
def __init__(self, upstream_path):
self.upstream_path = upstream_path
self.UFO = []
self.TTX = []
self.BIN = []
self.LICENSE = []
self.METADATA = []
self.SFD = []
self.TXT = []
self.walk()
def get_fonts(self):
return self.UFO + self.TTX + self.BIN + self.SFD
ALL_FONTS = property(get_fonts)
def walk(self):
l = len(self.upstream_path)
for root, dirs, files in os.walk(self.upstream_path):
for f in files:
fullpath = op.join(root, f)
if f[-4:].lower() == '.ttx':
try:
doc = lxml.etree.parse(fullpath)
el = doc.xpath('//ttFont[@sfntVersion]')
if not el:
continue
except:
continue
self.TTX.append(fullpath[l:].strip('/'))
if op.basename(f).lower() == 'metadata.json':
self.METADATA.append(fullpath[l:].strip('/'))
if f[-4:].lower() in ['.ttf', '.otf']:
self.BIN.append(fullpath[l:].strip('/'))
if f[-4:].lower() == '.sfd':
self.SFD.append(fullpath[l:].strip('/'))
if f[-4:].lower() in ['.txt', '.markdown', '.md', '.LICENSE']:
self.TXT.append(fullpath[l:].strip('/'))
if op.basename(f).lower() in UpstreamDirectory.ALL_LICENSES:
self.LICENSE.append(fullpath[l:].strip('/'))
for d in dirs:
fullpath = op.join(root, d)
if op.splitext(fullpath)[1].lower() == '.ufo':
self.UFO.append(fullpath[l:].strip('/'))
def nameTableRead(font, NameID, fallbackNameID=False):
for record in font['name'].names:
if record.nameID == NameID:
if b'\000' in record.string:
return record.string.decode('utf-16-be').encode('utf-8')
else:
return record.string
if fallbackNameID:
return nameTableRead(font, fallbackNameID)
return ''
| 32.110345 | 78 | 0.559708 | 3,499 | 0.751503 | 0 | 0 | 0 | 0 | 0 | 0 | 1,650 | 0.354381 |
0e51b7b58041b179a5ca569376e14c8b5faa8e01 | 100 | py | Python | src/spaczz/regex/__init__.py | brunobg/spaczz | 69c73cad16f65f01be2202ecc0c836dd03b42b61 | [
"MIT"
] | 153 | 2020-07-07T01:26:25.000Z | 2022-03-31T23:47:00.000Z | src/spaczz/regex/__init__.py | brunobg/spaczz | 69c73cad16f65f01be2202ecc0c836dd03b42b61 | [
"MIT"
] | 38 | 2020-07-15T02:29:34.000Z | 2021-08-15T21:32:54.000Z | src/spaczz/regex/__init__.py | brunobg/spaczz | 69c73cad16f65f01be2202ecc0c836dd03b42b61 | [
"MIT"
] | 20 | 2020-07-07T15:41:05.000Z | 2022-02-21T19:28:22.000Z | """Module for regex components."""
from .regexconfig import RegexConfig
__all__ = ["RegexConfig"]
| 16.666667 | 36 | 0.74 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.47 |
0e5337ed20b60dbcf33738cf8d37c01caaa201c9 | 1,968 | py | Python | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 212 | 2017-07-06T23:01:44.000Z | 2022-03-24T04:44:49.000Z | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 157 | 2017-07-24T10:03:41.000Z | 2022-03-12T01:03:47.000Z | imagetagger/imagetagger/annotations/migrations/0006_auto_20170826_1431.py | jbargu/imagetagger | 216ac5e73902abadc1880321e285e68c55bdfd3d | [
"MIT"
] | 54 | 2017-11-07T00:40:50.000Z | 2022-02-26T14:22:13.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 12:31
from __future__ import unicode_literals
import json
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('annotations', '0005_auto_20170826_1424'),
]
def forward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all valid annotations from raw_vector to vector
for annotation in Annotation.objects.using(db_alias).all():
try:
vector = json.loads(annotation.raw_vector)
for key, value in vector.items():
try:
# try to convert all numeric vector values to integer
vector[key] = int(value)
except ValueError:
continue
annotation.vector = vector
annotation.save()
except ValueError:
# Annotation is invalid, delete it
annotation.delete()
def backward_func(apps, schema_editor):
Annotation = apps.get_model("annotations", "Annotation")
db_alias = schema_editor.connection.alias
# Copy all annotations from vector to raw_vector
for annotation in Annotation.objects.using(db_alias).all():
annotation.raw_vector = json.dumps(annotation.vector)
annotation.save()
operations = [
migrations.RenameField(
model_name='annotation',
old_name='vector',
new_name='raw_vector',
),
migrations.AddField(
model_name='annotation',
name='vector',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.RunPython(forward_func, backward_func, atomic=True),
]
| 33.355932 | 77 | 0.601626 | 1,761 | 0.894817 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.203252 |
0e55028498c318134fe1dbd586569be4e5fbb449 | 1,778 | py | Python | process_csv.py | Blue9/llvm-pass-skeleton | e6be3d0f1a4d3f73128263ccbb81e587309b869b | [
"MIT"
] | null | null | null | process_csv.py | Blue9/llvm-pass-skeleton | e6be3d0f1a4d3f73128263ccbb81e587309b869b | [
"MIT"
] | null | null | null | process_csv.py | Blue9/llvm-pass-skeleton | e6be3d0f1a4d3f73128263ccbb81e587309b869b | [
"MIT"
] | null | null | null | import os
import statistics
import sys
def get_mean_std(out_csv):
with open(out_csv) as f:
lines = f.readlines()
tests = dict()
for t in lines[1:]:
t = t.split(",")
test_name = t[0].strip()
opt = float(t[2].strip())
tests[test_name] = tests.get(test_name, list()) + [opt]
means = {t: sum(v)/len(v) for t, v in tests.items()}
std = {t: statistics.stdev(v) for t, v in tests.items()}
return means
def get_tests(out_csv):
tests = set()
orig = dict()
with open(out_csv) as f:
lines = f.readlines()
for l in lines[1:]:
l = l.split(",")
name = l[0].strip()
original_runtime = float(l[1].strip())
tests.add(name)
orig[name] = orig.get(name, list()) + [original_runtime]
tests = sorted(list(tests))
means = {t: sum(v)/len(v) for t, v in orig.items()}
std = {t: statistics.stdev(v) for t, v in orig.items()}
return [[t, str(means[t])] for t in tests]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: python3 process_csv.py [output_csv_path]")
sys.exit(1)
num_runs = (1, 2, 3)
thresholds = (10, 100, 1000)
filename = lambda runs, threshold: f'out-{runs}-{threshold}.csv'
output_file = sys.argv[1]
header = ['test', 'original']
tests = get_tests('out-1-10.csv')
for threshold in thresholds:
for run in num_runs:
out_csv = filename(run, threshold)
stat = get_mean_std(out_csv)
header += [f'runs:{run}+thresh:{threshold}']
tests = [row + [str(stat[row[0]])] for row in tests]
with open(output_file, 'w') as f:
f.write(', '.join(header) + '\n')
for row in tests:
f.write(', '.join(row) + '\n')
| 32.925926 | 68 | 0.558493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.098425 |
0e55f8fc713d89522d978644224a64f2679af055 | 12,686 | py | Python | run_cnn.py | tlkh/mini-dlperf | 7d40bbad98ac68d4c496fe4b69d0de80ef0fffc6 | [
"MIT"
] | null | null | null | run_cnn.py | tlkh/mini-dlperf | 7d40bbad98ac68d4c496fe4b69d0de80ef0fffc6 | [
"MIT"
] | 37 | 2020-09-13T08:45:04.000Z | 2022-03-15T01:19:05.000Z | run_cnn.py | tlkh/mini-dlperf | 7d40bbad98ac68d4c496fe4b69d0de80ef0fffc6 | [
"MIT"
] | null | null | null | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rn152", action="store_true", default=False,
help="Train a larger ResNet-152 model instead of ResNet-50")
parser.add_argument("--rn50v2", action="store_true", default=False,
help="Train ResNet-50 V2 model instead of ResNet-50 MLPerf")
parser.add_argument("--dn201", action="store_true", default=False,
help="Train a larger DenseNet-201 model instead of ResNet-50")
parser.add_argument("--mobilenet", action="store_true", default=False,
help="Train a smaller MobileNetV2 model instead of ResNet-50")
parser.add_argument("--huge_cnn", action="store_true", default=False,
help="Train a huge toy CNN model instead of ResNet-50")
parser.add_argument("--amp", action="store_true", default=True,
help="Use grappler AMP for mixed precision training")
parser.add_argument("--keras_amp", action="store_true", default=False,
help="Use Keras AMP for mixed precision training")
parser.add_argument("--xla", action="store_true", default=True,
help="Use XLA compiler")
parser.add_argument("--batch_size", default=128, type=int,
help="Batch size to use for training")
parser.add_argument("--img_size", default=224, type=int,
help="Image size to use for training")
parser.add_argument("--lr", default=0.1, type=float,
help="Learning rate")
parser.add_argument("--epochs", default=4, type=int,
help="Number of epochs to train for")
parser.add_argument("--dataset", default="imagenette/320px",
help="TFDS Dataset to train on")
parser.add_argument("--data_dir", default="/workspace/tensorflow_datasets",
help="TFDS Dataset directory")
parser.add_argument("--threads", default=-1, type=int,
help="Number of CPU threads to use")
parser.add_argument("--verbose", default=2, type=int)
parser.add_argument("--steps", type=int, default=200)
parser.add_argument("--no_val", action="store_true", default=True)
parser.add_argument("--img_aug", action="store_true", default=False)
args = parser.parse_args()
import os
import multiprocessing
if args.threads == -1:
n_cores = multiprocessing.cpu_count()
print("Number of logical cores:", n_cores)
worker_threads = int(n_cores*0.9)
else:
worker_threads = args.threads
print("Number of threads used for dataloader:", worker_threads)
os.environ["TF_DISABLE_NVTX_RANGES"] = "1"
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
os.environ["TF_GPU_THREAD_COUNT"] = str(worker_threads)
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
if args.xla:
if os.environ.get("TF_XLA_FLAGS", None) is not None:
os.environ["TF_XLA_FLAGS"] += " --tf_xla_enable_lazy_compilation false"
else:
os.environ["TF_XLA_FLAGS"] = " --tf_xla_enable_lazy_compilation false"
os.environ["TF_XLA_FLAGS"] += " --tf_xla_async_io_level 1"
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARN)
import time
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from common import dataloaders, cnn_models, ops, callbacks
from nvstatsrecorder.callbacks import NVStats, NVLinkStats
print("Using XLA:", args.xla)
tf.config.optimizer.set_jit(args.xla)
print("Using grappler AMP:", args.amp)
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": args.amp,
"debug_stripper": True})
tf.config.threading.set_inter_op_parallelism_threads(worker_threads)
strategy = tf.distribute.MirroredStrategy()
replicas = strategy.num_replicas_in_sync
BATCH_SIZE = args.batch_size * replicas
IMG_SIZE = args.img_size
L_IMG_SIZE = int(args.img_size*1.1)
EPOCHS = args.epochs
tf_image_dtype = tf.float32
print("Number of devices:", replicas)
print("Global batch size:", BATCH_SIZE)
print("Base learning rate:", args.lr)
print("Loading Dataset")
print("Using TFDS dataset:", args.dataset)
dataset = dataloaders.return_fast_tfds(args.dataset,
data_dir=args.data_dir,
worker_threads=worker_threads,
buffer=BATCH_SIZE*2)
num_class = dataset["num_class"]
PAD = False
if PAD:
if num_class == 10:
num_class = 16
elif num_class == 1000:
num_class = 1024
print("Padded final layer to", num_class)
num_train = dataset["num_train"]
num_valid = dataset["num_valid"]
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
means = tf.cast(tf.broadcast_to(CHANNEL_MEANS, (IMG_SIZE, IMG_SIZE, 3)), tf_image_dtype)
if args.img_aug:
@tf.function
def format_train_example(image_path, label):
image = tf.io.decode_jpeg(image_path, channels=3, ratio=2,
fancy_upscaling=False,
dct_method="INTEGER_FAST")
image = ops.resize_preserve_ratio(image, L_IMG_SIZE)
image = ops.augment_image(image, IMG_SIZE)
image = (tf.cast(image, tf_image_dtype) - means)/127.5
return image, label
else:
@tf.function
def format_train_example(image_path, label):
image = tf.io.decode_jpeg(image_path, channels=3, ratio=2,
fancy_upscaling=False,
dct_method="INTEGER_FAST")
image = tf.image.central_crop(image, 0.9)
image = ops.crop_center_and_resize(image, IMG_SIZE)
image = (tf.cast(image, tf_image_dtype) - means)/127.5
return image, label
@tf.function
def format_test_example(image_path, label):
image = tf.io.decode_jpeg(image_path, channels=3, ratio=2,
fancy_upscaling=False,
dct_method="INTEGER_FAST")
image = tf.image.central_crop(image, 0.9)
image = ops.crop_center_and_resize(image, IMG_SIZE)
image = (tf.cast(image, tf_image_dtype) - means)/127.5
return image, label
print("Build tf.data input pipeline")
train = dataset["train"]
train = train.map(format_train_example, num_parallel_calls=worker_threads)
train = train.batch(BATCH_SIZE, drop_remainder=True)
train = train.prefetch(16)
valid = dataset["valid"]
valid = valid.map(format_test_example, num_parallel_calls=worker_threads)
if num_valid > 512 :
VAL_BATCH_SIZE = BATCH_SIZE
else:
VAL_BATCH_SIZE = num_valid//2
valid = valid.batch(VAL_BATCH_SIZE, drop_remainder=False)
valid = valid.prefetch(16)
train_steps = int(num_train/BATCH_SIZE)
valid_steps = int(num_valid/VAL_BATCH_SIZE)
print("Running pipelines:")
for batch in train.take(2):
image, label = batch[0].numpy(), batch[1].numpy()
print("* Image shape:", image.shape)
print("* Image size:", len(str(image)))
print("* Label shape:", label.shape)
time.sleep(1)
for batch in valid.take(2):
image, label = batch[0].numpy(), batch[1].numpy()
print("* Image shape:", image.shape)
print("* Image size:", len(str(image)))
print("* Label shape:", label.shape)
time.sleep(1)
print("Build and distribute model")
if args.keras_amp:
print("Using Keras AMP:", args.keras_amp)
tf.keras.mixed_precision.experimental.set_policy("mixed_float16")
with strategy.scope():
if args.rn152:
print("Using ResNet-152 V2 model")
model = cnn_models.rn152((IMG_SIZE,IMG_SIZE), num_class, weights=None, dtype=tf_image_dtype)
model = cnn_models.convert_for_training(model)
elif args.dn201:
print("Using DenseNet-201 model")
model = cnn_models.dn201((IMG_SIZE,IMG_SIZE), num_class, weights=None, dtype=tf_image_dtype)
model = cnn_models.convert_for_training(model)
elif args.mobilenet:
print("Using MobileNetV2 model")
model = cnn_models.mobilenet((IMG_SIZE,IMG_SIZE), num_class, weights=None, dtype=tf_image_dtype)
model = cnn_models.convert_for_training(model)
elif args.rn50v2:
print("Using ResNet-50 V2 model")
model = cnn_models.rn50((IMG_SIZE,IMG_SIZE), num_class, weights=None, dtype=tf_image_dtype)
model = cnn_models.convert_for_training(model)
elif args.huge_cnn:
print("Using Huge CNN model")
model = cnn_models.huge_cnn((IMG_SIZE,IMG_SIZE), num_class, weights=None, dtype=tf_image_dtype)
else:
print("Using ResNet-50 MLPerf model")
model = cnn_models.rn50_mlperf((IMG_SIZE,IMG_SIZE), num_class)
opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.875, nesterov=True)
if args.amp:
opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, "dynamic")
loss = tf.keras.losses.SparseCategoricalCrossentropy()
model.compile(loss=loss,
optimizer=opt)
model.summary()
print("Train model")
if args.steps:
train_steps = args.steps
if valid_steps > args.steps:
valid_steps = args.steps
verbose = args.verbose
if verbose != 1:
print("Verbose level:", verbose)
print("You will not see progress during training!")
train_img_per_epoch = train_steps * BATCH_SIZE
time_history = callbacks.TimeHistory()
callbacks = [time_history]
try:
SUDO_PASSWORD = None #os.environ["SUDO_PASSWORD"]
nv_stats = NVStats(gpu_index=0, interval=1, tensor_util=False,)
nvlink_interval = 5
gpus = list(range(replicas))
nvlink_stats = NVLinkStats(SUDO_PASSWORD, gpus=gpus, interval=nvlink_interval)
callbacks.append(nvlink_stats)
RECORD_NVLINK = True
except Exception as e:
print(e)
print("No sudo access, not recording Tensor Core and NVLink utilization")
nv_stats = NVStats(gpu_index=0, interval=1, tensor_util=False)
RECORD_NVLINK = False
callbacks.append(nv_stats)
if train_steps < 20:
validation_freq = 2
else:
validation_freq = 1
print("Start training")
train_start = time.time()
if args.no_val:
with strategy.scope():
model.fit(train, steps_per_epoch=train_steps,
epochs=EPOCHS, callbacks=callbacks, verbose=verbose)
else:
with strategy.scope():
model.fit(train, steps_per_epoch=train_steps, validation_freq=1,
validation_data=valid, validation_steps=valid_steps,
epochs=EPOCHS, callbacks=callbacks, verbose=verbose)
train_end = time.time()
nv_stats_recorder = nv_stats.recorder
prefix = args.dataset.replace("/", "_")
#nv_stats_recorder.plot_gpu_util(smooth=3, outpath=prefix+"_resnet_gpu_util.jpg")
#nv_stats_recorder.summary()
if RECORD_NVLINK:
nvlink_stats_recorder = nvlink_stats.recorder
#nvlink_stats_recorder.plot_nvlink_traffic(smooth=3, outpath=prefix+"_resnet_nvlink_util.jpg")
duration = sum(time_history.epoch_times[1:])/len(time_history.epoch_times[1:])
avg_fps = round(train_steps*BATCH_SIZE/duration, 1)
first_epoch = int(time_history.epoch_times[0])
nv_stats_recorder = nv_stats.recorder
gpu_data = nv_stats_recorder.get_data()
device_data = gpu_data["device_data"]
data_len = len(gpu_data["time_history"][first_epoch:])
avg_sm = int(sum(gpu_data["sm_util_history"][first_epoch:])/data_len)
avg_mem = int(sum(gpu_data["mem_util_history"][first_epoch:])/data_len)
avg_pcie = int(sum(gpu_data["pcie_txrx"][first_epoch:])/data_len)
pcie_gbps = round(sum(gpu_data["pcie_txrx"][first_epoch:])/data_len/100*device_data["max_pcie_bandwidth"]/1e6, 1)
avg_pwr = int(sum(gpu_data["pwr_history"][first_epoch:])/data_len)
pwr_watts = int(sum(gpu_data["pwr_history"][first_epoch:])/data_len/100*device_data["max_power"]/1e3)
avg_temp = int(sum(gpu_data["temp_history"][first_epoch:])/data_len)
max_vram = round(max(gpu_data["mem_occupy_history"])/100*device_data["total_vram"], 1)
if RECORD_NVLINK:
skip_time = int(time_history.epoch_times[0]/nvlink_interval)
nvlink_history = nvlink_stats_recorder.get_data()["nvlink_history"][skip_time:]
print(nvlink_history)
avg_nvlink_list = []
for t in nvlink_history:
avg_nvlink_list.append(
sum([i for i in t.values()])/len(list(t.keys()))
)
avg_nvlink = round(sum(avg_nvlink_list)/len(avg_nvlink_list), 1)
else:
avg_nvlink = 0.0
throttle = []
for t in gpu_data["throttle_reasons"]:
if t[1] > first_epoch:
throttle.append(t[0])
throttle = list(set(throttle))
print("Results:")
result_data = [
"PASS", avg_fps, avg_sm, avg_mem, avg_pcie, pcie_gbps, avg_pwr, pwr_watts, avg_temp, max_vram, avg_nvlink, throttle
]
results = ",".join([str(r) for r in result_data])
print(results)
| 38.210843 | 119 | 0.692023 | 0 | 0 | 0 | 0 | 1,302 | 0.102633 | 0 | 0 | 2,608 | 0.205581 |
0e5a510f773a00ee259b3eb4de0fd3c02d65e9b1 | 5,591 | py | Python | containers/forms.py | timothyjlaurent/shipyard | 400b6455fc249ad096bc849f2f27385cfe2daa6d | [
"Apache-2.0"
] | 1 | 2016-03-24T22:33:09.000Z | 2016-03-24T22:33:09.000Z | containers/forms.py | dragon9783/shipyard | a7cfb15efed01823077825e0b3c3dd6268017177 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:32:33.000Z | 2021-06-10T23:32:33.000Z | containers/forms.py | timothyjlaurent/shipyard | 400b6455fc249ad096bc849f2f27385cfe2daa6d | [
"Apache-2.0"
] | null | null | null | # Copyright Evan Hazlett and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from hosts.models import Host
from images.models import Image
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, Button
from crispy_forms.bootstrap import FieldWithButtons, StrictButton, FormActions
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
def get_available_hosts():
return Host.objects.filter(enabled=True)
def get_image_choices():
hosts = get_available_hosts()
choices = []
images = Image.objects.filter(host__in=hosts).order_by('repository').values_list(
'repository', flat=True).order_by('repository').distinct()
for i in images:
repo = i
if repo.find('<none>') == -1:
d = (repo, repo)
choices.append(d)
return choices
class CreateContainerForm(forms.Form):
image = forms.ChoiceField(required=True)
name = forms.CharField(required=False, help_text=_('container name (used in links)'))
hostname = forms.CharField(required=False)
description = forms.CharField(required=False)
command = forms.CharField(required=False)
memory = forms.CharField(required=False, max_length=8,
help_text='Memory in MB')
environment = forms.CharField(required=False,
help_text='key=value space separated pairs')
ports = forms.CharField(required=False, help_text=_('space separated (i.e. 8000 8001:8001 127.0.0.1:80:80 )'))
links = forms.CharField(required=False, help_text=_('space separated (i.e. redis:db)'))
volume = forms.CharField(required=False, help_text='container volume (i.e. /mnt/volume)')
volumes_from = forms.CharField(required=False,
help_text='mount volumes from specified container')
hosts = forms.MultipleChoiceField(required=True)
private = forms.BooleanField(required=False)
privileged = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(CreateContainerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
None,
'image',
'name',
'hostname',
'command',
'description',
'memory',
'environment',
'ports',
'links',
'volume',
'volumes_from',
'hosts',
'private',
'privileged',
),
FormActions(
Submit('save', _('Create'), css_class="btn btn-lg btn-success"),
)
)
self.helper.form_id = 'form-create-container'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = reverse('containers.views.create_container')
self.helper.help_text_inline = True
self.fields['image'].choices = [('', '----------')] + \
[x for x in get_image_choices()]
self.fields['hosts'].choices = \
[(x.id, x.name) for x in get_available_hosts()]
class ImportRepositoryForm(forms.Form):
repository = forms.CharField(help_text='i.e. ehazlett/logstash')
hosts = forms.MultipleChoiceField()
def __init__(self, *args, **kwargs):
super(ImportRepositoryForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'form-import-repository'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = reverse('containers.views.import_image')
self.helper.help_text_inline = True
self.fields['hosts'].choices = \
[(x.id, x.name) for x in get_available_hosts()]
class ContainerForm(forms.Form):
image = forms.ChoiceField()
command = forms.CharField(required=False)
def __init__(self, *args, **kwargs):
super(CreateContainerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'form-create-container'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = reverse('containers.views.create_container')
self.helper.help_text_inline = True
self.fields['image'].widget.attrs['readonly'] = True
class ImageBuildForm(forms.Form):
dockerfile = forms.FileField(required=False)
url = forms.URLField(help_text='Dockerfile URL', required=False)
tag = forms.CharField(help_text='i.e. app-v1', required=False)
hosts = forms.MultipleChoiceField()
def __init__(self, *args, **kwargs):
super(ImageBuildForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'form-build-image'
self.helper.form_class = 'form-horizontal'
self.helper.form_action = reverse('containers.views.build_image')
self.helper.help_text_inline = True
self.fields['hosts'].choices = \
[(x.id, x.name) for x in get_available_hosts()]
| 41.723881 | 114 | 0.655876 | 4,150 | 0.742264 | 0 | 0 | 0 | 0 | 0 | 0 | 1,430 | 0.255768 |
0e5a54810fbef414231a098e12c3d3a8fbf58189 | 343 | py | Python | 2020/01-1.py | matteodelabre/advent-of-code | a6f51222b52f948cec6aa94cb7c50bcfd4f53dc1 | [
"CC0-1.0"
] | 1 | 2021-12-02T05:16:11.000Z | 2021-12-02T05:16:11.000Z | 2020/01-1.py | matteodelabre/advent-of-code | a6f51222b52f948cec6aa94cb7c50bcfd4f53dc1 | [
"CC0-1.0"
] | null | null | null | 2020/01-1.py | matteodelabre/advent-of-code | a6f51222b52f948cec6aa94cb7c50bcfd4f53dc1 | [
"CC0-1.0"
] | null | null | null | rows = []
try:
while True:
rows.append(int(input()))
except EOFError:
pass
rows.sort()
goal = 2020
l = 0
r = len(rows) - 1
while rows[l] + rows[r] != goal and l < r:
if rows[l] + rows[r] < goal:
l += 1
else:
r -= 1
if rows[l] + rows[r] == goal:
print(rows[l] * rows[r])
else:
print('FAIL')
| 13.72 | 42 | 0.498542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.017493 |
0e5a5ab9eb95edd4197741674bf04156101779a0 | 1,543 | py | Python | castadmin/urls.py | flyinactor91/Rocky-Rollcall | ab2bdbbd5f5920e709a09d1b1182a388955211d9 | [
"MIT"
] | 2 | 2019-09-03T06:08:35.000Z | 2020-07-19T06:57:17.000Z | castadmin/urls.py | flyinactor91/Rocky-Rollcall | ab2bdbbd5f5920e709a09d1b1182a388955211d9 | [
"MIT"
] | null | null | null | castadmin/urls.py | flyinactor91/Rocky-Rollcall | ab2bdbbd5f5920e709a09d1b1182a388955211d9 | [
"MIT"
] | null | null | null | """
Cast admin URL patterns
"""
from django.urls import path
from . import views
_s = '<slug:slug>/'
urlpatterns = [
path(_s, views.cast_admin, name='cast_admin'),
path(_s+'section/new/', views.section_new, name='cast_section_new'),
path(_s+'section/<int:pk>/edit/', views.section_edit, name='cast_section_edit'),
path(_s+'section/<int:pk>/remove/', views.section_delete, name='cast_section_delete'),
path(_s+'photo/new/', views.photo_new, name='cast_photo_new'),
path(_s+'photo/<int:pk>/edit/', views.photo_edit, name='cast_photo_edit'),
path(_s+'photo/<int:pk>/remove/', views.photo_delete, name='cast_photo_delete'),
path(_s+'edit', views.cast_edit, name='cast_edit'),
path(_s+'delete', views.cast_delete, name='cast_delete'),
path(_s+'users/blocked', views.BlockedUsers.as_view(), name='cast_blocked_users'),
path(_s+'users/blocked/<slug:username>/block', views.block_user, name='cast_block_user'),
path(_s+'users/blocked/<slug:username>/unblock', views.unblock_user, name='cast_unblock_user'),
path(_s+'users/requests', views.MemberRequests.as_view(), name='cast_member_requests'),
path(_s+'users/requests/<slug:username>/approve', views.approve_request, name='cast_member_requests_approve'),
path(_s+'users/requests/<slug:username>/deny', views.deny_request, name='cast_member_requests_deny'),
path(_s+'users/managers', views.managers_edit, name='cast_managers_edit'),
path(_s+'users/managers/delete/<int:pk>', views.managers_delete, name='cast_managers_delete'),
]
| 53.206897 | 114 | 0.72197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 736 | 0.476993 |
0e5a60a46aeb18c5fcebbf2deb1ab0ad21f787d1 | 24 | py | Python | asn1tools/version.py | eerimoq/asn1tools | 93e5fc2922c191e4a593507d4bc8847341fc9f40 | [
"MIT"
] | 198 | 2017-08-04T21:49:15.000Z | 2022-03-26T10:11:21.000Z | asn1tools/version.py | eerimoq/asn1tools | 93e5fc2922c191e4a593507d4bc8847341fc9f40 | [
"MIT"
] | 144 | 2017-09-29T12:06:51.000Z | 2022-03-29T13:04:44.000Z | asn1tools/version.py | eerimoq/asn1tools | 93e5fc2922c191e4a593507d4bc8847341fc9f40 | [
"MIT"
] | 73 | 2017-10-09T13:33:28.000Z | 2022-03-11T01:35:22.000Z | __version__ = '0.159.0'
| 12 | 23 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.375 |
0e5bec43e7ad02e792ccbf34c4c1ba79010d99ec | 143 | py | Python | tags/ctx.py | NoUMelon/phen-cogs | f8d4de3a91f428114d66fd36ba38ecbd5ace83a3 | [
"MIT"
] | 1 | 2021-06-01T10:12:09.000Z | 2021-06-01T10:12:09.000Z | tags/ctx.py | NoUMelon/phen-cogs | f8d4de3a91f428114d66fd36ba38ecbd5ace83a3 | [
"MIT"
] | null | null | null | tags/ctx.py | NoUMelon/phen-cogs | f8d4de3a91f428114d66fd36ba38ecbd5ace83a3 | [
"MIT"
] | null | null | null | from redbot.core.commands import Context
class SilentContext(Context):
async def send(self, content: str = None, **kwargs):
pass
| 20.428571 | 56 | 0.699301 | 99 | 0.692308 | 0 | 0 | 0 | 0 | 65 | 0.454545 | 0 | 0 |
0e5c6b3710e58e819d07a446f732bb58ff060f7c | 545 | py | Python | Liver_segmentation/find_and_delete_empty_images.py | 6895mahfuzgit/PyTorch-and-Monai-for-AI-Healthcare-Imaging- | 9fbd1e2863599f58ccd5bb033ecf4c4de4017eb1 | [
"Apache-2.0"
] | null | null | null | Liver_segmentation/find_and_delete_empty_images.py | 6895mahfuzgit/PyTorch-and-Monai-for-AI-Healthcare-Imaging- | 9fbd1e2863599f58ccd5bb033ecf4c4de4017eb1 | [
"Apache-2.0"
] | null | null | null | Liver_segmentation/find_and_delete_empty_images.py | 6895mahfuzgit/PyTorch-and-Monai-for-AI-Healthcare-Imaging- | 9fbd1e2863599f58ccd5bb033ecf4c4de4017eb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 17 21:33:56 2022
@author: mahfuz
"""
from glob import glob
import nibabel as nif
import numpy as np
input_path_lables = r'C:\Users\mahfu\Desktop\Codes\nifiti_files\lables\*'
imput_labels = glob(input_path_lables)
for patient in imput_labels:
nifti_file = nif.load(patient)
f_data = nifti_file.get_fdata()
np_unique = np.unique(f_data)
print(np_unique)
if len(np_unique) == 1:
print('Dleted file length', len(np_unique))
else:
print('OK', len(np_unique))
| 21.8 | 73 | 0.684404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.293578 |
0e5ca3a4b73066c37affc89f2d0c6027191e7c14 | 4,873 | py | Python | pax/tasks/tasks/classification.py | epfml/pax | 26844d438d6f86889bbaeebc517199951af065bb | [
"Apache-2.0"
] | 3 | 2022-02-28T14:41:02.000Z | 2022-03-02T12:19:02.000Z | pax/tasks/tasks/classification.py | epfml/pax | 26844d438d6f86889bbaeebc517199951af065bb | [
"Apache-2.0"
] | null | null | null | pax/tasks/tasks/classification.py | epfml/pax | 26844d438d6f86889bbaeebc517199951af065bb | [
"Apache-2.0"
] | null | null | null | from typing import Mapping
import pax.tasks.registry as registry
import regex as re
import torch
from pax.tasks.datasets.api import Batch
from pax.tasks.models.api import Buffers, Model, Params, Tuple
from pax.tasks.tasks.api import Task
DEFAULT_DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ClassificationTask(Task):
def __init__(self, device=DEFAULT_DEVICE):
self._device = device
def init(self, seed: int = 0) -> Tuple[Params, Buffers]:
return self.model.init(seed)
@property
def name(self) -> str:
return type(self).__name__
def loss(
self,
params: Params,
batch: Batch,
buffers: Buffers = None,
is_training: bool = True,
) -> Tuple[float, Buffers]:
prediction, buffers = self.model.forward(
params, batch.x, buffers=buffers, is_training=is_training
)
loss = torch.nn.functional.cross_entropy(prediction, batch.y)
regularization = 0
for name, param in params.items():
wd = self._weight_decay_for_param(name)
if wd > 0:
regularization += _squared_norm(param) * wd
return loss + regularization, buffers
def evaluate_batch(
self,
params: Params,
batch: Batch,
buffers: Buffers = None,
is_training: bool = False,
) -> Mapping[str, torch.Tensor]:
with torch.no_grad():
output, buffers = self.model.forward(
params, batch.x, buffers=buffers, is_training=is_training
)
loss = torch.nn.functional.cross_entropy(output, batch.y)
predictions = torch.argmax(output, 1)
accuracy = predictions.eq(batch.y).float().mean()
predicted_probs = torch.nn.functional.softmax(output, dim=-1)
soft_accuracy = predicted_probs[torch.arange(len(batch), device=predicted_probs.device), batch.y].mean()
return {"loss": loss, "accuracy": accuracy, "soft_accuracy": soft_accuracy}
def _weight_decay_for_param(self, param_name: str) -> float:
return 0.0
class Cifar10(ClassificationTask):
config = {
"weight_decay": 1e-4,
"eval_batch_size": 1000,
"learning_rate": 0.05,
"momentum": 0.9,
"optimizer": "SGD",
}
def __init__(self, model: str = "resnet20", device=DEFAULT_DEVICE):
data = registry.dataset("torchvision.cifar10")(device=device)
self.train = data.train
self.test = data.test
self.model: Model = registry.model(model)(num_outputs=10, device=device)
super().__init__(device)
def _weight_decay_for_param(self, param_name: str) -> float:
if _parameter_type(param_name) != "batch_norm":
return self.config["weight_decay"]
else:
return 0.0
registry.task.register("cifar10", Cifar10)
class Cifar100(ClassificationTask):
config = {
"weight_decay": 1e-4,
"eval_batch_size": 1000,
"learning_rate": 0.05,
"momentum": 0.9,
"optimizer": "SGD",
}
def __init__(self, model: str = "resnet20", device=DEFAULT_DEVICE):
data = registry.dataset("torchvision.cifar100")(device=device)
self.train = data.train
self.test = data.test
self.model: Model = registry.model(model)(num_outputs=100, device=device)
super().__init__(device)
def _weight_decay_for_param(self, param_name: str) -> float:
if _parameter_type(param_name) != "batch_norm":
return self.config["weight_decay"]
else:
return 0.0
registry.task.register("cifar100", Cifar100)
def _squared_norm(tensor):
return torch.sum(tensor ** 2)
def _parameter_type(parameter_name):
if "conv" in parameter_name and "weight" in parameter_name:
return "convolution"
elif re.match(r""".*\.bn\d+\.(weight|bias)""", parameter_name):
return "batch_norm"
else:
return "other"
class LogisticRegression(ClassificationTask):
config = {"eval_batch_size": 1000}
def __init__(self, dataset: str, weight_decay=0, num_classes=None, device=DEFAULT_DEVICE):
data = registry.dataset(dataset)(device=device)
self.train = data.train
self.test = data.test
self.weight_decay = weight_decay
example_batch = next(iter(self.train.iterator(batch_size=1)))
if num_classes is None:
num_classes = max(data.train.num_classes, data.test.num_classes)
self.model: Model = registry.model("linear")(in_features=example_batch.x.shape[-1], out_features=num_classes, device=device)
super().__init__(device)
def _weight_decay_for_param(self, param_name: str) -> float:
return self.weight_decay
registry.task.register("logistic-regression", LogisticRegression)
| 31.642857 | 132 | 0.640468 | 4,058 | 0.832752 | 0 | 0 | 71 | 0.01457 | 0 | 0 | 443 | 0.090909 |
0e5ea68ff93ca844710804b051686a214341d76f | 278 | py | Python | python/ex103_validacao_dados.py | lucasdiogomartins/curso-em-video | 9da92b6255a11021f719a9e0ce994db639e1ac38 | [
"MIT"
] | null | null | null | python/ex103_validacao_dados.py | lucasdiogomartins/curso-em-video | 9da92b6255a11021f719a9e0ce994db639e1ac38 | [
"MIT"
] | null | null | null | python/ex103_validacao_dados.py | lucasdiogomartins/curso-em-video | 9da92b6255a11021f719a9e0ce994db639e1ac38 | [
"MIT"
] | null | null | null | def mostrar(n='', g=''):
if n == '':
n = '<desconhecido>'
if not g.isnumeric():
g = 0
return f'O jogador {n} fez {g} gol(s) no campeonato'
# Main
nome = input('Nome do Jogador: ').title()
gols = input('Número de Gols: ')
print(mostrar(nome, gols))
| 21.384615 | 56 | 0.55036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.397849 |
0e5ee46bdd0854fb83ad5b03e98e62a0395d3652 | 57,678 | py | Python | tests/integration/boxscore/test_ncaab_boxscore.py | MArtinherz/sportsipy | 24f4c1d5e3bb8ecc56e21568961588491e9cfd2a | [
"MIT"
] | 221 | 2018-05-15T19:48:03.000Z | 2021-01-05T15:36:21.000Z | tests/integration/boxscore/test_ncaab_boxscore.py | MArtinherz/sportsipy | 24f4c1d5e3bb8ecc56e21568961588491e9cfd2a | [
"MIT"
] | 502 | 2018-07-25T03:09:26.000Z | 2021-01-06T16:07:02.000Z | tests/integration/boxscore/test_ncaab_boxscore.py | MArtinherz/sportsipy | 24f4c1d5e3bb8ecc56e21568961588491e9cfd2a | [
"MIT"
] | 72 | 2021-01-21T13:17:00.000Z | 2022-03-31T21:43:25.000Z | import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsipy import utils
from sportsipy.constants import HOME
from sportsipy.ncaab.constants import BOXSCORES_URL, SCHEDULE_URL
from sportsipy.ncaab.boxscore import Boxscore, Boxscores
MONTH = 1
YEAR = 2020
BOXSCORE = '2020-01-22-19-louisville'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaab', filename)
return open('%s' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
return read_file('table.html')
if url == BOXSCORES_URL % (MONTH, 5, YEAR):
return MockPQ(read_file('boxscores-1-5-2020.html'))
if url == BOXSCORES_URL % (MONTH, 6, YEAR):
return MockPQ(read_file('boxscores-1-6-2020.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAABBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'January 22, 2020',
'location': 'KFC Yum! Center, Louisville, Kentucky',
'winner': HOME,
'winning_name': 'Louisville',
'winning_abbr': 'LOUISVILLE',
'losing_name': 'Georgia Tech',
'losing_abbr': 'GEORGIA-TECH',
'pace': 66.2,
'away_ranking': None,
'away_win_percentage': .421,
'away_wins': 8,
'away_losses': 11,
'away_minutes_played': 200,
'away_field_goals': 22,
'away_field_goal_attempts': 48,
'away_field_goal_percentage': .458,
'away_two_point_field_goals': 17,
'away_two_point_field_goal_attempts': 31,
'away_two_point_field_goal_percentage': .548,
'away_three_point_field_goals': 5,
'away_three_point_field_goal_attempts': 17,
'away_three_point_field_goal_percentage': .294,
'away_free_throws': 15,
'away_free_throw_attempts': 20,
'away_free_throw_percentage': .750,
'away_offensive_rebounds': 7,
'away_defensive_rebounds': 23,
'away_total_rebounds': 30,
'away_assists': 11,
'away_steals': 4,
'away_blocks': 4,
'away_turnovers': 16,
'away_personal_fouls': 18,
'away_points': 64,
'away_true_shooting_percentage': .557,
'away_effective_field_goal_percentage': .510,
'away_three_point_attempt_rate': .354,
'away_free_throw_attempt_rate': .417,
'away_offensive_rebound_percentage': 28.0,
'away_defensive_rebound_percentage': 63.9,
'away_total_rebound_percentage': 49.2,
'away_assist_percentage': 50.0,
'away_steal_percentage': 6.1,
'away_block_percentage': 10.5,
'away_turnover_percentage': 22.0,
'away_offensive_rating': 97.0,
'away_defensive_rating': 103.0,
'home_ranking': 6,
'home_win_percentage': .842,
'home_wins': 16,
'home_losses': 3,
'home_minutes_played': 200,
'home_field_goals': 24,
'home_field_goal_attempts': 58,
'home_field_goal_percentage': .414,
'home_two_point_field_goals': 18,
'home_two_point_field_goal_attempts': 38,
'home_two_point_field_goal_percentage': .474,
'home_three_point_field_goals': 6,
'home_three_point_field_goal_attempts': 20,
'home_three_point_field_goal_percentage': .300,
'home_free_throws': 14,
'home_free_throw_attempts': 23,
'home_free_throw_percentage': .609,
'home_offensive_rebounds': 13,
'home_defensive_rebounds': 18,
'home_total_rebounds': 31,
'home_assists': 12,
'home_steals': 9,
'home_blocks': 3,
'home_turnovers': 10,
'home_personal_fouls': 17,
'home_points': 68,
'home_true_shooting_percentage': .493,
'home_effective_field_goal_percentage': .466,
'home_three_point_attempt_rate': .345,
'home_free_throw_attempt_rate': .397,
'home_offensive_rebound_percentage': 36.1,
'home_defensive_rebound_percentage': 72.0,
'home_total_rebound_percentage': 50.8,
'home_assist_percentage': 50.0,
'home_steal_percentage': 13.6,
'home_block_percentage': 9.7,
'home_turnover_percentage': 12.8,
'home_offensive_rating': 103.0,
'home_defensive_rating': 97.0
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore('2020-01-22-19-louisville')
def test_ncaab_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
assert getattr(self.boxscore, 'summary') == {
# Box score is not parsed correctly
'away': [],
'home': []
}
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_ncaab_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaab_boxscore_players(self):
boxscore = Boxscore(BOXSCORE)
assert len(boxscore.home_players) == 10
assert len(boxscore.away_players) == 7
for player in boxscore.home_players:
assert not player.dataframe.empty
for player in boxscore.away_players:
assert not player.dataframe.empty
def test_ncaab_boxscore_string_representation(self):
expected = ('Boxscore for Georgia Tech '
'at Louisville (January 22, 2020)')
boxscore = Boxscore(BOXSCORE)
assert boxscore.__repr__() == expected
class TestNCAABBoxscores:
def setup_method(self):
self.expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "St. Joseph's",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "St. Joseph's",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': 'Houston Baptist',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': 'Houston Baptist',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "Purdue-Fort Wayne",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "Purdue-Fort Wayne",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': 'Green Bay',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': 'Green Bay',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': 'La Salle',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'La Salle',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "St. Peter's",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "St. Peter's",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': 'Saint Louis',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Saint Louis',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': 'Holy Cross',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Holy Cross',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northern Kentucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': 'Rhode Island',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': 'Rhode Island',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': 'St. Bonaventure',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': 'George Washington',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'St. Bonaventure',
'winning_abbr': 'st-bonaventure',
'losing_name': 'George Washington',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "St. John's (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "St. John's (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': 'Stony Brook',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Stony Brook',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': 'George Mason',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': 'George Mason',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "Wright State",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Wright State',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5),
datetime(2020, 1, 4)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_days(self, *args, **kwargs):
expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "St. Joseph's",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "St. Joseph's",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': 'Houston Baptist',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': 'Houston Baptist',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "Purdue-Fort Wayne",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "Purdue-Fort Wayne",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': 'Green Bay',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': 'Green Bay',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': 'La Salle',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'La Salle',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "St. Peter's",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "St. Peter's",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': 'Saint Louis',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Saint Louis',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': 'Holy Cross',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Holy Cross',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northern Kentucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': 'Rhode Island',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': 'Rhode Island',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': 'St. Bonaventure',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': 'George Washington',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'St. Bonaventure',
'winning_abbr': 'st-bonaventure',
'losing_name': 'George Washington',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "St. John's (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "St. John's (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': 'Stony Brook',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Stony Brook',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': 'George Mason',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': 'George Mason',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "Wright State",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Wright State',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
],
'1-6-2020': [
{'boxscore': '2020-01-06-21-oklahoma-state',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 55,
'away_rank': 17,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'West Virginia',
'winning_abbr': 'west-virginia',
'losing_name': 'Oklahoma State',
'losing_abbr': 'oklahoma-state'},
{'boxscore': '2020-01-06-20-jackson-state',
'away_name': 'Alabama A&M',
'away_abbr': 'alabama-am',
'away_score': 66,
'away_rank': None,
'home_name': 'Jackson State',
'home_abbr': 'jackson-state',
'home_score': 57,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Alabama A&M',
'winning_abbr': 'alabama-am',
'losing_name': 'Jackson State',
'losing_abbr': 'jackson-state'},
{'boxscore': '2020-01-06-20-grambling',
'away_name': 'Alabama State',
'away_abbr': 'alabama-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Grambling',
'home_abbr': 'grambling',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Grambling',
'winning_abbr': 'grambling',
'losing_name': 'Alabama State',
'losing_abbr': 'alabama-state'},
{'boxscore': '2020-01-06-20-texas-southern',
'away_name': 'Alcorn State',
'away_abbr': 'alcorn-state',
'away_score': 95,
'away_rank': None,
'home_name': 'Texas Southern',
'home_abbr': 'texas-southern',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Alcorn State',
'winning_abbr': 'alcorn-state',
'losing_name': 'Texas Southern',
'losing_abbr': 'texas-southern'},
{'boxscore': '2020-01-06-19-howard',
'away_name': 'Bethune-Cookman',
'away_abbr': 'bethune-cookman',
'away_score': 102,
'away_rank': None,
'home_name': 'Howard',
'home_abbr': 'howard',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bethune-Cookman',
'winning_abbr': 'bethune-cookman',
'losing_name': 'Howard',
'losing_abbr': 'howard'},
{'boxscore': '2020-01-06-19-army',
'away_name': 'Colgate',
'away_abbr': 'colgate',
'away_score': 70,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 65,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colgate',
'winning_abbr': 'colgate',
'losing_name': 'Army',
'losing_abbr': 'army'},
{'boxscore': '2020-01-06-19-north-carolina-at',
'away_name': 'Florida A&M',
'away_abbr': 'florida-am',
'away_score': 90,
'away_rank': None,
'home_name': 'North Carolina A&T',
'home_abbr': 'north-carolina-at',
'home_score': 97,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Carolina A&T',
'winning_abbr': 'north-carolina-at',
'losing_name': 'Florida A&M',
'losing_abbr': 'florida-am'},
{'boxscore': '2020-01-06-19-arkansas-little-rock',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 73,
'away_rank': None,
'home_name': 'Little Rock',
'home_abbr': 'little-rock',
'home_score': 79,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Little Rock',
'winning_abbr': 'little-rock',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2020-01-06-20-arkansas-state',
'away_name': 'Georgia State',
'away_abbr': 'georgia-state',
'away_score': 87,
'away_rank': None,
'home_name': 'Arkansas State',
'home_abbr': 'arkansas-state',
'home_score': 90,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arkansas State',
'winning_abbr': 'arkansas-state',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2020-01-06-19-appalachian-state',
'away_name': 'Louisiana',
'away_abbr': 'louisiana',
'away_score': 81,
'away_rank': None,
'home_name': 'Appalachian State',
'home_abbr': 'appalachian-state',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2020-01-06-19-coastal-carolina',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 64,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolnia',
'home_score': 93,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Louisiana-Monroe',
'losing_abbr': 'louisiana-monroe'},
{'boxscore': '2020-01-06-19-coppin-state',
'away_name': 'Norfolk State',
'away_abbr': 'norfolk-state',
'away_score': 82,
'away_rank': None,
'home_name': 'Coppin State',
'home_abbr': 'coppin-state',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Norfolk State',
'winning_abbr': 'norfolk-state',
'losing_name': 'Coppin State',
'losing_abbr': 'coppin-state'},
{'boxscore': '2020-01-06-20-texas-arlington',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 66,
'away_rank': None,
'home_name': 'Texas-Arlington',
'home_abbr': 'texas-arlington',
'home_score': 54,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Alabama',
'winning_abbr': 'south-alabama',
'losing_name': 'Texas-Arlington',
'losing_abbr': 'texas-arlington'},
{'boxscore': '2020-01-06-19-morgan-state',
'away_name': 'South Carolina State',
'away_abbr': 'south-carolina-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Morgan State',
'home_abbr': 'morgan-state',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Morgan State',
'winning_abbr': 'morgan-state',
'losing_name': 'South Carolina State',
'losing_abbr': 'south-carolina-state'},
{'boxscore': '2020-01-06-21-prairie-view',
'away_name': 'Southern',
'away_abbr': 'southern',
'away_score': 54,
'away_rank': None,
'home_name': 'Prairie View',
'home_abbr': 'prairie-view',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Prairie View',
'winning_abbr': 'prairie-view',
'losing_name': 'Southern',
'losing_abbr': 'southern'},
{'boxscore': '2020-01-06-20-texas-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 71,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Troy',
'winning_abbr': 'troy',
'losing_name': 'Texas State',
'losing_abbr': 'texas-state'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_string_representation(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5))
assert result.__repr__() == 'NCAAB games for 1-5-2020'
| 41.110478 | 78 | 0.437793 | 56,831 | 0.985315 | 0 | 0 | 34,853 | 0.604269 | 0 | 0 | 25,882 | 0.448733 |
0e609bb7fbbaaaa62dd52003ec698921717c72aa | 458 | py | Python | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | 6 | 2021-10-01T06:38:22.000Z | 2022-03-23T09:22:06.000Z | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | null | null | null | tests/test_elasticsearch.py | ankane/python-timeouts | bf939d5ef3bc8391d9ca8c2750c765fbd0bf63ae | [
"MIT"
] | null | null | null | from .conftest import TestTimeouts
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
class TestElasticsearch(TestTimeouts):
def test_connect(self):
with self.raises(ConnectionError):
Elasticsearch([self.connect_url()], timeout=1).cluster.health()
def test_read(self):
with self.raises(ConnectionError):
Elasticsearch([self.read_url()], timeout=1).cluster.health()
| 32.714286 | 75 | 0.733624 | 327 | 0.713974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0e60e9c2253d2e72211a2da9715165e839d0f5a9 | 1,179 | py | Python | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | 2 | 2015-11-05T08:54:40.000Z | 2016-03-01T22:14:29.000Z | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | null | null | null | mediasync/management/commands/syncmedia.py | kennethreitz-archive/django-mediasync | 93baadda6c8edfebf065e92a63aba588b9a0311e | [
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from mediasync.conf import msettings
import mediasync
import time
class Command(BaseCommand):
help = "Sync local media with remote client"
args = '[options]'
requires_model_validation = False
option_list = BaseCommand.option_list + (
make_option("-F", "--force", dest="force", help="force files to sync", action="store_true"),
make_option("-q", "--quiet", dest="verbose", help="disable output", action="store_false", default=True),
)
def handle(self, *args, **options):
msettings['SERVE_REMOTE'] = True
msettings['VERBOSE'] = options.get('verbose')
force = options.get('force') or False
try:
start_time = time.time()
mediasync.sync(force=force)
end_time = time.time()
secs = (end_time - start_time)
print 'sync finished in %0.3f seconds' % secs
except ValueError, ve:
raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args)) | 32.75 | 112 | 0.597116 | 1,013 | 0.859203 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.212044 |
0e6288801a87bd532b89f29032eb854a3c39498f | 751 | py | Python | coptim/functions/rosenbrock.py | cmazzaanthony/Optimization_Algorithms | 8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5 | [
"MIT"
] | 3 | 2019-06-20T17:26:07.000Z | 2019-07-02T22:14:38.000Z | coptim/functions/rosenbrock.py | cmazzaanthony/coptim | 8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5 | [
"MIT"
] | null | null | null | coptim/functions/rosenbrock.py | cmazzaanthony/coptim | 8dcfe1fcadbe4b3908b33dbc0f14f6d5c0178ce5 | [
"MIT"
] | null | null | null | import numpy as np
from coptim.function import Function
class Rosenbrock(Function):
def eval(self, x):
assert len(x) == 2, '2 dimensional input only.'
return 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
def gradient(self, x):
assert len(x) == 2, '2 dimensional input only.'
return np.array([
2 * (-200 * x[0] * x[1] + 200 * np.power(x[0], 3) - 1 + x[0]),
200 * (x[1] - x[0] ** 2)
])
def hessian(self, x):
assert len(x) == 2, '2 dimensional input only.'
df_dx1 = -400 * x[1] + 1200 * x[0] ** 2 + 2
df_dx1dx2 = -400 * x[0]
df_dx2dx1 = -400 * x[0]
df_dx2 = 200
return np.array([[df_dx1, df_dx1dx2], [df_dx2dx1, df_dx2]])
| 28.884615 | 74 | 0.49534 | 692 | 0.921438 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.107856 |
0e649134de3b3b4647c82c4e0a83af296513503f | 601 | py | Python | http-server.py | itamaro/python-http | 9a5d3700b77117aaa3681dbb6e335431ec6205fc | [
"Apache-2.0"
] | null | null | null | http-server.py | itamaro/python-http | 9a5d3700b77117aaa3681dbb6e335431ec6205fc | [
"Apache-2.0"
] | 3 | 2018-06-17T09:16:31.000Z | 2018-06-17T09:34:02.000Z | http-server.py | itamaro/python-http | 9a5d3700b77117aaa3681dbb6e335431ec6205fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from http.server import BaseHTTPRequestHandler, HTTPServer
from os import curdir, sep
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','image/png')
self.end_headers()
with open(curdir + sep + 'logo.png', 'rb') as f:
self.wfile.write(f.read())
try:
server = HTTPServer(('', PORT_NUMBER), myHandler)
print('Started httpserver on port' , PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
print('Stopping server')
| 23.115385 | 58 | 0.74376 | 266 | 0.442596 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.219634 |
0e6501df6c0fb85542839cee838fc4b4c8a3f879 | 868 | py | Python | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | lahja/tools/benchmark/typing.py | vaporyproject/lahja | fafc38106863f89ae04a52f6327e6ec5f42b2beb | [
"MIT"
] | null | null | null | from typing import (
NamedTuple,
)
from lahja import (
BaseEvent,
)
class RawMeasureEntry(NamedTuple):
sent_at: float
received_at: float
class CrunchedMeasureEntry(NamedTuple):
sent_at: float
received_at: float
duration: float
class PerfMeasureEvent(BaseEvent):
def __init__(self, payload: bytes, index: int, sent_at: float) -> None:
self.payload = payload
self.index = index
self.sent_at = sent_at
class ShutdownEvent(BaseEvent):
pass
class Total(NamedTuple):
caption: str
num_total: int
duration_fastest: float
duration_slowest: float
duration_avg: float
total_aggregated_time: float
total_duration: float
first_sent: float
last_received: float
class TotalRecordedEvent(BaseEvent):
def __init__(self, total: Total) -> None:
self.total = total
| 17.714286 | 75 | 0.691244 | 773 | 0.890553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0e65f0696679963eaa062eafd0dbac4752aacf94 | 531 | py | Python | players/urls.py | hleejr/hofAI | d4135a87071eb272e045ec427bd4cf2fedee9307 | [
"FTL"
] | null | null | null | players/urls.py | hleejr/hofAI | d4135a87071eb272e045ec427bd4cf2fedee9307 | [
"FTL"
] | null | null | null | players/urls.py | hleejr/hofAI | d4135a87071eb272e045ec427bd4cf2fedee9307 | [
"FTL"
] | 1 | 2021-08-19T22:26:16.000Z | 2021-08-19T22:26:16.000Z | from django.urls import path
from . import views
urlpatterns = [
path('nba/', views.PlayerListView.as_view(), name='nba-list-page'),
path('nba/<int:id>/', views.PlayerDetailView.as_view(), name='nba-detail-page'),
path('nba/search/', views.PlayerSearch.as_view(), name='nba-search-page'),
path('nfl/', views.NflListView.as_view(), name='nfl-list-page'),
path('nfl/<int:id>/', views.NflDetailView.as_view(), name='nfl-detail-page'),
path('nfl/search/', views.NflSearch.as_view(), name='nfl-search-page'),
] | 44.25 | 84 | 0.676083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.312618 |
0e677bb51b35c500b63970611b9dbe8b1db2dde6 | 292 | py | Python | django2/demo/meeting/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | django2/demo/meeting/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | 1 | 2020-07-17T09:25:42.000Z | 2020-07-17T09:25:42.000Z | django2/demo/meeting/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def hello(request):
return HttpResponse("Hello world")
def date(request, year, month, day):
return HttpResponse({
year: year,
month: month,
day: day
})
| 17.176471 | 38 | 0.660959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.130137 |
0e67c3d1189cc9a2fa0ae70c15b2f561c7bd2b47 | 404 | py | Python | Semester2/bottles.py | ConstantineLinardakis/Programming1Portfolio | 9062590de87e495ecf19b759a5d7a132a6982e3b | [
"MIT"
] | 1 | 2020-11-23T19:02:21.000Z | 2020-11-23T19:02:21.000Z | Semester2/bottles.py | ConstantineLinardakis/Programming1Portfolio | 9062590de87e495ecf19b759a5d7a132a6982e3b | [
"MIT"
] | null | null | null | Semester2/bottles.py | ConstantineLinardakis/Programming1Portfolio | 9062590de87e495ecf19b759a5d7a132a6982e3b | [
"MIT"
] | null | null | null | #loop
bottles = 99
while (bottles > 0):
if (bottles > 1):
print bottles, "bottles of root beer on the wall", bottles, "bottles of root beer"
print "Take one down pass it around,", bottles, "bottles of root beer on the wall"
else:
print bottles, "bottles of root beer on the wall", bottles, "bottle of root beer"
bottles = bottles - 1
| 21.263158 | 90 | 0.591584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.44802 |
0e6a1dd764d9575734fedccbb49686086ffb62aa | 6,574 | py | Python | deepcompton/scripts/CreateDatasetWithUncertainties.py | vuillaut/DeepIntegralCompton | f7f6811401d6b234a615742e4f6d22ac46e91b2f | [
"MIT"
] | 1 | 2021-12-06T16:35:20.000Z | 2021-12-06T16:35:20.000Z | deepcompton/scripts/CreateDatasetWithUncertainties.py | vuillaut/DeepIntegralCompton | f7f6811401d6b234a615742e4f6d22ac46e91b2f | [
"MIT"
] | 1 | 2021-12-07T00:45:31.000Z | 2021-12-08T09:13:45.000Z | deepcompton/scripts/CreateDatasetWithUncertainties.py | vuillaut/DeepIntegralCompton | f7f6811401d6b234a615742e4f6d22ac46e91b2f | [
"MIT"
] | 2 | 2021-12-06T16:43:46.000Z | 2021-12-06T16:49:54.000Z | # Deep Learning applique a l'imagerie Compton avec les donnees du satellite INTEGRAL
# Hackatlon AstroInfo 2021
##___ Importations
import numpy as np
import matplotlib.pyplot as plt
import Utilitaires_Compton as compton
import pickle as pkl
from os import listdir
from sys import setrecursionlimit
# ___ Constantes
Ee = 511
z_isgri = 0 # position plan ISGRI -> 0
z_picsit = -8.68 # position plan PICSIT -> distance entre les deux plans
r = 100000000000000.
precision = 5000.
## ___ Fonctions
def angular_separation(colat1, long1, colat2, long2):
"""
Compute the angular separation in radians
between two pointing direction given with lat-long
Parameters
----------
lat1: 1d `numpy.ndarray` , latitude of the first pointing direction
long1: 1d `numpy.ndarray` longitude of the first pointing direction
lat2: 1d `numpy.ndarray`, latitude of the second pointing direction
long2: 1d `numpy.ndarray`, longitude of the second pointing direction
Returns
-------
1d `numpy.ndarray`, angular separation
"""
cosdelta = np.sin(colat1) * np.sin(colat2) * np.cos(
(long1 - long2)) + np.cos(colat1) * np.cos(colat2)
cosdelta[cosdelta > 1] = 1.
cosdelta[cosdelta < -1] = -1.
ang_sep = np.arccos(cosdelta)
return ang_sep
def AddFunction_DistCone(f1,f2):
"""Somme de deux fonctions produisant une distribution
pour un ou plusieurs cones.
"""
return lambda l,c : f1(l,c) + f2(l,c)
def Cone_param(pos1x,pos1y,energ1,pos2x,pos2y,energ2) :
""" Retourne les parametres theta, phi et cottheta d'un cone
a partir des observations pos1x,pos1y,energ1,pos2x,pos2y,energ2.
retourne None si l'energie limite est atteinte.
-----------
Attention :
indice 1 : ISGRI, indice 2 : PICsIT
"""
x1cur = z_isgri
y1cur = pos1y
z1cur = -pos1x
x2cur = z_picsit
y2cur = pos2y
z2cur = -pos2x
E0 = energ1 + energ2
Ec = E0/(1+2*E0/Ee)
if (energ2 >= Ec) and (energ1 <= E0 - Ec):
cotheta = compton.cottheta(energ1,E0 - energ1)
A = np.array([x1cur,y1cur,z1cur])
B = np.array([x2cur,y2cur,z2cur])
theta = compton.colatitudeaxe(B,A)
phi = compton.longitudeaxe(B,A)
return theta, phi, cotheta
else :
return None
def Normal(delta_x,sigma):
"""Loi normale
"""
return 1/(sigma * np.sqrt(np.pi)) * np.exp(-1/2*(delta_x /sigma)**2)
def DistriCone_v3_2D(theta, phi, cottheta, sigma ):
""" Retourne la distribution associee au cone de parametres theta, phi, cottheta.
c'est-a-dire le cone plus une incertitude gaussienne de parametre sigma.
"""
def f_2D(l,c) :
"""l et c 2D
output 2D
"""
theta_list = np.full_like(c, theta)
phi_list = np.full_like(l, phi)
e = angular_separation(theta_list, phi_list,
np.radians(c), np.radians(l))
delta = e - np.arctan(1/cottheta)
return Normal(delta,np.radians(sigma))
return f_2D
def Create_image(data, n_cones, sigma = 2, grid_longit = np.linspace(0, 359, 180), grid_colat = np.linspace(0, 89, 45) ):
""" A partir des data simulees ou observees, creation d'une image sur la grille (grid_longit, grid_colat).
Pour cela on applique sur cette grille la fonction cumulant n_cones distributions produites avec n_cones evenements
de data, tires aleatoirement. Ces distributions gaussiennes ont une incertitude sigma.
"""
# ___ Load data
s = data.shape[0]
# ISGRI -> 1
E1_list = data[:,0]
Y1_list = data[:,6]
Z1_list = data[:,7]
#PICsIT -> 2
E2_list = data[:,1]
Y2_list = data[:,8]
Z2_list = data[:,9]
# ___ Create distribution
f_v3 = lambda l,c : 0
for i in np.random.randint(0,s,size = n_cones) :
L = Cone_param(Y1_list[i],Z1_list[i],E1_list[i],Y2_list[i],Z2_list[i],E2_list[i])
if L!= None :
theta, phi, cottheta = L
f_new = DistriCone_v3_2D(theta, phi, cottheta, sigma = sigma)
f_v3 = AddFunction_DistCone(f_v3,f_new)
# ___ Application on image
l, c = np.meshgrid(grid_colat, grid_longit)
im = f_v3(c,l)
return im
def Create_dataset(name_file_list,path, name_pkl, n_file_max = -1, n_image_file = 50, n_cones_min = 100, n_cones_max = 2000, sigma = 2):
""" Creation d'un dataset a partir d'une liste de fichiers path+name_file_list.
A partir de n_file_max fichiers, on tire n_image_file images par fichier
avec un nombre de cones variant aleatoirement entre n_cones_min et n_cones_max.
Le dataset est enregistre dans un pkl nomme name_pkl.
"""
y,x=[],[]
i=0
for name_file in name_file_list[:n_file_max] :
i+=1
_,theta,_,phi=name_file.replace(".npy","").split("_")
j=0
for n_cones in np.random.randint(n_cones_min,n_cones_max+1,size = n_image_file) :
data = np.load(path+name_file).astype("float64")
if len(data)>0:
#print(f"\r file n°{i}/{n_file_max}, image n°{j}/{n_image_file} ", end='', flush=True)
data = Create_image(data, n_cones, sigma = sigma)
y.append([float(theta),float(phi),n_cones])
x.append(data)
j+=1
if len(y)%100 == 0:
nx,ny = pkl.load(open(name_pkl, "rb"))
nx+=x
ny+=y
pkl.dump((nx,ny), open(name_pkl, "wb"))
x.clear()
y.clear()
nx,ny = pkl.load(open(name_pkl, "rb"))
nx+=x
ny+=y
pkl.dump((nx,ny), open(name_pkl, "wb"))
return 0
## ___ Script
path = '/Users/lg265853/Documents/AstroInfo/save_Compton'
name_file_list = listdir(path)
n_file_max = -1
n_image_file = 100
n_cones_min = 100
n_cones_max = 2000
sigma = 2
name_pkl = f'dataset_{n_image_file}images_{n_file_max}files_sig{sigma}_ncones{n_cones_min}-{n_cones_max}.pkl'
setrecursionlimit(n_cones_max)
Create_dataset(name_file_list,path+'/',name_pkl,n_file_max, n_image_file, n_cones_min, n_cones_max, sigma)
## ___ Verification
#x,y = pkl.load(open(name_pkl, "rb"))
#print(len(x), len(y))
#
#plt.figure(figsize=(20,20))
#k = 1
#for i in np.random.randint(0,len(x),size = 30):
# plt.subplot(3,10,k)
# plt.imshow(x[i], origin='lower', cmap = 'CMRmap_r')
# plt.scatter(y[i][0]/2,y[i][1]/2)
# plt.title(str(y[i][2]) + ' cones')
# plt.xlabel('colatitude') ; plt.ylabel('longitude')
# k+=1
#
#plt.show()
| 28.960352 | 136 | 0.623973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,740 | 0.416667 |
0e6ba2de9241ced5ccf9acb46a7611acaa103fa7 | 641 | py | Python | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | null | null | null | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | 4 | 2021-03-19T00:52:54.000Z | 2021-09-08T01:00:47.000Z | insta/migrations/0003_auto_20190522_1122.py | eddyyonnie/instanicer | ae5c9994a5fcf0291a7785921655be0d5293a790 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-22 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta', '0002_pictures'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=400)),
],
),
migrations.RenameField(
model_name='profile',
old_name='bio',
new_name='about',
),
]
| 24.653846 | 114 | 0.549142 | 548 | 0.854914 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.180967 |
0e6bbfb6f7ffa48afb033c77acd475e0be9aebd8 | 15,134 | py | Python | configs.py | wangchaodong/packaging | 7061a65c72f467a0b301374a7101b0152c168ad1 | [
"MIT"
] | 15 | 2021-09-19T16:01:38.000Z | 2022-03-21T02:02:24.000Z | configs.py | wangchaodong/packaging | 7061a65c72f467a0b301374a7101b0152c168ad1 | [
"MIT"
] | null | null | null | configs.py | wangchaodong/packaging | 7061a65c72f467a0b301374a7101b0152c168ad1 | [
"MIT"
] | 2 | 2021-09-19T21:52:15.000Z | 2021-09-25T14:41:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
import json
import os
import plistlib
import subprocess
import time
import tools
import requests
python_script_debug_enable = False # 是否开启debug模式 用于测试脚本
pwd = os.getcwd() # 当前文件的路径
ios_project_path = os.path.abspath(os.path.dirname(
pwd) + os.path.sep + ".") # ios项目路径,默认为当前文件的父路径, 如果修改请填写项目绝对路径
system_home_dir = os.path.expanduser('~') # home路径
build_directory = os.path.join(pwd, 'build') # 打包输出的文件夹
auth_key_dir_name = 'private_keys'
auth_key_copy_dir = os.path.join(pwd, auth_key_dir_name)
auth_key_destination = '~/private_keys/'
pgy_upload_url = 'https://www.pgyer.com/apiv2/app/upload'
testflights_url = 'https://appstoreconnect.apple.com/apps'
qr_code_img_path = os.path.join(build_directory, 'qrCode.jpg')
log_directory = os.path.join(pwd, 'log') # 日志文件夹
packaging_log_path = os.path.join(log_directory, 'packaging.log')
@enum.unique
class DistributionMethodType(enum.Enum):
Development = 'development'
AppStoreConnect = 'app-store'
AdHoc = 'ad-hoc'
class Config(object):
project_name: str
project_scheme_list: list
project_scheme_index: int
apple_account_team_id: str
development_provisioning_profiles: dict
distribution_provisioning_profiles: dict
adhoc_provisioning_profiles: dict
distribution_method: DistributionMethodType
upload_pgy_enable: bool
pgy_api_key: str
upload_app_sotre_enable: bool
upload_app_store_account_type: int # 1 使用apple账号 2 使用apiKey
apple_account_user: str
apple_account_password: str
auth_key_file_name: str
apple_account_apiKey: str
apple_account_apiIssuer: str
send_email_enable: bool
email_host: str
email_port: int
email_sender_user: str
email_sender_psw: str
email_receivers: list
add_build_number_enable: bool
log_enable: bool
app_update_message = ''
github_access_token: str
github_repo_url: str
testflight_external_group_name: str
xcodeproj_path = None
xcworkspace_path = None
is_workspace_project = True
def get_product_scheme():
return Config.project_scheme_list[Config.project_scheme_index]
def get_export_options_plist_path():
plist_path = os.path.join(
build_directory, Config.distribution_method.value+'_ExportOptions.plist')
return plist_path
def get_signing_certificate():
if Config.distribution_method == DistributionMethodType.Development:
return 'Apple Development'
elif Config.distribution_method == DistributionMethodType.AppStoreConnect:
return 'Apple Distribution'
elif Config.distribution_method == DistributionMethodType.AdHoc:
return 'Apple Distribution'
def get_provisioning_profile():
if Config.distribution_method == DistributionMethodType.Development:
return Config.development_provisioning_profiles
elif Config.distribution_method == DistributionMethodType.AppStoreConnect:
return Config.distribution_provisioning_profiles
elif Config.distribution_method == DistributionMethodType.AdHoc:
return Config.adhoc_provisioning_profiles
def get_export_path():
export_path = os.path.join(
build_directory, Config.distribution_method.value)
if export_path in os.listdir(build_directory):
print("%s exists" % (export_path))
else:
print("create dir %s" % (export_path))
subprocess.call('mkdir %s' % (export_path), shell=True)
time.sleep(1)
return export_path
def get_xcode_workspace_path():
if Config.xcworkspace_path is None:
path = search_project_file(
ios_project_path, '%s.xcworkspace' % (Config.project_name))
Config.xcworkspace_path = path
return os.path.join(path)
else:
return os.path.join(Config.xcworkspace_path)
def get_xcode_project_path():
if Config.xcodeproj_path is None:
path = search_project_file(
ios_project_path, '%s.xcodeproj' % (Config.project_name))
Config.xcodeproj_path = path
return os.path.join(path)
else:
return os.path.join(Config.xcodeproj_path)
def get_xcode_project_pbxproj_path():
return os.path.join(get_xcode_project_path(), 'project.pbxproj')
def search_project_file(path, target):
target_path = ''
for root, dirs, fs in os.walk(path):
for d in dirs:
if d == target:
target_path = os.path.join(root, d)
return target_path
for f in fs:
if f == target:
target_path = os.path.join(root, f)
return target_path
if target_path == '':
tools.fail_print('没有找到%s文件' % (target))
return target_path
def get_target_name():
return Config.project_name # 默认target name和project name一致
def get_exported_ipa_path():
ipa_path = os.path.join(
build_directory, '%s/%s.ipa' % (Config.distribution_method.value, Config.project_name))
return ipa_path
def prepare_config():
config_path = os.path.join(pwd, 'config.json')
with open(config_path, 'r') as config_file:
config_json_dic = json.load(config_file)
Config.project_name = config_json_dic['project_name']
Config.project_scheme_list = config_json_dic['project_scheme_list']
Config.project_scheme_index = config_json_dic['project_scheme_index']
Config.apple_account_team_id = config_json_dic['apple_account_team_id']
Config.development_provisioning_profiles = config_json_dic[
'development_provisioning_profiles']
Config.distribution_provisioning_profiles = config_json_dic[
'distribution_provisioning_profiles']
Config.adhoc_provisioning_profiles = config_json_dic['adhoc_provisioning_profiles']
Config.distribution_method = DistributionMethodType(
config_json_dic['distribution_method'])
Config.upload_pgy_enable = config_json_dic['upload_pgy_enable']
Config.pgy_api_key = config_json_dic['pgy_api_key']
Config.upload_app_sotre_enable = config_json_dic['upload_app_sotre_enable']
Config.upload_app_store_account_type = config_json_dic['upload_app_store_account_type']
Config.apple_account_user = config_json_dic['apple_account_user']
Config.apple_account_password = config_json_dic['apple_account_password']
Config.auth_key_file_name = config_json_dic['auth_key_file_name']
Config.apple_account_apiKey = config_json_dic['apple_account_apiKey']
Config.apple_account_apiIssuer = config_json_dic['apple_account_apiIssuer']
Config.send_email_enable = config_json_dic['send_email_enable']
Config.email_host = config_json_dic['email_host']
Config.email_port = config_json_dic['email_port']
Config.email_sender_user = config_json_dic['email_sender_user']
Config.email_sender_psw = config_json_dic['email_sender_psw']
Config.email_receivers = config_json_dic['email_receivers']
Config.add_build_number_enable = config_json_dic['add_build_number_enable']
Config.log_enable = config_json_dic['log_enable']
Config.github_access_token = config_json_dic['github_access_token']
Config.github_repo_url = config_json_dic['github_repo_url']
Config.testflight_external_group_name = config_json_dic['testflight_external_group_name']
if get_xcode_workspace_path() != '':
Config.is_workspace_project = True
else:
Config.is_workspace_project = False
if get_xcode_project_path() != '':
tools.fail_print('没有找到%s.xcodeproj文件, 请将脚本文件放到项目目录下')
# check project_scheme_list
if len(Config.project_scheme_list) == 0:
tools.warn_print("project_scheme_list未配置,正在获取project的schemes...")
list_project_command_run = subprocess.Popen(
'xcodebuild -list -project %s -json' % (get_xcode_project_path()), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = list_project_command_run.communicate()
project_info = stdout.decode('utf-8')
project_dict = json.loads(project_info)
print('projec info:\n %s' % (project_dict))
Config.project_scheme_list = project_dict['project']['schemes']
print('project_scheme_lis:\n%s' % (Config.project_scheme_list))
list_project_command_run.wait()
save_packaging_config()
def save_packaging_config():
dic = {
"project_name": Config.project_name,
"project_scheme_list": Config.project_scheme_list,
"project_scheme_index": Config.project_scheme_index,
"apple_account_team_id": Config.apple_account_team_id,
"development_provisioning_profiles": Config.development_provisioning_profiles,
"distribution_provisioning_profiles": Config.distribution_provisioning_profiles,
"distribution_method": Config.distribution_method.value,
"upload_pgy_enable": Config.upload_pgy_enable,
"pgy_api_key": Config.pgy_api_key,
"upload_app_sotre_enable": Config.upload_app_sotre_enable,
"upload_app_store_account_type": Config.upload_app_store_account_type,
"apple_account_user": Config.apple_account_user,
"apple_account_password": Config.apple_account_password,
"auth_key_file_name": Config.auth_key_file_name,
"apple_account_apiKey": Config.apple_account_apiKey,
"apple_account_apiIssuer": Config.apple_account_apiIssuer,
"send_email_enable": Config.send_email_enable,
"email_host": Config.email_host,
"email_port": Config.email_port,
"email_sender_user": Config.email_sender_user,
"email_sender_psw": Config.email_sender_psw,
"email_receivers": Config.email_receivers,
"add_build_number_enable": Config.add_build_number_enable,
"log_enable": Config.log_enable,
"github_access_token": Config.github_access_token,
"github_repo_url": Config.github_repo_url,
"testflight_external_group_name": Config.testflight_external_group_name
}
tools.warn_print('back up configs')
json_str = json.dumps(dic, ensure_ascii=False, indent=4) # 缩进4字符
config_path = os.path.join(pwd, 'config.json')
with open(config_path, 'w+') as config_file:
config_file.truncate(0)
config_file.write(json_str)
config_file.close()
def create_export_options_plist_file():
plist_value = {
'method': Config.distribution_method.value,
'destination': 'export',
'teamID': Config.apple_account_team_id,
'stripSwiftSymbols': True,
'compileBitcode': True,
'thinning': '<none>',
'signingCertificate': get_signing_certificate(),
'signingStyle': 'manual',
'provisioningProfiles': get_provisioning_profile(),
}
plist_path = get_export_options_plist_path()
print('ExportOptions.plist:\n'+plist_path+'\n')
print(plist_value)
with open(plist_path, 'wb') as fp:
plistlib.dump(plist_value, fp)
return plist_path
def prepare_packaging_dir():
tools.notice_print('prepare build dir: ' + build_directory)
subprocess.call(['rm', '-rf', '%s' % (build_directory)])
time.sleep(1)
subprocess.call(['mkdir', '-p', '%s' % (build_directory)])
time.sleep(1)
def prepare_log_dir():
tools.notice_print('prepare log dir: ' + log_directory)
subprocess.call(['rm', '-rf', '%s' % (log_directory)])
time.sleep(1)
subprocess.call(['mkdir', '-p', '%s' % (log_directory)])
time.sleep(1)
def prepare_app_store_upload():
if Config.upload_app_store_account_type == 1:
if len(Config.apple_account_user) == 0 or len(Config.apple_account_password) == 0:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
elif Config.upload_app_store_account_type == 2:
if len(Config.apple_account_apiKey) == 0 or len(Config.apple_account_apiIssuer) == 0:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
prepare_authkey_dir()
else:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
def prepare_authkey_dir():
if Config.auth_key_file_name is None or Config.auth_key_file_name not in os.listdir(auth_key_copy_dir):
tools.warn_print(
'使用apiKey/apiIssuer来上传App Store Connect时需要配置*.p8文件, 请先将*.p8文件复制到private_keys目录下, 具体详情可参考: https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api')
tools.end_program(2)
if auth_key_dir_name in os.listdir(system_home_dir):
print("%s exists" % (auth_key_destination))
else:
print("create dir: %s" % (auth_key_destination))
subprocess.call('cd ~ && mkdir %s' %
(auth_key_destination), shell=True)
time.sleep(1)
key_dir = os.path.expanduser(auth_key_destination)
if Config.auth_key_file_name in os.listdir(key_dir):
print("%s/%s file exists" %
(auth_key_destination, Config.auth_key_file_name))
else:
print("copy file: %s/%s" %
(auth_key_destination, Config.auth_key_file_name))
subprocess.call('cp -r %s %s' %
(auth_key_copy_dir, auth_key_destination), shell=True)
time.sleep(1)
def save_qr_code(qr_code_url):
r = requests.get(qr_code_url)
with open(qr_code_img_path, 'wb') as f:
f.write(r.content)
return qr_code_img_path
def save_packaging_log(start_time='', end_time='', error_message=''):
if Config.log_enable:
prepare_log_dir()
version = tools.get_xcode_project_info(
project_pbxproj_path=get_xcode_project_pbxproj_path(), target_name=get_target_name())
log = {
"strat_time": start_time,
"end_time": end_time,
"erro_message": error_message,
"app_name": Config.project_name,
"scheme": get_product_scheme(),
"update_message": Config.app_update_message,
"version": version[0]+'('+version[1]+')',
"upload_to_pgy": Config.upload_pgy_enable,
"upload_to_app_store": Config.upload_app_sotre_enable,
"auto_add_build": Config.add_build_number_enable,
'signingCertificate': get_signing_certificate(),
"distribution_method": Config.distribution_method.value,
"ipa_path": get_exported_ipa_path(),
"xcodeproj_path": get_xcode_project_path(),
"xcworkspace_path": get_xcode_workspace_path()
}
json_str = json.dumps(log, ensure_ascii=False, indent=4)
with open(packaging_log_path, "w+") as log_file:
log_file.truncate(0)
log_file.write(json_str)
log_file.close()
return json_str
| 37.835 | 208 | 0.703317 | 1,166 | 0.074974 | 0 | 0 | 140 | 0.009002 | 0 | 0 | 3,582 | 0.230324 |
0e6c3388675e9316ea0f6f1944364bc28e508aaa | 798 | py | Python | django_etuovi/utils/testing.py | City-of-Helsinki/django-etuovi | bb8ad9dee0ebfad3bb3e53dd0857dd67eed9a4a5 | [
"MIT"
] | 1 | 2021-03-15T11:28:16.000Z | 2021-03-15T11:28:16.000Z | django_etuovi/utils/testing.py | City-of-Helsinki/django-etuovi | bb8ad9dee0ebfad3bb3e53dd0857dd67eed9a4a5 | [
"MIT"
] | 8 | 2020-09-15T13:18:49.000Z | 2021-06-10T20:36:45.000Z | django_etuovi/utils/testing.py | City-of-Helsinki/django-etuovi | bb8ad9dee0ebfad3bb3e53dd0857dd67eed9a4a5 | [
"MIT"
] | 1 | 2020-12-02T13:18:07.000Z | 2020-12-02T13:18:07.000Z | import typing
def _find_type_origin(type_hint):
actual_type = typing.get_origin(type_hint) or type_hint
if isinstance(actual_type, typing._SpecialForm):
# case of typing.Union[…]
for origins in map(_find_type_origin, typing.get_args(type_hint)):
yield from origins
else:
yield actual_type
def check_dataclass_typing(instance):
for field_name, field_def in instance.__dataclass_fields__.items():
actual_types = tuple(
type_origin for type_origin in _find_type_origin(field_def.type)
)
actual_value = getattr(instance, field_name)
assert isinstance(actual_value, actual_types), (
f"Value {actual_value} of field {field_name}"
f"is not an instance of {actual_types}"
)
| 33.25 | 76 | 0.680451 | 0 | 0 | 324 | 0.405 | 0 | 0 | 0 | 0 | 111 | 0.13875 |
0e6dcc10a354feb2d47f6cf7f7092dd6f503b59c | 5,128 | py | Python | lauschgeraet/lgiface.py | SySS-Research/Lauschgeraet | b7c14fa3df3227109fbe2a006a4d67ba2645450f | [
"MIT"
] | 25 | 2019-06-12T11:32:48.000Z | 2022-02-25T01:57:38.000Z | lauschgeraet/lgiface.py | SySS-Research/Lauschgeraet | b7c14fa3df3227109fbe2a006a4d67ba2645450f | [
"MIT"
] | 1 | 2019-08-22T12:50:41.000Z | 2019-08-22T12:50:41.000Z | lauschgeraet/lgiface.py | SySS-Research/Lauschgeraet | b7c14fa3df3227109fbe2a006a4d67ba2645450f | [
"MIT"
] | 5 | 2019-07-08T00:08:05.000Z | 2021-09-09T03:55:41.000Z | # -*- coding: utf-8 -*-
from lauschgeraet.args import args, LG_NS_MODE
import subprocess
import os
import sys
import logging
import netns
log = logging.getLogger(__name__)
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
TEST = os.path.exists('testing')
TEST_STATE = {
"lgstate": {
"enabled": True,
"active": True,
"wifi": False,
"status": "active",
}
}
LG_NS = "lg"
# set environment variables for the bash scripts
py_env = {
"LG_ENV_BY_PYTHON": "1",
"ATIF": "lgPeer",
"SWIF": args.SW_IFACE,
"CLIF": args.CL_IFACE,
"GWIF": "lgGateway",
"WIFIIF": "",
"BRIF": 'br0',
"BRIP": '192.0.2.1',
"GWIP": '192.0.2.254', # bogus gateway
"ATNET": '203.0.113.0/24',
"ATIP": '203.0.113.1',
"WIFINET": '198.51.100.0/24',
"RANGE": "61000-62000",
"TMPDIR": os.path.join(
get_script_path(),
"lg-server",
".tmp",
)
}
ns_setup = [
# clean up status file: always start disabled
'rm -f %s/status' % py_env["TMPDIR"],
# create a network namespace
'ip netns add ' + LG_NS,
# Assign the interfaces to the new network namespace
'ip link set netns %s %s' % (LG_NS, py_env["SWIF"]),
'ip link set netns %s %s' % (LG_NS, py_env["CLIF"]),
# create a pair of veth interfaces
'ip link add %s type veth peer name %s' % (py_env["GWIF"], py_env["ATIF"]),
# Assign the AT interface to the network namespace
'ip link set netns %s %s' % (LG_NS, py_env["ATIF"]),
# Assign an address to the gateway interface
'ip addr add %s dev %s' % (py_env["ATNET"].replace('.0/', '.2/'),
py_env["GWIF"]),
'ip link set %s up' % py_env["GWIF"],
# Assign an address to the attacker interface
'ip netns exec %s ip addr add %s/24 dev %s' % (
LG_NS,
py_env["ATIP"],
py_env["ATIF"]),
'ip netns exec %s ip link set %s up' % (LG_NS, py_env["ATIF"]),
# Set loopback up
'ip netns exec %s ip link set lo up' % LG_NS,
# Arrange to masquerade outbound packets from the network
# namespace.
'iptables -t nat -A POSTROUTING -o %s -j MASQUERADE' % py_env["GWIF"],
]
ns_teardown = [
'ip netns del %s' % LG_NS,
'ip link del %s' % py_env["GWIF"],
'iptables -t nat -D POSTROUTING -o %s -j MASQUERADE' % py_env["GWIF"],
]
def run_steps(steps, ignore_errors=False):
for step in steps:
try:
subprocess.check_output(step.split(),)
except subprocess.CalledProcessError:
if ignore_errors:
log.exception("Exception while managing network namespaces:")
break
else:
raise
def init_ns():
if LG_NS_MODE:
log.info("Creating network namespace")
run_steps(ns_setup)
def teardown_ns():
if LG_NS_MODE:
log.info("Removing network namespace")
run_steps(ns_teardown, True)
def lg_exec(*args):
cmd = [
os.path.join(
get_script_path(),
"lg-server",
"bin",
args[0],
)
] + list(args[1:])
my_env = {**os.environ.copy(), **py_env}
try:
if LG_NS_MODE:
with netns.NetNS(nsname=LG_NS):
output = subprocess.check_output(cmd,
env=my_env,
stderr=subprocess.STDOUT)
else:
output = subprocess.check_output(cmd,
env=my_env,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.exception("Exception while running command: %s" % ' '.join(args))
log.error(e.output.decode())
return b""
return output
def get_lg_status():
if TEST:
return TEST_STATE
output = lg_exec('lg', 'status')
output = output.replace(b'\n', b'').decode()
# the cmd returns one of the following:
# * passive
# * active
# * wifi
# * disabled
# * waiting
# * failed
return {
"lgstate": {
"enabled": not output == 'disabled',
"active": output == 'active',
"wifi": output == 'wifi',
"status": output,
}
}
def set_lg_status(mode):
log.info("Setting Lauschgerät to '%s'" % mode)
if TEST:
global TEST_STATE
output = mode
TEST_STATE = {
"lgstate": {
"enabled": not output == 'disabled',
"active": output == 'active',
"wifi": output == 'wifi',
"status": output,
}
}
return None
try:
out = lg_exec("lg", "set", mode)
except subprocess.CalledProcessError as e:
log.exception("Setting mode failed")
return str(e)
except Exception as e:
log.exception("Setting mode failed")
return str(e)
log.info("Output from 'lg set': %s" % out.decode())
return None
| 27.132275 | 79 | 0.530811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,825 | 0.35582 |
0e6e57c6b5f1bcaacca310fcec2b309ab9c13359 | 4,476 | py | Python | 1099parser.py | liyanghuang/1099-parser | 5e1294f7ca74546228d10cb5272c47cec9bd51eb | [
"MIT"
] | 2 | 2021-03-10T01:22:12.000Z | 2021-05-18T01:11:33.000Z | 1099parser.py | liyanghuang/1099-parser | 5e1294f7ca74546228d10cb5272c47cec9bd51eb | [
"MIT"
] | null | null | null | 1099parser.py | liyanghuang/1099-parser | 5e1294f7ca74546228d10cb5272c47cec9bd51eb | [
"MIT"
] | null | null | null | import pdfplumber
import re
import csv
from tqdm import tqdm
print('(When entering file paths on windows, use \'/\' in the place of \'\\\')')
pdf_path = input('Enter the file path of the 1099 pdf:\n')
# open up the pdf file, keep trying until user enters valid file
valid_pdf = False
while not valid_pdf:
try:
pdf = pdfplumber.open(pdf_path)
valid_pdf = True
except FileNotFoundError:
pdf_path = input('The file entered is not a valid file, please try again:\n')
# open up csv file, use default.csv if nothing is entered
csv_path = input('Enter the file path of the output csv:\n')
if csv_path == '':
print('Nothing was entered, using default file and location: ./default.csv')
csv_path = 'default.csv'
valid_csv = False
while not valid_csv:
try:
csv_file = open(csv_path, 'w', newline='')
csv_writer = csv.writer(csv_file, delimiter=',')
valid_csv = True
except FileNotFoundError:
csv_path = input('The file entered is not a valid file, please try again:\n')
# ask the user if they want to compile multiple transactions into one
# mainly for the 'xxx transactions for xx/xx/xx' lines
compile_multiple = input('Compile multiple transactions? (Y/n):\n').lower()
if compile_multiple == '':
print('Nothing was entered, using default \'n\'')
compile_multiple = 'n'
while not (compile_multiple == 'y' or compile_multiple == 'n'):
compile_multiple = input('Invalid input, try again. Enter \'Y\' or \'n\':\n').lower()
# regex for the symbol line
symbol_re = re.compile(r'.*\/ Symbol:\s*$')
# regex for date line
date_re = re.compile(r'^\d{2}\/\d{2}\/\d{2} .*')
# regex for transaction line
transaction_re = re.compile(r'^\d* transactions for \d{2}\/\d{2}\/\d{2}.*')
# regex for transaction child line
transaction_child_re = re.compile(r'^\d*\.\d* .*')
print('Transforming to csv...')
# write csv headers
csv_writer.writerow(['Security', 'Date sold or disposed', 'Quantity', 'Proceeds/Reported gross or net', 'Date acquired', 'Cost or other basis', 'Accrued mkt disc/Wash sale loss disallowed', 'Gain or loss', 'Additional Information'])
for page in tqdm(pdf.pages):
# extract each line from each page
text = page.extract_text()
lines = text.split('\n')
i = 0
while i < len(lines):
symbol_line = lines[i]
# attempt to match symbol line which signals start of a security block
if symbol_re.match(symbol_line):
# once we found symbol look at next lines to figure out what to do next
i += 1
date_line = lines[i]
while i < len(lines) and not symbol_re.match(date_line):
# if we are not compiling multiple transactions, we want to write out each one
if compile_multiple == 'n':
# attemp to match transaction line to see if this security has multiple transactions
if transaction_re.match(date_line):
# if it is, we pull out the number, and the date for all of them
num_transactions = int(date_line.split(' ')[0])
date_dis = date_line.split(' ')[3][:8]
trans_counter = 0
# next we iterate through to capture all the individual transactions
while trans_counter < num_transactions:
i += 1
date_line = lines[i]
if transaction_child_re.match(date_line):
split_date_line = date_line.split(' ')
accrued_disc = '...' if split_date_line[4] == '...' else ' '.join(split_date_line[4:6])
additional_info = ' '.join(split_date_line[6:]) if split_date_line[4] == '...' else ' '.join(split_date_line[7:])
csv_writer.writerow([symbol_line.split(' / ')[0]] + [date_dis] + split_date_line[:4] + [accrued_disc] + [additional_info])
trans_counter += 1
# after we are done, we skip to the next symbol line
while i < len(lines) and not symbol_re.match(date_line):
i += 1
if i < len(lines):
date_line = lines[i]
continue
# if we are not compiling multiple transactions, just write out the lines that
# match the dateline regex which includes compiled transactions
if date_re.match(date_line):
split_date_line = date_line.split(' ')
accrued_disc = '...' if split_date_line[5] == '...' else ' '.join(split_date_line[5:7])
additional_info = ' '.join(split_date_line[7:]) if split_date_line[5] == '...' else ' '.join(split_date_line[8:])
csv_writer.writerow([symbol_line.split(' / ')[0]] + split_date_line[:5] + [accrued_disc] +[additional_info] )
i+= 1
if i < len(lines):
date_line = lines[i]
i -= 1
i += 1
# close files
csv_file.close()
pdf.close() | 41.444444 | 232 | 0.678731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,959 | 0.437668 |
0e6f3670c8eeedc919a111469dde8bf049ce3484 | 365 | py | Python | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | awards/migrations/0005_rename_avg_rate_rating_average.py | Maryan23/Laurels | cde7332ac9e5a4032a4d2da725bb5b70c66ce9ff | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-13 21:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('awards', '0004_auto_20211213_1253'),
]
operations = [
migrations.RenameField(
model_name='rating',
old_name='avg_rate',
new_name='average',
),
]
| 19.210526 | 47 | 0.586301 | 280 | 0.767123 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.293151 |
0e6fd8b9f77fd0e293d000aaeb0b2a9d87bb7355 | 3,955 | py | Python | Updated_Self_supervised_training/vq_vae_decoder.py | nerdk312/AMDIM_Decoder | 7ca7eb869801d5fbe80b6bc3bb9ca4a2ba4b7238 | [
"MIT"
] | null | null | null | Updated_Self_supervised_training/vq_vae_decoder.py | nerdk312/AMDIM_Decoder | 7ca7eb869801d5fbe80b6bc3bb9ca4a2ba4b7238 | [
"MIT"
] | null | null | null | Updated_Self_supervised_training/vq_vae_decoder.py | nerdk312/AMDIM_Decoder | 7ca7eb869801d5fbe80b6bc3bb9ca4a2ba4b7238 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
def _make_residual(channels): # Nawid- Performs a 3x3 convolution followed by a 1x1 convolution - The 3x3 convolution is padded and so the overall shape is the same.
return nn.Sequential(
nn.ReLU(),
nn.Conv2d(channels, channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(channels, channels, 1),
)
class HalfEncoder(nn.Module):
"""
An encoder that cuts the input size in half in both
dimensions.
"""
def __init__(self, in_channels, out_channels):
super(HalfEncoder,self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels,3, stride=2, padding=1)
self.residual1 = _make_residual(out_channels)
self.residual2 = _make_residual(out_channels)
def forward(self,x):
x = self.conv(x)
x = x + self.residual1(x)
x = x + self.residual2(x)
return x
class HalfQuarterDecoder(nn.Module):
"""
A decoder that takes two inputs. The first one is
upsampled by a factor of two, and then combined with
the second input which is further upsampled by a
factor of four.
"""
def __init__(self,in_channels, out_channels):
super(HalfQuarterDecoder, self).__init__()
self.residual1 = _make_residual(in_channels)
self.residual2 = _make_residual(in_channels)
self.conv1 = nn.ConvTranspose2d(in_channels, in_channels, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels * 2, in_channels, 3, padding=1)
self.residual3 = _make_residual(in_channels)
self.residual4 = _make_residual(in_channels)
self.conv3 = nn.ConvTranspose2d(in_channels, in_channels, 4, stride=2, padding=1)
self.conv4 = nn.ConvTranspose2d(in_channels, out_channels, 4, stride=2, padding=1)
def forward(self,inputs):
assert len(inputs) == 2
# Upsample the top input to match the shape of the
# bottom input.
x = inputs[0]
x = x + self.residual1(x)
x = x + self.residual2(x)
x = F.relu(x)
x = self.conv1(x) # Nawid - This is a convolution transpose which make the top input match the shape of the bottom input
x = F.relu(x)
# Mix together the bottom and top inputs.
x = torch.cat([x, inputs[1]], dim=1) # Nawid - Concatentate the upsample top feature map with the bottom feature map
x = self.conv2(x) # Nawid - Downsamples
x = x + self.residual3(x)
x = x + self.residual4(x)
x = F.relu(x)
x = self.conv3(x) # Nawid - Upsamples
x = F.relu(x)
x = self.conv4(x) # Nawid - Upsamples
return x
class VQ_VAE_Decoder(nn.Module):
'''
Performs the encoder to get the second input and then performs the decoder with both of the different inputs
'''
def __init__(self,in_channels,intermediate_channels, out_channels):
super(VQ_VAE_Decoder, self).__init__()
self.conv = nn.Conv2d(in_channels, intermediate_channels,3, stride=1, padding=1)
self.residual1 = _make_residual(intermediate_channels)
self.encoder =HalfEncoder(intermediate_channels,intermediate_channels)
self.halfquarterdecoder = HalfQuarterDecoder(intermediate_channels, out_channels)
def forward(self,x):
half_quarter_inputs = []
x = x.view(-1, 4, 16, 16)
x = self.conv(x) # Nawid - This gives c x 16 x 16
#print('conv1',x.size())
x = self.residual1(x) # Nawid - This should give c x 16 x 16
#print('first residual',x.size())
half_x = self.encoder(x) # Nawid - This should give c x 8 x 8
#print('encoded shape',half_x.size())
half_quarter_inputs.append(half_x) # Nawid - half_x into the network
half_quarter_inputs.append(x) # Nawid - Places x into the inputs
reconstruction = self.halfquarterdecoder(half_quarter_inputs)
return reconstruction
| 41.197917 | 165 | 0.653603 | 3,550 | 0.897598 | 0 | 0 | 0 | 0 | 0 | 0 | 1,151 | 0.291024 |
0e707c6695fa41c264c8fbc5f51ed3834836d8ab | 863 | py | Python | spider/featurization/audio_featurization.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 1 | 2021-11-05T17:42:47.000Z | 2021-11-05T17:42:47.000Z | spider/featurization/audio_featurization.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | null | null | null | spider/featurization/audio_featurization.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 2 | 2019-02-21T18:29:51.000Z | 2019-09-02T21:21:26.000Z | import os
import csv
import librosa
import numpy as np
import pandas as pd
from spider.featurization.audio_featurization import AudioFeaturization
# Read the test data csv
csv_file='data/testAudioData.csv'
df = pd.read_csv(csv_file)
# Read in the audio data specified by the csv
data = []
for idx, row in df.iterrows():
filename = os.path.join('data/raw_data', row['filename'])
datum, sampling_rate = librosa.load(filename)
data.append(datum)
# Optional audio featurization parameter specification
frame_length = 0.050
overlap = 0.025
# Request feature generation
print("Generating features...")
featurizer = AudioFeaturization(
sampling_rate=sampling_rate,
frame_length=frame_length,
overlap=overlap
)
features = featurizer.produce(data)
# Save features to disk
with open('features.csv', 'w+') as f:
for feature in features:
np.savetxt(f, feature) | 23.972222 | 71 | 0.772885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.307068 |
0e707d91371c2482346143a84f5ea61d4dc577a1 | 497 | py | Python | Telecom.py | BeatrizRCorreia/health-informatics-project1 | bb3d619ac7b0509758b041db8970b72121a07b3d | [
"MIT"
] | null | null | null | Telecom.py | BeatrizRCorreia/health-informatics-project1 | bb3d619ac7b0509758b041db8970b72121a07b3d | [
"MIT"
] | null | null | null | Telecom.py | BeatrizRCorreia/health-informatics-project1 | bb3d619ac7b0509758b041db8970b72121a07b3d | [
"MIT"
] | null | null | null | class Telecom:
def __init__(self, contact_db_id, system, value, use, rank, period):
self.contact_db_id = contact_db_id
self.system = system
self.value = value
self.use = use
self.rank = rank
self.period = period
def get_contact_db_id(self):
return self.contact_db_id
def get_system(self):
return self.system
def get_value(self):
return self.value
def get_use(self):
return self.use
def get_rank(self):
return self.rank
def get_period(self):
return self.period | 18.407407 | 69 | 0.72837 | 497 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0e70ece1a0f0d81315d96404c33e6fd25db67704 | 7,617 | py | Python | anvil/__main__.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | 1 | 2021-06-29T06:09:58.000Z | 2021-06-29T06:09:58.000Z | anvil/__main__.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | null | null | null | anvil/__main__.py | timjr/Openstack-Anvil | 5a8199cfef2a7cd83d1886aaa6aa8e0b24cd589d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import time
import traceback as tb
sys.path.insert(0, os.path.join(os.path.abspath(os.pardir)))
sys.path.insert(0, os.path.abspath(os.getcwd()))
from anvil import actions
from anvil import colorizer
from anvil import distro
from anvil import env
from anvil import exceptions as excp
from anvil import log as logging
from anvil import opts
from anvil import persona
from anvil import settings
from anvil import shell as sh
from anvil import utils
from anvil.pprint import center_text
LOG = logging.getLogger()
ANVIL_DIR = "/etc/anvil/"
SETTINGS_FN = "/etc/anvil/settings.yaml"
def run(args):
"""
Starts the execution after args have been parsed and logging has been setup.
Arguments: N/A
Returns: True for success to run, False for failure to start
"""
LOG.debug("CLI arguments are:")
utils.log_object(args, logger=LOG, level=logging.DEBUG, item_max_len=128)
# Keep the old args around so we have the full set to write out
saved_args = dict(args)
action = args.pop("action", '').strip().lower()
if action not in actions.names():
raise excp.OptionException("Invalid action name %r specified!" % (action))
# Determine + setup the root directory...
# If not provided attempt to locate it via the environment control files
args_root_dir = args.pop("dir")
root_dir = env.get_key('INSTALL_ROOT')
if not root_dir:
root_dir = args_root_dir
if not root_dir:
root_dir = sh.joinpths(sh.gethomedir(), 'openstack')
root_dir = sh.abspth(root_dir)
sh.mkdir(root_dir)
persona_fn = args.pop('persona_fn')
if not persona_fn:
raise excp.OptionException("No persona file name specified!")
if not sh.isfile(persona_fn):
raise excp.OptionException("Invalid persona file %r specified!" % (persona_fn))
# !!
# Here on out we should be using the logger (and not print)!!
# !!
# Stash the dryrun value (if any)
if 'dryrun' in args:
env.set("ANVIL_DRYRUN", str(args['dryrun']))
# Ensure the anvil etc dir is there if others are about to use it
ensure_anvil_dir()
# Load the distro
dist = distro.load(settings.DISTRO_DIR)
# Load + verify the person
try:
persona_obj = persona.load(persona_fn)
persona_obj.verify(dist)
except Exception as e:
raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e))
# Get the object we will be running with...
runner_cls = actions.class_for(action)
runner = runner_cls(distro=dist,
root_dir=root_dir,
name=action,
cli_opts=args)
(repeat_string, line_max_len) = utils.welcome()
print(center_text("Action Runner", repeat_string, line_max_len))
# Now that the settings are known to work, store them for next run
store_current_settings(saved_args)
LOG.info("Starting action %s on %s for distro: %s",
colorizer.quote(action), colorizer.quote(utils.iso8601()),
colorizer.quote(dist.name))
LOG.info("Using persona: %s", colorizer.quote(persona_fn))
LOG.info("In root directory: %s", colorizer.quote(root_dir))
start_time = time.time()
runner.run(persona_obj)
end_time = time.time()
pretty_time = utils.format_time(end_time - start_time)
LOG.info("It took %s seconds or %s minutes to complete action %s.",
colorizer.quote(pretty_time['seconds']), colorizer.quote(pretty_time['minutes']), colorizer.quote(action))
def load_previous_settings():
settings_prev = None
try:
# Don't use sh here so that we always
# read this (even if dry-run)
with open(SETTINGS_FN, 'r') as fh:
settings_prev = utils.load_yaml_text(fh.read())
except Exception:
# Errors could be expected on format problems
# or on the file not being readable....
pass
return settings_prev
def ensure_anvil_dir():
if not sh.isdir(ANVIL_DIR):
with sh.Rooted(True):
os.makedirs(ANVIL_DIR)
(uid, gid) = sh.get_suids()
sh.chown_r(ANVIL_DIR, uid, gid)
def store_current_settings(c_settings):
try:
# Remove certain keys that just shouldn't be saved
to_save = dict(c_settings)
for k in ['action', 'verbose', 'dryrun']:
if k in c_settings:
to_save.pop(k, None)
with sh.Rooted(True):
with open(SETTINGS_FN, 'w') as fh:
fh.write("# Anvil last used settings\n")
fh.write(utils.add_header(SETTINGS_FN, utils.prettify_yaml(to_save)))
fh.flush()
(uid, gid) = sh.get_suids()
sh.chown(SETTINGS_FN, uid, gid)
except Exception as e:
LOG.debug("Failed writing to %s due to %s", SETTINGS_FN, e)
def main():
"""
Starts the execution of without
injecting variables into the global namespace. Ensures that
logging is setup and that sudo access is available and in-use.
Arguments: N/A
Returns: 1 for success, 0 for failure
"""
# Do this first so people can see the help message...
args = opts.parse(load_previous_settings())
# Configure logging levels
log_level = logging.INFO
if args['verbose'] or args['dryrun']:
log_level = logging.DEBUG
logging.setupLogging(log_level)
LOG.debug("Log level is: %s" % (logging.getLevelName(log_level)))
def clean_exc(exc):
msg = str(exc).strip()
if msg.endswith(".") or msg.endswith("!"):
return msg
else:
return msg + "."
def traceback_fn():
traceback = None
if log_level < logging.INFO:
# See: http://docs.python.org/library/traceback.html
# When its not none u get more detailed info about the exception
traceback = sys.exc_traceback
tb.print_exception(sys.exc_type, sys.exc_value,
traceback, file=sys.stdout)
try:
# Drop to usermode
sh.user_mode(quiet=False)
except excp.AnvilException as e:
print(clean_exc(e))
print("This program should be running via %s, is it not?" % (colorizer.quote('sudo', quote_color='red')))
return 1
try:
run(args)
utils.goodbye(True)
return 0
except excp.OptionException as e:
print(clean_exc(e))
print("Perhaps you should try %s" % (colorizer.quote('--help', quote_color='red')))
return 1
except Exception:
utils.goodbye(False)
traceback_fn()
return 1
if __name__ == "__main__":
rc = main()
# Switch back to root mode for anything
# that needs to run in that mode for cleanups and etc...
try:
sh.root_mode(quiet=False)
except excp.AnvilException:
pass
sys.exit(rc)
| 32.004202 | 119 | 0.645661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,838 | 0.372588 |
0e713875dd8d9fbfe017a19ab32d110467c9d404 | 524 | py | Python | ht13_Ex28_MaxThree.py | mikksillaste/aima-python | cf7b90bccf10e682f073bb5dbdc2056be6f4eb40 | [
"MIT"
] | null | null | null | ht13_Ex28_MaxThree.py | mikksillaste/aima-python | cf7b90bccf10e682f073bb5dbdc2056be6f4eb40 | [
"MIT"
] | null | null | null | ht13_Ex28_MaxThree.py | mikksillaste/aima-python | cf7b90bccf10e682f073bb5dbdc2056be6f4eb40 | [
"MIT"
] | null | null | null | # kasutaja sisestab 3 numbrit
number1 = int(input("Sisesta esimene arv: "))
number2 = int(input("Sisesta teine arv: "))
number3 = int(input("Sisesta kolmas arv: "))
# funktsioon, mis tagastab kolmes sisestatud arvust suurima
def largest(number1, number2, number3):
biggest = 0
if number1 > biggest:
biggest = number1
if number2 > number1:
biggest = number2
if number3 > number2 and number3 > number1:
biggest = number3
return biggest
print(largest(number1, number2, number3)) | 26.2 | 59 | 0.685115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.293893 |