hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74bdc25410fe2d217d48df4b922d92a0673bcd3c
| 2,078
|
py
|
Python
|
contrib/devtools/check-doc.py
|
martin-braun/Nuwa
|
ea9924c953db44a8a2844c6bb63cb3a18f4e658f
|
[
"MIT"
] | 1
|
2021-12-30T23:57:59.000Z
|
2021-12-30T23:57:59.000Z
|
contrib/devtools/check-doc.py
|
martin-braun/Nuwa
|
ea9924c953db44a8a2844c6bb63cb3a18f4e658f
|
[
"MIT"
] | null | null | null |
contrib/devtools/check-doc.py
|
martin-braun/Nuwa
|
ea9924c953db44a8a2844c6bb63cb3a18f4e658f
|
[
"MIT"
] | 1
|
2022-01-10T22:20:22.000Z
|
2022-01-10T22:20:22.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizenuwacoinamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True)
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True)
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 43.291667
| 303
| 0.688643
|
1f046d99249b240068aa49f3560b1cabb09ab0b8
| 4,468
|
py
|
Python
|
webrecorder/webrecorder/standalone/webrecorder_full.py
|
ssteo/webrecorder
|
8690608171f35c475842df8fabbe666f5058193d
|
[
"Apache-2.0"
] | 1,217
|
2015-10-16T06:37:32.000Z
|
2020-06-08T14:02:08.000Z
|
webrecorder/webrecorder/standalone/webrecorder_full.py
|
ssteo/webrecorder
|
8690608171f35c475842df8fabbe666f5058193d
|
[
"Apache-2.0"
] | 476
|
2015-10-15T22:24:07.000Z
|
2020-06-11T07:43:29.000Z
|
webrecorder/webrecorder/standalone/webrecorder_full.py
|
nla/webrecorder
|
29be63e71999de6a4bc1144a47d59137fab34239
|
[
"Apache-2.0"
] | 101
|
2015-10-16T01:24:07.000Z
|
2020-05-18T00:49:49.000Z
|
import os
import sys
import base64
import shutil
from webrecorder.standalone.standalone import StandaloneRunner
from webrecorder.models.usermanager import CLIUserManager
from webrecorder.utils import sanitize_title
from webrecorder.standalone.localredisserver import LocalRedisServer
import redis
from six.moves.urllib.parse import urlsplit
# ============================================================================
class WebrecorderRunner(StandaloneRunner):
REDIS_PORT = 7679
def __init__(self, argres):
self.root_dir = argres.root_dir
self.redis_dir = os.path.join(self.root_dir, 'redis')
self.user_manager = None
self.browser_redis = None
self.default_user = argres.default_user
self.browser_id = base64.b32encode(os.urandom(15)).decode('utf-8')
self.dat_share_port = argres.dat_share_port
self.behaviors_tarfile = argres.behaviors_tarfile
super(WebrecorderRunner, self).__init__(argres, rec_port=0)
if not argres.no_browser:
import webbrowser
webbrowser.open_new(os.environ['APP_HOST'] + '/')
def _runner_init(self):
os.environ['WR_USER_CONFIG'] = 'pkg://webrecorder/config/standalone_recorder.yaml'
os.environ['SECRET_KEY'] = base64.b32encode(os.urandom(75)).decode('utf-8')
os.environ['RECORD_ROOT'] = os.path.join(self.root_dir, 'warcs', '')
os.environ['STORAGE_ROOT'] = os.path.join(self.root_dir, 'storage', '')
os.environ['REDIS_BROWSER_URL'] = 'redis://localhost:{0}/0'.format(self.REDIS_PORT)
os.environ['REDIS_SESSION_URL'] = 'redis://localhost:{0}/0'.format(self.REDIS_PORT)
os.environ['REDIS_BASE_URL'] = 'redis://localhost:{0}/1'.format(self.REDIS_PORT)
os.environ['ALLOW_DAT'] = '1'
os.environ['DAT_SHARE_HOST'] = 'localhost'
if self.dat_share_port:
os.environ['DAT_SHARE_PORT'] = self.dat_share_port
os.environ['BEHAVIORS_DIR'] = os.path.join(self.root_dir, 'behaviors')
os.environ['BROWSER_ID'] = self.browser_id
if self.behaviors_tarfile:
os.environ['BEHAVIORS_TARFILE'] = self.behaviors_tarfile
self.redis_server = LocalRedisServer(port=self.REDIS_PORT,
redis_dir=self.redis_dir)
self.browser_redis = self.redis_server.start()
self.user_manager = CLIUserManager()
if not self.default_user:
return
if not self.user_manager.check_user(self.default_user):
if not self.user_manager.is_username_available(self.default_user):
self.default_user = 'user-' + sanitize_title(self.default_user)
res = self.user_manager.create_user(
email='{0}@localhost'.format(self.default_user),
username=self.default_user,
passwd='LocalUser1',
role='admin',
name=self.default_user)
print('DEFAULT_USER=' + self.default_user, flush=True)
# set max_size to available free space, if possible
try:
res = shutil.disk_usage(self.root_dir)
max_size = res[2]
user = self.user_manager.all_users[self.default_user]
user.set_prop('max_size', max_size)
except Exception as e:
print(e)
os.environ['AUTO_LOGIN_USER'] = self.default_user
def close(self):
for key in self.browser_redis.scan_iter('up:{0}:*'.format(self.browser_id)):
print('Delete: ' + key)
self.browser_redis.delete(key)
super(WebrecorderRunner, self).close()
@classmethod
def add_args(cls, parser):
parser.add_argument('-d', '--root-dir',
default='./data/',
help='Root Data Dir')
parser.add_argument('-u', '--default-user',
default=None,
help='Create & Auto-Login as Default User')
parser.add_argument('--dat-share-port',
default=None,
help='Dat Share API server port')
parser.add_argument('--behaviors-tarfile',
default=None,
help='Behaviors Tarfile')
# ============================================================================
webrecorder = WebrecorderRunner.main
if __name__ == "__main__":
webrecorder()
| 33.343284
| 91
| 0.597135
|
015a9cd0da4d60c54b523018013d2059bbe1d238
| 4,392
|
py
|
Python
|
Providers/nxOMSAutomationWorker/automationworker/3.x/worker/streamhandler.py
|
assing/PowerShell-DSC-for-Linux-1
|
2747634e5e82fbbfcc87fff5e2c1cb0b91187d47
|
[
"MIT"
] | 1
|
2020-05-23T11:56:52.000Z
|
2020-05-23T11:56:52.000Z
|
Providers/nxOMSAutomationWorker/automationworker/3.x/worker/streamhandler.py
|
assing/PowerShell-DSC-for-Linux-1
|
2747634e5e82fbbfcc87fff5e2c1cb0b91187d47
|
[
"MIT"
] | null | null | null |
Providers/nxOMSAutomationWorker/automationworker/3.x/worker/streamhandler.py
|
assing/PowerShell-DSC-for-Linux-1
|
2747634e5e82fbbfcc87fff5e2c1cb0b91187d47
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
"""Stream handler module. Is used to process output from stdout and stderr"""
import codecs
import traceback
from threading import Thread
import jrdsclient
import tracer
PREFIX_DEBUG = "DEBUG:"
PREFIX_ERROR = "ERROR:"
PREFIX_VERBOSE = "VERBOSE:"
PREFIX_WARNING = "WARNING:"
STREAM_TYPE_DEBUG = "Debug"
STREAM_TYPE_ERROR = "Error"
STREAM_TYPE_OUTPUT = "Output"
STREAM_TYPE_VERBOSE = "Verbose"
STREAM_TYPE_WARNING = "Warning"
class StreamHandler(Thread):
"""Stream handler class."""
def __init__(self, job_data, runtime_process, jrds_client):
"""
:type job_data: jrdsclient.JobData
:type runtime_process :
:type jrds_client : jrdsclient.JRDSClient
"""
Thread.__init__(self)
self.daemon = True
self.job_data = job_data
self.runtime_process = runtime_process
self.jrds_client = jrds_client
def run(self):
"""Monitor the job's subprocess for output (which will be uploaded as streams).
Notes:
PowerShell stdout : http://stackoverflow.com/questions/22349139/utf-8-output-from-powershell
IMPORTANT: Do not log streams to cloud.
"""
stream_count = 0
while True:
try:
output = codecs.getwriter('utf8')(self.runtime_process.stdout).readline().decode()
if output == '' and self.runtime_process.poll() is not None:
break
elif output:
if output.startswith(PREFIX_DEBUG.lower()) or \
output.startswith(PREFIX_DEBUG.upper()) or \
output.startswith(PREFIX_DEBUG.capitalize()):
self.process_debug_stream(stream_count, output)
elif output.startswith(PREFIX_ERROR.lower()) or \
output.startswith(PREFIX_ERROR.upper()) or \
output.startswith(PREFIX_ERROR.capitalize()):
self.process_error_stream(stream_count, output)
elif output.startswith(PREFIX_VERBOSE.lower()) or \
output.startswith(PREFIX_VERBOSE.upper()) or \
output.startswith(PREFIX_VERBOSE.capitalize()):
self.process_verbose_stream(stream_count, output)
elif output.startswith(PREFIX_WARNING.lower()) or \
output.startswith(PREFIX_WARNING.upper()) or \
output.startswith(PREFIX_WARNING.capitalize()):
self.process_warning_stream(stream_count, output)
else:
self.process_output_stream(stream_count, output)
stream_count += 1
# leave trace at the end to prevent encoding issue from pushing streams to cloud
# leave this as debug trace to prevent logging customer streams to automation logs
tracer.log_debug_trace("STDOUT : " + str(output.strip()))
except:
tracer.log_sandbox_job_streamhandler_unhandled_exception(self.job_data.job_id, traceback.format_exc())
continue
tracer.log_sandbox_job_streamhandler_processing_complete(self.job_data.job_id)
def process_debug_stream(self, stream_count, output):
self.set_stream(stream_count, STREAM_TYPE_DEBUG, output)
pass
def process_error_stream(self, stream_count, output):
self.set_stream(stream_count, STREAM_TYPE_ERROR, output)
pass
def process_output_stream(self, stream_count, output):
self.set_stream(stream_count, STREAM_TYPE_OUTPUT, output)
pass
def process_verbose_stream(self, stream_count, output):
self.set_stream(stream_count, STREAM_TYPE_VERBOSE, output)
pass
def process_warning_stream(self, stream_count, output):
self.set_stream(stream_count, STREAM_TYPE_WARNING, output)
pass
def set_stream(self, stream_count, stream_type, output):
self.jrds_client.set_stream(self.job_data.job_id, self.job_data.runbook_version_id, output.strip(),
stream_type, stream_count)
| 40.666667
| 118
| 0.617259
|
6701623e8fd6e8ce499126dbd697e9e2d0646d7f
| 2,094
|
py
|
Python
|
weather_scrape_print.py
|
kmuldrew/Home_Automation
|
952427a3c4d189e3998e4269f2b45374364a4d17
|
[
"MIT"
] | null | null | null |
weather_scrape_print.py
|
kmuldrew/Home_Automation
|
952427a3c4d189e3998e4269f2b45374364a4d17
|
[
"MIT"
] | null | null | null |
weather_scrape_print.py
|
kmuldrew/Home_Automation
|
952427a3c4d189e3998e4269f2b45374364a4d17
|
[
"MIT"
] | null | null | null |
#Scrapes weather data and writes to disk
import time
from weather import weather_day
from weather import forecast_day
from apscheduler.schedulers.blocking import BlockingScheduler
#start the scheduler
sched = BlockingScheduler()
sched.daemonic = True
def write_weather_data():
w_data = weather_day()
cur_cond = []
cur_cond.append(w_data[0])
cur_T = float(w_data[1])
cur_data = []
if w_data[-1] == -500:
ind = [2,3,5,6,7,8,9,15,16,19,20,23,24,27,28,1]
else:
ind = [3,4,6,7,8,9,10,16,17,20,21,24,25,28,29,2]
for i in ind:
cur_data.append(int(float(w_data[i])))
day_data = forecast_day()
hour = int(day_data[0][0].text.split(':')[0])
Tempdata = []
for i in range(0,24):
T = int(day_data[1][i].text)
Tempdata.append(T)
filename = "/var/www/FlaskApp/FlaskApp/weather.dat"
file = open(filename, "w")
file.write("%d\n" % len(Tempdata))
for item in Tempdata:
file.write("%d\n" % item)
file.write("%.1f\n" % cur_T)
file.write("%d\n" % len(cur_data))
for item in cur_data:
file.write("%d\n" % item)
file.write(cur_cond[0]+"\n")
file.write("%d\n" % hour)
file.write("%d\n" % int(time.time()))
file.close()
#Tempdata = []
#cur_data = []
#cur_cond = []
#cur_T = 0.0
#with open(filename, "r") as infile:
#filedata = infile.read()
#infile.close()
#file_list = filedata.splitlines()
#Tempdata_len = int(file_list[0])
#for i in range(1,Tempdata_len+1):
#Tempdata.append(int(file_list[i]))
#cur_T = float(file_list[i+1])
#cur_data_len = int(file_list[i+2])
#for i in range(i+3,cur_data_len+i+3):
#cur_data.append(int(file_list[i]))
#cur_cond.append(file_list[-3])
#hour = int(file_list[-2])
#ts = int(file_list[-1])
#write_weather_data()
#Schedules report_energy_use to be run twice each hour, with 50% overlap
sched.add_job(write_weather_data, 'interval', minutes=5)
sched.start()
| 30.347826
| 72
| 0.592646
|
af584f26890c700f652cfdb9b92897a1da357926
| 1,973
|
py
|
Python
|
orlov/libs/minicap/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/minicap/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/minicap/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
""" Orlov Module : Minicap Module Fixture. """
import logging
import pytest
from orlov.libs.minicap import MinicapService, MinicapStream, MinicapProc
from orlov.exception import AndroidError
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def m_stream(request) -> MinicapStream:
""" MinicapStream Fixture.
Yields:
stream(MinicapStream): MinicapStream Module Create.
Raises:
AndroidError: 1. Device Not Found.
"""
logger.debug('Setup of MinicapStream Module.')
if request.config.getoption('minicap.ip'):
ip = request.config.getoption('minicap.ip')
else:
raise AndroidError('Could not get IP Address.')
if request.config.getoption('minicap.port'):
port = request.config.getoption('minicap.port')
else:
raise AndroidError('Could not get Port.')
yield MinicapStream.get_builder(ip, port)
@pytest.fixture(scope='session')
def m_service(request) -> MinicapService:
""" MinicapService Fixture.
Yields:
service(MinicapService): MinicapService Module Create.
Raises:
AndroidError: 1. Device Not Found.
"""
logger.debug('Setup of MinicapService Module.')
if request.config.getoption('minicap.service'):
serv = request.config.getoption('minicap.service')
else:
raise AndroidError('Could not get Service Name.')
yield MinicapService(serv)
@pytest.fixture(scope='session')
# pylint: disable=W0621
def minicap(request, m_stream, m_service) -> MinicapProc:
""" MinicapProc Fixture.
Yields:
proc(MinicapProc): MinicapProc Module Create.
"""
logger.debug('Minicap : Setup of Minicap Process.')
debug = False
if request.config.getoption('orlov_debug'):
debug = request.config.getoption('orlov_debug')
proc = MinicapProc(m_stream, m_service, debug=debug)
yield proc
proc.finish()
logger.debug('Minicap : TearDown of Minicap Process.')
| 27.027397
| 73
| 0.686265
|
850103c86f150fe4825ecb41db03cc85d1659519
| 18,377
|
py
|
Python
|
test/functional/wallet_multiwallet.py
|
subhashkarthik1998/namecoin-core
|
5026155158efb285079f088a3812e090dec5c246
|
[
"MIT"
] | null | null | null |
test/functional/wallet_multiwallet.py
|
subhashkarthik1998/namecoin-core
|
5026155158efb285079f088a3812e090dec5c246
|
[
"MIT"
] | null | null | null |
test/functional/wallet_multiwallet.py
|
subhashkarthik1998/namecoin-core
|
5026155158efb285079f088a3812e090dec5c246
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
import os
import shutil
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
FEATURE_LATEST = 169900
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': '' }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
os.rename(wallet_dir("wallet.dat"), wallet_dir("w8"))
# create another dummy wallet for use in testing backups later
self.start_node(0, [])
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_dir("wallet.dat"), empty_wallet)
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly
# '' - to verify default wallet file is created correctly
wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', '']
extra_args = ['-wallet={}'.format(n) for n in wallet_names]
self.start_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8'])
assert_equal(set(node.listwallets()), set(wallet_names))
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
# should not initialize if wallet path can't be created
exp_stderr = "boost::filesystem::create_directory:"
self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
# should not initialize if there are duplicate wallets
self.nodes[0].assert_start_raises_init_error(['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -zapwallettxes with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file")
self.log.info("Do not allow -salvagewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file")
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-walletdir=' + competing_wallet_dir])
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0, extra_args)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy'])
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(4.0)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 4.0)
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0])
# Fail to load duplicate wallets by different ways (directory and filepath)
assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat')
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), ['', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9'])
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
# Fail to load if wallet is downgraded
shutil.copytree(os.path.join(self.options.data_wallets_dir, 'high_minversion'), wallet_dir('high_minversion'))
self.restart_node(0, extra_args=['-upgradewallet={}'.format(FEATURE_LATEST)])
assert {'name': 'high_minversion'} in self.nodes[0].listwalletdir()['wallets']
self.log.info("Fail -upgradewallet that results in downgrade")
assert_raises_rpc_error(
-4,
"Wallet loading failed.",
lambda: self.nodes[0].loadwallet(filename='high_minversion'),
)
self.stop_node(
i=0,
expected_stderr='Error: Error loading {}: Wallet requires newer version of Namecoin Core'.format(
wallet_dir('high_minversion', 'wallet.dat')),
)
if __name__ == '__main__':
MultiWalletTest().main()
| 51.912429
| 195
| 0.670839
|
11ac5c19d44190b8130c86af33d626d89e4b896b
| 54,319
|
py
|
Python
|
src/writelogs.py
|
onishtar/amazon-serverless-datalake-workshop
|
2fe83c408a8f2e316fdea72b30ddc826f129a88d
|
[
"MIT-0"
] | 166
|
2018-11-24T21:40:12.000Z
|
2022-03-17T23:50:55.000Z
|
src/writelogs.py
|
onishtar/amazon-serverless-datalake-workshop
|
2fe83c408a8f2e316fdea72b30ddc826f129a88d
|
[
"MIT-0"
] | 3
|
2019-11-01T23:27:44.000Z
|
2022-01-26T22:47:49.000Z
|
src/writelogs.py
|
onishtar/amazon-serverless-datalake-workshop
|
2fe83c408a8f2e316fdea72b30ddc826f129a88d
|
[
"MIT-0"
] | 88
|
2018-11-19T01:59:57.000Z
|
2022-01-29T16:11:23.000Z
|
import boto3
import random
import string
import uuid
import json
import os
import datetime
import calendar
import time
#105.156.115.196,ontistesns0,17/Jan/2018:10:43:54,"GET /petstore/Cats/Treats",200,314
profiles = [{"ip":"34.209.30.165", "username":"mphillpottslx"},
{"ip":"207.130.250.61", "username":"gmellandnr"},
{"ip":"103.216.75.36", "username":"dwinpennyt"},
{"ip":"147.148.138.187", "username":"tdevil2r"},
{"ip":"142.117.198.234", "username":"mquipp3x"},
{"ip":"254.207.60.76", "username":"lgrent5r"},
{"ip":"206.126.42.113", "username":"eshiel6c"},
{"ip":"186.138.30.219", "username":"ljepp9a"},
{"ip":"22.165.3.106", "username":"nharbererd3"},
{"ip":"97.26.163.162", "username":"gstonehewerda"},
{"ip":"164.206.3.255", "username":"pbrixeyio"},
{"ip":"128.222.151.43", "username":"ahakonssonp1"},
{"ip":"6.167.223.77", "username":"sfaustinqa"},
{"ip":"236.35.38.183", "username":"cdudinqy"},
{"ip":"25.99.239.93", "username":"ccicconetti2s"},
{"ip":"121.41.52.44", "username":"jgibbard3q"},
{"ip":"133.12.214.53", "username":"abernade4h"},
{"ip":"29.37.153.38", "username":"gskittrell64"},
{"ip":"104.13.221.212", "username":"nwintle7z"},
{"ip":"143.41.89.43", "username":"eyggo8x"},
{"ip":"210.84.41.218", "username":"lclutterham9d"},
{"ip":"208.116.109.154", "username":"mbowditcha6"},
{"ip":"133.247.1.191", "username":"eeannettaei"},
{"ip":"186.173.42.67", "username":"hverringfl"},
{"ip":"56.39.70.156", "username":"shadlingtong7"},
{"ip":"67.203.244.204", "username":"apillinghy"},
{"ip":"175.132.68.125", "username":"alippetti0"},
{"ip":"124.171.35.2", "username":"djeanespz"},
{"ip":"193.176.176.65", "username":"ymacririerk"},
{"ip":"173.198.161.211", "username":"blawrensonrp"},
{"ip":"50.173.100.93", "username":"jocullen32"},
{"ip":"35.180.123.4", "username":"wsandyfirth4c"},
{"ip":"111.42.123.176", "username":"elongea4j"},
{"ip":"90.241.104.91", "username":"folyunin79"},
{"ip":"133.23.191.128", "username":"dgoricke9x"},
{"ip":"8.172.119.159", "username":"sgeardeg"},
{"ip":"204.153.38.2", "username":"aimriefb"},
{"ip":"211.226.15.99", "username":"adowniegs"},
{"ip":"192.7.203.215", "username":"rgarletteiu"},
{"ip":"24.211.185.236", "username":"aswafferj8"},
{"ip":"107.72.91.107", "username":"nbaukhamjr"},
{"ip":"245.207.212.134", "username":"etaffarellol7"},
{"ip":"175.70.245.214", "username":"tmackenq6"},
{"ip":"24.81.67.108", "username":"wkuzemka1x"},
{"ip":"146.111.138.200", "username":"akalisz4v"},
{"ip":"98.72.253.123", "username":"whandscombe56"},
{"ip":"117.188.145.201", "username":"ehallums6y"},
{"ip":"124.94.215.199", "username":"aphelps7c"},
{"ip":"108.43.80.33", "username":"lsquibbs8a"},
{"ip":"124.43.207.108", "username":"valtamirano9j"},
{"ip":"160.209.226.27", "username":"nbarberae"},
{"ip":"246.0.16.61", "username":"tleclairear"},
{"ip":"237.188.103.151", "username":"ratwateraz"},
{"ip":"196.168.39.182", "username":"rvaudreba"},
{"ip":"193.184.176.82", "username":"ibrattone3"},
{"ip":"138.172.156.160", "username":"ptilliardej"},
{"ip":"173.222.136.245", "username":"edownib"},
{"ip":"255.3.240.228", "username":"tpieronike"},
{"ip":"117.100.69.229", "username":"dwitnallkz"},
{"ip":"123.170.168.121", "username":"nkentonm9"},
{"ip":"203.18.255.242", "username":"mclutheramoj"},
{"ip":"160.230.110.231", "username":"hpachepq"},
{"ip":"17.119.5.39", "username":"dyaneevn"},
{"ip":"226.72.145.247", "username":"mmacgilly"},
{"ip":"119.205.25.198", "username":"ocottingham3j"},
{"ip":"174.190.27.156", "username":"bbowe4b"},
{"ip":"77.26.50.44", "username":"jfigures57"},
{"ip":"54.201.21.32", "username":"cmallion6p"},
{"ip":"227.186.28.73", "username":"tdeblasiisa3"},
{"ip":"228.69.53.176", "username":"jbardellbb"},
{"ip":"104.216.117.179", "username":"bwoodhallbo"},
{"ip":"190.45.236.142", "username":"ngerardeauxdf"},
{"ip":"217.242.195.160", "username":"aholligangc"},
{"ip":"208.232.190.249", "username":"rlewryjb"},
{"ip":"195.42.143.27", "username":"chamblinkd"},
{"ip":"242.163.21.12", "username":"shauxbykn"},
{"ip":"139.204.164.193", "username":"rjoronmg"},
{"ip":"212.167.136.13", "username":"ddilaweynz"},
{"ip":"176.225.72.34", "username":"mmurkinqc"},
{"ip":"51.190.8.162", "username":"jhabden0"},
{"ip":"175.219.179.146", "username":"cphilbricke"},
{"ip":"144.167.189.73", "username":"ckenward18"},
{"ip":"212.225.181.89", "username":"kmaliphant3c"},
{"ip":"201.147.140.129", "username":"mhuster8n"},
{"ip":"198.248.122.239", "username":"rbisset95"},
{"ip":"34.50.118.27", "username":"lriccardinic9"},
{"ip":"122.80.92.156", "username":"sblythfa"},
{"ip":"182.21.83.120", "username":"wyantsurevh4"},
{"ip":"196.199.117.162", "username":"deglintonha"},
{"ip":"61.209.1.136", "username":"pmcnaughthe"},
{"ip":"25.16.183.221", "username":"valenichicovhi"},
{"ip":"114.219.169.116", "username":"clampm4"},
{"ip":"17.50.111.82", "username":"bseedullm8"},
{"ip":"140.84.108.199", "username":"pmorleyna"},
{"ip":"156.224.199.28", "username":"aavrahamov26"},
{"ip":"13.131.100.17", "username":"pmcilmorie2f"},
{"ip":"167.248.231.76", "username":"gmoakes2p"},
{"ip":"199.44.146.154", "username":"idemiranda2w"},
{"ip":"26.48.231.112", "username":"bbatrick52"},
{"ip":"114.217.40.147", "username":"frosenthal7r"},
{"ip":"152.95.136.119", "username":"nessex9i"},
{"ip":"208.156.93.17", "username":"pmcgaraghanbf"},
{"ip":"199.168.175.245", "username":"kakriggir"},
{"ip":"223.243.239.63", "username":"smetherellja"},
{"ip":"40.113.231.254", "username":"cfifelj"},
{"ip":"92.255.183.83", "username":"asabbenmd"},
{"ip":"80.143.137.83", "username":"rfransinelliol"},
{"ip":"109.81.246.109", "username":"ewhilesoy"},
{"ip":"34.54.55.69", "username":"pgaishpe"},
{"ip":"171.196.134.186", "username":"uhaskeyra"},
{"ip":"206.156.192.224", "username":"dcrollmanq"},
{"ip":"46.0.183.236", "username":"cgull1q"},
{"ip":"181.191.30.81", "username":"amacrannell27"},
{"ip":"23.156.72.241", "username":"ibenoy3h"},
{"ip":"21.100.167.110", "username":"bminnette44"},
{"ip":"8.236.68.58", "username":"dcurtiss5y"},
{"ip":"183.239.228.246", "username":"ncliburn69"},
{"ip":"220.8.73.104", "username":"bizkoviczd7"},
{"ip":"71.159.235.153", "username":"bfrandsendj"},
{"ip":"223.83.28.5", "username":"etrussmanf1"},
{"ip":"222.202.17.248", "username":"sbrimmacombefn"},
{"ip":"251.230.97.115", "username":"ilaurentinohm"},
{"ip":"76.41.182.177", "username":"tgodboldmt"},
{"ip":"152.159.240.199", "username":"mgarfitnt"},
{"ip":"72.123.2.159", "username":"dwindousp5"},
{"ip":"54.87.146.165", "username":"kduesterpk"},
{"ip":"225.207.128.152", "username":"maimerql"},
{"ip":"202.157.79.15", "username":"lfulunr1"},
{"ip":"87.205.138.127", "username":"bworhamo"},
{"ip":"239.110.61.25", "username":"aescudier4g"},
{"ip":"161.247.93.216", "username":"fstolberger87"},
{"ip":"182.236.134.126", "username":"gcuddon9y"},
{"ip":"71.159.229.236", "username":"hclaypoleci"},
{"ip":"170.41.30.47", "username":"gcordobese9"},
{"ip":"132.96.239.83", "username":"bramsellfj"},
{"ip":"48.157.74.221", "username":"atregeargm"},
{"ip":"51.126.173.82", "username":"fbeagleyhd"},
{"ip":"68.116.16.223", "username":"bpietzjx"},
{"ip":"61.27.236.249", "username":"schestlelt"},
{"ip":"105.64.80.219", "username":"tsweetnammv"},
{"ip":"38.49.59.224", "username":"dgedneyqm"},
{"ip":"64.135.144.125", "username":"amawbyrh"},
{"ip":"104.206.195.175", "username":"kcamillettiu"},
{"ip":"22.211.241.99", "username":"sdullaghan1c"},
{"ip":"189.203.139.247", "username":"anovello58"},
{"ip":"134.20.109.168", "username":"rmccrackem5o"},
{"ip":"147.105.85.180", "username":"wfaldo7o"},
{"ip":"116.109.219.177", "username":"anorrie7u"},
{"ip":"50.47.205.124", "username":"cwoakes8s"},
{"ip":"111.127.88.69", "username":"eboddington8u"},
{"ip":"217.52.3.237", "username":"iheyball9e"},
{"ip":"125.112.136.112", "username":"yannott9k"},
{"ip":"243.44.221.46", "username":"wwillattsao"},
{"ip":"98.125.172.244", "username":"krothamc6"},
{"ip":"58.162.24.66", "username":"adarnelldg"},
{"ip":"184.17.154.32", "username":"amuckleg3"},
{"ip":"206.198.211.217", "username":"mrosingk"},
{"ip":"90.60.16.140", "username":"skillelayh6"},
{"ip":"2.41.63.75", "username":"amardleid"},
{"ip":"149.69.17.70", "username":"fprowtingkf"},
{"ip":"49.32.231.140", "username":"hstorykm"},
{"ip":"240.109.36.188", "username":"elomisne"},
{"ip":"120.163.83.76", "username":"mtearneyrj"},
{"ip":"198.81.196.251", "username":"cceillierw"},
{"ip":"128.173.252.136", "username":"cgummary1j"},
{"ip":"54.37.203.220", "username":"fbalffye1n"},
{"ip":"229.115.165.60", "username":"fgruby46"},
{"ip":"61.213.130.68", "username":"opiscopello47"},
{"ip":"160.105.123.70", "username":"edomney49"},
{"ip":"33.114.230.85", "username":"dcardenosa4e"},
{"ip":"4.230.109.38", "username":"hmcturlough5n"},
{"ip":"14.73.66.64", "username":"ldalloway7j"},
{"ip":"238.120.210.228", "username":"lcahn9m"},
{"ip":"217.32.61.69", "username":"rlaxton9r"},
{"ip":"114.140.43.129", "username":"ffussiedz"},
{"ip":"5.189.36.174", "username":"rroskillyih"},
{"ip":"58.155.24.52", "username":"lvictoryj4"},
{"ip":"9.52.106.18", "username":"glindegardle"},
{"ip":"109.168.135.103", "username":"dvenningmy"},
{"ip":"66.188.100.163", "username":"dboultwoodo5"},
{"ip":"233.26.74.38", "username":"aspeendenpt"},
{"ip":"158.186.68.47", "username":"rferroli6k"},
{"ip":"47.120.46.225", "username":"eburburyb2"},
{"ip":"57.138.125.170", "username":"stavernorcl"},
{"ip":"207.208.90.137", "username":"relwoode2"},
{"ip":"41.137.186.230", "username":"esamplesfy"},
{"ip":"81.154.70.70", "username":"cambroseg0"},
{"ip":"227.212.11.56", "username":"csalewayhl"},
{"ip":"136.137.152.209", "username":"hbatecokm0"},
{"ip":"40.213.20.164", "username":"jstewartnc"},
{"ip":"143.104.152.138", "username":"oborsinp"},
{"ip":"22.193.214.174", "username":"lbrookwoodq4"},
{"ip":"204.156.236.209", "username":"jskea19"},
{"ip":"69.103.178.176", "username":"smactrustam5b"},
{"ip":"190.8.113.193", "username":"enorcock5s"},
{"ip":"217.166.180.69", "username":"gandriss6o"},
{"ip":"162.34.80.37", "username":"gagiolfinger7e"},
{"ip":"210.87.79.250", "username":"kcroshawa1"},
{"ip":"168.26.154.196", "username":"dzamboniariaq"},
{"ip":"8.245.205.253", "username":"tzuenellib5"},
{"ip":"34.45.30.163", "username":"wbathersbybq"},
{"ip":"255.144.105.187", "username":"lmacroriecv"},
{"ip":"53.195.92.153", "username":"aleggsev"},
{"ip":"245.50.39.90", "username":"pmcquodek9"},
{"ip":"77.186.60.203", "username":"smattisssenlc"},
{"ip":"77.191.130.83", "username":"avasyunkinmj"},
{"ip":"232.9.122.26", "username":"rbrideauo0"},
{"ip":"143.160.252.157", "username":"fcraiggph"},
{"ip":"204.94.212.82", "username":"eharston15"},
{"ip":"23.168.91.67", "username":"nneary16"},
{"ip":"156.58.207.19", "username":"kkyneton2y"},
{"ip":"243.221.235.161", "username":"fharry39"},
{"ip":"15.82.202.19", "username":"mriceards7d"},
{"ip":"98.204.157.158", "username":"ageane7q"},
{"ip":"192.247.92.43", "username":"hblaskettat"},
{"ip":"31.240.90.17", "username":"hselwynecq"},
{"ip":"52.48.17.154", "username":"clyaldp"},
{"ip":"67.139.189.47", "username":"cforbese7"},
{"ip":"125.81.203.171", "username":"fambrogieo"},
{"ip":"160.186.116.0", "username":"kbaumfordfu"},
{"ip":"138.16.116.226", "username":"krawstornehh"},
{"ip":"143.95.250.2", "username":"tmaffezzolikq"},
{"ip":"54.18.184.36", "username":"mliveingl2"},
{"ip":"92.182.107.82", "username":"rtrippettlq"},
{"ip":"3.201.191.172", "username":"wdoblelu"},
{"ip":"35.220.223.95", "username":"lbethomi"},
{"ip":"16.18.142.193", "username":"lkeesmz"},
{"ip":"221.34.236.239", "username":"vdrennanon"},
{"ip":"15.72.186.233", "username":"gstichallqp"},
{"ip":"95.85.73.233", "username":"rkinnerley3d"},
{"ip":"151.105.19.118", "username":"ejouanny3z"},
{"ip":"37.62.181.205", "username":"aboobier6m"},
{"ip":"71.90.129.24", "username":"lcrooke8c"},
{"ip":"221.132.174.204", "username":"jwethers94"},
{"ip":"97.161.203.175", "username":"bsparka2"},
{"ip":"138.245.236.117", "username":"scheseau"},
{"ip":"144.100.19.118", "username":"rfindlayc5"},
{"ip":"204.80.75.144", "username":"ddawbergw"},
{"ip":"67.149.185.13", "username":"gschruyersgx"},
{"ip":"86.213.40.92", "username":"jcastanoneh7"},
{"ip":"52.53.142.225", "username":"rstedmanju"},
{"ip":"108.51.177.251", "username":"wcousensjw"},
{"ip":"55.3.80.27", "username":"jantonijevicky"},
{"ip":"130.8.201.254", "username":"bskethm7"},
{"ip":"150.4.207.72", "username":"lcolamn0"},
{"ip":"91.177.70.8", "username":"ddewaren5"},
{"ip":"17.71.245.31", "username":"tbaudinsot"},
{"ip":"223.111.66.138", "username":"hbalderyr4"},
{"ip":"109.126.70.240", "username":"wallensonr6"},
{"ip":"118.99.187.79", "username":"isemble2b"},
{"ip":"216.186.250.10", "username":"ascutta4"},
{"ip":"143.22.89.67", "username":"fbroaderc0"},
{"ip":"123.44.131.184", "username":"mpickerincm"},
{"ip":"8.175.70.1", "username":"dbrandesfr"},
{"ip":"74.104.82.42", "username":"lcockadaygg"},
{"ip":"164.209.223.1", "username":"tcuttleshp"},
{"ip":"135.122.117.92", "username":"ecrecynb"},
{"ip":"215.203.41.108", "username":"iolyetl"},
{"ip":"181.21.211.202", "username":"hboig2a"},
{"ip":"171.171.34.186", "username":"jraggles4y"},
{"ip":"225.59.172.190", "username":"kflecknell5p"},
{"ip":"14.253.207.9", "username":"mcauston8i"},
{"ip":"81.151.70.180", "username":"tbertin91"},
{"ip":"66.58.110.247", "username":"bhardeyjg"},
{"ip":"64.251.137.33", "username":"randerll3"},
{"ip":"32.91.248.187", "username":"ehoustonny"},
{"ip":"109.113.28.130", "username":"pjoeoa"},
{"ip":"16.61.209.186", "username":"lwhitemanow"},
{"ip":"171.40.127.133", "username":"bshellidayqn"},
{"ip":"110.90.156.240", "username":"rhearonsd"},
{"ip":"29.13.200.215", "username":"gkidneyi"},
{"ip":"231.63.233.126", "username":"jpallasch1g"},
{"ip":"219.206.142.29", "username":"lcool3p"},
{"ip":"218.168.1.238", "username":"kwalkinshaw4u"},
{"ip":"164.194.84.47", "username":"dcroston7l"},
{"ip":"139.175.157.39", "username":"lshoreman7m"},
{"ip":"5.55.219.66", "username":"fbazoge8o"},
{"ip":"0.229.250.121", "username":"jcrockac"},
{"ip":"76.86.6.32", "username":"tkemsonak"},
{"ip":"89.176.133.125", "username":"eandriolic2"},
{"ip":"186.27.185.172", "username":"mlinkincb"},
{"ip":"79.235.195.73", "username":"kbrangancu"},
{"ip":"232.39.218.242", "username":"swelland1"},
{"ip":"120.218.78.143", "username":"csaizdm"},
{"ip":"85.210.236.250", "username":"mbeddoedx"},
{"ip":"32.241.228.253", "username":"fbalasinig5"},
{"ip":"29.94.85.123", "username":"rcrehanho"},
{"ip":"122.177.185.133", "username":"mburchilli2"},
{"ip":"81.102.44.57", "username":"gridelkb"},
{"ip":"138.151.117.231", "username":"sgladmankl"},
{"ip":"216.75.15.198", "username":"jcullipms"},
{"ip":"95.110.61.216", "username":"fcostellowrn"},
{"ip":"174.166.62.220", "username":"croper1"},
{"ip":"183.176.192.182", "username":"ahartshorne8"},
{"ip":"211.91.210.230", "username":"lgromley4s"},
{"ip":"89.218.124.52", "username":"oturban6l"},
{"ip":"30.232.64.70", "username":"nleyes78"},
{"ip":"216.62.164.88", "username":"soffell7s"},
{"ip":"102.130.110.107", "username":"tlangstaff99"},
{"ip":"65.243.211.242", "username":"mdonnerab"},
{"ip":"51.234.22.54", "username":"sspreullaj"},
{"ip":"212.10.223.103", "username":"nstandbrookebs"},
{"ip":"211.199.160.174", "username":"kmcgawch"},
{"ip":"33.150.94.29", "username":"sclinningcp"},
{"ip":"0.246.121.87", "username":"raucotteh"},
{"ip":"101.37.5.110", "username":"pbraccig2"},
{"ip":"200.147.216.111", "username":"nligginsge"},
{"ip":"97.207.114.67", "username":"ccaudwellhx"},
{"ip":"124.92.205.247", "username":"sosheilsj3"},
{"ip":"71.126.18.103", "username":"kcliffka"},
{"ip":"154.166.208.111", "username":"candreaskp"},
{"ip":"183.20.205.178", "username":"pkubiaknd"},
{"ip":"194.168.78.235", "username":"pocarrollpd"},
{"ip":"194.111.81.30", "username":"dmitroq2"},
{"ip":"232.106.101.46", "username":"sgrillsqr"},
{"ip":"77.77.182.210", "username":"mbamlett2m"},
{"ip":"135.210.28.208", "username":"etawton6i"},
{"ip":"5.101.115.231", "username":"cdavidofd2"},
{"ip":"114.124.24.8", "username":"gbloxsomed8"},
{"ip":"38.186.82.185", "username":"cthroweriq"},
{"ip":"193.39.229.22", "username":"bizkovitchjy"},
{"ip":"81.109.229.147", "username":"lsertinoc"},
{"ip":"82.185.198.232", "username":"jseelyoe"},
{"ip":"85.213.161.81", "username":"ewhysalloo"},
{"ip":"244.98.252.169", "username":"rambersonp3"},
{"ip":"45.185.203.11", "username":"mlibrerosrc"},
{"ip":"107.84.166.162", "username":"challagan51"},
{"ip":"110.215.82.40", "username":"gessame5z"},
{"ip":"116.32.219.175", "username":"uwaldrumas"},
{"ip":"162.220.249.49", "username":"slangfittdh"},
{"ip":"126.153.152.198", "username":"blarwaydl"},
{"ip":"19.0.219.132", "username":"pcastellonegl"},
{"ip":"126.62.161.110", "username":"iscanlinh3"},
{"ip":"120.110.164.218", "username":"amarquessh9"},
{"ip":"11.120.179.212", "username":"wbantickjd"},
{"ip":"232.57.183.41", "username":"cmatousekm1"},
{"ip":"90.151.150.9", "username":"agooderp6"},
{"ip":"23.29.11.33", "username":"ahance2q"},
{"ip":"21.228.85.114", "username":"nofeeney4a"},
{"ip":"11.112.233.147", "username":"vmewett60"},
{"ip":"149.167.180.170", "username":"cburless8z"},
{"ip":"63.146.48.197", "username":"irominovb6"},
{"ip":"188.120.141.38", "username":"blidsterbn"},
{"ip":"59.113.252.228", "username":"ttinne8"},
{"ip":"98.73.124.73", "username":"estaintonskinnfh"},
{"ip":"183.137.200.82", "username":"blorriefm"},
{"ip":"23.151.34.222", "username":"jhuggenik"},
{"ip":"58.0.150.220", "username":"jtrowlerjh"},
{"ip":"163.68.80.132", "username":"pmaccostyla"},
{"ip":"210.132.254.167", "username":"fmarkiemo"},
{"ip":"53.14.201.32", "username":"ndavydenkoo4"},
{"ip":"70.33.129.109", "username":"vspadonir2"},
{"ip":"70.25.76.101", "username":"ofayerbrotherrb"},
{"ip":"249.116.127.250", "username":"adumpletonz"},
{"ip":"51.50.54.251", "username":"ssymon2d"},
{"ip":"148.200.7.191", "username":"nmccumesky3f"},
{"ip":"143.7.20.39", "username":"cfrandsen4w"},
{"ip":"150.34.218.226", "username":"obruyns6d"},
{"ip":"55.47.114.22", "username":"crimington8v"},
{"ip":"133.42.21.119", "username":"emendes93"},
{"ip":"207.97.0.112", "username":"wpickrill9n"},
{"ip":"239.94.111.175", "username":"jfrancoma7"},
{"ip":"112.29.53.120", "username":"etallquistbz"},
{"ip":"161.135.229.59", "username":"pchastneyc4"},
{"ip":"92.35.54.133", "username":"eumancn"},
{"ip":"56.254.126.94", "username":"gdufouree1"},
{"ip":"135.151.55.179", "username":"cconfortkx"},
{"ip":"143.156.24.122", "username":"ppablolw"},
{"ip":"27.186.92.253", "username":"lgoodeepc"},
{"ip":"114.50.43.187", "username":"wwattishamrl"},
{"ip":"175.113.70.238", "username":"otamburi4"},
{"ip":"47.29.254.177", "username":"katyeo3r"},
{"ip":"146.41.169.86", "username":"wludman6g"},
{"ip":"186.43.109.201", "username":"gkirkman9t"},
{"ip":"99.115.229.75", "username":"jmcowenbh"},
{"ip":"118.59.187.72", "username":"lellissd5"},
{"ip":"102.194.5.116", "username":"vmcvittiedd"},
{"ip":"69.161.87.95", "username":"kerskindr"},
{"ip":"33.248.18.20", "username":"mmcateerdy"},
{"ip":"177.37.206.127", "username":"rseysf2"},
{"ip":"177.108.27.123", "username":"mretallackfk"},
{"ip":"48.248.52.200", "username":"caleksankingd"},
{"ip":"52.91.182.57", "username":"jchazellehu"},
{"ip":"151.68.28.250", "username":"ggillorani6"},
{"ip":"12.57.21.78", "username":"dbuckleek1"},
{"ip":"214.6.91.223", "username":"gtommasuzzild"},
{"ip":"181.73.162.123", "username":"gmantramml"},
{"ip":"246.108.219.104", "username":"ddewinl"},
{"ip":"224.208.6.110", "username":"lmappnv"},
{"ip":"1.140.21.231", "username":"oskeggso8"},
{"ip":"10.21.88.9", "username":"lrelfeob"},
{"ip":"57.81.174.162", "username":"esodorpr"},
{"ip":"143.157.39.44", "username":"mholton17"},
{"ip":"252.18.205.79", "username":"sraylton2o"},
{"ip":"93.66.7.22", "username":"qaleksashin2t"},
{"ip":"3.104.121.49", "username":"krickson33"},
{"ip":"81.104.172.57", "username":"mbarnewille3l"},
{"ip":"95.80.92.32", "username":"lnassi5v"},
{"ip":"160.47.22.69", "username":"kboribal66"},
{"ip":"23.176.104.235", "username":"cchalcroft6w"},
{"ip":"15.122.222.59", "username":"cgwilliam7y"},
{"ip":"69.123.95.115", "username":"dmarcq8g"},
{"ip":"225.78.254.15", "username":"cfidock8t"},
{"ip":"128.0.54.238", "username":"agobolosaa"},
{"ip":"24.64.173.10", "username":"edumbrallbl"},
{"ip":"207.87.73.78", "username":"eellingsbm"},
{"ip":"71.87.108.86", "username":"dtymg8"},
{"ip":"143.115.246.6", "username":"abrunonhf"},
{"ip":"216.103.159.168", "username":"eblencowehz"},
{"ip":"234.186.21.108", "username":"rkrzyzanowskij6"},
{"ip":"239.215.103.165", "username":"bmcardelljm"},
{"ip":"15.94.188.199", "username":"hwratekr"},
{"ip":"148.147.200.103", "username":"knuttonkv"},
{"ip":"158.255.135.10", "username":"lbirkenshawl8"},
{"ip":"238.113.248.59", "username":"tlutwidgemw"},
{"ip":"239.204.248.30", "username":"wmahodyov"},
{"ip":"105.23.250.11", "username":"cgarstangpa"},
{"ip":"244.93.163.203", "username":"modoherty77"},
{"ip":"213.104.26.183", "username":"jaylettai"},
{"ip":"251.119.35.67", "username":"fburganco"},
{"ip":"251.237.15.128", "username":"mskrinesee"},
{"ip":"101.78.178.0", "username":"nandratly"},
{"ip":"58.10.214.107", "username":"nrentzqe"},
{"ip":"48.88.224.200", "username":"cmatherson1f"},
{"ip":"57.250.198.180", "username":"jzappel23"},
{"ip":"188.104.130.248", "username":"jmacsherry35"},
{"ip":"125.238.14.7", "username":"tmathew4k"},
{"ip":"112.53.245.82", "username":"alougheid5x"},
{"ip":"192.139.250.0", "username":"stune8r"},
{"ip":"213.103.41.236", "username":"fdaldryah"},
{"ip":"142.144.16.10", "username":"vparnabyds"},
{"ip":"169.242.2.125", "username":"toughff"},
{"ip":"137.117.206.127", "username":"cspohrhs"},
{"ip":"101.161.172.23", "username":"amacmakinj1"},
{"ip":"210.250.186.141", "username":"ckordingji"},
{"ip":"183.58.5.4", "username":"tfitzsimonjn"},
{"ip":"95.244.216.203", "username":"rcrampm6"},
{"ip":"41.23.4.161", "username":"rcockerillmc"},
{"ip":"237.172.68.48", "username":"bslayns"},
{"ip":"21.155.254.248", "username":"vnevino7"},
{"ip":"214.111.5.40", "username":"jbonifaziop4"},
{"ip":"27.8.13.24", "username":"jdigbyps"},
{"ip":"106.100.51.132", "username":"rjorgesenq7"},
{"ip":"153.226.3.106", "username":"hcarmoqb"},
{"ip":"3.169.182.180", "username":"ndancyf"},
{"ip":"102.174.247.102", "username":"bstockall14"},
{"ip":"86.199.60.165", "username":"mcalderhead20"},
{"ip":"218.91.188.143", "username":"rbalham22"},
{"ip":"25.243.225.207", "username":"mollenbuttel2j"},
{"ip":"25.95.39.126", "username":"tmatuszinski2u"},
{"ip":"32.43.226.135", "username":"ccantrell63"},
{"ip":"61.77.162.40", "username":"cburde6h"},
{"ip":"60.94.22.58", "username":"rpapaminas7t"},
{"ip":"64.1.201.235", "username":"apollokb4"},
{"ip":"167.24.169.4", "username":"rdinwoodieh5"},
{"ip":"97.163.34.189", "username":"cpietraszekjc"},
{"ip":"170.120.225.227", "username":"spontonn8"},
{"ip":"246.39.44.197", "username":"aquimbynh"},
{"ip":"112.34.239.96", "username":"nbrotherhedok"},
{"ip":"160.91.37.212", "username":"emccauleyou"},
{"ip":"251.237.189.105", "username":"apersickep7"},
{"ip":"246.130.75.148", "username":"eraynerpy"},
{"ip":"180.7.76.51", "username":"acollyearr3"},
{"ip":"75.57.82.159", "username":"rperringtong"},
{"ip":"152.235.176.211", "username":"lgavrielli31"},
{"ip":"165.142.91.153", "username":"smichelotti6e"},
{"ip":"66.127.78.92", "username":"mbalaison70"},
{"ip":"82.121.72.126", "username":"ghinkins9c"},
{"ip":"150.218.149.255", "username":"hnapolitano9s"},
{"ip":"199.147.52.36", "username":"mbernettial"},
{"ip":"117.101.65.121", "username":"dsmouten"},
{"ip":"146.67.255.79", "username":"soherlihyep"},
{"ip":"165.175.74.95", "username":"cpurchonfe"},
{"ip":"171.108.66.135", "username":"torrygn"},
{"ip":"253.205.73.186", "username":"kdinsehk"},
{"ip":"169.180.165.232", "username":"rmeacheri3"},
{"ip":"117.82.159.19", "username":"cdaini9"},
{"ip":"224.246.186.246", "username":"cheseylp"},
{"ip":"141.15.132.93", "username":"vchelleypv"},
{"ip":"159.131.172.85", "username":"nbeevensqf"},
{"ip":"180.45.228.243", "username":"afaireqt"},
{"ip":"42.127.247.220", "username":"maverillrg"},
{"ip":"84.44.12.102", "username":"bsheera"},
{"ip":"171.254.252.250", "username":"garnaudot1e"},
{"ip":"0.18.209.198", "username":"mgibling24"},
{"ip":"34.234.114.125", "username":"pjancic34"},
{"ip":"197.141.191.169", "username":"dpioch53"},
{"ip":"237.45.47.56", "username":"tpenhalurick5a"},
{"ip":"102.47.119.137", "username":"mfetherstonhaugh6s"},
{"ip":"237.230.175.90", "username":"tnafzger7b"},
{"ip":"36.109.138.186", "username":"icaldecourt7n"},
{"ip":"37.251.246.123", "username":"rwyllcock8h"},
{"ip":"213.131.205.154", "username":"smarkovic97"},
{"ip":"61.235.96.199", "username":"cdonefd"},
{"ip":"13.174.71.75", "username":"egartlandfq"},
{"ip":"170.1.217.73", "username":"cwhetnellg1"},
{"ip":"122.178.198.68", "username":"bdoerskw"},
{"ip":"120.255.51.139", "username":"cdobkinl6"},
{"ip":"59.59.180.183", "username":"dferrottio9"},
{"ip":"57.132.226.71", "username":"sjobkeos"},
{"ip":"67.13.28.47", "username":"wfenderq9"},
{"ip":"129.129.57.133", "username":"tdumingosqh"},
{"ip":"44.240.78.102", "username":"pbuttfieldqv"},
{"ip":"217.155.132.86", "username":"hlardiner1a"},
{"ip":"207.48.135.124", "username":"sshorrock1y"},
{"ip":"162.216.161.193", "username":"hgregoriou28"},
{"ip":"206.68.64.4", "username":"dhigginbottam2e"},
{"ip":"27.83.6.110", "username":"aclohisey75"},
{"ip":"40.19.3.4", "username":"gdoddridge88"},
{"ip":"254.249.255.68", "username":"aebdina8"},
{"ip":"136.242.92.45", "username":"lonslowdv"},
{"ip":"23.116.34.250", "username":"zelderfieldj9"},
{"ip":"188.131.186.3", "username":"vclareku"},
{"ip":"237.129.216.221", "username":"lmckimlh"},
{"ip":"175.114.245.106", "username":"ldoveymh"},
{"ip":"167.131.197.204", "username":"smoncreifpm"},
{"ip":"106.165.240.97", "username":"vpoolyqd"},
{"ip":"116.226.245.123", "username":"abarlowqj"},
{"ip":"121.139.184.76", "username":"sguppyqw"},
{"ip":"38.136.166.25", "username":"cdionsettir7"},
{"ip":"105.24.173.155", "username":"lblennerhassettr8"},
{"ip":"228.240.219.44", "username":"pleynagh11"},
{"ip":"211.7.201.88", "username":"rzmitrovich1u"},
{"ip":"151.180.101.192", "username":"adovidaitis4l"},
{"ip":"202.184.124.160", "username":"kminihane9z"},
{"ip":"50.58.40.184", "username":"ccurriercf"},
{"ip":"209.213.240.182", "username":"cemmsdn"},
{"ip":"213.19.238.231", "username":"aseedullfs"},
{"ip":"92.51.28.181", "username":"cdungeeiy"},
{"ip":"211.216.155.225", "username":"blamkinlz"},
{"ip":"49.64.10.224", "username":"rdresserpg"},
{"ip":"254.208.52.90", "username":"wmunktonqu"},
{"ip":"113.200.76.163", "username":"adandrear5"},
{"ip":"165.106.132.114", "username":"jonions45"},
{"ip":"245.225.74.177", "username":"rjudkins5i"},
{"ip":"161.198.76.211", "username":"fhilling5m"},
{"ip":"48.40.155.252", "username":"lfearnsides83"},
{"ip":"219.13.231.17", "username":"rwetheredbt"},
{"ip":"27.70.221.233", "username":"acollefordeb"},
{"ip":"85.148.84.225", "username":"rfarrajf"},
{"ip":"4.188.115.45", "username":"amckeemano2"},
{"ip":"8.126.247.11", "username":"dpeppinoq"},
{"ip":"117.155.148.67", "username":"rhannanqg"},
{"ip":"8.5.44.227", "username":"etrevorr0"},
{"ip":"155.249.157.57", "username":"shazelgroverq"},
{"ip":"100.47.54.200", "username":"vsinnockx"},
{"ip":"13.87.158.40", "username":"jrebillard3v"},
{"ip":"4.161.113.157", "username":"kmcgarry48"},
{"ip":"214.64.196.247", "username":"cspellacy61"},
{"ip":"252.143.24.204", "username":"aarnaldo7k"},
{"ip":"53.37.83.194", "username":"rdowry8y"},
{"ip":"163.88.99.39", "username":"sslafford96"},
{"ip":"215.106.236.208", "username":"tjackman9q"},
{"ip":"50.100.165.145", "username":"cmolead"},
{"ip":"65.190.189.137", "username":"gmcgarrityan"},
{"ip":"124.102.94.158", "username":"ldaybelld6"},
{"ip":"180.115.209.115", "username":"wleregofi"},
{"ip":"56.91.102.233", "username":"ktregidofw"},
{"ip":"128.250.19.114", "username":"hfrowdeg9"},
{"ip":"157.135.187.123", "username":"eredheadim"},
{"ip":"183.12.7.9", "username":"gscammondenma"},
{"ip":"163.112.220.239", "username":"lstallonn2"},
{"ip":"35.168.126.191", "username":"scluckien3"},
{"ip":"72.61.183.124", "username":"rburnessng"},
{"ip":"90.44.160.45", "username":"llefortnm"},
{"ip":"116.250.240.197", "username":"haylettnq"},
{"ip":"158.226.66.212", "username":"gantuschpo"},
{"ip":"74.155.146.125", "username":"cduffett1z"},
{"ip":"60.105.110.116", "username":"lleggen4q"},
{"ip":"180.122.137.40", "username":"rheck74"},
{"ip":"50.109.155.81", "username":"glaugharned0"},
{"ip":"226.9.233.160", "username":"mwhatedk"},
{"ip":"185.171.35.228", "username":"dkitchenmangq"},
{"ip":"174.197.59.193", "username":"pchessellgr"},
{"ip":"64.133.141.85", "username":"tmcilvennyi1"},
{"ip":"114.153.203.9", "username":"rvankovil"},
{"ip":"85.165.107.40", "username":"swallisll"},
{"ip":"40.75.149.40", "username":"sdunstann4"},
{"ip":"214.46.195.97", "username":"qkelkpl"},
{"ip":"187.110.3.118", "username":"apetzolt3i"},
{"ip":"82.38.156.58", "username":"cnottle3y"},
{"ip":"83.235.131.149", "username":"cgrazier89"},
{"ip":"255.170.239.18", "username":"ngaynesde"},
{"ip":"187.210.48.118", "username":"kmasserelex"},
{"ip":"14.241.161.228", "username":"amaffionej5"},
{"ip":"23.58.84.138", "username":"nleidenjt"},
{"ip":"49.248.133.234", "username":"bsketchleyl5"},
{"ip":"255.112.223.164", "username":"aickowiczn6"},
{"ip":"141.76.151.73", "username":"xcamisp2"},
{"ip":"139.223.140.56", "username":"bfroodp8"},
{"ip":"127.253.189.144", "username":"mnel1v"},
{"ip":"155.127.163.127", "username":"mbarends25"},
{"ip":"126.55.31.95", "username":"ukent2i"},
{"ip":"180.142.172.200", "username":"cbrack2k"},
{"ip":"186.15.39.60", "username":"cavieson4r"},
{"ip":"219.12.142.108", "username":"adabnot6j"},
{"ip":"110.128.178.198", "username":"llammerding6x"},
{"ip":"58.96.101.201", "username":"vcruft85"},
{"ip":"83.88.49.132", "username":"qmcgintyct"},
{"ip":"246.166.227.227", "username":"tameyf5"},
{"ip":"212.0.21.157", "username":"oballega"},
{"ip":"40.116.100.165", "username":"wjimmesi5"},
{"ip":"15.0.112.185", "username":"pjolliffis"},
{"ip":"111.183.164.45", "username":"gsladejo"},
{"ip":"228.210.222.195", "username":"tmeechanjv"},
{"ip":"137.130.79.167", "username":"wlabuschagneko"},
{"ip":"242.98.22.207", "username":"mbunstonm2"},
{"ip":"91.167.62.124", "username":"crigollenj"},
{"ip":"132.159.133.166", "username":"amcgannonoi"},
{"ip":"138.23.158.185", "username":"damerighipp"},
{"ip":"253.37.96.189", "username":"cshrimpling40"},
{"ip":"174.180.111.203", "username":"slideard5j"},
{"ip":"6.81.39.70", "username":"hdover65"},
{"ip":"32.229.91.140", "username":"vtreece9p"},
{"ip":"163.2.133.159", "username":"fcastellabi"},
{"ip":"108.19.73.58", "username":"dtinmouthbk"},
{"ip":"125.106.109.96", "username":"jbrinsdene0"},
{"ip":"149.77.3.246", "username":"epriddinggh"},
{"ip":"243.188.221.97", "username":"anotheria"},
{"ip":"101.101.198.173", "username":"idohmernu"},
{"ip":"218.199.32.182", "username":"wmeadley5k"},
{"ip":"145.130.186.205", "username":"dgallemore62"},
{"ip":"160.178.8.8", "username":"jloudwell6a"},
{"ip":"172.10.1.250", "username":"rhuggett6u"},
{"ip":"199.208.88.19", "username":"mwalsham7a"},
{"ip":"146.246.224.36", "username":"rhaggett98"},
{"ip":"207.204.118.23", "username":"cburghallam"},
{"ip":"163.236.243.17", "username":"bgrinawaydi"},
{"ip":"251.189.203.221", "username":"qtongueet"},
{"ip":"157.175.203.6", "username":"vlambornf6"},
{"ip":"192.94.212.140", "username":"azanussiigt"},
{"ip":"148.192.90.192", "username":"pgooddayhj"},
{"ip":"134.122.14.170", "username":"lschachif"},
{"ip":"25.38.240.87", "username":"rquartonk6"},
{"ip":"236.50.70.22", "username":"hryhorovichl1"},
{"ip":"48.131.247.203", "username":"cjozwiaklf"},
{"ip":"228.14.24.85", "username":"kmaccurtainlk"},
{"ip":"166.233.82.130", "username":"grandaleslo"},
{"ip":"131.14.244.90", "username":"pgergem5"},
{"ip":"177.12.36.139", "username":"srettermk"},
{"ip":"81.80.196.63", "username":"nmacphersonn1"},
{"ip":"240.103.95.156", "username":"wdanseyrr"},
{"ip":"112.193.70.54", "username":"obelfrageh"},
{"ip":"164.37.251.13", "username":"wclemanceau1s"},
{"ip":"157.64.139.202", "username":"cvalder2c"},
{"ip":"19.137.53.173", "username":"cmoxsom3n"},
{"ip":"10.67.202.250", "username":"hhitzmann4z"},
{"ip":"61.75.72.81", "username":"nrelfe5t"},
{"ip":"174.196.128.67", "username":"aclimar6q"},
{"ip":"168.106.222.190", "username":"bhaskew76"},
{"ip":"213.236.202.237", "username":"rbillett7v"},
{"ip":"250.102.216.174", "username":"csetch7x"},
{"ip":"158.199.199.147", "username":"respinazob9"},
{"ip":"110.126.58.29", "username":"esheavillsf3"},
{"ip":"181.9.242.153", "username":"mpriddyfc"},
{"ip":"255.177.41.3", "username":"sdecristofalog4"},
{"ip":"81.87.237.60", "username":"nswanborrowg6"},
{"ip":"246.40.3.42", "username":"fockleshawhg"},
{"ip":"128.115.118.126", "username":"cheselwoodhn"},
{"ip":"39.54.198.200", "username":"tpropperj7"},
{"ip":"171.109.176.10", "username":"atrousdellk4"},
{"ip":"71.88.155.136", "username":"lpieterkk"},
{"ip":"2.8.155.10", "username":"nsassermn"},
{"ip":"220.241.62.91", "username":"sserfatini"},
{"ip":"65.98.176.226", "username":"jpearnop"},
{"ip":"234.39.181.153", "username":"skenrydqs"},
{"ip":"147.202.120.54", "username":"gmatusevich1h"},
{"ip":"153.209.141.62", "username":"nspong1k"},
{"ip":"139.17.167.237", "username":"celgie2h"},
{"ip":"77.199.164.183", "username":"xgoldupaf"},
{"ip":"180.63.37.4", "username":"dplomerb0"},
{"ip":"39.203.235.250", "username":"tjeanesb3"},
{"ip":"118.3.191.92", "username":"pkalvinbv"},
{"ip":"213.177.199.123", "username":"sdunbobbincc"},
{"ip":"6.247.173.14", "username":"ewoliterek"},
{"ip":"30.110.15.93", "username":"spellissierew"},
{"ip":"125.204.173.120", "username":"hjewarez"},
{"ip":"242.245.128.200", "username":"ogurgfp"},
{"ip":"34.12.4.94", "username":"dsanctoi4"},
{"ip":"188.77.17.120", "username":"myuillii"},
{"ip":"211.110.206.46", "username":"adonawayo1"},
{"ip":"217.46.20.38", "username":"tnorreyom"},
{"ip":"1.36.208.89", "username":"bscogingsp0"},
{"ip":"208.17.108.43", "username":"mkubistap9"},
{"ip":"144.222.118.113", "username":"wmarrisq1"},
{"ip":"161.255.156.244", "username":"ezimek7g"},
{"ip":"158.89.77.90", "username":"htwinbourne8f"},
{"ip":"9.241.175.128", "username":"tlanceley8m"},
{"ip":"47.250.11.68", "username":"gscorton9w"},
{"ip":"161.214.163.163", "username":"bfakesap"},
{"ip":"47.191.62.202", "username":"ccremenc1"},
{"ip":"235.169.216.10", "username":"craubheimcg"},
{"ip":"113.67.160.212", "username":"tstanboroughcw"},
{"ip":"78.198.11.254", "username":"bmcdonalddt"},
{"ip":"107.86.121.83", "username":"bmactimpanydu"},
{"ip":"182.77.34.9", "username":"bgooldingeu"},
{"ip":"181.37.118.249", "username":"lhauggf4"},
{"ip":"103.35.163.245", "username":"nkirkmankj"},
{"ip":"250.248.64.129", "username":"bkillickn7"},
{"ip":"153.28.167.204", "username":"aduchanpf"},
{"ip":"172.101.56.219", "username":"dgaishr9"},
{"ip":"82.112.115.2", "username":"wcricket29"},
{"ip":"92.4.124.238", "username":"ncursons2l"},
{"ip":"199.203.149.119", "username":"ffabbro4d"},
{"ip":"69.47.216.76", "username":"sheggie5h"},
{"ip":"98.76.66.236", "username":"sdaltrey6b"},
{"ip":"92.2.245.250", "username":"fmonery6z"},
{"ip":"12.176.250.249", "username":"flillow8w"},
{"ip":"199.244.207.120", "username":"gcaunter9l"},
{"ip":"111.11.105.153", "username":"gvedishchev9o"},
{"ip":"243.188.19.122", "username":"alingerb1"},
{"ip":"218.186.80.154", "username":"edebowbd"},
{"ip":"241.66.87.135", "username":"ebrixeyce"},
{"ip":"132.28.154.35", "username":"anairneef"},
{"ip":"70.188.97.17", "username":"ctrittongf"},
{"ip":"40.216.39.21", "username":"agowlergp"},
{"ip":"11.253.123.60", "username":"csamsonjs"},
{"ip":"39.253.122.77", "username":"sallderidgelb"},
{"ip":"58.135.210.132", "username":"bgiorgik"},
{"ip":"22.166.159.200", "username":"jwelling38"},
{"ip":"50.251.76.65", "username":"csongust5f"},
{"ip":"90.95.40.240", "username":"ituckley8d"},
{"ip":"148.123.131.76", "username":"tthireaues"},
{"ip":"85.138.19.61", "username":"ablackallerf8"},
{"ip":"14.240.154.8", "username":"wodeegangb"},
{"ip":"123.241.175.18", "username":"rmcbrydehq"},
{"ip":"226.128.67.187", "username":"bfrandseniz"},
{"ip":"162.81.191.243", "username":"dfarquharsonjz"},
{"ip":"127.146.71.163", "username":"lbreazeallkh"},
{"ip":"201.101.187.19", "username":"iharradineqi"},
{"ip":"140.6.88.83", "username":"sspillardrd"},
{"ip":"37.168.207.190", "username":"cummfreyrf"},
{"ip":"35.136.215.72", "username":"ibuffeyr"},
{"ip":"206.201.133.251", "username":"tallon1r"},
{"ip":"252.14.60.85", "username":"frobillard3s"},
{"ip":"113.60.1.164", "username":"palner55"},
{"ip":"254.185.32.82", "username":"wembury7p"},
{"ip":"152.24.160.3", "username":"nquinet9u"},
{"ip":"49.237.102.98", "username":"ymousea0"},
{"ip":"67.238.100.191", "username":"gbiernatax"},
{"ip":"229.72.19.25", "username":"dexterbg"},
{"ip":"176.54.94.102", "username":"vmountforte4"},
{"ip":"144.157.203.205", "username":"dguileec"},
{"ip":"167.203.164.124", "username":"rlowersgi"},
{"ip":"115.85.54.121", "username":"agrievejq"},
{"ip":"86.191.190.97", "username":"apostlesk5"},
{"ip":"3.159.246.252", "username":"bcollingridgek8"},
{"ip":"115.71.220.115", "username":"vbrightyl9"},
{"ip":"135.46.69.245", "username":"jomolanlg"},
{"ip":"151.157.114.109", "username":"rlippiattmx"},
{"ip":"173.143.14.158", "username":"fcumberbatchnk"},
{"ip":"26.96.206.26", "username":"jstetsonpw"},
{"ip":"82.151.135.22", "username":"adejuares1i"},
{"ip":"35.14.140.206", "username":"wwarner42"},
{"ip":"211.68.231.48", "username":"fsturmey4o"},
{"ip":"239.101.42.116", "username":"jfreake71"},
{"ip":"203.79.14.11", "username":"wgalbreth92"},
{"ip":"53.113.142.153", "username":"cetteray"},
{"ip":"118.18.134.99", "username":"eyukhninib7"},
{"ip":"79.176.79.26", "username":"emcerlainec7"},
{"ip":"245.31.18.117", "username":"trosbroughcr"},
{"ip":"96.136.64.8", "username":"aferrieshb"},
{"ip":"71.226.74.173", "username":"eackersit"},
{"ip":"140.27.137.142", "username":"zstaggemf"},
{"ip":"193.239.75.77", "username":"nballintyneqx"},
{"ip":"207.52.76.182", "username":"fbrolanqz"},
{"ip":"111.105.140.108", "username":"awartnaby67"},
{"ip":"11.130.249.248", "username":"gelston9f"},
{"ip":"35.255.20.144", "username":"aamburgya5"},
{"ip":"151.155.142.98", "username":"mbueybw"},
{"ip":"176.89.45.213", "username":"mchapmangy"},
{"ip":"84.24.248.211", "username":"tgreggip"},
{"ip":"193.25.232.138", "username":"agenckenx"},
{"ip":"160.25.252.198", "username":"sthravespb"},
{"ip":"35.11.121.153", "username":"fscuphampj"},
{"ip":"238.80.25.9", "username":"fmackerness1l"},
{"ip":"51.126.37.156", "username":"nhoovart36"},
{"ip":"196.151.213.148", "username":"sgarey3u"},
{"ip":"153.113.50.203", "username":"rhowkins5e"},
{"ip":"234.5.226.25", "username":"bsenecaut6t"},
{"ip":"44.119.109.145", "username":"avouls8p"},
{"ip":"147.228.169.229", "username":"sfarge9v"},
{"ip":"2.132.217.0", "username":"llagrangefz"},
{"ip":"54.118.5.81", "username":"amoseygz"},
{"ip":"76.153.99.34", "username":"lhadinghami8"},
{"ip":"189.54.160.156", "username":"mguisekc"},
{"ip":"21.134.85.126", "username":"cgarnsonmm"},
{"ip":"98.151.107.200", "username":"bsummerscaleso3"},
{"ip":"227.225.192.221", "username":"tbenyon7"},
{"ip":"195.16.218.165", "username":"pferrettino9"},
{"ip":"79.208.7.30", "username":"nrosenboim3b"},
{"ip":"178.77.11.133", "username":"ktomankiewicz3e"},
{"ip":"142.48.117.170", "username":"jmattimoebp"},
{"ip":"38.198.198.168", "username":"ctrunchionk3"},
{"ip":"84.7.118.189", "username":"isissonsn9"},
{"ip":"13.6.199.107", "username":"emcateeroh"},
{"ip":"115.116.24.165", "username":"hcomfortor"},
{"ip":"136.0.55.128", "username":"tlaguerrep"},
{"ip":"17.84.71.117", "username":"strippitt1d"},
{"ip":"66.20.72.124", "username":"nhadleigh1t"},
{"ip":"210.60.119.169", "username":"rbiggans1w"},
{"ip":"35.165.87.169", "username":"alefloch2x"},
{"ip":"183.100.182.7", "username":"emarcombe2z"},
{"ip":"7.244.235.221", "username":"bbuckles5q"},
{"ip":"35.248.6.36", "username":"glewington5u"},
{"ip":"52.51.174.89", "username":"seastham68"},
{"ip":"211.132.249.28", "username":"eclynmans6f"},
{"ip":"50.170.20.2", "username":"rburston6n"},
{"ip":"94.32.68.215", "username":"rgoodger72"},
{"ip":"38.35.144.153", "username":"bhedden84"},
{"ip":"70.214.86.39", "username":"spasterfieldcz"},
{"ip":"244.107.187.213", "username":"jmattityahoued"},
{"ip":"109.192.193.244", "username":"wbradnockfx"},
{"ip":"142.14.73.230", "username":"cpignyig"},
{"ip":"175.84.49.111", "username":"njorgesennn"},
{"ip":"116.33.50.20", "username":"oattwellri"},
{"ip":"150.227.156.184", "username":"mparchment2g"},
{"ip":"245.102.156.78", "username":"sgrabbam43"},
{"ip":"64.85.100.158", "username":"aiwanowski4m"},
{"ip":"242.170.55.9", "username":"kharriman54"},
{"ip":"128.124.45.2", "username":"pfuge5d"},
{"ip":"105.114.242.67", "username":"bchildren6r"},
{"ip":"139.95.76.41", "username":"cfinnemore73"},
{"ip":"194.15.21.53", "username":"chargraves7w"},
{"ip":"29.60.247.133", "username":"mtrounsoncx"},
{"ip":"19.141.6.135", "username":"mstainbridgegu"},
{"ip":"108.69.230.38", "username":"ecanarioie"},
{"ip":"210.232.46.75", "username":"kbowij"},
{"ip":"59.219.246.219", "username":"mtomasiv"},
{"ip":"191.64.92.125", "username":"ehancockk7"},
{"ip":"130.249.225.161", "username":"gepslyln"},
{"ip":"251.155.176.70", "username":"kgoodsallm3"},
{"ip":"208.123.105.237", "username":"tmcgrayleme"},
{"ip":"252.32.97.21", "username":"rceresano"},
{"ip":"211.43.94.128", "username":"jcoatsod"},
{"ip":"90.88.243.64", "username":"jpitbladoq8"},
{"ip":"246.176.7.10", "username":"casquezqk"},
{"ip":"145.8.65.225", "username":"agallamorero"},
{"ip":"207.173.0.42", "username":"ncassiuss"},
{"ip":"101.71.32.11", "username":"hshervington3o"},
{"ip":"100.216.14.101", "username":"egiggs3t"},
{"ip":"159.213.26.33", "username":"afranzonetti5w"},
{"ip":"153.177.155.175", "username":"gsteeplescj"},
{"ip":"61.222.244.4", "username":"bconantdb"},
{"ip":"78.201.136.253", "username":"mcuxsoney"},
{"ip":"198.146.125.253", "username":"pwigsellfo"},
{"ip":"191.199.174.117", "username":"aduroin"},
{"ip":"57.53.199.33", "username":"chawyesjk"},
{"ip":"249.163.203.203", "username":"vbarensenli"},
{"ip":"134.69.235.86", "username":"bloucalm"},
{"ip":"166.206.201.251", "username":"ajanaceklr"},
{"ip":"9.246.250.132", "username":"eayliffelv"},
{"ip":"118.43.231.76", "username":"mmacmaykinmb"},
{"ip":"133.122.201.203", "username":"ftillmq"},
{"ip":"31.184.177.240", "username":"abagnellnf"},
{"ip":"105.221.188.97", "username":"jwhitemarsho6"},
{"ip":"224.120.243.203", "username":"ltiemanox"},
{"ip":"135.169.238.64", "username":"vclaidenpi"},
{"ip":"161.16.194.109", "username":"lthompsett3"},
{"ip":"233.110.120.183", "username":"anorthamj"},
{"ip":"232.216.61.227", "username":"glaunchbury4n"},
{"ip":"9.112.120.120", "username":"gyexley4t"},
{"ip":"186.153.114.153", "username":"nprickett7h"},
{"ip":"99.18.101.199", "username":"vjenne8j"},
{"ip":"180.86.150.228", "username":"stizard8l"},
{"ip":"249.62.139.20", "username":"hkielt9g"},
{"ip":"205.20.75.235", "username":"jlinley9h"},
{"ip":"62.123.109.182", "username":"dmateosb8"},
{"ip":"128.229.115.2", "username":"bcowinsc8"},
{"ip":"234.242.58.222", "username":"adommersendo"},
{"ip":"237.177.13.6", "username":"gshevelsiw"},
{"ip":"119.40.115.54", "username":"asteedki"},
{"ip":"77.54.117.95", "username":"gryburnmu"},
{"ip":"90.84.253.93", "username":"smuzziof"},
{"ip":"104.89.242.219", "username":"bferrollipn"},
{"ip":"122.227.235.177", "username":"wshilstone10"},
{"ip":"147.165.250.20", "username":"ykarolczyk1b"},
{"ip":"153.148.119.9", "username":"hdanser4i"},
{"ip":"208.252.107.40", "username":"lpenson50"},
{"ip":"218.121.114.214", "username":"hjakolevitch80"},
{"ip":"167.110.48.224", "username":"kflea8q"},
{"ip":"208.149.149.110", "username":"eskiltona9"},
{"ip":"50.126.84.102", "username":"esabeybr"},
{"ip":"110.85.110.219", "username":"mtroutbeckbx"},
{"ip":"109.143.63.174", "username":"llindemannby"},
{"ip":"157.123.237.0", "username":"lkerrycd"},
{"ip":"192.163.126.123", "username":"ztrimmellcs"},
{"ip":"210.142.204.190", "username":"wberrowea"},
{"ip":"40.146.61.222", "username":"mrudigerf7"},
{"ip":"65.63.93.55", "username":"jgaliah0"},
{"ip":"147.245.174.81", "username":"wambrosioic"},
{"ip":"91.220.204.100", "username":"echatelotix"},
{"ip":"114.68.212.239", "username":"scaponq0"},
{"ip":"151.153.197.119", "username":"vrichardonq5"},
{"ip":"163.54.44.186", "username":"shoutbyc"},
{"ip":"76.170.14.68", "username":"mtweedle1o"},
{"ip":"30.82.75.233", "username":"swaterfield2n"},
{"ip":"36.46.194.129", "username":"ddurning37"},
{"ip":"9.121.216.99", "username":"amacparlan3a"},
{"ip":"123.164.179.33", "username":"aposkitt3g"},
{"ip":"168.0.29.99", "username":"vsnoxill3w"},
{"ip":"20.25.29.7", "username":"dfarlow5l"},
{"ip":"66.186.45.148", "username":"ihallwood6v"},
{"ip":"223.139.241.19", "username":"wwestfrimley7i"},
{"ip":"171.4.91.187", "username":"kyoell82"},
{"ip":"5.219.232.226", "username":"lmilesapbe"},
{"ip":"83.127.19.83", "username":"nbyrkmyrbj"},
{"ip":"243.47.6.54", "username":"lturbanbu"},
{"ip":"213.112.121.159", "username":"cmunseyd4"},
{"ip":"140.100.242.120", "username":"ebaksterdq"},
{"ip":"220.149.118.225", "username":"aluttyeq"},
{"ip":"200.57.131.14", "username":"byolehv"},
{"ip":"89.68.99.163", "username":"dbanaszewskije"},
{"ip":"158.196.117.130", "username":"vfrederickl0"},
{"ip":"114.0.80.142", "username":"gfolkmr"},
{"ip":"38.7.213.80", "username":"ekupisnw"},
{"ip":"34.170.155.23", "username":"akaynepu"},
{"ip":"100.216.47.92", "username":"zziehmsqo"},
{"ip":"229.198.77.218", "username":"palbone13"},
{"ip":"80.131.0.242", "username":"ckelner3k"},
{"ip":"147.63.231.204", "username":"ademanuele4f"},
{"ip":"34.69.145.200", "username":"aimpy59"},
{"ip":"182.171.12.143", "username":"ndjorvic5g"},
{"ip":"143.255.199.206", "username":"mcoile5"},
{"ip":"247.230.67.151", "username":"jableyht"},
{"ip":"156.31.80.148", "username":"chalburtonjj"},
{"ip":"248.152.30.190", "username":"fdebrettjl"},
{"ip":"58.225.171.28", "username":"astackoz"},
{"ip":"45.7.208.36", "username":"acrosslandqq"},
{"ip":"184.223.165.27", "username":"hchamberlin2v"},
{"ip":"28.72.83.216", "username":"bashworth3m"},
{"ip":"138.250.160.157", "username":"tdunnett4x"},
{"ip":"10.35.232.84", "username":"modowd86"},
{"ip":"198.20.65.173", "username":"dpitkaithly8k"},
{"ip":"236.155.44.88", "username":"fmccathay90"},
{"ip":"150.66.31.162", "username":"gdeamaya9b"},
{"ip":"126.196.122.36", "username":"farnotaw"},
{"ip":"19.182.232.111", "username":"balderseybc"},
{"ip":"215.140.72.126", "username":"abidgodc3"},
{"ip":"82.195.52.203", "username":"rmattekdc"},
{"ip":"245.133.100.115", "username":"kmoultdw"},
{"ip":"211.141.182.59", "username":"rlillicoem"},
{"ip":"19.6.165.182", "username":"dketchenf9"},
{"ip":"157.47.255.117", "username":"hambrosigj"},
{"ip":"40.197.197.58", "username":"cgoldsberrygo"},
{"ip":"118.136.73.252", "username":"dwisongv"},
{"ip":"93.87.61.159", "username":"kneilandhc"},
{"ip":"3.119.220.88", "username":"pcouvesjp"},
{"ip":"252.50.126.96", "username":"istiddardkg"},
{"ip":"207.206.162.178", "username":"ttitherkt"},
{"ip":"243.102.33.213", "username":"cfishl4"},
{"ip":"212.158.48.18", "username":"erittmeierrm"},
{"ip":"80.186.154.151", "username":"mtollady2"},
{"ip":"77.204.50.156", "username":"edimelow6"},
{"ip":"100.55.177.161", "username":"gbaalham12"},
{"ip":"99.111.253.180", "username":"gtolhurst5c"},
{"ip":"181.224.123.89", "username":"ssodor7f"},
{"ip":"254.23.127.124", "username":"bovernell8b"},
{"ip":"251.36.247.47", "username":"chaweag"},
{"ip":"195.172.118.196", "username":"ctomczynskiav"},
{"ip":"47.108.130.235", "username":"lhathwoodca"},
{"ip":"171.43.104.120", "username":"bharmse6"},
{"ip":"13.22.214.235", "username":"bhallifaxf0"},
{"ip":"134.215.19.53", "username":"kwilberh1"},
{"ip":"59.63.95.52", "username":"gkliemkeh2"},
{"ip":"116.113.61.228", "username":"djoyeshr"},
{"ip":"132.47.93.211", "username":"pkitchinhw"},
{"ip":"109.191.167.14", "username":"dsorej0"},
{"ip":"193.102.235.34", "username":"ggiacobiliok0"},
{"ip":"173.150.139.7", "username":"bchawkleypx"},
{"ip":"107.201.221.187", "username":"bhasseklre"},
{"ip":"58.52.196.13", "username":"sstandidge5"},
{"ip":"122.4.13.217", "username":"jaronsohnb"},
{"ip":"140.133.209.75", "username":"ocamossom"},
{"ip":"2.250.22.196", "username":"alebelv"},
{"ip":"167.15.69.196", "username":"rhelin21"},
{"ip":"249.72.209.20", "username":"ideverock41"},
{"ip":"150.193.239.185", "username":"ktames4p"},
{"ip":"222.17.110.184", "username":"rmosdill81"},
{"ip":"226.96.9.23", "username":"tcleevely8e"},
{"ip":"244.240.171.227", "username":"ttwinborneel"},
{"ip":"16.71.187.119", "username":"thaseller"},
{"ip":"230.235.55.0", "username":"amccaughanfg"},
{"ip":"90.245.177.97", "username":"tdozdillft"},
{"ip":"227.196.172.49", "username":"ncrosfeldfv"},
{"ip":"129.218.69.247", "username":"spauleauh8"},
{"ip":"211.53.112.242", "username":"ltubbi7"},
{"ip":"144.26.213.22", "username":"drebanksks"},
{"ip":"7.250.213.102", "username":"edebellisls"},
{"ip":"97.153.180.106", "username":"tdeningtonmp"},
{"ip":"118.48.122.20", "username":"anancarrowog"},
{"ip":"167.155.239.99", "username":"dkenneyq3"}]
requestUrls = ["GET /petstore/Fish/Feeders",
"GET /petstore/Cats/DryFood",
"GET /petstore/Bird/Etc",
"GET /petstore/Dogs/DryFood",
"GET /petstore/Dogs/CannedFood",
"GET /petstore/Fish/PlantCare",
"GET /petstore/Fish/Food",
"GET /petstore/Cats/WetFood",
"GET /petstore/Bird/Treats",
"GET /petstore/Bird/Food",
"GET /petstore/Dogs/FoodToppers",
"GET /petstore/Cats/Treats"]
client = boto3.client('logs')
def lambda_handler(event, context):
timestamp = datetime.datetime.now()
nextSeqTokem = ""
logGroupName = os.environ['LOG_GROUP_NAME']
logStreamName = timestamp.strftime('%d-%b-%Y-%H-%M-%S')
response = client.create_log_stream(
logGroupName=logGroupName,
logStreamName=logStreamName
)
waittime = 100 #write log every 100 ms
while context.get_remaining_time_in_millis() > (waittime+100):
time.sleep(float(waittime)/1000.0)
profile = profiles[random.randint(0,len(profiles)-1)]
requestUrl = requestUrls[random.randint(0,len(requestUrls)-1)]
timestamp = datetime.datetime.now()
milliseconds = timestamp.microsecond / 1000
ts = calendar.timegm(timestamp.timetuple())*1000 + milliseconds
logentry = profile["ip"] + "," + profile["username"] + "," + timestamp.strftime('%d/%b/%Y:%H:%M:%S') + ",\"" + requestUrl +"\",200," + str(random.randint(300,1000))
if nextSeqTokem == "" :
response = client.put_log_events(
logGroupName=logGroupName,
logStreamName=logStreamName,
logEvents=[
{
'timestamp': ts,
'message': logentry
}
]
)
nextSeqTokem = response["nextSequenceToken"]
else:
response = client.put_log_events(
logGroupName=logGroupName,
logStreamName=logStreamName,
logEvents=[
{
'timestamp': ts,
'message': logentry
}
],
sequenceToken=nextSeqTokem
)
nextSeqTokem = response["nextSequenceToken"]
waittime = random.randint(1,100)
return context.get_remaining_time_in_millis()
| 50.388683
| 166
| 0.581749
|
0cd8da46f0f4c0d3c942cc0cd6f0e2f269a12a9e
| 1,804
|
py
|
Python
|
setup.py
|
skabbit/djangocms-unitegallery
|
e77c6f1b20e75f25c11674a9fd35f8d0b7cf3ec7
|
[
"MIT"
] | null | null | null |
setup.py
|
skabbit/djangocms-unitegallery
|
e77c6f1b20e75f25c11674a9fd35f8d0b7cf3ec7
|
[
"MIT"
] | null | null | null |
setup.py
|
skabbit/djangocms-unitegallery
|
e77c6f1b20e75f25c11674a9fd35f8d0b7cf3ec7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from djangocms_unitegallery import __version__
INSTALL_REQUIRES = [
'django>=1.7',
'django-cms>=3.0',
# 'sorl-thumbnail>=12.0', # this should be done manually
]
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
]
if sys.version_info >= (3, 0):
enc = {'encoding': 'UTF-8'}
else:
enc = {}
long_desc = r'''
%s
%s
''' % (open('README.rst', **enc).read(), open('CHANGELOG.rst', **enc).read())
setup(
name='djangocms-unitegallery',
version=__version__,
description='unitegallery grid Plugin for django CMS',
author='David Jean Louis',
author_email='izimobil@gmail.com',
url='https://github.com/izimobil/djangocms-unitegallery',
packages=[
'djangocms_unitegallery',
'djangocms_unitegallery.migrations',
],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.rst').read(),
include_package_data=True,
zip_safe=False
)
| 26.529412
| 77
| 0.638027
|
a159122d896494c3cf699fb31fabe409a8c6aa50
| 6,972
|
py
|
Python
|
train.py
|
rendchevi/BVAE-TTS
|
235753c13bc528430c19a34e1ca0fa1c1a631993
|
[
"MIT"
] | null | null | null |
train.py
|
rendchevi/BVAE-TTS
|
235753c13bc528430c19a34e1ca0fa1c1a631993
|
[
"MIT"
] | null | null | null |
train.py
|
rendchevi/BVAE-TTS
|
235753c13bc528430c19a34e1ca0fa1c1a631993
|
[
"MIT"
] | null | null | null |
import os, sys, argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.model import Model
import hparams as hp
from text import *
from utils.utils import *
from utils.plot_image import *
from apex import amp
def validate(model, val_loader, iteration, writer):
model.eval()
with torch.no_grad():
n_data, val_recon_loss, val_kl_loss, val_duration_loss, val_align_loss = 0, 0, 0, 0, 0
for i, batch in enumerate(val_loader):
n_data += len(batch[0])
text_padded, text_lengths, mel_padded, mel_lengths = [ x.cuda() for x in batch ]
text_mask, mel_mask, diag_mask = model.prepare_mask(text_lengths, mel_lengths)
##### Text #####
key, value = model.TextEnc(text_padded, text_mask)
##### Bottom_Up #####
query=model.bottom_up(mel_padded, mel_mask)
##### Alignment #####
h, align = model.get_align(query, key, value, text_lengths, mel_lengths, text_mask, mel_mask)
##### Top_Down #####
mel_pred, kl_loss = model.top_down(h, mel_mask)
##### Compute Loss #####
duration_out = model.get_duration(value, text_mask)
recon_loss, duration_loss, align_loss = model.compute_loss(mel_pred,
mel_padded,
duration_out,
align,
mel_lengths,
text_mask,
mel_mask,
diag_mask)
val_recon_loss += recon_loss.item() * len(batch[0])
val_kl_loss += kl_loss.item() * len(batch[0])
val_duration_loss += duration_loss.item() * len(batch[0])
val_align_loss += align_loss.item() * len(batch[0])
val_recon_loss /= n_data
val_kl_loss /= n_data
val_duration_loss /= n_data
val_align_loss /= n_data
writer.add_scalar('val_recon_loss', val_recon_loss, global_step=iteration)
writer.add_scalar('val_kl_loss', val_kl_loss, global_step=iteration)
writer.add_scalar('val_duration_loss', val_duration_loss, global_step=iteration)
writer.add_scalar('val_align_loss', val_align_loss, global_step=iteration)
mel_plots, align_plots = plot_image(mel_padded,
mel_pred,
align,
text_padded,
mel_lengths,
text_lengths)
writer.add_figure('Validation mel_plots', mel_plots, global_step=iteration)
writer.add_figure('Validation align_plots', align_plots, global_step=iteration)
mel_out, durations = model.inference(text_padded[-1:, :text_lengths[-1]])
align = torch.repeat_interleave(torch.eye(len(durations[0].cpu())).to(torch.long),
durations[0].cpu(),
dim=0).unsqueeze(0)
mel_lengths[-1] = mel_out.size(2)
mel_plots, align_plots = plot_image(torch.zeros_like(mel_padded),
mel_out,
align,
text_padded,
mel_lengths,
text_lengths)
writer.add_figure('Validation mel_plots_inference', mel_plots, global_step=iteration)
writer.add_figure('Validation align_plots_inference', align_plots, global_step=iteration)
model.train()
def main(args):
train_loader, val_loader, collate_fn = prepare_dataloaders(hp)
model = Model(hp).cuda()
optimizer = torch.optim.Adamax(model.parameters(), lr=hp.lr)
writer = get_writer(hp.output_directory, args.logdir)
#model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
### Load trained checkpoint ###
if args.checkpoint_path != '':
model.load_state_dict(torch.load(args.checkpoint_path)['state_dict'])
print('#####################')
print('CHECKPOINT LOADED.')
print('#####################')
iteration = 0
model.train()
print(f"Training Start!!! ({args.logdir})")
while iteration < (hp.train_steps):
for i, batch in enumerate(train_loader):
text_padded, text_lengths, mel_padded, mel_lengths = [ x.cuda() for x in batch ]
recon_loss, kl_loss, duration_loss, align_loss = model(text_padded, mel_padded, text_lengths, mel_lengths)
alpha=min(1, iteration/hp.kl_warmup_steps)
scaled_loss = recon_loss + alpha*kl_loss + duration_loss + align_loss
scaled_loss.backward()
#with amp.scale_loss((recon_loss + alpha*kl_loss + duration_loss + align_loss), optimizer) as scaled_loss:
# scaled_loss.backward()
iteration += 1
lr_scheduling(optimizer, iteration)
nn.utils.clip_grad_norm_(model.parameters(), hp.grad_clip_thresh)
optimizer.step()
model.zero_grad()
writer.add_scalar('train_recon_loss', recon_loss, global_step=iteration)
writer.add_scalar('train_kl_loss', kl_loss, global_step=iteration)
writer.add_scalar('train_duration_loss', duration_loss, global_step=iteration)
writer.add_scalar('train_align_loss', align_loss, global_step=iteration)
sys.stdout.write('\r[Iteration] {}/{} [recon_loss] {} [kl_loss] {} [duration_loss] {} [align_loss] {}'.format(iteration,
hp.train_steps,
recon_loss,
alpha*kl_loss,
duration_loss,
align_loss))
if iteration % (hp.iters_per_validation) == 0:
validate(model, val_loader, iteration, writer)
if iteration % (hp.iters_per_checkpoint) == 0:
save_checkpoint(model, optimizer, hp.lr, iteration, filepath=f'{hp.output_directory}/{args.logdir}')
if iteration == (hp.train_steps):
break
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--checkpoint_path', type = str, default = '')
p.add_argument('--gpu', type=str, default='0')
p.add_argument('--logdir', type=str, required=True)
args = p.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
torch.manual_seed(hp.seed)
torch.cuda.manual_seed(hp.seed)
main(args)
| 45.568627
| 132
| 0.549053
|
6e053fef29fa0b07c1320316de3e733ec6e92049
| 534
|
py
|
Python
|
python/cv/LoadImage/main.py
|
knyazer/lessons
|
2ff0ecc4be53d56d4709f5b0e0de2b5a3cc2d0cc
|
[
"MIT"
] | null | null | null |
python/cv/LoadImage/main.py
|
knyazer/lessons
|
2ff0ecc4be53d56d4709f5b0e0de2b5a3cc2d0cc
|
[
"MIT"
] | null | null | null |
python/cv/LoadImage/main.py
|
knyazer/lessons
|
2ff0ecc4be53d56d4709f5b0e0de2b5a3cc2d0cc
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
### TARGET: read an image and then show it
# Pixels are in format BGR (blue green red)
# And images presented as width x height x channels
img = cv.imread("data/images/deer.jpg")
print(img.shape) ### (height, width, number of channels)
# You can set pixel value:
img[5][60] = (250, 250, 250)
# Or particular pixel channel:
img[100][100][0] = 0
cv.imshow("Name of the window", img)
# Wait until ESC is pressed (0 means infinite delay, any positive number means delay in ms)
cv.waitKey(0)
| 21.36
| 91
| 0.702247
|
b2dd65f9fc5115bbe67ed6cc149fd15a99b14d38
| 17,205
|
py
|
Python
|
main.py
|
PeacefulLion/91160
|
57d0cf429fb1808b3141a10839db6cdda8b4de58
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
PeacefulLion/91160
|
57d0cf429fb1808b3141a10839db6cdda8b4de58
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
PeacefulLion/91160
|
57d0cf429fb1808b3141a10839db6cdda8b4de58
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# author: MasterPan
# email: i@hvv.me
import re
import time
import json
import datetime
import logging
import requests.cookies
from bs4 import BeautifulSoup
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
from base64 import b64decode, b64encode
from fake_useragent import UserAgent
import random
from apscheduler.schedulers.background import BackgroundScheduler
# 请修改此处,或者保持为空
configs = {
'username': '',
'password': '',
'city_index': '',
'unit_id': '',
'dep_id': '',
'doc_id': [],
'weeks': [],
'days': [],
'unit_name': '',
'dep_name': '',
'doctor_name': []
}
ua = UserAgent()
PUBLIC_KEY = "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDWuY4Gff8FO3BAKetyvNgGrdZM9CMNoe45SzHMXxAPWw6E2idaEjqe5uJFjVx55JW" \
"+5LUSGO1H5MdTcgGEfh62ink/cNjRGJpR25iVDImJlLi2izNs9zrQukncnpj6NGjZu" \
"/2z7XXfJb4XBwlrmR823hpCumSD1WiMl1FMfbVorQIDAQAB "
session = requests.Session()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
# 国内热门城市数据(广州 长沙 香港 上海 武汉 重庆 北京 东莞 深圳 海外 郑州 天津 淮南)
cities = [
{
"name": "广州",
"cityId": "2918"
},
{
"name": "长沙",
"cityId": "3274"
},
{
"name": "香港",
"cityId": "3314"
},
{
"name": "上海",
"cityId": "3306"
},
{
"name": "武汉",
"cityId": "3276"
},
{
"name": "重庆",
"cityId": "3316"
},
{
"name": "北京",
"cityId": "2912"
},
{
"name": "东莞",
"cityId": "2920"
},
{
"name": "深圳",
"cityId": "5"
},
{
"name": "海外",
"cityId": "6145"
},
{
"name": "郑州",
"cityId": "3242"
},
{
"name": "天津",
"cityId": "3308"
},
{
"name": "淮南",
"cityId": "3014"
}
]
weeks_list = [
{
"name": "星期一",
"value": "1",
"alias": "一"
},
{
"name": "星期二",
"value": "2",
"alias": "二"
},
{
"name": "星期三",
"value": "3",
"alias": "三"
},
{
"name": "星期四",
"value": "4",
"alias": "四"
},
{
"name": "星期五",
"value": "5",
"alias": "五"
},
{
"name": "星期六",
"value": "6",
"alias": "六"
},
{
"name": "星期天",
"value": "7",
"alias": "日"
}
]
day_list = [
{
"name": "上午",
"value": ["am"]
},
{
"name": "下午",
"value": ["pm"]
},
{
"name": "全天",
"value": ["am", "pm"]
}
]
def get_headers() -> json:
return {
"User-Agent": ua.random,
"Referer": "https://www.91160.com",
"Origin": "https://www.91160.com"
}
def login(username, password) -> bool:
url = "https://user.91160.com/login.html"
rsa_key = RSA.importKey(b64decode(PUBLIC_KEY))
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
username = b64encode(cipher.encrypt(username.encode())).decode()
password = b64encode(cipher.encrypt(password.encode())).decode()
data = {
"username": username,
"password": password,
"target": "https://www.91160.com",
"error_num": 0,
"token": tokens()
}
r = session.post(url, data=data, headers=get_headers(),
allow_redirects=False)
if r.status_code == 302:
redirect_url = r.headers["location"]
session.get(redirect_url, headers=get_headers())
logging.info("登录成功")
return True
else:
logging.error("登录失败: {}".format(check_user(data)))
return False
def check_user(data) -> json:
url = "https://user.91160.com/checkUser.html"
r = session.post(url, data=data, headers=get_headers())
return json.loads(r.content.decode('utf-8'))
def tokens() -> str:
url = "https://user.91160.com/login.html"
r = session.get(url, headers=get_headers())
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, "html.parser")
return soup.find("input", id="tokens").attrs["value"]
def brush_ticket(unit_id, dep_id, weeks, days) -> list:
now_date = datetime.date.today().strftime("%Y-%m-%d")
url = "https://www.91160.com/dep/getschmast/uid-{}/depid-{}/date-{}/p-0.html".format(
unit_id, dep_id, now_date)
r = session.get(url, headers=get_headers())
json_obj = r.json()
if "week" not in json_obj:
raise RuntimeError("刷票异常: {}".format(json_obj))
week_list: list = json_obj["week"]
week_arr = []
for week in weeks:
week_arr.append(str(week_list.index(week)))
doc_ids = json_obj["doc_ids"].split(",")
result = []
for doc in doc_ids:
_doc = json_obj["sch"][doc]
for day in days:
if day in _doc:
sch = _doc[day]
if isinstance(sch, list) and len(sch) > 0:
for item in sch:
result.append(item)
else:
for index in week_arr:
if index in sch:
result.append(sch[index])
return [element for element in result if element["y_state"] == "1"]
def brush_ticket_new(doc_id, dep_id, weeks, days) -> list:
now_date = datetime.date.today().strftime("%Y-%m-%d")
url = "https://www.91160.com/doctors/ajaxgetclass.html"
data = {
"docid": doc_id,
"date": now_date,
"days": 6
}
r = session.post(url, headers=get_headers(), data=data)
json_obj = r.json()
if "dates" not in json_obj:
if "status" in json_obj:
logging.info("Token过期,重新登陆")
time.sleep(30)
login(configs['username'], configs['password'])
return []
else:
raise RuntimeError("刷票异常: {}".format(json_obj))
date_list: dict = json_obj["dates"]
week_arr = []
for week in weeks:
val = convert_week(week)
key = list(date_list.keys())[list(date_list.values()).index(val)]
week_arr.append(key)
if len(week_arr) == 0:
raise RuntimeError("刷票异常: {}".format(json_obj))
doc_sch = json_obj["sch"]["{}_{}".format(dep_id, doc_id)]
result = []
for day in days:
key = "{}_{}_{}".format(dep_id, doc_id, day)
if key in doc_sch:
doc_sch_day = doc_sch[key]
for week in week_arr:
if week in doc_sch_day:
result.append(doc_sch_day[week])
return [element for element in result if element["y_state"] == "1"]
def convert_week(w):
for week in weeks_list:
if week["value"] == w:
return week["alias"]
return ""
def get_ticket(ticket, unit_id, dep_id):
schedule_id = ticket["schedule_id"]
url = "https://www.91160.com/guahao/ystep1/uid-{}/depid-{}/schid-{}.html".format(
unit_id, dep_id, schedule_id)
logging.info(url)
r = session.get(url, headers=get_headers())
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, "html.parser")
data = {
"sch_data": soup.find(attrs={"name": "sch_data"}).attrs["value"],
"mid": soup.find(attrs={"name": "mid"}).attrs["value"],
"hisMemId": "",
"disease_input": "",
"order_no": "",
"disease_content": "",
"accept": "1",
"unit_id": soup.find("input", id="unit_id").attrs["value"],
"schedule_id": ticket["schedule_id"],
"dep_id": ticket["dep_id"],
"his_dep_id": "",
"sch_date": "",
"time_type": ticket["time_type"],
"doctor_id": ticket["doctor_id"],
"his_doc_id": "",
"detlid": soup.select('#delts li')[0].attrs["val"],
"detlid_realtime": soup.find("input", id="detlid_realtime").attrs["value"],
"level_code": soup.find("input", id="level_code").attrs["value"],
"is_hot": "",
"addressId": "3317",
"address": "China",
"buyinsurance": 1
}
url = "https://www.91160.com/guahao/ysubmit.html"
logging.error("URL: {}".format(url))
logging.error("PARAM: {}".format(data))
r = session.post(url, data=data, headers=get_headers(),
allow_redirects=False)
if r.status_code == 302:
# redirect_url = r.headers["location"]
# if get_ticket_result(redirect_url):
logging.info("预约成功,请留意短信通知!")
else:
logging.info(r.text)
logging.info("预约失败")
def get_ticket_result(redirect_url) -> bool:
r = session.get(redirect_url, headers=get_headers())
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find(attrs={"class": "sucess-title"}).text
return result == "预约成功"
def set_user_configs():
while True:
if configs['username'] != '':
print("当前用户名为:%s" % configs['username'])
else:
configs['username'] = input("请输入用户名: ")
if configs['password'] != '':
print("当前密码为:%s" % configs['password'])
else:
configs['password'] = input("请输入密码: ")
if configs['username'] != '' and configs['password'] != '':
print("登录中,请稍等...")
if login(configs['username'], configs['password']):
time.sleep(1)
print("登录成功")
break
else:
configs['username'] = ''
configs['password'] = ''
time.sleep(1)
print("用户名或密码错误,请重新输入!")
else:
configs['username'] = ''
configs['password'] = ''
time.sleep(1)
print("用户名/密码信息不完整,已清空,请重新输入")
def set_city_configs():
if configs['city_index'] == "":
print("=====请选择就医城市=====\n")
for index, city in enumerate(cities):
print("{}{}. {}".format(" " if index <
9 else "", index + 1, city["name"]))
print()
while True:
city_index = input("请输入城市序号: ")
is_number = True if re.match(r'^\d+$', city_index) else False
if is_number and int(city_index) in range(1, len(cities) + 1):
configs['city_index'] = city_index
break
else:
print("输入有误,请重新输入!")
else:
print("当前选择城市为:%s" % cities[int(configs['city_index']) - 1]["name"])
def set_hospital_configs():
url = "https://www.91160.com/ajax/getunitbycity.html"
data = {
"c": cities[int(configs['city_index']) - 1]["cityId"]
}
r = session.post(url, headers=get_headers(), data=data)
hospitals = json.loads(r.content.decode('utf-8'))
if configs['unit_id'] == "":
print("=====请选择医院=====\n")
for index, hospital in enumerate(hospitals):
print("{}{}. {}".format(" " if index < 9 else "",
index + 1, hospital["unit_name"]))
print()
while True:
hospital_index = input("请输入医院序号: ")
is_number = True if re.match(r'^\d+$', hospital_index) else False
if is_number and int(hospital_index) in range(1, len(hospitals) + 1):
configs["unit_id"] = hospitals[int(
hospital_index) - 1]["unit_id"]
configs["unit_name"] = hospitals[int(
hospital_index) - 1]["unit_name"]
break
else:
print("输入有误,请重新输入!")
else:
print("当前选择医院为:%s(%s)" % (configs["unit_name"], configs["unit_id"]))
def set_department_configs():
url = "https://www.91160.com/ajax/getdepbyunit.html"
data = {
"keyValue": configs["unit_id"]
}
r = session.post(url, headers=get_headers(), data=data)
departments = r.json()
if configs['dep_id'] == "":
print("=====请选择科室=====\n")
dep_id_arr = []
dep_name = {}
for department in departments:
print(department["pubcat"])
for child in department["childs"]:
dep_id_arr.append(child["dep_id"])
dep_name[child["dep_id"]] = child["dep_name"]
print(" {}. {}".format(child["dep_id"], child["dep_name"]))
print()
while True:
department_index = input("请输入科室序号: ")
is_number = True if re.match(r'^\d+$', department_index) else False
if is_number and int(department_index) in dep_id_arr:
configs["dep_id"] = department_index
configs["dep_name"] = dep_name[int(department_index)]
break
else:
print("输入有误,请重新输入!")
else:
print("当前选择科室为:%s(%s)" % (configs["dep_name"], configs["dep_id"]))
def set_doctor_configs():
now_date = datetime.date.today().strftime("%Y-%m-%d")
unit_id = configs["unit_id"]
dep_id = configs["dep_id"]
url = "https://www.91160.com/dep/getschmast/uid-{}/depid-{}/date-{}/p-0.html".format(
unit_id, dep_id, now_date)
r = session.get(url, headers=get_headers())
logging.info(r.json())
doctors = r.json()["doc"]
doc_id_arr = []
doc_name = {}
if len(configs["doc_id"]) == 0:
print("=====请选择医生=====\n")
for doctor in doctors:
doc_id_arr.append(doctor["doctor_id"])
doc_name[doctor["doctor_id"]] = doctor["doctor_name"]
print("{}. {}".format(doctor["doctor_id"], doctor["doctor_name"]))
print()
doctor_index = input("请输入医生编号: ")
doctor_index_arr = doctor_index.split(',')
for oneId in doctor_index_arr:
if int(oneId) in doc_id_arr:
configs['doc_id'].append(oneId)
configs['doctor_name'].append(doc_name[int(oneId)])
else:
print("当前选择医生为:%s(%s)" % (configs["doctor_name"], configs["doc_id"]))
def set_week_configs():
if not configs["weeks"]:
print("=====请选择哪天的号=====\n")
for week in weeks_list:
print("{}. {}".format(week["value"], week["name"]))
print()
while True:
week_str = input("请输入需要周几的号[可多选,如(6,7)](默认不限制): ")
week_str = week_str if len(week_str) > 0 else ",".join(
map(lambda x: str(x), list(range(1, 8))))
configs["weeks"] = week_str.split(",")
break
def set_days_configs():
if not configs["days"]:
print("=====请选择时间段=====\n")
for index, day in enumerate(day_list):
print("{}. {}".format(index + 1, day["name"]))
print()
while True:
day_index = input("请输入时间段序号: ")
is_number = True if re.match(r'^\d+$', day_index) else False
if is_number and int(day_index) in range(1, len(day_list) + 1):
configs["days"] = day_list[int(day_index) - 1]["value"]
break
else:
print("输入有误,请重新输入!")
def init_data():
set_user_configs()
set_city_configs()
set_hospital_configs()
set_department_configs()
set_doctor_configs()
set_week_configs()
set_days_configs()
def run():
init_data()
logging.info(configs)
unit_id = configs["unit_id"]
dep_id = configs["dep_id"]
doc_id = configs["doc_id"]
weeks = configs["weeks"]
days = configs["days"]
# 刷票休眠时间,频率过高会导致刷票接口拒绝请求
sleep_time = 15
logging.info("刷票开始")
while True:
try:
for oneId in doc_id:
tickets = brush_ticket_new(oneId, dep_id, weeks, days)
if len(tickets) > 0:
logging.info(tickets)
logging.info("刷到票了,开抢了...")
get_ticket(tickets[0], unit_id, dep_id)
break
else:
logging.info("努力刷票中...")
time.sleep(sleep_time)
except Exception as e:
logging.error(e)
break
logging.info("刷票结束")
print("当前配置为:\n\t%s" % configs)
def runOnce():
logging.info(configs)
unit_id = configs["unit_id"]
dep_id = configs["dep_id"]
doc_id = configs["doc_id"]
weeks = configs["weeks"]
days = configs["days"]
logging.info("定时刷票:开始")
try:
for oneId in doc_id:
tickets = brush_ticket_new(oneId, dep_id, weeks, days)
if len(tickets) > 0:
logging.info(tickets)
logging.info("定时刷票:刷到票了,开抢了...")
get_ticket(tickets[0], unit_id, dep_id)
else:
logging.info("努力刷票中...")
sleep_time = random.randrange(1, 2)
time.sleep(sleep_time)
except Exception as e:
logging.error(e)
logging.info("定时刷票:结束")
if __name__ == '__main__':
try:
scheduler = BackgroundScheduler()
scheduler.add_job(runOnce, 'cron', hour=15, minute=0)
scheduler.add_job(runOnce, 'cron', hour=15, minute=0, second=1)
scheduler.add_job(runOnce, 'cron', hour=15, minute=0, second=3)
scheduler.start()
run()
except KeyboardInterrupt:
print("\n=====强制退出=====")
print("当前配置为:\n\t%s" % configs)
exit(0)
| 29.973868
| 120
| 0.529555
|
7b4bbb9adedf8f41f6fd0917c5f697a793a30f02
| 1,351
|
py
|
Python
|
tests/test_morning_star.py
|
laye0619/ofanalysis
|
e59585c65117c6a573fa2ad6d5ee0a311afbd50f
|
[
"MIT"
] | null | null | null |
tests/test_morning_star.py
|
laye0619/ofanalysis
|
e59585c65117c6a573fa2ad6d5ee0a311afbd50f
|
[
"MIT"
] | null | null | null |
tests/test_morning_star.py
|
laye0619/ofanalysis
|
e59585c65117c6a573fa2ad6d5ee0a311afbd50f
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from ofanalysis.morningstar.morning_star import MorningStar
class TestMorningStar(TestCase):
def setUp(self) -> None:
self.morning_star = MorningStar(
assets_path='./tests/assets',
temp_path='./tests/temp_storage.csv',
cookie_str="MS_LocalEmailAddr=laye0619@gmail.com=; ASP.NET_SessionId=jxzqirz0dm2yib45ztdf54u2; Hm_lvt_eca85e284f8b74d1200a42c9faa85464=1651040501,1652689812,1652705120,1652744827; MSCC=vZDDOF8neG4=; user=username=laye0619@gmail.com&nickname=laye0619&status=Free&memberId=458975&password=trWMrjKv97VkUvhSrVRdJw==; authWeb=9CE0E7675007ACF68C1CE367F33956FFF0AC3E338CFA7713581263C99989618D73C6F84E19CEC133929F55AC2842329CDD753435158A20AB98A0FC6F57D5E5F9DDCD4BB191858F9E9A844738563C06C3E526DF3CFF4AE574A610DADB1C99FF00001CA12DFB758CF31DE1696233C049F1B87D790A; Hm_lpvt_eca85e284f8b74d1200a42c9faa85464=1652744844; AWSALB=FHLDJYnxkyqTmZV3rYxNRxBb+gjf1MRzVMSKn9PLj7Th9bs1ipmBzRnyEHVHfhlx/gt0IPFka868zPd+RpC37mukM0aVcCaSLxf9aHZnVKPyTEO//A7JHmJKqdvo; AWSALBCORS=FHLDJYnxkyqTmZV3rYxNRxBb+gjf1MRzVMSKn9PLj7Th9bs1ipmBzRnyEHVHfhlx/gt0IPFka868zPd+RpC37mukM0aVcCaSLxf9aHZnVKPyTEO//A7JHmJKqdvo"
)
def test_get_fund_list(self):
self.morning_star.get_fund_list()
def test_write_to_db(self):
self.morning_star.write_to_db()
pass
| 67.55
| 889
| 0.834937
|
c4ce52ee35c6664ad7e357341c605f176e07b1e6
| 85
|
py
|
Python
|
pyqtstarter/__init__.py
|
jokey2k/PyQtStarter
|
165c14ee954164399d286cecd90cd25bacc328c3
|
[
"Unlicense"
] | null | null | null |
pyqtstarter/__init__.py
|
jokey2k/PyQtStarter
|
165c14ee954164399d286cecd90cd25bacc328c3
|
[
"Unlicense"
] | null | null | null |
pyqtstarter/__init__.py
|
jokey2k/PyQtStarter
|
165c14ee954164399d286cecd90cd25bacc328c3
|
[
"Unlicense"
] | null | null | null |
__version__ = "1.0.0" # as seen on Semantic Versioning 2.0.0, http://www.semver.org
| 42.5
| 84
| 0.694118
|
ea299bf6918f0abf7d0fae2fb06525567746c17f
| 3,583
|
py
|
Python
|
Taxi/LightGBM/lightGBM.py
|
chenwenhang/Short-term-Prediction-of-Demand-for-Ride-hailing-Services
|
d0cc73643314d6daac4d7cd39adf8208b30989a3
|
[
"MIT"
] | 1
|
2020-07-08T13:44:59.000Z
|
2020-07-08T13:44:59.000Z
|
Taxi/LightGBM/lightGBM.py
|
chenwenhang/Short-term-Prediction-of-Demand-for-Ride-hailing-Services
|
d0cc73643314d6daac4d7cd39adf8208b30989a3
|
[
"MIT"
] | null | null | null |
Taxi/LightGBM/lightGBM.py
|
chenwenhang/Short-term-Prediction-of-Demand-for-Ride-hailing-Services
|
d0cc73643314d6daac4d7cd39adf8208b30989a3
|
[
"MIT"
] | 1
|
2020-12-21T15:10:17.000Z
|
2020-12-21T15:10:17.000Z
|
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import PolynomialFeatures
from lightgbm import LGBMClassifier
import joblib
import numpy as np
import pandas as pd
import time
# Make sure the data file and python code is in the same folder
start = time.time()
trips = pd.read_csv('../data/NY_Taxi_March_2019.csv')
# Check data information
trips.head()
trips.info()
# Calculate passenger trips per time period per day
trips_period = trips.groupby(['DayofYear', 'TimePeriod', 'DayofWeek'])['passenger_count'].sum().reset_index()
# Construct a template of times
time_interval = 15
day_start, day_end = (60, 91) # March, days starting from January 1
time_start, time_end = (5 * 60, 23 * 60) # 5:00-23:00 in minutes
unique_days = np.arange(day_start, day_end)
unique_time_periods = np.arange(time_start, time_end, time_interval) # time period with interval 15 minutes
list_day, list_time = zip(*[(d, t) for d in unique_days for t in unique_time_periods])
ts_template = pd.DataFrame()
ts_template = ts_template.assign(DayofYear=list_day, TimePeriod=list_time)
# Merge the observations with the template and fill na zero
ts = ts_template.merge(trips_period, on=['DayofYear', 'TimePeriod'], how='left').fillna(0)
# Disrupt the list order
ts = shuffle(ts)
print(ts)
# Get features and results
X = ts[['DayofYear', 'TimePeriod', 'DayofWeek']]
y = ts[['passenger_count']]
# Because of the small number of features, the trained model scores low,
# so PolynomialFeatures function needs to be used to increase the number of features
poly = PolynomialFeatures(degree=4, include_bias=True, interaction_only=False)
X = poly.fit_transform(X)
print(X.shape)
print(y.shape)
# Split train_set and test_set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.3)
# Build model12
lightGBM = LGBMClassifier()
# Generate optional parameter dictionary
num_leaves = [31, 50, 100]
max_depth = [-1]
learning_rate = [0.01]
boosting = ["dart"]
objective = ['regression']
# Use GPU to accelerate training model
# device = ['gpu']
# gpu_platform_id = [1]
# gpu_device_id = [0]
# param_grid = dict(num_leaves=num_leaves, n_estimators=n_estimators, learning_rate=learning_rate,
# device=device, gpu_platform_id=gpu_platform_id, gpu_device_id=gpu_device_id)
param_grid = dict(num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate,
objective=objective, boosting=boosting)
# Use GridSearchCV to adjust super-parameters automatically
# Use all CPU cores, four times cross-validation
grid_search = GridSearchCV(lightGBM, param_grid, n_jobs=-1, cv=4)
grid_result = grid_search.fit(X_train, y_train)
# Print best result
print("Best: %f using %s" % (grid_result.best_score_, grid_search.best_params_))
# Print all results
means = grid_result.cv_results_['mean_test_score']
params = grid_result.cv_results_['params']
for mean, param in zip(means, params):
print("%f with: %r" % (mean, param))
# Save model
model = LGBMClassifier(num_leaves=grid_search.best_params_['num_leaves'],
max_depth=grid_search.best_params_['max_depth'],
learning_rate=grid_search.best_params_['learning_rate'],
boosting=grid_search.best_params_['boosting'],
objective=grid_search.best_params_['objective']).fit(X_train, y_train)
joblib.dump(model, './model/lightGBM.pkl')
# Code running time
end = time.time()
print(end - start)
| 35.83
| 109
| 0.744348
|
51b3c2a7bd215a83be926e8dbb2b01c74b5afa45
| 296
|
py
|
Python
|
captcha/apps.py
|
Andrew-Chen-Wang/django-recaptcha
|
fa0d0f844413d451e0f9f9db293909b046ecd944
|
[
"BSD-3-Clause"
] | 13
|
2022-01-26T21:57:05.000Z
|
2022-03-18T11:39:36.000Z
|
captcha/apps.py
|
Andrew-Chen-Wang/django-recaptcha
|
fa0d0f844413d451e0f9f9db293909b046ecd944
|
[
"BSD-3-Clause"
] | 34
|
2022-01-25T14:11:48.000Z
|
2022-02-08T01:23:07.000Z
|
captcha/apps.py
|
Andrew-Chen-Wang/django-recaptcha
|
fa0d0f844413d451e0f9f9db293909b046ecd944
|
[
"BSD-3-Clause"
] | 4
|
2022-01-29T11:35:17.000Z
|
2022-02-21T09:17:29.000Z
|
from django.apps import AppConfig
from django.core.checks import Tags, register
from captcha.checks import recaptcha_key_check
class CaptchaConfig(AppConfig):
name = "captcha"
verbose_name = "Django reCAPTCHA"
def ready(self):
register(recaptcha_key_check, Tags.security)
| 22.769231
| 52
| 0.760135
|
b192efc0b714b14282924156f2c2417639fd630d
| 6,531
|
py
|
Python
|
scripts/dfa_walkthrough.py
|
rahulvenugopal/ComapreComplexity
|
77039ee02dc579825e7806930a67fdbcdd1dcf4f
|
[
"MIT"
] | null | null | null |
scripts/dfa_walkthrough.py
|
rahulvenugopal/ComapreComplexity
|
77039ee02dc579825e7806930a67fdbcdd1dcf4f
|
[
"MIT"
] | null | null | null |
scripts/dfa_walkthrough.py
|
rahulvenugopal/ComapreComplexity
|
77039ee02dc579825e7806930a67fdbcdd1dcf4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 11:07:55 2021
- DFA tutorial based on nolds implementation
@author: Rahul Venugopal
References
- https://nolds.readthedocs.io/en/latest/nolds.html#detrended-fluctuation-analysis
- https://nolds.readthedocs.io/en/latest/_modules/nolds/measures.html#dfa
- Detrended fluctuation analysis: a scale-free view on neuronal oscillations
https://www.frontiersin.org/articles/10.3389/fphys.2012.00450/full
"""
# Run the custom fucntions in the following cell block
#%% Helper functions
import warnings
def poly_fit(x, y, degree, fit="RANSAC"):
# check if we can use RANSAC
if fit == "RANSAC":
try:
# ignore ImportWarnings in sklearn
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
import sklearn.linear_model as sklin
import sklearn.preprocessing as skpre
except ImportError:
warnings.warn(
"fitting mode 'RANSAC' requires the package sklearn, using"
+ " 'poly' instead",
RuntimeWarning)
fit = "poly"
if fit == "poly":
return np.polyfit(x, y, degree)
elif fit == "RANSAC":
model = sklin.RANSACRegressor(sklin.LinearRegression(fit_intercept=False))
xdat = np.asarray(x)
if len(xdat.shape) == 1:
# interpret 1d-array as list of len(x) samples instead of
# one sample of length len(x)
xdat = xdat.reshape(-1, 1)
polydat = skpre.PolynomialFeatures(degree).fit_transform(xdat)
try:
model.fit(polydat, y)
coef = model.estimator_.coef_[::-1]
except ValueError:
warnings.warn(
"RANSAC did not reach consensus, "
+ "using numpy's polyfit",
RuntimeWarning)
coef = np.polyfit(x, y, degree)
return coef
else:
raise ValueError("invalid fitting mode ({})".format(fit))
# Logarithmic
def logarithmic_n(min_n, max_n, factor):
"""
Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
Non-integer results are rounded down.
Args:
min_n (float):
minimum value (must be < max_n)
max_n (float):
maximum value (must be > min_n)
factor (float):
factor used to increase min_n (must be > 1)
Returns:
list of integers:
min_n, min_n * factor, min_n * factor^2, ... min_n * factor^i < max_n
without duplicates
"""
assert max_n > min_n
assert factor > 1
# stop condition: min * f^x = max
# => f^x = max/min
# => x = log(max/min) / log(f)
max_i = int(np.floor(np.log(1.0 * max_n / min_n) / np.log(factor)))
ns = [min_n]
for i in range(max_i + 1):
n = int(np.floor(min_n * (factor ** i)))
if n > ns[-1]:
ns.append(n)
return ns
# plot_reg
def plot_reg(xvals, yvals, poly, x_label="x", y_label="y", data_label="data",
reg_label="regression line", fname=None):
"""
Helper function to plot trend lines for line-fitting approaches. This
function will show a plot through ``plt.show()`` and close it after the window
has been closed by the user.
Args:
xvals (list/array of float):
list of x-values
yvals (list/array of float):
list of y-values
poly (list/array of float):
polynomial parameters as accepted by ``np.polyval``
Kwargs:
x_label (str):
label of the x-axis
y_label (str):
label of the y-axis
data_label (str):
label of the data
reg_label(str):
label of the regression line
fname (str):
file name (if not None, the plot will be saved to disc instead of
showing it though ``plt.show()``)
"""
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
plt.plot(xvals, yvals, "bo", label=data_label)
if not (poly is None):
plt.plot(xvals, np.polyval(poly, xvals), "r-", label=reg_label)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc="best")
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close()
#%% DFA calculation
# Create a sample data
import numpy as np
data = np.random.random(1000)
# Initial parameters
nvals = None
overlap = True
order = 1
fit_trend = "poly"
fit_exp = "RANSAC" # robust to outliers
debug_plot = False
debug_data = False
plot_file = None
# Converting data to an array
data = np.asarray(data)
total_N = len(data)
# Fixing the window lengths for data having more than 100 data points
nvals = logarithmic_n(4, 0.1 * total_N, 1.2)
# Cumulative sum of deviations from mean
walk = np.cumsum(data - np.mean(data))
# initialise
fluctuations = []
# Looping through different window sizes to capture standard deviations
for n in nvals:
# subdivide data into chunks of size n
# step size n/2 instead of n for overlap
d = np.array([walk[i:i + n] for i in range(0, len(walk) - n, n // 2)])
# each row of d is data slice shifted with 50% overlap
# calculate local trends as polynomes
x = np.arange(n)
# fitting a regression line of order 1
tpoly = [poly_fit(x, d[i], order, fit=fit_trend)
for i in range(len(d))]
tpoly = np.array(tpoly)
# tpoly has intercept and slope
# find the trend line
trend = np.array([np.polyval(tpoly[i], x) for i in range(len(d))])
# calculate standard deviation ("fluctuation") of walks in d around trend
flucs = np.sqrt(np.sum((d - trend) ** 2, axis=1) / n)
# calculate mean fluctuation over all subsequences
f_n = np.sum(flucs) / len(flucs)
fluctuations.append(f_n)
fluctuations = np.array(fluctuations)
# filter zeros from fluctuations
# I think this is to avoid the logarithm issues with zero
nonzero = np.where(fluctuations != 0)
nvals = np.array(nvals)[nonzero]
fluctuations = fluctuations[nonzero]
if len(fluctuations) == 0:
# all fluctuations are zero => we cannot fit a line
poly = [np.nan, np.nan]
else:
poly = poly_fit(np.log(nvals), np.log(fluctuations), 1,
fit=fit_exp)
print(poly[0])
#%% Visualising polynomial fit demo
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(12345)
y = np.random.random(500)
x= range(len(y))
trend = np.polyfit(x,y,1)
plt.plot(x,y,'o')
trendpoly = np.poly1d(trend)
plt.plot(x,trendpoly(x))
plt.plot(x,y,color = 'steelblue')
plt.title('Random signal of length 1000 data points')
ax = plt.gca()
# ax.set_xlim([xmin, xmax])
ax.set_ylim([-0.5, 2])
#%% Plotting segments
plt.plot(np.transpose(d))
plt.plot(np.transpose(trend))
plt.plot(np.transpose(d-trend))
ax = plt.gca()
# ax.set_xlim([xmin, xmax])
ax.set_ylim([-4, 4])
| 27.791489
| 82
| 0.670035
|
cc212862217c3fbea36e487ac0926d33cb8bae69
| 1,177
|
py
|
Python
|
examples/scripts/load_csv_to_neo4j.py
|
kevinschaper/kgx
|
4c129428131047af4506a93fe8dc54fb92a7c702
|
[
"BSD-3-Clause"
] | 32
|
2020-10-21T17:35:27.000Z
|
2022-03-17T02:40:08.000Z
|
examples/scripts/load_csv_to_neo4j.py
|
kevinschaper/kgx
|
4c129428131047af4506a93fe8dc54fb92a7c702
|
[
"BSD-3-Clause"
] | 136
|
2020-10-02T11:06:50.000Z
|
2022-03-29T03:55:53.000Z
|
examples/scripts/load_csv_to_neo4j.py
|
kevinschaper/kgx
|
4c129428131047af4506a93fe8dc54fb92a7c702
|
[
"BSD-3-Clause"
] | 19
|
2018-05-03T17:03:08.000Z
|
2020-07-15T22:12:40.000Z
|
import os
import argparse
from kgx.transformer import Transformer
"""
A loader script that demonstrates how to load edges and nodes into Neo4j.
"""
def usage():
print("""
usage: load_csv_to_neo4j.py --nodes nodes.csv --edges edges.csv
""")
parser = argparse.ArgumentParser(description='Load edges and nodes into Neo4j')
parser.add_argument('--nodes', help='file with nodes in CSV format')
parser.add_argument('--edges', help='file with edges in CSV format')
parser.add_argument('--uri', help='URI/URL for Neo4j (including port)', default='localhost:7474')
parser.add_argument('--username', help='username', default='neo4j')
parser.add_argument('--password', help='password', default='demo')
args = parser.parse_args()
if args.nodes is None and args.edges is None:
usage()
exit()
filename = []
if args.nodes:
filename.append(args.nodes)
if args.edges:
filename.append(args.edges)
input_args = {
'filename': filename,
'format': 'csv'
}
output_args = {
'uri': args.uri,
'username': args.username,
'password': args.password,
'format': 'neo4j'
}
# Initialize Transformer
t = Transformer()
t.transform(input_args, output_args)
| 25.042553
| 97
| 0.704333
|
4153dbe662944d3be0d9f192e78c4b2587230458
| 585
|
py
|
Python
|
examples/delivery/trade/get_balance.py
|
AlfonsoAgAr/binance-futures-connector-python
|
f0bd2c7b0576503bf526ce6be329ca2dae90fefe
|
[
"MIT"
] | 58
|
2021-11-22T11:46:27.000Z
|
2022-03-30T06:58:53.000Z
|
examples/delivery/trade/get_balance.py
|
sanjeevan121/binance-futures-connector-python
|
d820b73a15e9f64c80891a13694ca0c5d1693b90
|
[
"MIT"
] | 15
|
2021-12-15T22:40:52.000Z
|
2022-03-29T22:08:31.000Z
|
examples/delivery/trade/get_balance.py
|
sanjeevan121/binance-futures-connector-python
|
d820b73a15e9f64c80891a13694ca0c5d1693b90
|
[
"MIT"
] | 28
|
2021-12-10T03:56:13.000Z
|
2022-03-25T22:23:44.000Z
|
#!/usr/bin/env python
import logging
from binance.delivery import Delivery as Client
from binance.lib.utils import config_logging
from binance.error import ClientError
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
client = Client(key, secret,base_url="https://dapi.binance.com")
try:
response = client.balance(recvWindow=6000)
logging.info(response)
except ClientError as error:
logging.error(
"Found error. status: {}, error code: {}, error message: {}".format(
error.status_code, error.error_code, error.error_message
)
)
| 26.590909
| 76
| 0.711111
|
c048c46562750090cfd8293a936e73d7a7d69de7
| 6,382
|
py
|
Python
|
pygest/cmdline/command.py
|
mfschmidt/PyGEST
|
3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b
|
[
"MIT"
] | null | null | null |
pygest/cmdline/command.py
|
mfschmidt/PyGEST
|
3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b
|
[
"MIT"
] | 1
|
2020-09-20T03:20:14.000Z
|
2020-09-20T03:20:14.000Z
|
pygest/cmdline/command.py
|
mfschmidt/PyGEST
|
3d5e9f5f29ad3d51d3786ea8c39ac89ae792db3b
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
import logging
import pkg_resources
import pygest as ge
from datetime import datetime
from pygest.convenience import path_to
""" TODO: Create a file logger with command line, never when instantiating the library altogether.
Library use should never auto-log. """
class Command(object):
""" Each command has overlapping functionality. Each command can inherit the shared functionality from here.
"""
def __init__(self, arguments, command="", description="A PyGEST command", logger=None):
""" Initialize the command basics, but do not parse args or run. """
self._arguments = arguments
self._command = command
self._description = description
self._parser = argparse.ArgumentParser(description=self._description)
self._add_arguments()
self._args = self._parser.parse_args(self._arguments)
self._args.beginning = datetime.now()
self._setup_data() # _setup_data must come before logging, as it ascertains the base path for everything.
self._post_process_arguments()
self._setup_logging(logger) # _setup_logging must come before later ge.Data() so it can pass the logger.
if "data" in self._args and self._args.data is not None:
self.data = ge.Data(self._args.data, self._logger)
if self._args.verbose:
self._report_context()
def _add_arguments(self):
""" Add on some standard arguments used by all commands. """
self._parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
self._parser.add_argument("--log", dest="log", default='',
help="Provide a path to a log file to save output.")
def _setup_data(self):
""" If a data path is provided, use it to initiate pygest.data. """
if "data" not in self._args or self._args.data is None or self._args.data == "NONE":
if "PYGEST_DATA" in os.environ:
self._args.data = os.environ['PYGEST_DATA']
else:
print("I don't know where to find data. Try one of the following, with your own path:")
print("")
print(" $ pygest {} --data /home/mike/ge_data".format(" ".join(sys.argv[1:])))
print("")
print("or, better, set it in your environment (use ~/.bashrc as a permanent solution)")
print("")
print(" $ export PYGEST_DATA=/home/mike/ge_data")
print(" $ pygest {}".format(" ".join(sys.argv[1:])))
print("")
sys.exit(1)
if not os.path.isdir(self._args.data):
print("Data directory '{}' does not exist.".format(self._args.data))
sys.exit(1)
def _setup_logging(self, logger=None):
""" Create a logger and handlers. """
if logger is None:
# Set up logging, formatted with datetimes.
log_formatter = logging.Formatter(fmt='%(asctime)s | %(message)s', datefmt='%Y-%m-%d_%H:%M:%S')
self._logger = logging.getLogger('pygest')
self._logger.setLevel(1)
# Set up the console (stdout) log handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.DEBUG if self._args.verbose else logging.INFO)
self._logger.addHandler(console_handler)
# Set up the file handler, if requested
# The default is log=''
if "log" in self._args and self._args.log not in ['', 'null']:
file_handler = logging.FileHandler(self._args.log, mode='a+')
file_handler.setFormatter(log_formatter)
# file_handler.setLevel(logging.DEBUG if args.verbose else logging.INFO)
# As a design decision, heavy logging to a file is almost always desirable, without clogging stdout
file_handler.setLevel(logging.DEBUG)
self._logger.addHandler(file_handler)
self._logger.debug("logger added filehandler at {}, from cmdline argument.".format(self._args.log))
else:
pass # no file handlers get set up
elif logger == "null":
self._logger.addHandler(logging.NullHandler())
else:
self._logger.addHandler(logger)
def _report_context(self, indent=""):
""" Report our interpretation of command-line arguments. """
def log_indented(s):
""" output to the logger with the provided indentation """
self._logger.info("{}{}".format(indent, s))
log_indented("--------------------------------------------------------------------------------")
log_indented(" Command: {}".format(" ".join(sys.argv[:])))
log_indented(" OPENBLAS_NUM_THREADS = {}".format(os.environ['OPENBLAS_NUM_THREADS']))
try:
log_indented(" PyGEST is running version {}".format(pkg_resources.require("pygest")[0].version))
except pkg_resources.DistributionNotFound:
log_indented(" PyGEST is running uninstalled; no version information is available.")
log_indented(" interpretation of arguments:")
for k in self._args.__dict__:
if self._args.__dict__[k] is not None:
log_indented(" - {} = {}".format(k, self._args.__dict__[k]))
path_type = 'split' if self._command == 'split' else 'result'
if len(self._logger.handlers) > 1:
for h in self._logger.handlers:
if isinstance(h, logging.FileHandler):
log_indented(" path '{}'".format(
path_to(self._command, self._args, path_type=path_type, log_file=True)
))
log_indented(" logging to '{}'".format(h.baseFilename))
log_indented("--------------------------------------------------------------------------------")
def _post_process_arguments(self):
""" Provide the opportunity to interpret and modify command-line arguments before reporting them. """
pass
def run(self):
""" Perform the task this command is created for. """
raise NotImplementedError()
| 48.717557
| 115
| 0.592918
|
2581faf0bba8b578d45967d3ac4d1eb362d00fb2
| 6,736
|
py
|
Python
|
warp_drive/training/utils/data_loader.py
|
MetaMind/warp-drive
|
ecc17aae6c75a00c46b0e9f3297963f1beb697c0
|
[
"BSD-3-Clause"
] | null | null | null |
warp_drive/training/utils/data_loader.py
|
MetaMind/warp-drive
|
ecc17aae6c75a00c46b0e9f3297963f1beb697c0
|
[
"BSD-3-Clause"
] | null | null | null |
warp_drive/training/utils/data_loader.py
|
MetaMind/warp-drive
|
ecc17aae6c75a00c46b0e9f3297963f1beb697c0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from gym.spaces import Discrete, MultiDiscrete
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
_OBSERVATIONS = Constants.OBSERVATIONS
_ACTIONS = Constants.ACTIONS
_REWARDS = Constants.REWARDS
_DONE_FLAGS = Constants.DONE_FLAGS
def all_equal(iterable):
"""
Check all elements of an iterable (e.g., list) are identical
"""
return len(set(iterable)) <= 1
def create_and_push_data_placeholders(
env_wrapper, policy_tag_to_agent_id_map, training_batch_size_per_env
):
"""
Create observations, sampled_actions, rewards and done flags placeholders
and push to the device; this is required for generating environment
roll-outs as well as training.
"""
assert env_wrapper is not None
assert isinstance(policy_tag_to_agent_id_map, dict)
assert len(policy_tag_to_agent_id_map) > 0 # at least one policy
# Reset the environment
obs = env_wrapper.reset_all_envs()
observation_space = {}
action_space = {}
# Define the action spaces for each policy
for pol_mod_tag in policy_tag_to_agent_id_map:
first_agent_id = policy_tag_to_agent_id_map[pol_mod_tag][0]
observation_space[pol_mod_tag] = env_wrapper.env.observation_space[
first_agent_id
]
action_space[pol_mod_tag] = env_wrapper.env.action_space[first_agent_id]
print("-" * 40)
print(f"Observation space: {pol_mod_tag}", observation_space[pol_mod_tag])
print(f"Action space: {pol_mod_tag}", action_space[pol_mod_tag])
print("-" * 40)
# Note: This release version assumes all agents use the same obs/action space!
# If the obs/action spaces for the agents are different, we would just need to
# push obs/action and reward placeholders for each agent separately.
# Assert all action spaces are of the same type
action_spaces = [action_space[key] for key in action_space]
action_types = [type(action_space[key]) for key in action_space]
assert all_equal(action_types)
# Also assert all action spaces are of the same dimension
first_agent_action_space = action_spaces[0]
if isinstance(first_agent_action_space, MultiDiscrete):
action_dims = [tuple(action_space[key].nvec) for key in action_space]
elif isinstance(first_agent_action_space, Discrete):
action_dims = [tuple([action_space[key].n]) for key in action_space]
else:
raise NotImplementedError(
"Action spaces can be of type 'Discrete' or 'MultiDiscrete'"
)
assert all_equal(action_dims)
# Use the first action dimension element's length to determine the number of actions
num_actions = len(action_dims[0])
# Create observations, sampled_actions and rewards placeholders
# Note: We add the "num_envs" dimension to the placehholders since we will
# be running multiple replicas of the environment concurrently.
num_envs = env_wrapper.n_envs
observations_placeholder = np.stack(
[np.array([obs[key] for key in sorted(obs.keys())]) for _ in range(num_envs)],
axis=0,
)
sampled_actions_placeholder = np.zeros(
(num_envs, env_wrapper.env.num_agents, num_actions),
dtype=np.int32,
)
rewards_placeholder = np.zeros((num_envs, env_wrapper.n_agents), dtype=np.float32)
# Use the DataFeed class to add the observations, actions and rewards
# placeholder arrays. These arrays will be written to during the environment step().
# For the observations placeholders set save_copy_and_apply_at_reset=True, so that a
# copy of the initial observation values will be saved and applied at every reset.
tensor_feed = DataFeed()
tensor_feed.add_data(
name=_OBSERVATIONS,
data=observations_placeholder,
save_copy_and_apply_at_reset=True,
)
tensor_feed.add_data(name=_ACTIONS, data=sampled_actions_placeholder)
tensor_feed.add_data(name=_REWARDS, data=rewards_placeholder)
# Also add separate placeholders for each policy model's sampled actions,
# if there are multiple policies or a MultiDiscrete action space.
# This is required since our sampler will be invoked for each policy model and
# action dimension separately.
single_policy_with_discrete_action_space = len(
policy_tag_to_agent_id_map
) == 1 and isinstance(first_agent_action_space, Discrete)
if not single_policy_with_discrete_action_space:
for pol_mod_tag in policy_tag_to_agent_id_map:
if isinstance(first_agent_action_space, Discrete):
tensor_feed.add_data(
name=f"{_ACTIONS}_{pol_mod_tag}_0",
data=sampled_actions_placeholder[
:, policy_tag_to_agent_id_map[pol_mod_tag]
],
)
elif isinstance(first_agent_action_space, MultiDiscrete):
for action_idx in range(num_actions):
tensor_feed.add_data(
name=f"{_ACTIONS}_{pol_mod_tag}_{action_idx}",
data=sampled_actions_placeholder[
:, policy_tag_to_agent_id_map[pol_mod_tag], action_idx
],
)
else:
raise NotImplementedError(
"Action spaces can be of type 'Discrete' or 'MultiDiscrete'"
)
# Additionally, add placeholders for the sampled_actions, rewards and done_flags
# for the roll-out batch.
# Note: The observation batch will be defined by the Trainer after processing.
# The batch of observations, sampled_actions, rewards and done flags are
# needed for training.
tensor_feed.add_data(
name=f"{_ACTIONS}_batch",
data=np.zeros(
(training_batch_size_per_env,) + sampled_actions_placeholder.shape,
dtype=np.int32,
),
)
tensor_feed.add_data(
name=f"{_REWARDS}_batch",
data=np.zeros((training_batch_size_per_env,) + rewards_placeholder.shape),
)
done_flags_placeholder = env_wrapper.cuda_data_manager.pull_data_from_device(
"_done_"
)
tensor_feed.add_data(
name=f"{_DONE_FLAGS}_batch",
data=np.zeros((training_batch_size_per_env,) + done_flags_placeholder.shape),
)
# Push all the placeholders to the device (GPU)
env_wrapper.cuda_data_manager.push_data_to_device(
tensor_feed, torch_accessible=True
)
| 41.073171
| 88
| 0.694774
|
014c5ccce17e9e1d251bea4c58f055a1c4f57f12
| 6,444
|
py
|
Python
|
utils.py
|
yueliu1999/awesome-deep-graph-clustering
|
84af77b466ee05807041a1e3bd6277c713ee3364
|
[
"MIT"
] | 3
|
2021-11-26T16:21:19.000Z
|
2021-11-27T05:40:22.000Z
|
utils.py
|
xihongyang1999/Awesome-Deep-Graph-Clustering
|
34115911ec504b69b2194cee3219b22b7bd50414
|
[
"MIT"
] | null | null | null |
utils.py
|
xihongyang1999/Awesome-Deep-Graph-Clustering
|
34115911ec504b69b2194cee3219b22b7bd50414
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author : Yue Liu
# @Email : yueliu19990731@163.com
# @Time : 2021/11/25 11:11
import torch
import numpy as np
def numpy_to_torch(a, is_sparse=False):
"""
numpy array to torch tensor
:param a: the numpy array
:param is_sparse: is sparse tensor or not
:return: torch tensor
"""
if is_sparse:
a = torch.sparse.Tensor(a)
else:
a = torch.from_numpy(a)
return a
def torch_to_numpy(t):
"""
torch tensor to numpy array
:param t: the torch tensor
:return: numpy array
"""
return t.numpy()
def load_graph_data(dataset_name, show_details=False):
"""
load graph data
:param dataset_name: the name of the dataset
:param show_details: if show the details of dataset
- dataset name
- features' shape
- labels' shape
- adj shape
- edge num
- category num
- category distribution
:return: the features, labels and adj
"""
load_path = "dataset/" + dataset_name + "/" + dataset_name
feat = np.load(load_path+"_feat.npy", allow_pickle=True)
label = np.load(load_path+"_label.npy", allow_pickle=True)
adj = np.load(load_path+"_adj.npy", allow_pickle=True)
if show_details:
print("++++++++++++++++++++++++++++++")
print("---details of graph dataset---")
print("++++++++++++++++++++++++++++++")
print("dataset name: ", dataset_name)
print("feature shape: ", feat.shape)
print("label shape: ", label.shape)
print("adj shape: ", adj.shape)
print("edge num: ", int(adj.sum()/2))
print("category num: ", max(label)-min(label)+1)
print("category distribution: ")
for i in range(max(label)+1):
print("label", i, end=":")
print(len(label[np.where(label == i)]))
print("++++++++++++++++++++++++++++++")
return feat, label, adj
def load_data(dataset_name, show_details=False):
"""
load non-graph data
:param dataset_name: the name of the dataset
:param show_details: if show the details of dataset
- dataset name
- features' shape
- labels' shape
- category num
- category distribution
:return: the features and labels
"""
load_path = "dataset/" + dataset_name + "/" + dataset_name
feat = np.load(load_path+"_feat.npy", allow_pickle=True)
label = np.load(load_path+"_label.npy", allow_pickle=True)
if show_details:
print("++++++++++++++++++++++++++++++")
print("------details of dataset------")
print("++++++++++++++++++++++++++++++")
print("dataset name: ", dataset_name)
print("feature shape: ", feat.shape)
print("label shape: ", label.shape)
print("category num: ", max(label)-min(label)+1)
print("category distribution: ")
for i in range(max(label)+1):
print("label", i, end=":")
print(len(label[np.where(label == i)]))
print("++++++++++++++++++++++++++++++")
return feat, label
def construct_graph(feat, k=5, metric="euclidean"):
"""
construct the knn graph for a non-graph dataset
:param feat: the input feature matrix
:param k: hyper-parameter of knn
:param metric: the metric of distance calculation
- euclidean: euclidean distance
- cosine: cosine distance
- heat: heat kernel
:return: the constructed graph
"""
# euclidean distance, sqrt((x-y)^2)
if metric == "euclidean" or metric == "heat":
xy = np.matmul(feat, feat.transpose())
xx = (feat * feat).sum(1).reshape(-1, 1)
xx_yy = xx + xx.transpose()
euclidean_distance = xx_yy - 2 * xy
euclidean_distance[euclidean_distance < 1e-5] = 0
distance_matrix = np.sqrt(euclidean_distance)
# heat kernel, exp^{- euclidean^2/t}
if metric == "heat":
distance_matrix = - (distance_matrix ** 2) / 2
distance_matrix = np.exp(distance_matrix)
# cosine distance, 1 - cosine similarity
if metric == "cosine":
norm_feat = feat / np.sqrt(np.sum(feat ** 2, axis=1)).reshape(-1, 1)
cosine_distance = 1 - np.matmul(norm_feat, norm_feat.transpose())
cosine_distance[cosine_distance < 1e-5] = 0
distance_matrix = cosine_distance
# top k
distance_matrix = numpy_to_torch(distance_matrix)
top_k, index = torch.topk(distance_matrix, k)
top_k_min = torch.min(top_k, dim=-1).values.unsqueeze(-1).repeat(1, distance_matrix.shape[-1])
ones = torch.ones_like(distance_matrix)
zeros = torch.zeros_like(distance_matrix)
knn_graph = torch.where(torch.ge(distance_matrix, top_k_min), ones, zeros)
return torch_to_numpy(knn_graph)
def normalize_adj(adj, self_loop=True, symmetry=True):
"""
normalize the adj matrix
:param adj: input adj matrix
:param self_loop: if add the self loop or not
:param symmetry: symmetry normalize or not
:return: the normalized adj matrix
"""
# add the self_loop
if self_loop:
adj_tmp = adj + np.eye(adj.shape[0])
else:
adj_tmp = adj
# calculate degree matrix and it's inverse matrix
d = np.diag(adj_tmp.sum(0))
d_inv = np.linalg.inv(d)
# symmetry normalize: D^{-0.5} A D^{-0.5}
if symmetry:
sqrt_d_inv = np.sqrt(d_inv)
norm_adj = np.matmul(np.matmul(sqrt_d_inv, adj_tmp), sqrt_d_inv)
# non-symmetry normalize: D^{-1} A
else:
norm_adj = np.matmul(d_inv, adj_tmp)
return norm_adj
def diffusion_adj(adj, self_loop=True, mode="ppr", transport_rate=0.2):
"""
graph diffusion
:param adj: input adj matrix
:param self_loop: if add the self loop or not
:param mode: the mode of graph diffusion
:param transport_rate: the transport rate
- personalized page rank
-
:return: the graph diffusion
"""
# add the self_loop
if self_loop:
adj_tmp = adj + np.eye(adj.shape[0])
else:
adj_tmp = adj
# calculate degree matrix and it's inverse matrix
d = np.diag(adj_tmp.sum(0))
d_inv = np.linalg.inv(d)
sqrt_d_inv = np.sqrt(d_inv)
# calculate norm adj
norm_adj = np.matmul(np.matmul(sqrt_d_inv, adj_tmp), sqrt_d_inv)
# calculate graph diffusion
if mode == "ppr":
diff_adj = transport_rate * np.linalg.inv((np.eye(d.shape[0]) - (1 - transport_rate) * norm_adj))
return diff_adj
| 31.434146
| 105
| 0.602576
|
e93e988e93c595ea512999833e8502aa15a2efd2
| 446
|
py
|
Python
|
{{cookiecutter.project_package}}/{{cookiecutter.project_package}}/wsgi.py
|
madelyneriksen/react-django-goodstuff-cookiecutter
|
daa8960a762c59ebbdc940ebb08f3784853e4ccf
|
[
"MIT"
] | 17
|
2019-11-05T20:29:11.000Z
|
2022-03-09T09:25:10.000Z
|
{{cookiecutter.project_package}}/{{cookiecutter.project_package}}/wsgi.py
|
madelyneriksen/react-django-goodstuff-cookiecutter
|
daa8960a762c59ebbdc940ebb08f3784853e4ccf
|
[
"MIT"
] | 49
|
2019-12-02T13:27:23.000Z
|
2021-08-03T13:32:25.000Z
|
{{cookiecutter.project_package}}/{{cookiecutter.project_package}}/wsgi.py
|
madelyneriksen/react-django-goodstuff-cookiecutter
|
daa8960a762c59ebbdc940ebb08f3784853e4ccf
|
[
"MIT"
] | 2
|
2020-10-11T18:38:39.000Z
|
2021-03-16T13:16:00.000Z
|
"""
WSGI config for {{ cookiecutter.project_title }}.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "{{ cookiecutter.project_package }}.settings.prod"
)
application = get_wsgi_application()
| 23.473684
| 80
| 0.766816
|
230311e763c4b3ddc0e2202e7ba70af3e6c8285b
| 14,694
|
py
|
Python
|
src/batou/utils.py
|
gocept/batou
|
4d239996f464c406cde82c48155e5b8273a9063d
|
[
"BSD-2-Clause-FreeBSD"
] | 34
|
2019-09-06T05:30:10.000Z
|
2022-03-12T01:25:38.000Z
|
src/batou/utils.py
|
gocept/batou
|
4d239996f464c406cde82c48155e5b8273a9063d
|
[
"BSD-2-Clause-FreeBSD"
] | 204
|
2019-09-05T14:41:12.000Z
|
2022-03-10T12:14:37.000Z
|
src/batou/utils.py
|
gocept/batou
|
4d239996f464c406cde82c48155e5b8273a9063d
|
[
"BSD-2-Clause-FreeBSD"
] | 25
|
2019-10-10T07:13:41.000Z
|
2022-03-24T14:52:25.000Z
|
import contextlib
import copy
import fcntl
import functools
import hashlib
import inspect
import itertools
import os
import socket
import subprocess
import sys
import time
from collections import defaultdict
import pkg_resources
from batou import DeploymentError, IPAddressConfigurationError, output
class BagOfAttributes(dict):
"""Provide a dict-like object that can also
be accessed using attributes.
It's sometimes more convenient to write
a.x instead of a['x']. However, namespaces may
require being able to also use non-Python-identifier
keys.
"""
def __getattr__(self, key):
return self[key]
def self_id():
template = "batou/{version} ({python}, {system})"
system = os.uname()
system = " ".join([system[0], system[2], system[4]])
version = pkg_resources.require("batou")[0].version
python = sys.implementation.name
python += " {0}.{1}.{2}-{3}{4}".format(*sys.version_info)
return template.format(**locals())
class MultiFile(object):
def __init__(self, files):
self.files = files
def write(self, value):
for file in self.files:
file.write(value)
def flush(self):
for file in self.files:
file.flush()
@contextlib.contextmanager
def locked(filename):
# XXX can we make this not leave files around?
with open(filename, "a+") as lockfile:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print(
"Could not acquire lock {}".format(filename), file=sys.stderr)
raise RuntimeError(
'cannot create lock "%s": more than one instance running '
"concurrently?" % lockfile,
lockfile,
)
# publishing the process id comes handy for debugging
lockfile.seek(0)
lockfile.truncate()
print(os.getpid(), file=lockfile)
lockfile.flush()
yield
lockfile.seek(0)
lockfile.truncate()
def flatten(list_of_lists):
return list(itertools.chain.from_iterable(list_of_lists))
def notify_send(title, description):
subprocess.call(["notify-send", title, description])
def notify_macosx(title, description):
subprocess.call([
"osascript",
"-e",
'display notification "{}" with title "{}"'.format(description, title),
])
def notify_none(title, description):
pass
try:
subprocess.check_output(["which", "osascript"], stderr=subprocess.STDOUT)
notify = notify_macosx
except (subprocess.CalledProcessError, OSError):
try:
subprocess.check_output(["which", "notify-send"],
stderr=subprocess.STDOUT)
notify = notify_send
except (subprocess.CalledProcessError, OSError):
notify = notify_none
resolve_override = {}
resolve_v6_override = {}
def resolve(host, port=0, resolve_override=resolve_override):
if host in resolve_override:
address = resolve_override[host]
output.annotate(
'resolved (v4) `{}` to {} (override)'.format(host, address),
debug=True)
else:
output.annotate('resolving (v4) `{}`'.format(host), debug=True)
responses = socket.getaddrinfo(host, int(port), socket.AF_INET)
output.annotate(
'resolved (v4) `{}` to {}'.format(host, responses), debug=True)
address = responses[0][4][0]
output.annotate(
'selected (v4) {}, {}'.format(host, address), debug=True)
return address
def resolve_v6(host, port=0, resolve_override=resolve_v6_override):
if host in resolve_override:
address = resolve_override[host]
output.annotate(
'resolved (v6) `{}` to {} (override)'.format(host, address),
debug=True)
else:
output.annotate('resolving (v6) `{}`'.format(host), debug=True)
responses = socket.getaddrinfo(host, int(port), socket.AF_INET6)
output.annotate(
'resolved (v6) `{}` to {}'.format(host, responses), debug=True)
address = None
for _, _, _, _, sockaddr in responses:
addr, _, _, _ = sockaddr
if addr.startswith('fe80:'):
continue
address = addr
break
if not address:
raise ValueError('No valid address found for `{}`.'.format(host))
output.annotate(
'selected (v6) {}, {}'.format(host, address), debug=True)
return address
@functools.total_ordering
class Address(object):
"""An internet service address that can be listened and connected to.
The constructor address is expected to be the address that can be
connected to. The listen address will be computed automatically.
.. code-block:: pycon
>>> x = Address('localhost', 80)
>>> str(x.connect)
'localhost:80'
>>> str(x.listen)
'127.0.0.1:80'
"""
#: The connect address as it should be used when configuring clients.
#: This is a :py:class:`batou.utils.NetLoc` object.
connect = None
def __init__(self,
connect_address,
port=None,
require_v4=True,
require_v6=False):
if not require_v4 and not require_v6:
raise ValueError(
"At least one of `require_v4` or `require_v6` is required. "
"None were selected.")
if ":" in connect_address:
connect, port = connect_address.split(":")
else:
connect = connect_address
if port is None:
raise ValueError("Need port for service address.")
self.connect = NetLoc(connect, str(port))
if require_v4:
address = resolve(connect, port)
self.listen = NetLoc(address, str(port))
if require_v6:
address = resolve_v6(connect, port)
self.listen_v6 = NetLoc(address, str(port))
def __lt__(self, other):
if isinstance(other, Address):
return str(self) < str(other)
return NotImplemented
def __eq__(self, other):
if isinstance(other, Address):
return str(self) == str(other)
return NotImplemented
def __str__(self):
return str(self.connect)
@property
def listen(self):
"""The IPv4 listen (or bind) address as it should be used when
configuring servers. This is a :py:class:`batou.utils.NetLoc`
object. It raises an :py:class:`batou.IPAddressConfigurationError`
if used unconfigured.
"""
try:
return self._listen
except AttributeError:
raise IPAddressConfigurationError(self, 4)
@listen.setter
def listen(self, value):
self._listen = value
@property
def listen_v6(self):
"""The IPv6 listen (or bind) address as it should be used when
configuring servers. This is a :py:class:`batou.utils.NetLoc`
object. It raises an :py:class:`batou.IPAddressConfigurationError`
if used unconfigured.
"""
try:
return self._listen_v6
except AttributeError:
raise IPAddressConfigurationError(self, 6)
@listen_v6.setter
def listen_v6(self, value):
self._listen_v6 = value
@functools.total_ordering
class NetLoc(object):
"""A network location specified by host and port.
Network locations can automatically render an appropriate string
representation:
.. code-block:: pycon
>>> x = NetLoc('127.0.0.1')
>>> x.host
'127.0.0.1'
>>> x.port
None
>>> str(x)
'127.0.0.1'
>>> y = NetLoc('127.0.0.1', 80)
>>> str(y)
'127.0.0.1:80'
"""
#: The host part of this network location. Can be a hostname or IP address.
host = None
#: The port of this network location. Can be ``None`` or an integer.
port = None
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
if self.port:
if ":" in self.host: # ipv6
fmt = "[{self.host}]:{self.port}"
else:
fmt = "{self.host}:{self.port}"
else:
fmt = "{self.host}"
return fmt.format(self=self)
def __repr__(self):
return "<NetLoc `{}`>".format(self)
# These are not "correct" comparisons from a networking viewpoint.
# However, they are useful to provide a predictable ordering to
# avoid unnecessary config changes. Also, the values do not have
# to be IP addresses but can be hostnames as well.
def __lt__(self, other):
return str(self) < other
def __eq__(self, other):
return str(self) == other
def revert_graph(graph):
graph = ensure_graph_data(graph)
reverse_graph = defaultdict(set)
for node, dependencies in list(graph.items()):
# Ensure all nodes will exist
reverse_graph[node]
for dependency in dependencies:
reverse_graph[dependency].add(node)
return reverse_graph
def ensure_graph_data(graph):
# Ensure that all nodes exist as keys even if they don't have outgoing
# relations.
for node, relations in list(graph.items()):
for relation in relations:
if relation not in graph:
graph[relation] = set()
return graph
class CycleError(ValueError):
def __str__(self):
message = []
components = list(self.args[0].items())
components.sort(key=lambda x: x[0].name)
for component, subs in components:
message.append(component.name + " depends on")
for sub in subs:
message.append(" " + sub.name)
return "\n".join(message)
def remove_nodes_without_outgoing_edges(graph):
for node, dependencies in list(graph.items()):
if not dependencies:
del graph[node]
def topological_sort(graph):
# Take a directed graph and provide a topological sort of all nodes.
#
# The graph is given as
#
# {node: [dependency, dependency], ...}
#
# If the graph has cycles a CycleError will be raised.
graph = ensure_graph_data(graph)
sorted = []
reverse_graph = revert_graph(graph)
roots = [
node for node, incoming in list(reverse_graph.items()) if not incoming]
while roots:
root = roots.pop()
sorted.append(root)
for node in list(graph[root]):
graph[root].remove(node)
reverse_graph[node].remove(root)
if not reverse_graph[node]:
roots.append(node)
if any(graph.values()):
# Simplify the graph a bit to make it easier to spot the cycle.
remove_nodes_without_outgoing_edges(graph)
raise CycleError(dict(graph))
return sorted
class CmdExecutionError(DeploymentError, RuntimeError):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
self.args = (cmd, returncode, stdout, stderr)
def report(self):
output.error(self.cmd)
output.tabular("Return code", str(self.returncode), red=True)
output.line("STDOUT", red=True)
output.annotate(self.stdout)
output.line("STDERR", red=True)
output.annotate(self.stderr)
def cmd(cmd,
silent=False,
ignore_returncode=False,
communicate=True,
env=None,
acceptable_returncodes=[0],
encoding="utf-8"):
if not isinstance(cmd, str):
# We use `shell=True`, so the command needs to be a single string and
# we need to pay attention to shell quoting.
quoted_args = []
for arg in cmd:
arg = arg.replace("'", "\\'")
if " " in arg:
arg = "'{}'".format(arg)
quoted_args.append(arg)
cmd = " ".join(quoted_args)
if env is not None:
add_to_env = env
env = os.environ.copy()
env.update(add_to_env)
output.annotate("cmd: {}".format(cmd), debug=True)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True,
env=env)
if not communicate:
# XXX See #12550
return process
stdout, stderr = process.communicate()
if encoding is not None:
stdout = stdout.decode(encoding, errors="replace")
stderr = stderr.decode(encoding, errors="replace")
if process.returncode not in acceptable_returncodes:
if not ignore_returncode:
raise CmdExecutionError(cmd, process.returncode, stdout, stderr)
return stdout, stderr
class Timer(object):
def __init__(self, note):
self.duration = 0
self.note = note
def __enter__(self):
self.started = time.time()
def __exit__(self, exc1, exc2, exc3):
self.duration = time.time() - self.started
output.annotate(self.note + " took %fs" % self.duration, debug=True)
def hash(path, function="sha_512"):
h = getattr(hashlib, function)()
with open(path, "rb") as f:
chunk = f.read(64 * 1024)
while chunk:
h.update(chunk)
chunk = f.read(64 * 1024)
return h.hexdigest()
def call_with_optional_args(func, **kw):
"""Provide a way to perform backwards-compatible call,
passing only arguments that the function actually expects.
"""
call_kw = {}
verify_args = inspect.signature(func)
for name, parameter in verify_args.parameters.items():
if name in kw:
call_kw[name] = kw[name]
if parameter.kind == inspect.Parameter.VAR_KEYWORD:
call_kw = kw
break
return func(**call_kw)
def dict_merge(a, b):
"""recursively merges dict's. not just simple a['key'] = b['key'], if
both a and b have a key who's value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.
https://www.xormedia.com/recursively-merge-dictionaries-in-python/
"""
if not isinstance(b, dict):
return b
result = copy.deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
elif k in result and isinstance(result[k], list):
result[k] = result[k][:]
result[k].extend(v)
else:
result[k] = copy.deepcopy(v)
return result
| 29.865854
| 79
| 0.603716
|
d9f3830e1898006cb66c7a534b4bc1e5052751ff
| 8,553
|
py
|
Python
|
eeg_modelling/eeg_viewer/data_source.py
|
nilu33032/google-research
|
bb8f5457fd9dba56018b23a038249c539c1b7491
|
[
"Apache-2.0"
] | 1
|
2019-04-18T15:03:39.000Z
|
2019-04-18T15:03:39.000Z
|
eeg_modelling/eeg_viewer/data_source.py
|
nilu33032/google-research
|
bb8f5457fd9dba56018b23a038249c539c1b7491
|
[
"Apache-2.0"
] | null | null | null |
eeg_modelling/eeg_viewer/data_source.py
|
nilu33032/google-research
|
bb8f5457fd9dba56018b23a038249c539c1b7491
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains DataSources that extract channel data from various file types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
import re
from google.protobuf import timestamp_pb2
from eeg_modelling.eeg_viewer import lookup
from eeg_modelling.eeg_viewer import utils
from eeg_modelling.pyprotos import data_pb2
from eeg_modelling.pyprotos import event_pb2
class DataSource(object):
"""Source of waveform data for WaveformsExample to create data responses."""
_CHANNEL_MATCHERS = []
def __init__(self, file_type, key):
self._file_type = file_type
self._key = key
matchers = [matcher for matcher_pair in self._CHANNEL_MATCHERS
for matcher in matcher_pair[0]]
self._lookup = lookup.Lookup(self.GetChannelList(), matchers)
def GetFileKey(self):
"""Returns the key or ID used to access the given file."""
return self._key
def GetFileType(self):
"""Returns the waveform type in the file."""
return self._file_type
@abc.abstractmethod
def GetChannelList(self):
"""Returns the list of features in the file."""
pass
@abc.abstractmethod
def GetLength(self):
"""Returns number of seconds of data in the source."""
pass
@abc.abstractmethod
def GetStartTime(self):
"""Get global start time of the file."""
pass
def GetSamplingFrequency(self, channel_indices):
"""Returns the sampling frequency for a group of channels.
Args:
channel_indices: List of channels indices to get the sampling freq from.
Returns:
Sampling frequency for all the channels (must be the same).
Raises:
ValueError: if the channels don't have the same frequency.
"""
freqs = list(set(
self.GetChannelSamplingFrequency(index)
for index in channel_indices
))
if len(freqs) != 1:
raise ValueError('The requested channels do not have the same frequency')
return freqs[0]
@abc.abstractmethod
def GetChannelSamplingFrequency(self, index):
"""Get the frequency of the data with the given index."""
pass
@abc.abstractmethod
def GetChannelData(self, index_list, start, duration):
"""Returns the feature data associated with the given index.
Args:
index_list: The numerical indices for the requested channels.
start: The start of the requested slice in seconds relative to the
beginning of the data.
duration: The duration of the requested slice in seconds.
"""
pass
def GetChannelName(self, index):
"""Returns the feature name for display that maps to index."""
return self._lookup.GetShorthandFromIndex(index)
def GetChannelIndexFromKey(self, key):
"""Returns the numerical index associated with the channel key.
The key given corresponds to a single key for the given channel data in the
data structure. The index is the assigned numerical index generated for the
data source on construction by the 3-way lookup (e.g. if the TF Example key
is 'eeg_channel/EEG FP1-REF/samples', this function might return 21).
Args:
key: The key used to access the channel in the underlying data structure.
"""
return self._lookup.GetIndexFromKey(key)
def GetChannelIndexFromName(self, name):
"""Returns the numerical index associated with the channel name."""
return self._lookup.GetIndexFromShorthand(name)
@abc.abstractmethod
def GetAnnotations(self):
"""Returns a list of Waveforms Viewer Annotations."""
@abc.abstractmethod
def GetPatientId(self):
"""Returns the patient ID."""
def GetChannelIndexDict(self):
"""Dict of available features to render in the data source.
Each key is an index and its value is the shorthand name for the feature.
Returns:
Dictionary that maps between the feature index and shorthand.
"""
return self._lookup.GetIndexToShorthandDict()
class TfExampleDataSource(DataSource):
"""DataSource that extracts data from a TF Example proto instance."""
# These values are keys that will always be present in a TF Example from the
# Medical Waveforms sandbox
_NUM_SAMPLES_KEY = 'eeg_channel/num_samples'
_FREQ_KEY = 'eeg_channel/sampling_frequency_hz'
_RESAMPLED_NUM_SAMPLES_KEY = 'eeg_channel/resampled_num_samples'
_RESAMPLED_FREQ_KEY = 'eeg_channel/resampled_sampling_frequency_hz'
_START_TIME_KEY = 'start_time'
_PATIENT_ID_KEY = 'segment/patient_id'
_ANNOTATIONS_KEY = 'raw_label_events'
def __init__(self, tf_example, key, file_type):
self._tf_example = tf_example
self._feature = self._tf_example.features.feature
super(TfExampleDataSource, self).__init__(file_type, str(key))
def GetChannelList(self):
return self._feature.keys()
def GetLength(self):
num_samples = self._feature[self._NUM_SAMPLES_KEY].int64_list.value[0]
sample_freq = self._feature[self._FREQ_KEY].float_list.value[0]
return math.ceil(float(num_samples) / sample_freq)
def GetStartTime(self):
start_timestamp = timestamp_pb2.Timestamp.FromString(
self._feature[self._START_TIME_KEY].bytes_list.value[0])
return utils.TimestampPb2ToSeconds(start_timestamp)
def GetChannelSamplingFrequency(self, index):
key = self._lookup.GetKeyFromIndex(index)
for matcher_set, freq_key in self._CHANNEL_MATCHERS:
if any(matcher.match(key) for matcher in matcher_set) and freq_key:
return self._feature[freq_key].float_list.value[0]
return 1
def GetChannelData(self, index_list, start, duration):
freq = self.GetSamplingFrequency(index_list)
chunk_start_index, chunk_end_index = utils.GetSampleRange(freq, duration,
start)
channel_dict = {}
for index in index_list:
key = self._lookup.GetKeyFromIndex(index)
if self._feature[key].HasField('float_list'):
channel_data = self._feature[key].float_list.value
else:
raise ValueError('Channel %s is not a float value.' % key)
channel_dict[str(index)] = channel_data[chunk_start_index:chunk_end_index]
return channel_dict
def GetAnnotations(self):
annotation_strings = self._feature[self._ANNOTATIONS_KEY].bytes_list.value
annotations = []
for annotation_string in annotation_strings:
event = event_pb2.Event.FromString(annotation_string)
annotation = data_pb2.WaveformMetadata.Label()
annotation.label_text = event.label
annotation.start_time = event.start_time_sec
annotations.append(annotation)
return annotations
def GetPatientId(self):
return self._feature[self._PATIENT_ID_KEY].bytes_list.value[0]
class TfExampleEegDataSource(TfExampleDataSource):
"""Data source that extracts EEG data from a TF Example."""
_CHANNEL_MATCHERS = [
([
# EEG channel pattern
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/samples'),
], 'eeg_channel/sampling_frequency_hz'),
([
# EEG channel pattern for training data
re.compile(r'eeg_channel/EEG (\w+)(-\w+)*/resampled_samples'),
], 'eeg_channel/resampled_sampling_frequency_hz'),
([
# 'seizure bin' used at the shorthand for this key.
re.compile(r'(seizure_bin)ary_per_sec'), # Derived feature pattern.
], None),
]
def __init__(self, tf_example, key):
super(TfExampleEegDataSource, self).__init__(tf_example, key, 'EEG')
class TfExampleEkgDataSource(TfExampleDataSource):
"""Data source that extracts EEG data from a TF Example."""
_CHANNEL_MATCHERS = [
([
# EKG channel pattern
re.compile(r'eeg_channel/POL (EKG\w+)/samples'),
# ECG channel pattern
re.compile(r'eeg_channel/(\w+)/samples')
], 'eeg_channel/sampling_frequency_hz'),
]
def __init__(self, tf_example, key):
super(TfExampleEkgDataSource, self).__init__(tf_example, key, 'EKG')
| 34.768293
| 80
| 0.718578
|
f1f9edd0b0b6b13b4431d1c8eba7750ee6601387
| 66
|
py
|
Python
|
modules/templates/historic/Delphi/__init__.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
modules/templates/historic/Delphi/__init__.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
modules/templates/historic/Delphi/__init__.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
__all__ = ("delphi",
)
import templates.Delphi.delphi
| 13.2
| 30
| 0.606061
|
4f8bd15d1c409bd45f287e39bcb008b8283aad6b
| 4,569
|
py
|
Python
|
pastepoints/pastepoints.py
|
majordookie/paste-cogs
|
b4e9fd919a8c5dde020b6aabac000c27648b835a
|
[
"Apache-2.0"
] | null | null | null |
pastepoints/pastepoints.py
|
majordookie/paste-cogs
|
b4e9fd919a8c5dde020b6aabac000c27648b835a
|
[
"Apache-2.0"
] | null | null | null |
pastepoints/pastepoints.py
|
majordookie/paste-cogs
|
b4e9fd919a8c5dde020b6aabac000c27648b835a
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import io
import re
from collections import namedtuple
import aiohttp
import discord
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import box, pagify
# Paste Points are the new Bitcoin
BaseCog = getattr(commands, "Cog", object)
upemoji_id = 397064398830829569
downemoji_id = 272737368916754432
channel_id = 331655111644545027
class PastePoints(BaseCog):
"""Paste Points cog settings meme"""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=974374573)
default_guild = {}
self.config.register_guild(**default_guild)
self.config.register_user(karma=0)
@commands.command()
async def pp(self, ctx: commands.Context, top: int = 10):
"""Prints out the karma leaderboard.
Defaults to top 10. Use negative numbers to reverse the leaderboard.
"""
reverse = True
if top == 0:
top = 10
elif top < 0:
reverse = False
top = -top
members_sorted = sorted(
await self._get_all_members(ctx.bot), key=lambda x: x.karma, reverse=reverse
)
if len(members_sorted) < top:
top = len(members_sorted)
topten = members_sorted[:top]
highscore = ""
place = 1
for member in topten:
highscore += str(place).ljust(len(str(top)) + 1)
highscore += "{} | ".format(member.name).ljust(18 - len(str(member.karma)))
highscore += str(member.karma) + "\n"
place += 1
if highscore != "":
for page in pagify(highscore, shorten_by=12):
await ctx.send(box(page, lang="py"))
else:
await ctx.send("No one has any karma 🙁")
@commands.command()
@checks.is_owner()
async def setpp(self, ctx: commands.Context, user: discord.Member, amount: int):
"""Resets a user's karma."""
await self.config.user(user).karma.set(amount)
await ctx.send("{}'s karma has been set.".format(user.display_name))
async def _get_all_members(self, bot):
"""Get a list of members which have karma.
Returns a list of named tuples with values for `name`, `id`, `karma`.
"""
member_info = namedtuple("Member", "id name karma")
ret = []
for member in bot.get_all_members():
if any(member.id == m.id for m in ret):
continue
karma = await self.config.user(member).karma()
if karma == 0:
continue
ret.append(member_info(
id=member.id, name=str(member), karma=karma))
return ret
@commands.Cog.listener()
async def on_message(self, message):
if (message.author.id == self.bot.user.id or (message.attachments == [] and message.embeds == [] and re.search(r"http:\/\/|https:\/\/", message.content) == None)):
return
if (message.channel.id == channel_id):
upemoji = self.bot.get_emoji(upemoji_id)
downemoji = self.bot.get_emoji(downemoji_id)
await message.add_reaction(upemoji)
await message.add_reaction(downemoji)
@commands.Cog.listener()
async def on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
"""Fires when the bot sees a reaction being added, and updates karma."""
await self._check_reaction(reaction, user, added=True)
@commands.Cog.listener()
async def on_reaction_remove(self, reaction: discord.Reaction, user: discord.User):
"""Fires when the bot sees a reaction being removed, and updates karma."""
await self._check_reaction(reaction, user, added=False)
async def _check_reaction(self, reaction: discord.Reaction, user: discord.User, *, added: bool):
message = reaction.message
(author, channel, guild) = (message.author, message.channel, message.guild)
if author == user:
return
if (reaction.emoji.id == upemoji_id):
await self._add_karma(author, 1 if added == True else -1)
elif (reaction.emoji.id == downemoji_id):
await self._add_karma(author, -1 if added == True else 1)
async def _add_karma(self, user: discord.User, amount: int):
settings = self.config.user(user)
karma = await settings.karma()
await settings.karma.set(karma + amount)
| 39.730435
| 172
| 0.602758
|
424622f86c8f06efacc262277b4e5a814bd9daac
| 9,023
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlMultiCollectionFormatVersionTolerant/urlmulticollectionformatversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlMultiCollectionFormatVersionTolerant/urlmulticollectionformatversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlMultiCollectionFormatVersionTolerant/urlmulticollectionformatversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_queries_array_string_multi_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
array_query = kwargs.pop('array_query', None) # type: Optional[List[str]]
accept = "application/json"
# Construct URL
url = '/queries/array/multi/string/null'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters['arrayQuery'] = [_SERIALIZER.query("array_query", q, 'str') if q is not None else '' for q in array_query]
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_queries_array_string_multi_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
array_query = kwargs.pop('array_query', None) # type: Optional[List[str]]
accept = "application/json"
# Construct URL
url = '/queries/array/multi/string/empty'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters['arrayQuery'] = [_SERIALIZER.query("array_query", q, 'str') if q is not None else '' for q in array_query]
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_queries_array_string_multi_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
array_query = kwargs.pop('array_query', None) # type: Optional[List[str]]
accept = "application/json"
# Construct URL
url = '/queries/array/multi/string/valid'
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if array_query is not None:
query_parameters['arrayQuery'] = [_SERIALIZER.query("array_query", q, 'str') if q is not None else '' for q in array_query]
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class QueriesOperations(object):
"""QueriesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def array_string_multi_null(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get a null array of string using the multi-array format.
:keyword array_query: a null array of string using the multi-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
array_query = kwargs.pop("array_query", None) # type: Optional[List[str]]
request = build_queries_array_string_multi_null_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_multi_null.metadata = {"url": "/queries/array/multi/string/null"} # type: ignore
@distributed_trace
def array_string_multi_empty(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get an empty array [] of string using the multi-array format.
:keyword array_query: an empty array [] of string using the multi-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
array_query = kwargs.pop("array_query", None) # type: Optional[List[str]]
request = build_queries_array_string_multi_empty_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_multi_empty.metadata = {"url": "/queries/array/multi/string/empty"} # type: ignore
@distributed_trace
def array_string_multi_valid(
self, **kwargs # type: Any
):
# type: (...) -> None
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null, ''] using the
mult-array format.
:keyword array_query: an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' , null,
''] using the mult-array format.
:paramtype array_query: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
array_query = kwargs.pop("array_query", None) # type: Optional[List[str]]
request = build_queries_array_string_multi_valid_request(
array_query=array_query,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
array_string_multi_valid.metadata = {"url": "/queries/array/multi/string/valid"} # type: ignore
| 36.530364
| 131
| 0.655769
|
01d20866418d5149b9f2b189c563d1210f5e2e49
| 807
|
py
|
Python
|
manage.py
|
ksu-svt/svt-event-web-app
|
10d6d7ab0829edf45e4d8d503ac57f5b20fecbae
|
[
"MIT"
] | null | null | null |
manage.py
|
ksu-svt/svt-event-web-app
|
10d6d7ab0829edf45e4d8d503ac57f5b20fecbae
|
[
"MIT"
] | 15
|
2017-08-29T03:26:08.000Z
|
2018-01-20T21:05:01.000Z
|
manage.py
|
ksu-svt/svt-event-web-app
|
10d6d7ab0829edf45e4d8d503ac57f5b20fecbae
|
[
"MIT"
] | 1
|
2017-09-24T04:56:00.000Z
|
2017-09-24T04:56:00.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "svt_event.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.086957
| 77
| 0.643123
|
711ce82ed5e548703444c327276ec5e1b7c5fd16
| 1,429
|
py
|
Python
|
app/recipe/serializers.py
|
singhlovepreet1/recipe-app-api
|
d604dcc3d788691c7ec6ee00e9c10a1f0f9a647d
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
singhlovepreet1/recipe-app-api
|
d604dcc3d788691c7ec6ee00e9c10a1f0f9a647d
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
singhlovepreet1/recipe-app-api
|
d604dcc3d788691c7ec6ee00e9c10a1f0f9a647d
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag object"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for an ingredient object"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serialize a recipe"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = (
'id', 'title', 'ingredients', 'tags', 'time_minutes',
'price', 'link'
)
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serialize a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 25.981818
| 65
| 0.639608
|
b4dd823cdfbd0db425061a9d8a5784ee20ad7e78
| 13,926
|
py
|
Python
|
tests/test_support.py
|
sudhanshu8917/babel
|
5c1383a4b36809afa196f5813f799036c0ce5b55
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_support.py
|
sudhanshu8917/babel
|
5c1383a4b36809afa196f5813f799036c0ce5b55
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_support.py
|
sudhanshu8917/babel
|
5c1383a4b36809afa196f5813f799036c0ce5b55
|
[
"BSD-3-Clause"
] | 1
|
2019-07-02T07:23:46.000Z
|
2019-07-02T07:23:46.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software, 2013-2019 the Babel team
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import inspect
import os
import shutil
import tempfile
import unittest
import pytest
from datetime import date, datetime, timedelta
from babel import support
from babel.messages import Catalog
from babel.messages.mofile import write_mo
from babel._compat import BytesIO, PY2
get_arg_spec = (inspect.getargspec if PY2 else inspect.getfullargspec)
@pytest.mark.usefixtures("os_environ")
class TranslationsTestCase(unittest.TestCase):
def setUp(self):
# Use a locale which won't fail to run the tests
os.environ['LANG'] = 'en_US.UTF-8'
messages1 = [
('foo', {'string': 'Voh'}),
('foo', {'string': 'VohCTX', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('Voh1', 'Vohs1')}),
(('foo1', 'foos1'), {'string': ('VohCTX1', 'VohsCTX1'), 'context': 'foo'}),
]
messages2 = [
('foo', {'string': 'VohD'}),
('foo', {'string': 'VohCTXD', 'context': 'foo'}),
(('foo1', 'foos1'), {'string': ('VohD1', 'VohsD1')}),
(('foo1', 'foos1'), {'string': ('VohCTXD1', 'VohsCTXD1'), 'context': 'foo'}),
]
catalog1 = Catalog(locale='en_GB', domain='messages')
catalog2 = Catalog(locale='en_GB', domain='messages1')
for ids, kwargs in messages1:
catalog1.add(ids, **kwargs)
for ids, kwargs in messages2:
catalog2.add(ids, **kwargs)
catalog1_fp = BytesIO()
catalog2_fp = BytesIO()
write_mo(catalog1_fp, catalog1)
catalog1_fp.seek(0)
write_mo(catalog2_fp, catalog2)
catalog2_fp.seek(0)
translations1 = support.Translations(catalog1_fp)
translations2 = support.Translations(catalog2_fp, domain='messages1')
self.translations = translations1.add(translations2, merge=False)
def assertEqualTypeToo(self, expected, result):
self.assertEqual(expected, result)
assert type(expected) == type(result), "instance type's do not " + \
"match: %r!=%r" % (type(expected), type(result))
def test_pgettext(self):
self.assertEqualTypeToo('Voh', self.translations.gettext('foo'))
self.assertEqualTypeToo('VohCTX', self.translations.pgettext('foo',
'foo'))
def test_upgettext(self):
self.assertEqualTypeToo(u'Voh', self.translations.ugettext('foo'))
self.assertEqualTypeToo(u'VohCTX', self.translations.upgettext('foo',
'foo'))
def test_lpgettext(self):
self.assertEqualTypeToo(b'Voh', self.translations.lgettext('foo'))
self.assertEqualTypeToo(b'VohCTX', self.translations.lpgettext('foo',
'foo'))
def test_npgettext(self):
self.assertEqualTypeToo('Voh1',
self.translations.ngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo('Vohs1',
self.translations.ngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo('VohCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo('VohsCTX1',
self.translations.npgettext('foo', 'foo1',
'foos1', 2))
def test_unpgettext(self):
self.assertEqualTypeToo(u'Voh1',
self.translations.ungettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(u'Vohs1',
self.translations.ungettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(u'VohCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(u'VohsCTX1',
self.translations.unpgettext('foo', 'foo1',
'foos1', 2))
def test_lnpgettext(self):
self.assertEqualTypeToo(b'Voh1',
self.translations.lngettext('foo1', 'foos1', 1))
self.assertEqualTypeToo(b'Vohs1',
self.translations.lngettext('foo1', 'foos1', 2))
self.assertEqualTypeToo(b'VohCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(b'VohsCTX1',
self.translations.lnpgettext('foo', 'foo1',
'foos1', 2))
def test_dpgettext(self):
self.assertEqualTypeToo(
'VohD', self.translations.dgettext('messages1', 'foo'))
self.assertEqualTypeToo(
'VohCTXD', self.translations.dpgettext('messages1', 'foo', 'foo'))
def test_dupgettext(self):
self.assertEqualTypeToo(
u'VohD', self.translations.dugettext('messages1', 'foo'))
self.assertEqualTypeToo(
u'VohCTXD', self.translations.dupgettext('messages1', 'foo', 'foo'))
def test_ldpgettext(self):
self.assertEqualTypeToo(
b'VohD', self.translations.ldgettext('messages1', 'foo'))
self.assertEqualTypeToo(
b'VohCTXD', self.translations.ldpgettext('messages1', 'foo', 'foo'))
def test_dnpgettext(self):
self.assertEqualTypeToo(
'VohD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
'VohsD1', self.translations.dngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
'VohCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
'VohsCTXD1', self.translations.dnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_dunpgettext(self):
self.assertEqualTypeToo(
u'VohD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
u'VohsD1', self.translations.dungettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
u'VohCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
u'VohsCTXD1', self.translations.dunpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_ldnpgettext(self):
self.assertEqualTypeToo(
b'VohD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 1))
self.assertEqualTypeToo(
b'VohsD1', self.translations.ldngettext('messages1', 'foo1', 'foos1', 2))
self.assertEqualTypeToo(
b'VohCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 1))
self.assertEqualTypeToo(
b'VohsCTXD1', self.translations.ldnpgettext('messages1', 'foo', 'foo1',
'foos1', 2))
def test_load(self):
tempdir = tempfile.mkdtemp()
try:
messages_dir = os.path.join(tempdir, 'fr', 'LC_MESSAGES')
os.makedirs(messages_dir)
catalog = Catalog(locale='fr', domain='messages')
catalog.add('foo', 'bar')
with open(os.path.join(messages_dir, 'messages.mo'), 'wb') as f:
write_mo(f, catalog)
translations = support.Translations.load(tempdir, locales=('fr',), domain='messages')
self.assertEqual('bar', translations.gettext('foo'))
finally:
shutil.rmtree(tempdir)
class NullTranslationsTestCase(unittest.TestCase):
def setUp(self):
fp = BytesIO()
write_mo(fp, Catalog(locale='de'))
fp.seek(0)
self.translations = support.Translations(fp=fp)
self.null_translations = support.NullTranslations(fp=fp)
def method_names(self):
return [name for name in dir(self.translations) if 'gettext' in name]
def test_same_methods(self):
for name in self.method_names():
if not hasattr(self.null_translations, name):
self.fail('NullTranslations does not provide method %r' % name)
def test_method_signature_compatibility(self):
for name in self.method_names():
translations_method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
self.assertEqual(
get_arg_spec(translations_method),
get_arg_spec(null_method),
)
def test_same_return_values(self):
data = {
'message': u'foo', 'domain': u'domain', 'context': 'tests',
'singular': u'bar', 'plural': u'baz', 'num': 1,
'msgid1': u'bar', 'msgid2': u'baz', 'n': 1,
}
for name in self.method_names():
method = getattr(self.translations, name)
null_method = getattr(self.null_translations, name)
signature = get_arg_spec(method)
parameter_names = [name for name in signature.args if name != 'self']
values = [data[name] for name in parameter_names]
self.assertEqual(method(*values), null_method(*values))
class LazyProxyTestCase(unittest.TestCase):
def test_proxy_caches_result_of_function_call(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one)
self.assertEqual(1, proxy.value)
self.assertEqual(1, proxy.value)
def test_can_disable_proxy_cache(self):
self.counter = 0
def add_one():
self.counter += 1
return self.counter
proxy = support.LazyProxy(add_one, enable_cache=False)
self.assertEqual(1, proxy.value)
self.assertEqual(2, proxy.value)
def test_can_copy_proxy(self):
from copy import copy
numbers = [1, 2]
def first(xs):
return xs[0]
proxy = support.LazyProxy(first, numbers)
proxy_copy = copy(proxy)
numbers.pop(0)
self.assertEqual(2, proxy.value)
self.assertEqual(2, proxy_copy.value)
def test_can_deepcopy_proxy(self):
from copy import deepcopy
numbers = [1, 2]
def first(xs):
return xs[0]
proxy = support.LazyProxy(first, numbers)
proxy_deepcopy = deepcopy(proxy)
numbers.pop(0)
self.assertEqual(2, proxy.value)
self.assertEqual(1, proxy_deepcopy.value)
def test_format_date():
fmt = support.Format('en_US')
assert fmt.date(date(2007, 4, 1)) == 'Apr 1, 2007'
def test_format_datetime():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
when = datetime(2007, 4, 1, 15, 30)
assert fmt.datetime(when) == 'Apr 1, 2007, 11:30:00 AM'
def test_format_time():
from pytz import timezone
fmt = support.Format('en_US', tzinfo=timezone('US/Eastern'))
assert fmt.time(datetime(2007, 4, 1, 15, 30)) == '11:30:00 AM'
def test_format_timedelta():
fmt = support.Format('en_US')
assert fmt.timedelta(timedelta(weeks=11)) == '3 months'
def test_format_number():
fmt = support.Format('en_US')
assert fmt.number(1099) == '1,099'
def test_format_decimal():
fmt = support.Format('en_US')
assert fmt.decimal(1.2345) == '1.234'
def test_format_percent():
fmt = support.Format('en_US')
assert fmt.percent(0.34) == '34%'
def test_lazy_proxy():
def greeting(name='world'):
return u'Hello, %s!' % name
lazy_greeting = support.LazyProxy(greeting, name='Joe')
assert str(lazy_greeting) == u"Hello, Joe!"
assert u' ' + lazy_greeting == u' Hello, Joe!'
assert u'(%s)' % lazy_greeting == u'(Hello, Joe!)'
greetings = [
support.LazyProxy(greeting, 'world'),
support.LazyProxy(greeting, 'Joe'),
support.LazyProxy(greeting, 'universe'),
]
greetings.sort()
assert [str(g) for g in greetings] == [
u"Hello, Joe!",
u"Hello, universe!",
u"Hello, world!",
]
def test_catalog_merge_files():
# Refs issues #92, #162
t1 = support.Translations()
assert t1.files == []
t1._catalog["foo"] = "bar"
if PY2:
# Explicitly use the pure-Python `StringIO` class, as we need to
# augment it with the `name` attribute, which we can't do for
# `babel._compat.BytesIO`, which is `cStringIO.StringIO` under
# `PY2`...
from StringIO import StringIO
fp = StringIO()
else:
fp = BytesIO()
write_mo(fp, Catalog())
fp.seek(0)
fp.name = "pro.mo"
t2 = support.Translations(fp)
assert t2.files == ["pro.mo"]
t2._catalog["bar"] = "quux"
t1.merge(t2)
assert t1.files == ["pro.mo"]
assert set(t1._catalog.keys()) == {'', 'foo', 'bar'}
| 38.153425
| 97
| 0.568074
|
5940ac5461e9eedb418a7b88a9a2136104e3610c
| 4,047
|
py
|
Python
|
python/ray/serve/pipeline/tests/test_deployment_node.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 1
|
2022-03-07T06:40:06.000Z
|
2022-03-07T06:40:06.000Z
|
python/ray/serve/pipeline/tests/test_deployment_node.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 29
|
2021-11-24T00:50:07.000Z
|
2022-03-19T07:11:36.000Z
|
python/ray/serve/pipeline/tests/test_deployment_node.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import ray
from ray import serve
from ray.experimental.dag.input_node import InputNode
from ray.serve.pipeline.deployment_node import (
DeploymentNode,
)
from ray.serve.pipeline.constants import USE_SYNC_HANDLE_KEY
@serve.deployment
class ServeActor:
def __init__(self, init_value=0):
self.i = init_value
def inc(self):
self.i += 1
def get(self):
return self.i
class SyncActor:
def __init__(self, init_value=0):
self.i = init_value
def inc(self):
self.i += 1
def get(self):
return self.i
class Actor:
def __init__(self, init_value=0):
self.i = init_value
async def inc(self):
self.i += 1
async def get(self):
return self.i
@pytest.mark.asyncio
async def test_simple_deployment_async(serve_instance):
"""Internal testing only for simple creation and execution.
User should NOT directly create instances of Deployment or DeploymentNode.
"""
node = DeploymentNode(
Actor,
"test",
(10,),
{},
{},
other_args_to_resolve={USE_SYNC_HANDLE_KEY: False},
)
node._deployment.deploy()
handle = node._deployment_handle
assert ray.get(await node.get.execute()) == 10
ray.get(await node.inc.execute())
assert ray.get(await node.get.execute()) == 11
assert ray.get(await node.get.execute()) == ray.get(await handle.get.remote())
def test_simple_deployment_sync(serve_instance):
"""Internal testing only for simple creation and execution.
User should NOT directly create instances of Deployment or DeploymentNode.
"""
node = DeploymentNode(
Actor,
"test",
(10,),
{},
{},
other_args_to_resolve={USE_SYNC_HANDLE_KEY: True},
)
node._deployment.deploy()
handle = node._deployment_handle
assert ray.get(node.get.execute()) == 10
ray.get(node.inc.execute())
assert ray.get(node.get.execute()) == 11
assert ray.get(node.get.execute()) == ray.get(handle.get.remote())
def test_no_input_node_as_init_args():
"""
User should NOT directly create instances of Deployment or DeploymentNode.
"""
with pytest.raises(
ValueError,
match="cannot be used as args, kwargs, or other_args_to_resolve",
):
_ = DeploymentNode(
Actor,
"test",
(InputNode()),
{},
{},
other_args_to_resolve={USE_SYNC_HANDLE_KEY: True},
)
with pytest.raises(
ValueError,
match="cannot be used as args, kwargs, or other_args_to_resolve",
):
_ = DeploymentNode(
Actor,
"test",
(),
{"a": InputNode()},
{},
other_args_to_resolve={USE_SYNC_HANDLE_KEY: True},
)
with pytest.raises(
ValueError,
match="cannot be used as args, kwargs, or other_args_to_resolve",
):
_ = DeploymentNode(
Actor,
"test",
(),
{},
{},
other_args_to_resolve={"arg": {"options_a": InputNode()}},
)
def test_invalid_use_sync_handle():
with pytest.raises(
ValueError,
match=f"{USE_SYNC_HANDLE_KEY} should only be set with a boolean value",
):
_ = DeploymentNode(
Actor,
"test",
[],
{},
{},
other_args_to_resolve={USE_SYNC_HANDLE_KEY: {"options_a": "hii"}},
)
def test_mix_sync_async_handle(serve_instance):
# TODO: (jiaodong) Add complex multi-deployment tests from ray DAG.
pass
def test_deployment_node_as_init_args(serve_instance):
# TODO: (jiaodong) Add complex multi-deployment tests from ray DAG.
pass
def test_multi_deployment_nodes(serve_instance):
# TODO: (jiaodong) Add complex multi-deployment tests from ray DAG.
pass
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 24.089286
| 82
| 0.604151
|
f092a53625215f4ac23ece0aefdd6ea71b830c3e
| 31,847
|
py
|
Python
|
Compiler/path_oram.py
|
mkskeller/SPDZ-BMR-ORAM
|
78af37ae0316699260199030c28441dbf175d860
|
[
"BSD-4-Clause-UC"
] | 4
|
2019-01-22T23:14:00.000Z
|
2021-10-04T02:57:32.000Z
|
Compiler/path_oram.py
|
mkskeller/SPDZ-BMR-ORAM
|
78af37ae0316699260199030c28441dbf175d860
|
[
"BSD-4-Clause-UC"
] | null | null | null |
Compiler/path_oram.py
|
mkskeller/SPDZ-BMR-ORAM
|
78af37ae0316699260199030c28441dbf175d860
|
[
"BSD-4-Clause-UC"
] | 1
|
2021-05-26T19:33:01.000Z
|
2021-05-26T19:33:01.000Z
|
# (C) 2018 University of Bristol, Bar-Ilan University. See License.txt
if '_Array' not in dir():
from oram import *
import permutation
_Array = Array
import oram
#import pdb
class Counter(object):
def __init__(self, val=0, max_val=None, size=None, value_type=sgf2n):
if value_type is sgf2n:
if isinstance(val, int):
val = 1 << val
if max_val is not None:
self.bit_length = max_val+1
else:
self.bit_length = sgf2n.bit_length
elif value_type is sint:
self.bit_length = log2(max_val+1)
else:
raise CompilerError('Invalid value type for Counter')
self.value = value_type(val)
self.value_type = value_type
if isinstance(val, sgf2n):
self._used = True
else:
self._used = False
def used(self):
return self._used
def increment(self, b):
""" Increment counter by a secret bit """
if self.value_type is sgf2n:
prod = self.value * b
self.value = (2*prod + self.value - prod)
else:
self.value = (self.value + b)
self._used = True
def decrement(self, b):
""" Decrement counter by a secret bit """
if self.value_type is sgf2n:
inv_2 = cgf2n(1) / cgf2n(2)
prod = self.value * b
self.value = (inv_2*prod + self.value - prod)
self._used = True
def reset(self):
if self.value_type is sgf2n:
self.value = self.value_type(1)
else:
self.value = self.value_type(0)
self._used = False
def equal(self, i):
""" Equality with clear int """
if self.value_type is sgf2n:
d = self.value - sgf2n(2**i)
bits = d.bit_decompose(self.bit_length)
return 1 - bits[i]
else:
return self.value.equal(i, self.bit_length)
def equal_range(self, i):
""" Vector of equality bits for 0, 1, ..., i-1 """
return self.value.bit_decompose(self.bit_length)[:i]
def XOR(a, b):
if isinstance(a, int) and isinstance(b, int):
return a^b
elif isinstance(a, sgf2n) or isinstance(b, sgf2n):
return a + b
else:
try:
return a ^ b
except TypeError:
return a + b - 2*a*b
def pow2_eq(a, i, bit_length=40):
""" Test for equality with 2**i, when a is a power of 2 (gf2n only)"""
d = a - sgf2n(2**i)
bits = d.bit_decompose(bit_length)
return 1 - bits[i]
def empty_entry_sorter(a, b):
""" Sort by entry's empty bit (empty <= not empty) """
return (1 - a.empty()) * b.empty()
def empty_entry_list_sorter(a, b):
""" Sort a list by looking at first element's emptiness """
return (1 - a[0].empty()) * b[0].empty()
def bucket_size_sorter(x, y):
""" Sort buckets by their sizes. Bucket is a list of the form
[entry_0, entry_1, ..., entry_Z, size],
where size is a GF(2^n) element with a single 1 in the position
corresponding to the bucket size """
Z = len(x) - 1
xs = x[-1]
ys = y[-1]
t = 2**Z * xs / ys
# xs <= yx if bits 0 to Z of t are 0
return 1 - reduce(lambda x,y: x*y, t.bit_decompose(2*Z)[:Z])
def shuffle(x, config=None, value_type=sgf2n, reverse=False):
""" Simulate secure shuffling with Waksman network for 2 players.
Returns the network switching config so it may be re-used later. """
n = len(x)
if n & (n-1) != 0:
raise CompilerError('shuffle requires n a power of 2')
if config is None:
config = permutation.configure_waksman(permutation.random_perm(n))
for i,c in enumerate(config):
config[i] = [value_type(b) for b in c]
permutation.waksman(x, config, reverse=reverse)
permutation.waksman(x, config, reverse=reverse)
return config
def LT(a, b):
a_bits = bit_decompose(a)
b_bits = bit_decompose(b)
u = cgf2n()
BitLTC1(u, a_bits, b_bits, 16)
class PathORAM(TreeORAM):
def __init__(self, size, value_type=sgf2n, value_length=1, entry_size=None, \
bucket_oram=TrivialORAM, tau=3, sigma=5, stash_size=None, \
bucket_size=2, init_rounds=-1):
#if size <= k:
# raise CompilerError('ORAM size too small')
print 'create oram of size', size
self.bucket_oram = bucket_oram
self.bucket_size = bucket_size
self.D = log2(size)
self.logD = log2(self.D) + 1
self.value_type = value_type
if entry_size is not None:
self.value_length = len(tuplify(entry_size))
self.entry_size = tuplify(entry_size)
else:
self.value_length = value_length
self.entry_size = [None] * value_length
self.index_size = log2(size)
self.index_type = value_type.get_type(self.index_size)
self.size = size
self.entry_type = Entry.get_empty(*self.internal_entry_size()).types()
self.buckets = RAM(self.bucket_size * 2**(self.D+1), self.entry_type,
self.get_array)
if init_rounds != -1:
# put memory initialization in different timer
stop_timer()
start_timer(1)
self.buckets.init_mem(self.empty_entry())
if init_rounds != -1:
stop_timer(1)
start_timer()
self.index = self.index_structure(size, self.D, value_type, init_rounds, True)
# deterministic eviction strategy from Gentry et al.
self.deterministic_eviction = True
if stash_size is None:
if self.deterministic_eviction:
if self.bucket_size == 2:
# Z=2 more efficient without sigma/tau limits
tau = 20
sigma = 20
stash_size = 20
elif self.bucket_size == 3:
tau = 20
sigma = 20
stash_size = 2
elif self.bucket_size == 4:
tau = 3
sigma = 5
stash_size = 2
else:
raise CompilerError('Bucket size %d not supported' % self.bucket_size)
else:
tau = 3
sigma = 5
stash_size = 48
self.tau = tau
self.sigma = sigma
self.stash_capacity = stash_size
self.stash = TrivialORAM(stash_size, *self.internal_value_type(), \
index_size=self.index_size)
# temp storage for the path + stash in eviction
self.temp_size = stash_size + self.bucket_size*(self.D+1)
self.temp_storage = RAM(self.temp_size, self.entry_type, self.get_array)
self.temp_levels = [0] * self.temp_size # Array(self.temp_size, 'c')
for i in range(self.temp_size):
self.temp_levels[i] = 0
# these include a read value from the stash
self.read_value = [Array(self.D + 2, self.value_type.get_type(l))
for l in self.entry_size]
self.read_empty = Array(self.D + 2, self.value_type.bit_type)
self.state = MemValue(self.value_type(0))
self.eviction_count = MemValue(cint(0))
# bucket and stash sizes counter
#self.sizes = [Counter(0, max_val=4) for i in range(self.D + 1)]
self.stash_size = Counter(0, max_val=stash_size)
self.read_path = MemValue(value_type.clear_type(0))
@function_block
def evict():
if self.value_type == sgf2n:
self.use_shuffle_evict = True
else:
self.use_shuffle_evict = True
leaf = random_block(self.D, self.value_type).reveal()
if oram.use_insecure_randomness:
leaf = self.value_type(regint.get_random(self.D)).reveal()
if self.deterministic_eviction:
leaf = 0
ec = self.eviction_count.read()
# leaf bits already reversed so just use counter
self.eviction_count.write((ec + 1) % 2**self.D)
leaf = self.value_type.clear_type(ec)
self.state.write(self.value_type(leaf))
print 'eviction leaf =', leaf
# load the path
for i, ram_indices in enumerate(self.bucket_indices_on_path_to(leaf)):
for j, ram_index in enumerate(ram_indices):
self.temp_storage[i*self.bucket_size + j] = self.buckets[ram_index]
self.temp_levels[i*self.bucket_size + j] = i
ies = self.internal_entry_size()
self.buckets[ram_index] = Entry.get_empty(*ies)
# load the stash
for i in range(len(self.stash.ram)):
self.temp_levels[i + self.bucket_size*(self.D+1)] = 0
#for i, entry in enumerate(self.stash.ram):
@for_range(len(self.stash.ram))
def f(i):
entry = self.stash.ram[i]
self.temp_storage[i + self.bucket_size*(self.D+1)] = entry
te = Entry.get_empty(*self.internal_entry_size())
self.stash.ram[i] = te
self.path_regs = [None] * self.bucket_size*(self.D+1)
self.stash_regs = [None] * len(self.stash.ram)
for i, ram_indices in enumerate(self.bucket_indices_on_path_to(leaf)):
for j, ram_index in enumerate(ram_indices):
self.path_regs[j + i*self.bucket_size] = self.buckets[ram_index]
for i in range(len(self.stash.ram)):
self.stash_regs[i] = self.stash.ram[i]
#self.sizes = [Counter(0, max_val=4) for i in range(self.D + 1)]
if self.use_shuffle_evict:
if self.bucket_size == 4:
self.size_bits = [[self.value_type.bit_type(i) for i in (0, 0, 0, 1)] for j in range(self.D+1)]
elif self.bucket_size == 2 or self.bucket_size == 3:
self.size_bits = [[self.value_type.bit_type(i) for i in (0, 0)] for j in range(self.D+1)]
else:
self.size_bits = [[self.value_type.bit_type(0) for i in range(self.bucket_size)] for j in range(self.D+1)]
self.stash_size = Counter(0, max_val=len(self.stash.ram))
leaf = self.state.read().reveal()
if self.use_shuffle_evict:
# more efficient eviction using permutation networks
self.shuffle_evict(leaf)
else:
# naive eviction method
for i,(entry, depth) in enumerate(zip(self.temp_storage, self.temp_levels)):
self.evict_block(entry, depth, leaf)
for i, entry in enumerate(self.stash_regs):
self.stash.ram[i] = entry
for i, ram_indices in enumerate(self.bucket_indices_on_path_to(leaf)):
for j, ram_index in enumerate(ram_indices):
self.buckets[ram_index] = self.path_regs[i*self.bucket_size + j]
self.evict = evict
@method_block
def read_and_remove_levels(self, u):
#print 'reading path to', self.read_path
leaf = self.read_path.read()
for level in range(self.D + 1):
ram_indices = list(self.bucket_indices_on_path_to(leaf))[level]
#print 'level %d, bucket %d' % (level, ram_indices[0]/self.bucket_size)
#for j in range(self.bucket_size):
# #bucket.bucket.ram[j].v.reveal().print_reg('lev%d' % level)
# print str(self.buckets[ram_indices[j]]) + ', ',
#print '\n'
#value, empty = bucket.bucket.read_and_remove(u, 1)
empty_entry = self.empty_entry(False)
skip = 1
found = Array(self.bucket_size, self.value_type.bit_type)
entries = [self.buckets[j] for j in ram_indices]
indices = [e.v for e in entries]
empty_bits = [e.empty() for e in entries]
for j in range(self.bucket_size):
found[j] = indices[j].equal(u, self.index_size) * \
(1 - empty_bits[j])
# at most one 1 in found
empty = 1 - sum(found)
prod_entries = map(operator.mul, found, entries)
read_value = sum((entry.x.skip(skip) for entry in prod_entries), \
empty * empty_entry.x.skip(skip))
for i,(j, entry, prod_entry) in enumerate(zip(ram_indices, entries, prod_entries)):
self.buckets[j] = entry - prod_entry + found[i] * empty_entry
value, empty = [MemValue(v) for v in read_value], MemValue(empty)
for v,w in zip(self.read_value, value):
v[level] = w.read()
self.read_empty[level] = empty.read()
#print 'post-rar from', bucket
#p_bucket.write(bucket.p_children(self.read_path & 1))
#self.read_path.irshift(1)
self.check()
value, empty = self.stash.read_and_remove(u, 1)
for v, w in zip(self.read_value, value):
v[self.D+1] = w
self.read_empty[self.D+1] = empty
def empty_entry(self, apply_type=True):
vtype, entry_size = self.internal_entry_size()
return Entry.get_empty(vtype, entry_size, apply_type, self.index_size)
def shuffle_evict(self, leaf):
""" Evict using oblivious shuffling etc """
evict_debug = False
levels = [None] * len(self.temp_storage)
bucket_sizes = Array(self.D + 2, cint)
for i in range(self.D + 2):
bucket_sizes[i] = regint(0)
Program.prog.curr_tape.start_new_basicblock()
leaf = self.state.read().reveal()
if evict_debug:
print_ln('\tEviction leaf: %s', leaf)
for i,(entry, depth) in enumerate(zip(self.temp_storage, self.temp_levels)):
lca_lev, cbits = self.compute_lca(entry.x[0], leaf, 1 - entry.empty())
level_bits = self.adjust_lca(cbits, depth, 1 - entry.empty())
# last bit indicates stash
levels[i] = [sum(level_bits[j]*j for j in range(self.D+2)), level_bits[-1]]
if evict_debug:
@if_(1 - entry.empty().reveal())
def f():
print_ln('entry (%s, %s) going to level %s', entry.v.reveal(), entry.x[0].reveal(), levels[i][0].reveal())
print_ln('%s ' * len(level_bits), *[b.reveal() for b in level_bits])
if evict_debug:
print_ln("")
# sort entries+levels by emptiness: buckets already sorted so just perform a
# sequence of merges on these and the stash
buckets = [[[self.temp_storage[j]] + levels[j] for j in range(self.bucket_size*i,self.bucket_size*(i+1))] for i in range(self.D+1)]
stash = [None] * (self.stash_capacity)
for i in range(self.stash_capacity):
j = i+self.bucket_size*(self.D+1)
stash[i] = [self.temp_storage[j]] + levels[j]
merged_entries = buckets + [stash]
merged_entries = [m for sl in merged_entries for m in sl]
me_len = len(merged_entries)
while len(merged_entries) & (len(merged_entries)-1) != 0:
merged_entries.append(None)
# sort taking into account stash etc. (GF(2^n) ONLY atm)
permutation.odd_even_merge_sort(merged_entries, lambda a,b: a[0].empty() * (a[-1] - 1 + b[-1]) + 1 - a[-1])
merged_entries = merged_entries[:me_len]
# and sort assigned positions by emptiness (non-empty first)
empty_bits_and_levels = [[0]*self.bucket_size for i in range(self.D+1)]
stash_bits = 0
if evict_debug:
print_str('Size bits: ')
# convert bucket size bits to bits flagging emptiness for each position
for j in range(self.D+1):
s = self.size_bits[j]
#for b in s:
# b.reveal().print_reg('u%d' % j)
if self.bucket_size == 4:
c = s[0]*s[1]
if self.value_type == sgf2n:
empty_bits_and_levels[j][0] = [1 - self.value_type.bit_type(s[0] + s[1] + s[2] + c), self.value_type.clear_type(j)]
empty_bits_and_levels[j][1] = [1 - self.value_type.bit_type(s[1] + s[2]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][2] = [1 - self.value_type.bit_type(c + s[2]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][3] = [1 - self.value_type.bit_type(s[2]), self.value_type.clear_type(j)]
else:
empty_bits_and_levels[j][0] = [1 - self.value_type.bit_type(s[0] + s[1] - c + s[2]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][1] = [1 - self.value_type.bit_type(s[1] + s[2]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][2] = [1 - self.value_type.bit_type(c + s[2]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][3] = [1 - self.value_type.bit_type(s[2]), self.value_type.clear_type(j)]
elif self.bucket_size == 2:
if evict_debug:
print_str('%s,%s,', s[0].reveal(), s[1].reveal())
empty_bits_and_levels[j][0] = [1 - self.value_type.bit_type(s[0] + s[1]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][1] = [1 - self.value_type.bit_type(s[1]), self.value_type.clear_type(j)]
elif self.bucket_size == 3:
c = s[0]*s[1]
empty_bits_and_levels[j][0] = [1 - self.value_type.bit_type(s[0] + s[1] - c), self.value_type.clear_type(j)]
empty_bits_and_levels[j][1] = [1 - self.value_type.bit_type(s[1]), self.value_type.clear_type(j)]
empty_bits_and_levels[j][2] = [1 - self.value_type.bit_type(c), self.value_type.clear_type(j)]
if evict_debug:
print_ln()
empty_bits_and_levels = [x for sl in empty_bits_and_levels for x in sl]
while len(empty_bits_and_levels) & (len(empty_bits_and_levels)-1) != 0:
empty_bits_and_levels.append(None)
permutation.odd_even_merge_sort(empty_bits_and_levels, permutation.bitwise_list_comparator)
empty_bits_and_levels = [e for e in empty_bits_and_levels if e is not None]
# assign levels to empty positions
stash_level = self.value_type.clear_type(self.D + 1)
if evict_debug:
print_ln('Bits and levels: ')
for i, entrylev in enumerate(merged_entries):
entry = entrylev[0]
level = entrylev[1]
if i < len(empty_bits_and_levels):
new_level = (empty_bits_and_levels[i][1] - level) * entry.empty() + level
if evict_debug:
print_ln('\t(empty pos %s, entry %s: empty lev %s, entry %s: new %s)', empty_bits_and_levels[i][0].reveal(), entry.empty().reveal(),
empty_bits_and_levels[i][1].reveal(), level.reveal(), new_level.reveal())
else:
new_level = level + stash_level * entry.empty()
if evict_debug:
print_ln('\t(entry %s: level %s: new %s)', entry.empty().reveal(),
level.reveal(), new_level.reveal())
merged_entries[i] = [entry, new_level]
if evict_debug:
print_ln()
# shuffle entries and levels
while len(merged_entries) & (len(merged_entries)-1) != 0:
merged_entries.append(None) #self.root.bucket.empty_entry(False))
permutation.rec_shuffle(merged_entries, value_type=self.value_type)
merged_entries = [e for e in merged_entries if e is not None]
# need to copy entries/levels to memory for re-positioning
entries_ram = RAM(self.temp_size, self.entry_type, self.get_array)
levels_array = Array(self.temp_size, cint)
for i,entrylev in enumerate(merged_entries):
if entrylev is not None:
entries_ram[i] = entrylev[0]
levels_array[i] = entrylev[1].reveal()
Program.prog.curr_tape.start_new_basicblock()
# reveal shuffled levels
@for_range(self.temp_size)
def f(i):
level = regint(levels_array[i])
sz = regint(bucket_sizes[level])
self.temp_storage[level*self.bucket_size + sz] = entries_ram[i]
bucket_sizes[level] += 1
if evict_debug:
for i in range(self.D+1):
@if_(bucket_sizes[i] != self.bucket_size)
def f():
print_str('Sizes: ')
for i in range(self.D+2):
print_str('%s,', bucket_sizes[i])
print_ln()
runtime_error('Incorrect bucket sizes')
Program.prog.curr_tape.start_new_basicblock()
for i, ram_indices in enumerate(self.bucket_indices_on_path_to(leaf)):
for j, ram_index in enumerate(ram_indices):
self.buckets[ram_index] = self.temp_storage[i*self.bucket_size + j]
for i in range(self.stash_capacity):
self.stash.ram[i] = self.temp_storage[i + (self.D+1)*self.bucket_size]
def evict_block(self, entry, level, leaf):
""" Evict an entry at a given level """
#leaf = self.state.read().reveal()
lca_lev, cbits = self.compute_lca(entry.x[0], leaf, 1 - entry.empty()) #, level + self.sigma)
#new_lca = self.adjust_lca(cbits, level, 1 - entry.empty())
lev, assigned = self.compute_pos(entry, level, lca_lev, leaf)
#print 'evicted to lev', lev.value, assigned
def read_and_remove(self, u):
self.read_path.write(self.read_and_renew_index(u))
self.check()
self.read_and_remove_levels(u)
values = (ValueTuple(x) for x in zip(*self.read_value))
not_empty = [1 - x for x in self.read_empty]
read_empty = 1 - sum(not_empty)
read_value = sum(map(operator.mul, not_empty, values), \
ValueTuple(0 for i in range(self.value_length)))
self.check(u)
Program.prog.curr_tape.\
start_new_basicblock(name='read_and_remove-%d-end' % self.size)
return read_value, read_empty
def buckets_on_path_to(self, leaf):
""" Iterator of buckets on the path to a leaf """
bucket = RefBucket(MemValue(self.root.mem.address), self, True)
yield bucket
for i in range(self.D):
bucket = bucket.ref_children(leaf & 1)
leaf >>= 1
yield bucket
def bucket_indices_on_path_to(self, leaf):
leaf = regint(leaf)
yield range(self.bucket_size)
index = 0
for i in range(self.D):
index = 2*index + 1 + regint(cint(leaf) & 1)
leaf >>= 1
yield [index*self.bucket_size + i for i in range(self.bucket_size)]
def get_bucket_indices(self, i, l):
""" Get RAM indices for the i-th bucket on path to leaf l """
index = 0
for j in range(i):
index = 2*index + 1 + (l & 1)
l >>= 1
index = regint(index)
return [index * self.bucket_size + j for j in range(self.bucket_size)]
def get_bucket(self, i, l):
""" Get the i-th bucket on the path to leaf l """
bucket = RefBucket(MemValue(self.root.mem.address), self, True)
for j in range(i):
bucket = bucket.ref_children(l & 1)
l >>= 1
return bucket
def get_children(self, i, l):
""" Get children of the i-th bucket on level l """
j = 2**l + i - 1
return self.buckets[2*j+1], self.buckets[2*j+2]
def adjust_lca(self, lca_bits, lev, not_empty, prnt=False):
""" Adjust LCA based on bucket capacities (and original clear level, lev) """
found = self.value_type.bit_type(0)
assigned = self.value_type.bit_type(0)
try_add_here = self.value_type.bit_type(0)
new_lca = [self.value_type.bit_type(0)] * (self.D + 1)
upper = min(lev + self.sigma, self.D)
lower = max(lev - self.tau, 0)
for j in range(upper, lower-1, -1):
found += lca_bits[j]
try_add_here += lca_bits[j]
if self.bucket_size == 4:
new_lca[j] = try_add_here * (1 - self.size_bits[j][2]) # (not_empty => lca_bits all 0)
#new_lca[j] = found * (1 - assigned) * (1 - self.size_bits[j][2]) * not_empty
elif self.bucket_size == 2 or self.bucket_size == 3:
new_lca[j] = try_add_here * (1 - self.size_bits[j][1])
if prnt:
new_lca[j].reveal().print_reg('nl%d' % j)
assigned += new_lca[j]
if self.value_type == sgf2n:
try_add_here += new_lca[j]
else:
try_add_here += new_lca[j] - 2*try_add_here*new_lca[j]
if self.bucket_size == 4:
t = new_lca[j] * self.size_bits[j][0]
t2 = t * self.size_bits[j][1]
# s_0 := s_0 \xor b
# s_1 := s_1 \xor (s_0 & b)
# s_2 := s_2 \xor (s_0 & s_1 & b)
if self.value_type == sgf2n:
self.size_bits[j][0] += new_lca[j]
self.size_bits[j][1] += t
self.size_bits[j][2] += t2 #t * self.size_bits[j][1]
else:
self.size_bits[j][0] += new_lca[j] - 2*t
self.size_bits[j][1] += t - 2*t2
self.size_bits[j][2] += t2
# '1 if empty' bit
#self.size_bits[j][3] *= (1 - new_lca[j])
elif self.bucket_size == 2 or self.bucket_size == 3:
t = new_lca[j] * self.size_bits[j][0]
if self.value_type == sgf2n:
self.size_bits[j][0] += new_lca[j]
else:
self.size_bits[j][0] += new_lca[j] - 2*t
self.size_bits[j][1] += t
else:
raise CompilerError('Bucket size %d not supported' % self.bucket_size)
add_to_stash = not_empty - sum(new_lca)
#final_level = sum(new_lca[i]*i for i in range(self.D+1)) + add_to_stash * (self.D+1)
#
#if_then(cint(reveal(not_empty)))
#final_level.reveal().print_reg('lca')
#for j in range(2):
# for k,b in enumerate(self.size_bits[j]):
# b.reveal().print_reg('u%dj%d' % (k,j))
#end_if()
return new_lca + [add_to_stash]
def compute_lca(self, a, b, not_empty, limit=None):
""" Compute depth of the least common ancestor of a and b, upper bounded by limit """
a_bits = bit_decompose(a, self.D)
b_bits = bit_decompose(b, self.D)
found = [None] * self.D
not_found = self.value_type.bit_type(not_empty) #1
if limit is None:
limit = self.D
for i in range(self.D)[:limit]:
# find first position where bits differ (i.e. first 0 in 1 - a XOR b)
t = 1 - XOR(a_bits[i], b_bits[i])
prev_nf = not_found
not_found *= t
found[i] = prev_nf - not_found
if self.use_shuffle_evict:
return None, found + [not_found]
else:
one = self.value_type.clear_type(1)
lca = sum(found[i]*(one << i) for i in range(self.D)[:limit]) + \
(one << limit) * not_found
return Counter(lca, max_val=limit, value_type=self.value_type), found + [not_found]
def compute_pos(self, entry, lev, levstar, leaf):
""" Clear integer lev, secret gf2n levstar (rep. as power of 2 with Counter object). """
pos = 0
a = 0
b = 0
not_empty = 1 - entry.empty()
upper = min(lev + self.sigma, self.D)
lower = max(lev - self.tau, 0)
levstar_eq = levstar.equal_range(upper+1)
e = 0
b = 0
for j in range(upper, lower - 1, -1):
# e = want to place at this level
e = (1 - b) * ((1 - e)*levstar_eq[j] + e) * not_empty
# b = can place at this level
b = e * (1 - self.size_bits[j][-1])
s = 1 + sgf2n(self.size_bits[j][0])
t = cgf2n(1)
for i in range(1, self.bucket_size):
t <<= 1
s += t * (self.size_bits[j][i-1] + self.size_bits[j][i])
size_eq = (s * b).bit_decompose(self.bucket_size)
a += sum(size_eq)
#self.sizes[j].value.read().reveal().print_reg('sz%d' % j)
#self.sizes[j].equal(self.bucket_size).reveal().print_reg('eq')
#b.reveal().print_reg('b')
#print 'sz%d:' % j, self.sizes[j].value #, levstar.value, b
for i in range(self.bucket_size):
c = size_eq[i]
#t = cint(c.reveal())
#def f():
# entry.x[1].reveal().print_reg('writ')
# t.print_reg('l%di%d' % (j,i))
# entry.x[0].reveal().print_reg('w lf')
#if_statement(t,f)
#if c.reveal() == 1:
# print 'writing block %d at level %d on path to %d' % (i,j,leaf)
# print 'writing', entry*c + bucket.ram[i]*(1 - c)
prev = self.path_regs[i + j*self.bucket_size]
new = c * (entry - prev) + prev
self.path_regs[i + j*self.bucket_size] = new
self.size_bits[j][i] += c
add_to_stash = not_empty - a # (1-a) * not_empty
stash_eq = Counter(self.stash_size.value * add_to_stash, len(self.stash.ram)).equal_range(self.stash.size)
for i,s in enumerate(self.stash_regs):
c = stash_eq[i] #* add_to_stash
te = c * (entry - s) + s # entry*c + s*(1 - c)
self.stash_regs[i] = te
self.stash_size.increment(add_to_stash)
#if add_to_stash.reveal() == 1:
# print 'stash', self.stash_size.value
return levstar, a
def add(self, entry, state=None, evict=True):
if state is None:
state = self.state.read()
l = state
x = tuple(i.read() for i in entry.x)
e = Entry(entry.v.read(), (l,) + x, entry.empty())
#self.temp_storage[self.temp_size-1] = e * 1
#self.temp_levels[self.temp_size-1] = 0
#print 'adding', self.temp_storage[-1][0]
try:
self.stash.add(e)
except Exception:
print self
raise
if evict:
self.evict()
class LocalPathORAM(PathORAM):
""" Debugging only. Path ORAM using index revealing the access
pattern. """
index_structure = LocalPackedIndexStructure
def OptimalORAM(size, *args, **kwargs):
# threshold set from experiments (lower than in SCSL)
threshold = 2**10
if size <= threshold:
return LinearORAM(size,*args,**kwargs)
else:
return RecursivePathORAM(size, *args, **kwargs)
class RecursivePathIndexStructure(PackedIndexStructure):
storage = staticmethod(OptimalORAM)
class RecursivePathORAM(PathORAM):
index_structure = RecursivePathIndexStructure
class AtLeastOneRecursionPackedPathORAM(PackedIndexStructure):
storage = RecursivePathORAM
class AtLeastOneRecursionPackedPathORAMWithEmpty(PackedORAMWithEmpty):
storage = RecursivePathORAM
class OptimalPackedPathORAMWithEmpty(PackedORAMWithEmpty):
storage = staticmethod(OptimalORAM)
| 41.039948
| 152
| 0.558075
|
5fcbc3ba39fc492ab2533c7e35a616bb2e147b48
| 3,178
|
py
|
Python
|
mtsgi/agents/agents_test.py
|
srsohn/mtsgi
|
31dc14b007758edc0aa340397184f47695705b1c
|
[
"MIT"
] | 1
|
2021-03-30T16:43:36.000Z
|
2021-03-30T16:43:36.000Z
|
mtsgi/agents/agents_test.py
|
srsohn/mtsgi
|
31dc14b007758edc0aa340397184f47695705b1c
|
[
"MIT"
] | null | null | null |
mtsgi/agents/agents_test.py
|
srsohn/mtsgi
|
31dc14b007758edc0aa340397184f47695705b1c
|
[
"MIT"
] | null | null | null |
"""
Testing all agents on several environments.
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "-1" # use cpu
import sys
import pytest
from acme import specs
import mockito
from mockito import spy2
from mtsgi import envs
from mtsgi import agents
from mtsgi import environment_loop
from mtsgi.envs import wrappers
from mtsgi.utils import env_utils, snt_utils
def make_envs(env_id: str, graph_param: str, seed: int, num_envs: int):
# Create a single adaptation environment.
adapt_envs = env_utils.create_environment(
env_id=env_id,
graph_param=graph_param,
seed=seed,
batch_size=num_envs,
num_adapt_steps=100,
add_fewshot_wrapper=True,
use_multi_processing=False,
)
# Create test environment.
test_envs = env_utils.create_environment(
env_id=env_id,
graph_param=graph_param,
seed=seed,
batch_size=num_envs,
num_adapt_steps=100,
add_fewshot_wrapper=True,
use_multi_processing=False,
)
return adapt_envs, test_envs
@pytest.mark.parametrize("env_id, graph_param, seed, num_envs", [
('playground', 'D1_eval', 1, 4),
('mining', 'eval', 1, 4),
])
def test_msgi(env_id, graph_param, seed, num_envs):
adapt_envs, test_envs = make_envs(env_id, graph_param, seed, num_envs)
environment_spec = specs.make_environment_spec(adapt_envs)
# Create meta agent.
meta_agent = agents.MSGI(
environment_spec=environment_spec,
num_adapt_steps=20,
num_trial_splits=5,
environment_id=env_id,
branch_neccessary_first=True,
exploration='random',
temp=200,
w_a=3.0,
beta_a=8.0,
ep_or=0.8,
temp_or=2.0
)
# Run meta loop.
meta_loop = environment_loop.EnvironmentMetaLoop(
adapt_environment=adapt_envs,
test_environment=test_envs,
meta_agent=meta_agent,
label='meta_eval'
)
meta_loop.run(
num_trials=3,
num_adapt_steps=20, # should be greater than TimeLimit
num_test_episodes=1,
num_trial_splits=5
)
@pytest.mark.parametrize("env_id, graph_param, seed, num_envs", [
('playground', 'D1_train', 1, 4),
('mining', 'train', 1, 4),
])
def test_rlrl(env_id, graph_param, seed, num_envs):
adapt_envs, test_envs = make_envs(env_id, graph_param, seed, num_envs)
environment_spec = specs.make_environment_spec(adapt_envs)
if env_id in {'playground', 'mining'}:
# spatial observation.
network = snt_utils.CombinedNN(environment_spec.actions)
else:
network = snt_utils.RecurrentNN(environment_spec.actions)
# Create meta agent.
meta_agent = agents.RLRL(
environment_spec=environment_spec,
network=network,
n_step_horizon=10,
minibatch_size=10
)
# Run meta loop.
meta_loop = environment_loop.EnvironmentMetaLoop(
adapt_environment=adapt_envs,
test_environment=test_envs,
meta_agent=meta_agent,
label='meta_train' # XXX meta train
)
meta_loop.run(
num_trials=3,
num_adapt_steps=100, # should be greater than TimeLimit
num_test_episodes=1,
num_trial_splits=1
)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| 24.828125
| 72
| 0.698553
|
2c59d91ce2652a5ae333ef12cf77eed78e26c0f6
| 9,702
|
py
|
Python
|
etc/configure.py
|
tardyp/scancode-toolkit
|
cce8cbc64746ed58f2f0a9bf03d6c3500bd7cc1c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
etc/configure.py
|
tardyp/scancode-toolkit
|
cce8cbc64746ed58f2f0a9bf03d6c3500bd7cc1c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
etc/configure.py
|
tardyp/scancode-toolkit
|
cce8cbc64746ed58f2f0a9bf03d6c3500bd7cc1c
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
"""
A configuration helper that wraps virtualenv and pip to configure an Python
isolated virtual environment and install requirement there with pip using
bundled packages from a third-party directory. It does some minimal checks on
supported OSes, architectures and Python versions.
To use, create requirements flies and use this way:
* to configure and install::
./configure.py [<pip requirement argument>, ...]
* to cleanup everything::
./configure.py --clean
"""
import os
import shutil
import subprocess
import sys
def unsupported(platform):
print('Unsupported Python, OS, platform or architecture: {platform}'.format(platform=platform))
print('See https://github.com/nexB/scancode-toolkit/ for supported OS/platforms.')
print('Enter a ticket https://github.com/nexB/scancode-toolkit/issues '
'asking for support of your OS/platform combo.')
sys.exit(1)
# Supported platforms and arches
# Supported Python versions
sys_platform = str(sys.platform).lower()
if not(
sys_platform.startswith('linux')
or 'win32' in sys_platform
or 'darwin' in sys_platform
or 'freebsd' in sys_platform
):
unsupported(sys_platform)
if not (sys.maxsize > 2 ** 32):
unsupported('32 bits: use a 64 bits OS and Python instead.')
if sys.version_info < (3, 6):
unsupported('Only Python 64 bits 3.6 and above on are supported')
on_win = 'win32' in sys_platform
if on_win:
BIN_DIR_NAME = 'Scripts'
else:
BIN_DIR_NAME = 'bin'
def call(cmd):
"""
Run the `cmd` command (as a list of args) with all env vars using `root_dir`
as the current working directory.
"""
cmd = ' '.join(cmd)
try:
subprocess.check_call(
cmd,
shell=True,
env=dict(os.environ),
cwd=ROOT_DIR,
stderr=subprocess.STDOUT,
)
except Exception as e:
raise Exception(f'Failed to run {cmd}') from e
# list of cleanble directory and file paths
cleanable = '''
build
bin
lib
lib64
include
tcl
local
.Python
.eggs
pip-selfcheck.json
src/scancode_toolkit.egg-info
SCANCODE_DEV_MODE
man
Scripts
'''.split()
def find_pycache(root_dir):
"""
Yield __pycache__ directory paths found in root_dir as paths relative to
root_dir.
"""
for top, dirs, _files in os.walk(root_dir):
for d in dirs:
if d != '__pycache__':
continue
dir_path = os.path.join(top, d)
dir_path = dir_path.replace(root_dir, '', 1)
dir_path = dir_path.strip(os.path.sep)
yield dir_path
def clean(root_dir, cleanable=cleanable):
"""
Remove `cleanable` directories and files from `root_dir`.
"""
print('* Cleaning ...')
# also clean __pycache__ if any
cleanable.extend(find_pycache(root_dir))
for d in cleanable:
loc = os.path.join(root_dir, d)
if os.path.exists(loc):
if os.path.isdir(loc):
shutil.rmtree(loc)
else:
os.remove(loc)
def quote(s):
"""
Return a string s enclosed in double quotes.
"""
return '"{}"'.format(s)
def create_virtualenv(root_dir, venv_pyz, quiet=False):
"""
Create a virtualenv in the `root_dir` directory. Use the current Python
exe. Use the virtualenv.pyz app in at `venv_pyz`.
If `quiet` is True, information messages are suppressed.
Note: we do not use the bundled Python 3 "venv" because its behavior and
presence is not consistent across Linux distro and sometimes pip is not
included either by default. The virtualenv.pyz app cures all these issues.
"""
if not quiet:
print('* Configuring Python ...')
if not venv_pyz or not os.path.exists(venv_pyz):
print(f'Configuration Error: Unable to find {venv_pyz}... aborting.')
exit(1)
standard_python = sys.executable
# once we have a pyz, we do not want to download anything else nor ever update
vcmd = [
standard_python, quote(venv_pyz),
'--wheel', 'embed', '--pip', 'embed', '--setuptools', 'embed',
'--seeder', 'pip',
'--never-download',
'--no-periodic-update',
]
if quiet:
vcmd += ['-qq']
# we create the virtualenv in the root_dir
vcmd += [quote(root_dir)]
call(vcmd)
def activate_virtualenv():
"""
Activate the ROOT_DIR virtualenv in the current process.
"""
activate_this = os.path.join(BIN_DIR, 'activate_this.py')
exec(open(activate_this).read(), {'__file__': activate_this})
def pip_install(req_args, quiet=False):
"""
Install a list of `req_args` command line requirement arguments with
pip, using packages found in THIRDPARTY_DIR_OR_LINKS directory or URL.
"""
if not quiet:
print('* Installing packages ...')
if on_win:
cmd = [CONFIGURED_PYTHON, '-m', 'pip']
else:
cmd = [quote(os.path.join(BIN_DIR, 'pip'))]
# note: --no-build-isolation means that pip/wheel/setuptools will not
# be reinstalled a second time and this speeds up the installation.
# We always have the PEP517 build dependencies installed already.
cmd += [
'install', '--upgrade',
'--no-build-isolation',
'--no-index',
'--find-links', THIRDPARTY_DIR_OR_LINKS,
]
if quiet:
cmd += ['-qq']
cmd += req_args
call(cmd)
def pip_cache_wheels(requirement_files, quiet=False):
"""
Download and cache wheels from a list of `requirement_files` pip requirement
files using packages found in THIRDPARTY_LINKS and save them in the
THIRDPARTY_DIR directory.
"""
if not quiet:
print('* Downloading packages ...')
for req_file in requirement_files:
if on_win:
cmd = [CONFIGURED_PYTHON, '-m', 'pip']
else:
cmd = [quote(os.path.join(BIN_DIR, 'pip'))]
cmd += [
'download', '--no-index',
'--find-links', THIRDPARTY_LINKS,
'--dest', THIRDPARTY_DIR,
]
if quiet:
cmd += ['-qq']
cmd += ['--requirement', req_file]
call(cmd)
def get_pip_req_files(req_args):
rfs = [f for f in req_args if f.startswith('requirements') and f.endswith('.txt')]
return rfs
usage = '\nUsage: configure [--clean] [<pip requirement arguments>]\n'
if __name__ == '__main__':
# you must create a CONFIGURE_QUIET env var if you want to run quietly
##################
quiet = 'CONFIGURE_QUIET' in os.environ
# define/setup common directories and locations
##################
current_dir = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(current_dir)
sys.path.insert(0, ROOT_DIR)
BIN_DIR = os.path.join(ROOT_DIR, BIN_DIR_NAME)
if on_win:
CONFIGURED_PYTHON = os.path.join(BIN_DIR, 'python.exe')
else:
CONFIGURED_PYTHON = os.path.join(BIN_DIR, 'python')
# THIRDPARTY_DIR is a cache of wheels
THIRDPARTY_DIR = os.environ.get('THIRDPARTY_DIR', 'thirdparty')
THIRDPARTY_DIR = os.path.join(ROOT_DIR, THIRDPARTY_DIR)
os.makedirs(THIRDPARTY_DIR, exist_ok=True)
THIRDPARTY_LINKS = os.environ.get('THIRDPARTY_LINKS', 'https://thirdparty.aboutcode.org/pypi')
# no_cache = 'CONFIGURE_NO_CACHE' in os.environ
# if no_cache:
# THIRDPARTY_DIR_OR_LINKS = THIRDPARTY_DIR
# else:
# if we have at least one wheel in THIRDPARTY_DIR, we assume we are offline
# otherwise we are online and use our remote links for pip operations
has_wheels = any(w.endswith('.whl') for w in os.listdir(THIRDPARTY_DIR))
THIRDPARTY_DIR_OR_LINKS = THIRDPARTY_DIR if has_wheels else THIRDPARTY_LINKS
# collect args
##################
requirement_args = ['--requirement', 'requirements.txt']
args = sys.argv[1:]
if args:
arg0 = args[0]
if arg0 == '--clean':
clean(ROOT_DIR)
sys.exit(0)
# use provided pip args instead of defaults
requirement_args = args
# Determine where to get dependencies from
#################################
# etc/thirdparty must contain virtualenv.pyz
etc_thirdparty = os.path.join(os.path.dirname(__file__), 'thirdparty')
VIRTUALENV_PYZ_APP_LOC = os.path.join(etc_thirdparty, 'virtualenv.pyz')
if not os.path.exists(VIRTUALENV_PYZ_APP_LOC):
print(
f'* FAILED to configure: virtualenv application {VIRTUALENV_PYZ_APP_LOC} not found. '
'The current version needs to be saved in etc/thirdparty. '
'See https://github.com/pypa/get-virtualenv and '
'https://virtualenv.pypa.io/en/latest/installation.html#via-zipapp')
sys.exit(1)
# Configure proper: create and activate virtualenv
###########################
if not os.path.exists(CONFIGURED_PYTHON):
create_virtualenv(root_dir=ROOT_DIR, venv_pyz=VIRTUALENV_PYZ_APP_LOC, quiet=quiet)
activate_virtualenv()
# cache requirements
req_files = get_pip_req_files(requirement_args)
# pip_cache_wheels(requirement_files=req_files, quiet=quiet)
# ... and installl
pip_install(requirement_args, quiet=quiet)
if not quiet:
print('* Configuration completed.')
print()
| 29.047904
| 99
| 0.642136
|
4474b160d34ca3401008575d2535acd191638a35
| 1,529
|
py
|
Python
|
normatrix/normatrix/plugged/two_space.py
|
TristanMasselot/NorMatrix
|
978b204a7a88dd38629bc286677af155f0a6b7a6
|
[
"MIT"
] | null | null | null |
normatrix/normatrix/plugged/two_space.py
|
TristanMasselot/NorMatrix
|
978b204a7a88dd38629bc286677af155f0a6b7a6
|
[
"MIT"
] | null | null | null |
normatrix/normatrix/plugged/two_space.py
|
TristanMasselot/NorMatrix
|
978b204a7a88dd38629bc286677af155f0a6b7a6
|
[
"MIT"
] | null | null | null |
try:
from normatrix.source.file_parser import CFileParse
except ModuleNotFoundError:
from normatrix.normatrix.source.file_parser import CFileParse
def check_non_comment_line(file: CFileParse, line: str, i: int, IS_IN_COMMENT: bool, nb_error: int, list_error: list):
separator = [chr(k) for k in range(ord('a'), ord('z')+1)] + \
[chr(k) for k in range(ord('A'), ord('Z')+1)] + \
[chr(k) for k in range(ord('0'), ord('9')+1)]
line: str = line.split("//")[0]
line = line.split("/*")
if len(line) == 1:
line: str = line[0]
if line.endswith("\\"):
return (IS_IN_COMMENT, nb_error)
e = 0
while e < len(line) and line[e] not in separator:
e += 1
if " " in line[e:]:
nb_error += 1
list_error.append((i + 1, f"two space alone ({line[e:]})"))
else:
IS_IN_COMMENT = True
return (IS_IN_COMMENT, nb_error)
def check(context, file: CFileParse) -> (int, int, list):
IS_IN_COMMENT = False
nb_error = 0
list_error = []
filelines = file.sub_filelines
if file.filepath.endswith("Makefile"):
return (nb_error, 1, list_error)
for i, line in enumerate(filelines):
if IS_IN_COMMENT:
if "*/" in line:
IS_IN_COMMENT = False
else:
IS_IN_COMMENT, nb_error = check_non_comment_line(file, line,
i, IS_IN_COMMENT, nb_error, list_error
)
return (nb_error, 1, list_error)
| 34.75
| 118
| 0.572269
|
a131b195020514ce15713e523855a37a946e453d
| 406
|
py
|
Python
|
CursoEmVideoPython/desafio34.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
CursoEmVideoPython/desafio34.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | 1
|
2020-07-04T16:27:25.000Z
|
2020-07-04T16:27:25.000Z
|
CursoEmVideoPython/desafio34.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
print('-='*20)
print('ANALISADOR DE TRIÂNGULOS')
print('-='*20)
c1 = float(input('Digite o comprimento da primeira reta:[cm] '))
c2 = float(input('Digite o comprimento da segunda reta:[cm] '))
c3 = float(input('Digite o comprimento da terceira reta:[cm] '))
if c2 - c3 < c1 < c2 + c3 and c1 - c3 < c2 < c1 + c3 and c2 - c1 < c3 < c2 + c1:
print('É UM TRIÂNGULO!')
else:
print('NÃO É UM TRIÂNGULO!')
| 40.6
| 80
| 0.633005
|
163698227d7a19c112e97e01a4d8f29d0cdcd241
| 3,468
|
py
|
Python
|
growlee/src/GrowleeConnection.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 30
|
2015-05-08T22:10:00.000Z
|
2022-03-13T22:09:31.000Z
|
growlee/src/GrowleeConnection.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 124
|
2015-04-27T21:30:48.000Z
|
2022-03-29T10:21:39.000Z
|
growlee/src/GrowleeConnection.py
|
TwolDE2/enigma2-plugins
|
06685a5ce6a65a8724d3b32c8f7906714650ca2c
|
[
"OLDAP-2.3"
] | 193
|
2015-01-10T09:21:26.000Z
|
2022-03-21T08:19:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from Components.config import config
from Tools import Notifications
from Screens.MessageBox import MessageBox
from twisted.internet.defer import Deferred
from twisted.internet import reactor
from . import NOTIFICATIONID
def emergencyDisable(*args, **kwargs):
if args:
try:
args[0].printTraceback()
except Exception:
pass
global growleeConnection
if growleeConnection:
growleeConnection.stop()
if hasattr(Notifications, 'notificationQueue'):
addedList = Notifications.notificationQueue.addedCB
else:
addedList = Notifications.notificationAdded
if gotNotification in addedList:
addedList.remove(gotNotification)
Notifications.AddPopup(
_("Network error.\nDisabling Growlee until next restart!"),
MessageBox.TYPE_ERROR,
10
)
def gotNotification():
if hasattr(Notifications, 'notificationQueue'):
notifications = Notifications.notificationQueue.queue
def handler(note):
return note.fnc, note.screen, note.args, note.kwargs, note.id
else:
notifications = Notifications.notifications
handler = lambda note: note
if notifications:
_, screen, args, kwargs, id = handler(notifications[-1])
if screen is MessageBox and id != NOTIFICATIONID:
# NOTE: priority is in [-2; 2] but type is [0; 3] so map it
# XXX: maybe priority==type-2 would be more appropriate
priority = kwargs.get("type", 0) - 1
timeout = kwargs.get("timeout", -1)
if "text" in kwargs:
description = kwargs["text"]
else:
description = args[0]
description = description
growleeConnection.sendNotification(title="Dreambox", description=description, priority=priority, timeout=timeout, id=id)
class GrowleeConnection:
connections = []
pending = 0
def sendNotification(self, title="Dreambox", description='', priority=-1, timeout=-1, id=""):
for connection, host in self.connections:
try:
level = int(host.level.value)
except ValueError:
level = -1
if connection and id not in host.blacklist.value and not priority < level:
connection.sendNotification(title=title, description=description, priority=priority, timeout=timeout)
def listen(self):
if self.connections:
return
for host in config.plugins.growlee.hosts:
if not (host.enable_outgoing.value or host.enable_incoming.value):
continue
proto = host.protocol.value
if proto == "prowl":
from .Prowl import ProwlAPI
connection = ProwlAPI(host)
elif proto == "growl":
from .GrowlTalk import GrowlTalkAbstraction
connection = GrowlTalkAbstraction(host)
elif proto == "gntp":
from .GNTP import GNTPAbstraction
connection = GNTPAbstraction(host)
elif proto == "snarl":
from .SNP import SnarlNetworkProtocolAbstraction
connection = SnarlNetworkProtocolAbstraction(host)
else: # proto == "syslog":
from .Syslog import SyslogAbstraction
connection = SyslogAbstraction(host)
self.connections.append((connection, host))
def maybeClose(self, resOrFail, defer=None):
self.pending -= 1
if self.pending == 0:
if defer:
defer.callback(True)
def stop(self):
defer = Deferred()
self.pending = 0
for connection, host in self.connections:
d = connection.stop()
if d is not None:
self.pending += 1
d.addBoth(self.maybeClose, defer=defer)
del self.connections[:]
if self.pending == 0:
reactor.callLater(1, defer, True)
return defer
growleeConnection = GrowleeConnection()
| 27.52381
| 123
| 0.729527
|
4abf8a4e5b7ce7da146ea2e7981972fb317190dd
| 3,379
|
py
|
Python
|
setup.py
|
archiif/streamlink
|
5c9fa063e2200aaeb530c59a5e8ead7048334d38
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
archiif/streamlink
|
5c9fa063e2200aaeb530c59a5e8ead7048334d38
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
archiif/streamlink
|
5c9fa063e2200aaeb530c59a5e8ead7048334d38
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from os import environ, path
from sys import argv, exit, version_info
from textwrap import dedent
from setuptools import setup
import versioneer
def format_msg(text, *args, **kwargs):
return dedent(text).strip(" \n").format(*args, **kwargs)
CURRENT_PYTHON = version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with older Python versions
if CURRENT_PYTHON < REQUIRED_PYTHON:
exit(format_msg("""
========================================================
Unsupported Python version
========================================================
This version of Streamlink requires at least Python {}.{},
but you're trying to install it on Python {}.{}.
This may be because you are using a version of pip that
doesn't understand the python_requires classifier.
Make sure you have pip >= 9.0 and setuptools >= 24.2
""", *(REQUIRED_PYTHON + CURRENT_PYTHON)))
# Explicitly disable running tests via setuptools
if "test" in argv:
exit(format_msg("""
Running `python setup.py test` has been deprecated since setuptools 41.5.0.
Streamlink requires pytest for collecting and running tests, via one of these commands:
`pytest` or `python -m pytest` (see the pytest docs for more infos about this)
"""))
deps = [
"requests>=2.26.0,<3.0",
"isodate",
"lxml>=4.6.4,<5.0",
"websocket-client>=0.58.0",
# Support for SOCKS proxies
"PySocks!=1.5.7,>=1.5.6",
]
# for encrypted streams
if environ.get("STREAMLINK_USE_PYCRYPTO"):
deps.append("pycrypto")
else:
# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
deps.append("pycryptodome>=3.4.3,<4")
# for localization
if environ.get("STREAMLINK_USE_PYCOUNTRY"):
deps.append("pycountry")
else:
deps.append("iso-639")
deps.append("iso3166")
def is_wheel_for_windows():
if "bdist_wheel" in argv:
names = ["win32", "win-amd64", "cygwin"]
length = len(argv)
for pos in range(argv.index("bdist_wheel") + 1, length):
if argv[pos] == "--plat-name" and pos + 1 < length:
return argv[pos + 1] in names
elif argv[pos][:12] == "--plat-name=":
return argv[pos][12:] in names
return False
entry_points = {
"console_scripts": ["streamlink=streamlink_cli.main:main"]
}
if is_wheel_for_windows():
entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
# optional data files
data_files = [
# shell completions
# requires pre-built completion files via shtab (dev-requirements.txt)
# `./script/build-shell-completions.sh`
("share/bash-completion/completions", ["build/shtab/bash/streamlink"]),
("share/zsh/site-functions", ["build/shtab/zsh/_streamlink"]),
# man page
# requires pre-built man page file via sphinx (docs-requirements.txt)
# `make --directory=docs clean man`
("share/man/man1", ["docs/_build/man/streamlink.1"])
]
data_files = [
(destdir, [file for file in srcfiles if path.exists(file)])
for destdir, srcfiles in data_files
]
setup(
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=deps,
entry_points=entry_points,
data_files=data_files,
)
| 30.718182
| 96
| 0.64013
|
4282f680786fbcbf6ef87b5b484a19bb3c988e2d
| 39,075
|
py
|
Python
|
var/spack/repos/builtin/packages/openfoam/package.py
|
cjy7117/spack
|
3582115d2af3ebd8156d742883c049e5b864eb2d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-07-03T22:53:51.000Z
|
2021-07-03T22:53:51.000Z
|
var/spack/repos/builtin/packages/openfoam/package.py
|
cjy7117/spack
|
3582115d2af3ebd8156d742883c049e5b864eb2d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openfoam/package.py
|
cjy7117/spack
|
3582115d2af3ebd8156d742883c049e5b864eb2d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-01-10T18:54:54.000Z
|
2021-07-03T22:57:16.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Mark Olesen <mark.olesen@esi-group.com>
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USERMPI and use spack to populate an appropriate
# configuration and generate wmake rules for 'USER' and 'USERMPI'
# mpi implementations.
#
# - Resolution of flex, zlib needs more attention (within OpenFOAM)
# - +paraview:
# depends_on should just be 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt"
# packages:
# paraview:
# variants: +plugins +qt
# in ~/.spack/packages.yaml
#
# Known issues
# - Combining +zoltan with +int64 has not been tested, but probably won't work.
# - Combining +mgridgen with +int64 or +float32 probably won't work.
#
# The spack 'develop' version of openfoam retains the upstream
# WM_PROJECT_VERSION=com naming internally.
#
##############################################################################
import glob
import re
import os
from spack import *
from spack.util.environment import EnvironmentModifications
import llnl.util.tty as tty
# Not the nice way of doing things, but is a start for refactoring
__all__ = [
'add_extra_files',
'write_environ',
'rewrite_environ_files',
'mplib_content',
'foam_add_path',
'foam_add_lib',
'OpenfoamArch',
]
def add_extra_files(foam_pkg, common, local, **kwargs):
"""Copy additional common and local files into the stage.source_path
from the openfoam/common and the package/assets directories,
respectively
"""
outdir = foam_pkg.stage.source_path
indir = join_path(os.path.dirname(__file__), 'common')
for f in common:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
indir = join_path(foam_pkg.package_dir, 'assets')
for f in local:
tty.info('Added file {0}'.format(f))
install(join_path(indir, f), join_path(outdir, f))
def format_export(key, value):
"""Format key,value pair as 'export' with newline for POSIX shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unset'.
"""
if key.startswith('#'):
return '## export {0}={1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unset {0}\n'.format(key)
else:
return 'export {0}={1}\n'.format(key, value)
def format_setenv(key, value):
"""Format key,value pair as 'setenv' with newline for C-shell.
A leading '#' for key adds a comment character to the entire line.
A value of 'None' corresponds to 'unsetenv'.
"""
if key.startswith('#'):
return '## setenv {0} {1}\n'.format(re.sub(r'^#+\s*', '', key), value)
elif value is None:
return 'unsetenv {0}\n'.format(key)
else:
return 'setenv {0} {1}\n'.format(key, value)
def _write_environ_entries(outfile, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
if isinstance(environ, dict):
for key in sorted(environ):
entry = environ[key]
if isinstance(entry, dict):
_write_environ_entries(outfile, entry, formatter)
elif isinstance(entry, list):
_write_environ_entries(outfile, entry, formatter)
else:
outfile.write(formatter(key, entry))
elif isinstance(environ, list):
for item in environ:
outfile.write(formatter(item[0], item[1]))
def _write_environ_file(output, environ, formatter):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Also descends into sub-dict and sub-list, but drops the key.
"""
with open(output, 'w') as outfile:
outfile.write('# spack generated\n')
_write_environ_entries(outfile, environ, formatter)
outfile.write('# spack\n')
def write_environ(environ, **kwargs):
"""Write environment settings as 'export' or 'setenv'.
If environ is a dict, write in sorted order.
If environ is a list, write pair-wise.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile:
_write_environ_file(rcfile, environ, format_export)
rcfile = kwargs.get('cshell', None)
if rcfile:
_write_environ_file(rcfile, environ, format_setenv)
def rewrite_environ_files(environ, **kwargs):
"""Use filter_file to rewrite (existing) POSIX shell or C-shell files.
Keyword Options:
posix[=None] If set, the name of the POSIX file to rewrite.
cshell[=None] If set, the name of the C-shell file to rewrite.
"""
rcfile = kwargs.get('posix', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*export\s+{0})=.*$'.format(k)
if not v:
replace = r'unset {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unset {0} {1}'.format(k, v)
else:
replace = r'\1={0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
rcfile = kwargs.get('cshell', None)
if rcfile and os.path.isfile(rcfile):
for k, v in environ.items():
regex = r'^(\s*setenv\s+{0})\s+.*$'.format(k)
if not v:
replace = r'unsetenv {0} #SPACK: unset'.format(k)
elif v.startswith('#'):
replace = r'unsetenv {0} {1}'.format(k, v)
else:
replace = r'\1 {0}'.format(v)
filter_file(regex, replace, rcfile, backup=False)
def foam_add_path(*args):
"""A string with args prepended to 'PATH'"""
return '"' + ':'.join(args) + ':${PATH}"'
def foam_add_lib(*args):
"""A string with args prepended to 'LD_LIBRARY_PATH'"""
return '"' + ':'.join(args) + ':${LD_LIBRARY_PATH}"'
def pkglib(package, pre=None):
"""Get lib64 or lib from package prefix.
Optional parameter 'pre' to provide alternative prefix
"""
libdir = package.prefix.lib64
if not os.path.isdir(libdir):
libdir = package.prefix.lib
if pre:
return join_path(pre, os.path.basename(libdir))
else:
return libdir
def mplib_content(spec, pre=None):
"""The mpi settings (from spack) for the OpenFOAM wmake includes, which
allows later reuse within OpenFOAM.
Optional parameter 'pre' to provide alternative prefix for
bin and lib directories.
"""
mpi_spec = spec['mpi']
bin = mpi_spec.prefix.bin
inc = mpi_spec.headers.directories[0] # Currently only need first one
lib = pkglib(mpi_spec)
libname = 'mpi'
if 'mpich' in mpi_spec.name:
libname = 'mpich'
if pre:
bin = join_path(pre, os.path.basename(bin))
inc = join_path(pre, os.path.basename(inc))
lib = join_path(pre, os.path.basename(lib))
else:
pre = mpi_spec.prefix
info = {
'name': '{0}-{1}'.format(mpi_spec.name, mpi_spec.version),
'prefix': pre,
'include': inc,
'bindir': bin,
'libdir': lib,
'FLAGS': '-DOMPI_SKIP_MPICXX -DMPICH_SKIP_MPICXX',
'PINC': '-I{0}'.format(inc),
'PLIBS': '-L{0} -l{1}'.format(lib, libname),
}
return info
# -----------------------------------------------------------------------------
class Openfoam(Package):
"""OpenFOAM is a GPL-opensource C++ CFD-toolbox.
This offering is supported by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
OpenCFD Ltd has been developing and releasing OpenFOAM since its debut
in 2004.
"""
maintainers = ['olesenm']
homepage = "http://www.openfoam.com/"
url = "https://sourceforge.net/projects/openfoam/files/v1906/OpenFOAM-v1906.tgz"
git = "https://develop.openfoam.com/Development/openfoam.git"
list_url = "https://sourceforge.net/projects/openfoam/files/"
list_depth = 2
version('develop', branch='develop', submodules='True')
version('master', branch='master', submodules='True')
version('2006_201012', sha256='9afb7eee072bfddcf7f3e58420c93463027db2394997ac4c3b87a8b07c707fb0')
version('2006', sha256='30c6376d6f403985fc2ab381d364522d1420dd58a42cb270d2ad86f8af227edc')
version('1912_200506', sha256='831a39ff56e268e88374d0a3922479fd80260683e141e51980242cc281484121')
version('1912_200403', sha256='1de8f4ddd39722b75f6b01ace9f1ba727b53dd999d1cd2b344a8c677ac2db4c0')
version('1912', sha256='437feadf075419290aa8bf461673b723a60dc39525b23322850fb58cb48548f2')
version('1906_200312', sha256='f75645151ed5d8c5da592d307480979fe580a25627cc0c9718ef370211577594')
version('1906_191103', sha256='631a7fcd926ccbcdef0ab737a9dc55e58d6bedae2f3acaa041ea679db6c9303b')
version('1906', sha256='bee03c4b1da0d2c9f98eb469eeffbce3a8614728ef8e87f664042a7490976537')
version('1812_200312', sha256='925d2877c12740fab177a30fdcaa8899c262c15b90225f9c29d18a2d97532de0')
version('1812_191001', sha256='857a3d476696679313ea9a3f022b33446ddef2bcd417049a9486d504c12038dd')
version('1812_190531', sha256='51f0ef49a199edf3cd94e2ccfc7330e54e93c8e4ddb29ee66fe3e6b443583b34')
version('1812', sha256='d4d23d913419c6a364b1fe91509c1fadb5661bdf2eedb8fe9a8a005924eb2032')
version('1806', sha256='6951aab6405294fe59cec90b0a4e425f5403043191cda02ebaaa890ce1fcc819')
version('1712', sha256='4d22caa25d638d4c59bb93ee4dec51e8f71724f9f507eeb4162f771ebe885d21')
version('1706', sha256='7779048bb53798d9a5bd2b2be0bf302c5fd3dff98e29249d6e0ef7eeb83db79a')
version('1612', sha256='2909c43506a68e1f23efd0ca6186a6948ae0fc8fe1e39c78cc23ef0d69f3569d')
variant('float32', default=False,
description='Use single-precision')
variant('spdp', default=False,
description='Use single/double mixed precision')
variant('int64', default=False,
description='With 64-bit labels')
variant('knl', default=False,
description='Use KNL compiler settings')
variant('kahip', default=False,
description='With kahip decomposition')
variant('metis', default=False,
description='With metis decomposition')
variant('scotch', default=True,
description='With scotch/ptscotch decomposition')
variant('zoltan', default=False,
description='With zoltan renumbering')
variant('mgridgen', default=False, description='With mgridgen support')
variant('paraview', default=False,
description='Build paraview plugins and runtime post-processing')
variant('vtk', default=False,
description='With VTK runTimePostProcessing')
variant('source', default=True,
description='Install library/application sources and tutorials')
depends_on('mpi')
# After 1712, could suggest openmpi+thread_multiple for collated output
# but particular mixes of mpi versions and InfiniBand may not work so well
# conflicts('^openmpi~thread_multiple', when='@1712:')
depends_on('zlib')
depends_on('fftw')
depends_on('boost')
depends_on('cgal')
# The flex restriction is ONLY to deal with a spec resolution clash
# introduced by the restriction within scotch!
depends_on('flex@:2.6.1,2.6.4:')
depends_on('cmake', type='build')
depends_on('m4', type='build')
# Require scotch with ptscotch - corresponds to standard OpenFOAM setup
depends_on('scotch~metis+mpi~int64', when='+scotch~int64')
depends_on('scotch~metis+mpi+int64', when='+scotch+int64')
depends_on('kahip', when='+kahip')
depends_on('metis@5:', when='+metis')
depends_on('metis+int64', when='+metis+int64')
# mgridgen is statically linked
depends_on('parmgridgen', when='+mgridgen', type='build')
depends_on('zoltan', when='+zoltan')
depends_on('vtk', when='+vtk')
depends_on('adios2', when='@1912:')
# For OpenFOAM plugins and run-time post-processing this should just be
# 'paraview+plugins' but that resolves poorly.
# Workaround: use preferred variants "+plugins +qt" in
# ~/.spack/packages.yaml
# 1706 ok with newer paraview but avoid pv-5.2, pv-5.3 readers
depends_on('paraview@5.4:', when='@1706:+paraview')
# 1612 plugins need older paraview
depends_on('paraview@:5.0.1', when='@1612+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Version-specific patches
patch('1612-spack-patches.patch', when='@1612')
# kahip patch (wmake)
patch('https://develop.openfoam.com/Development/openfoam/commit/8831dfc58b0295d0d301a78341dd6f4599073d45.patch',
when='@1806',
sha256='531146be868dd0cda70c1cf12a22110a38a30fd93b5ada6234be3d6c9256c6cf'
)
# Some user config settings
# default: 'compile-option': '-spack',
# default: 'mplib': 'USERMPI', # Use user mpi for spack
config = {
# Add links into bin/, lib/ (eg, for other applications)
'link': False
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # From patch() method.
#
# - End of definitions / setup -
#
def url_for_version(self, version):
"""Handles locations for patched and unpatched versions.
Patched version (eg '1906_191103') are located in the
corresponding unpatched directories (eg '1906').
Older versions (eg, v1612+) had additional '+' in naming
"""
if version <= Version('1612'):
fmt = 'v{0}+/OpenFOAM-v{1}+.tgz'
else:
fmt = 'v{0}/OpenFOAM-v{1}.tgz'
return self.list_url + fmt.format(version.up_to(1), version)
def setup_minimal_environment(self, env):
"""Sets a minimal openfoam environment.
"""
tty.info('OpenFOAM minimal env {0}'.format(self.prefix))
env.set('FOAM_PROJECT_DIR', self.projectdir)
env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin added automatically
env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_build_environment(self, env):
"""Sets the build environment (prior to unpacking the sources).
"""
# Avoid the exception that occurs at runtime
# when building with the Fujitsu compiler.
if self.spec.satisfies('%fj'):
env.set('FOAM_SIGFPE', 'false')
def setup_run_environment(self, env):
"""Sets the run environment (post-installation).
The environment comes from running:
.. code-block:: console
$ . $WM_PROJECT_DIR/etc/bashrc
"""
bashrc = join_path(self.projectdir, 'etc', 'bashrc')
minimal = True
if os.path.isfile(bashrc):
# post-install: source the installed bashrc
try:
mods = EnvironmentModifications.from_sourcing_file(
bashrc,
clean=True, # Remove duplicate entries
blacklist=[ # Blacklist these
# Inadvertent changes
# -------------------
'PS1', # Leave untouched
'MANPATH', # Leave untouched
# Unneeded bits
# -------------
# 'FOAM_SETTINGS', # Do not use with modules
# 'FOAM_INST_DIR', # Old
# 'FOAM_(APP|ETC|SRC|SOLVERS|UTILITIES)',
# 'FOAM_TUTORIALS', # May be useful
# 'WM_OSTYPE', # Purely optional value
# Third-party cruft - only used for orig compilation
# -----------------
'[A-Z].*_ARCH_PATH',
# '(KAHIP|METIS|SCOTCH)_VERSION',
# User-specific
# -------------
'FOAM_RUN',
'(FOAM|WM)_.*USER_.*',
],
whitelist=[ # Whitelist these
'MPI_ARCH_PATH', # Can be required for compilation
])
env.extend(mods)
minimal = False
tty.info('OpenFOAM bashrc env: {0}'.format(bashrc))
except Exception:
minimal = True
if minimal:
# pre-build or minimal environment
self.setup_minimal_environment(env)
def setup_dependent_build_environment(self, env, dependent_spec):
"""Use full OpenFOAM environment when building.
Mirror WM_PROJECT_DIR value as FOAM_PROJECT_DIR to avoid
masking the normal OpenFOAM cleanup of previous versions.
"""
self.setup_run_environment(env)
env.set('FOAM_PROJECT_DIR', self.projectdir)
def setup_dependent_run_environment(self, env, dependent_spec):
"""Use full OpenFOAM environment when running.
Mirror WM_PROJECT_DIR value as FOAM_PROJECT_DIR to avoid
masking the normal OpenFOAM cleanup of previous versions.
"""
self.setup_run_environment(env)
env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('platforms', self.foam_arch, 'bin')
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('platforms', self.foam_arch, 'lib')
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
@when('@:1806')
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Prior to 1812, required OpenFOAM-v{VER} directory when sourcing
projdir = "OpenFOAM-v{0}".format(self.version)
if not os.path.exists(join_path(self.stage.path, projdir)):
tty.info('Added directory link {0}'.format(projdir))
os.symlink(
os.path.relpath(
self.stage.source_path,
self.stage.path
),
join_path(self.stage.path, projdir)
)
# Avoid WM_PROJECT_INST_DIR for ThirdParty
# This modification is non-critical
edits = {
'WM_THIRD_PARTY_DIR':
r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party',
}
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# The following filtering is non-critical.
# It simply prevents 'site' dirs at the wrong level
# (likely non-existent anyhow) from being added to
# PATH, LD_LIBRARY_PATH.
for rcdir in ['config.sh', 'config.csh']:
rcfile = join_path('etc', rcdir, 'settings')
if os.path.isfile(rcfile):
filter_file(
'WM_PROJECT_INST_DIR/',
'WM_PROJECT_DIR/',
rcfile,
backup=False)
@when('@1906: %fj')
@run_before('configure')
def make_fujitsu_rules(self):
"""Create Fujitsu rules (clang variant) unless supplied upstream.
Implemented for 1906 and later (older rules are too messy to edit).
Already included after 1912.
"""
general_rules = 'wmake/rules/General'
arch_rules = 'wmake/rules/linuxARM64' # self.arch
src = arch_rules + 'Clang'
dst = arch_rules + 'Fujitsu' # self.compiler
if os.path.exists(dst):
return
# Handle rules/<ARCH><COMP> or rules/<ARCH>/<COMP>
if not os.path.exists(src):
src = join_path(arch_rules, 'Clang')
dst = join_path(arch_rules, 'Fujitsu') # self.compiler
if os.path.exists(dst):
return
tty.info('Add Fujitsu wmake rules')
copy_tree(src, dst)
for cfg in ['c', 'c++', 'general']:
rule = join_path(dst, cfg)
filter_file('Clang', 'Fujitsu', rule, backup=False)
src = join_path(general_rules, 'Clang')
dst = join_path(general_rules, 'Fujitsu') # self.compiler
copy_tree(src, dst)
filter_file('clang', spack_cc, join_path(dst, 'c'),
backup=False, string=True)
filter_file('clang++', spack_cxx, join_path(dst, 'c++'),
backup=False, string=True)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their
various locations: etc/bashrc, etc/config.sh/FEATURE and
customizations that don't properly fit get placed in the etc/prefs.sh
file (similiarly for csh).
"""
# Filtering bashrc, cshrc
edits = {}
edits.update(self.foam_arch.foam_dict())
rewrite_environ_files( # etc/{bashrc,cshrc}
edits,
posix=join_path('etc', 'bashrc'),
cshell=join_path('etc', 'cshrc'))
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
# TODO
# 'CMAKE_ARCH_PATH': spec['cmake'].prefix,
# 'FLEX_ARCH_PATH': spec['flex'].prefix,
# 'ZLIB_ARCH_PATH': spec['zlib'].prefix,
}
# MPI content, using MPI_ARCH_PATH
user_mpi = mplib_content(spec, '${MPI_ARCH_PATH}')
# Content for etc/config.{csh,sh}/ files
self.etc_config = {
'CGAL': [
('BOOST_ARCH_PATH', spec['boost'].prefix),
('CGAL_ARCH_PATH', spec['cgal'].prefix),
('LD_LIBRARY_PATH',
foam_add_lib(
pkglib(spec['boost'], '${BOOST_ARCH_PATH}'),
pkglib(spec['cgal'], '${CGAL_ARCH_PATH}'))),
],
'FFTW': [
('FFTW_ARCH_PATH', spec['fftw'].prefix), # Absolute
('LD_LIBRARY_PATH',
foam_add_lib(
pkglib(spec['fftw'], '${BOOST_ARCH_PATH}'))),
],
# User-defined MPI
'mpi-user': [
('MPI_ARCH_PATH', spec['mpi'].prefix), # Absolute
('LD_LIBRARY_PATH', foam_add_lib(user_mpi['libdir'])),
('PATH', foam_add_path(user_mpi['bindir'])),
],
'adios2': {},
'scotch': {},
'kahip': {},
'metis': {},
'ensight': {}, # Disable settings
'paraview': [],
'gperftools': [], # Disable settings
'vtk': [],
}
# With adios2 after 1912
if spec.satisfies('@1912:'):
self.etc_config['adios2'] = [
('ADIOS2_ARCH_PATH', spec['adios2'].prefix),
('LD_LIBRARY_PATH',
foam_add_lib(pkglib(spec['adios2'], '${ADIOS2_ARCH_PATH}'))),
('PATH', foam_add_path('${ADIOS2_ARCH_PATH}/bin')),
]
if '+scotch' in spec:
self.etc_config['scotch'] = {
'SCOTCH_ARCH_PATH': spec['scotch'].prefix,
# For src/parallel/decompose/Allwmake
'SCOTCH_VERSION': 'scotch-{0}'.format(spec['scotch'].version),
}
if '+kahip' in spec:
self.etc_config['kahip'] = {
'KAHIP_ARCH_PATH': spec['kahip'].prefix,
}
if '+metis' in spec:
self.etc_config['metis'] = {
'METIS_ARCH_PATH': spec['metis'].prefix,
}
# ParaView_INCLUDE_DIR is not used in 1812, but has no ill-effect
if '+paraview' in spec:
pvmajor = 'paraview-{0}'.format(spec['paraview'].version.up_to(2))
self.etc_config['paraview'] = [
('ParaView_DIR', spec['paraview'].prefix),
('ParaView_INCLUDE_DIR', '${ParaView_DIR}/include/' + pvmajor),
('PV_PLUGIN_PATH', '$FOAM_LIBBIN/' + pvmajor),
('PATH', foam_add_path('${ParaView_DIR}/bin')),
]
if '+vtk' in spec:
self.etc_config['vtk'] = [
('VTK_DIR', spec['vtk'].prefix),
('LD_LIBRARY_PATH',
foam_add_lib(pkglib(spec['vtk'], '${VTK_DIR}'))),
]
# Optional
if '+mgridgen' in spec:
self.etc_config['mgridgen'] = {
'MGRIDGEN_ARCH_PATH': spec['parmgridgen'].prefix
}
# Optional
if '+zoltan' in spec:
self.etc_config['zoltan'] = {
'ZOLTAN_ARCH_PATH': spec['zoltan'].prefix
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
if self.etc_prefs:
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
# Adjust components to use SPACK variants
for component, subdict in self.etc_config.items():
write_environ(
subdict,
posix=join_path('etc', 'config.sh', component),
cshell=join_path('etc', 'config.csh', component))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = ['-silent']
if self.parallel: # Build in parallel? - pass as an argument
args.append('-j{0}'.format(make_jobs))
builder = Executable(self.build_script)
builder(*args)
def install_write_location(self):
"""Set the installation location (projectdir) in bashrc,cshrc."""
mkdirp(self.projectdir)
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_DIR': self.projectdir,
}
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
@when('@:1806')
def install_write_location(self):
"""Set the installation location (projectdir) in bashrc,cshrc.
In 1806 and earlier, had WM_PROJECT_INST_DIR as the prefix
directory where WM_PROJECT_DIR was installed.
"""
mkdirp(self.projectdir)
projdir = os.path.basename(self.projectdir)
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
def install(self, spec, prefix):
"""Install under the projectdir"""
mkdirp(self.projectdir)
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Having wmake and ~source is actually somewhat pointless...
# Install 'etc' before 'bin' (for symlinks)
# META-INFO for 1812 and later (or backported)
dirs = ['META-INFO', 'etc', 'bin', 'wmake']
if '+source' in spec:
dirs.extend(['applications', 'src', 'tutorials'])
for d in dirs:
if os.path.isdir(d):
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
dirs = ['platforms']
if '+source' in spec:
dirs.extend(['doc'])
# Install platforms (and doc) skipping intermediate targets
relative_ignore_paths = ['src', 'applications', 'html', 'Guides']
ignore = lambda p: p in relative_ignore_paths
for d in dirs:
install_tree(
d,
join_path(self.projectdir, d),
ignore=ignore,
symlinks=True)
self.install_write_location()
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path(os.path.relpath(self.install_log_path)),
join_path('log.' + str(self.foam_arch)))
if not self.config['link']:
return
# ln -s platforms/linux64GccXXX/lib lib
with working_dir(self.projectdir):
if os.path.isdir(self.archlib):
os.symlink(self.archlib, 'lib')
# (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .)
with working_dir(join_path(self.projectdir, 'bin')):
for f in [
f for f in glob.glob(join_path('..', self.archbin, "*"))
if os.path.isfile(f)
]:
os.symlink(f, os.path.basename(f))
# -----------------------------------------------------------------------------
class OpenfoamArch(object):
"""OpenfoamArch represents architecture/compiler settings for OpenFOAM.
The string representation is WM_OPTIONS.
Keywords
label-size=[True] supports int32/int64
compile-option[=-spack]
mplib[=USERMPI]
"""
#: Map spack compiler names to OpenFOAM compiler names
# By default, simply capitalize the first letter
compiler_mapping = {'intel': 'Icc', 'fj': 'Fujitsu'}
def __init__(self, spec, **kwargs):
# Some user settings, to be adjusted manually or via variants
self.compiler = None # <- %compiler
self.arch_option = '' # Eg, -march=knl
self.label_size = None # <- +int64
self.precision_option = 'DP' # <- +float32 | +spdp
self.compile_option = kwargs.get('compile-option', '-spack')
self.arch = None
self.options = None
self.mplib = kwargs.get('mplib', 'USERMPI')
# WM_LABEL_OPTION, but perhaps not yet for foam-extend
if '+int64' in spec:
self.label_size = '64'
elif kwargs.get('label-size', True):
self.label_size = '32'
# WM_PRECISION_OPTION
if '+spdp' in spec:
self.precision_option = 'SPDP'
elif '+float32' in spec:
self.precision_option = 'SP'
# Processor/architecture-specific optimizations
if '+knl' in spec:
self.arch_option = '-march=knl'
# Capitalize first letter of compiler name to obtain the
# OpenFOAM naming (eg, gcc -> Gcc, clang -> Clang, etc).
# Use compiler_mapping[] for special cases
comp = spec.compiler.name
if comp in self.compiler_mapping:
comp = self.compiler_mapping[comp]
self.compiler = comp.capitalize()
self.update_arch(spec)
self.update_options()
def update_arch(self, spec):
"""Set WM_ARCH string corresponding to spack platform/target
"""
# spec.architecture.platform is like `uname -s`, but lower-case
platform = str(spec.architecture.platform)
# spec.target.family is like `uname -m`
target = str(spec.target.family)
# No spack platform family for ia64 or armv7l
if platform == 'linux':
if target == 'x86_64':
platform += '64'
elif target == 'ia64':
platform += 'IA64'
elif target == 'armv7l':
platform += 'ARM7'
elif target == 'aarch64':
platform += 'Arm64'
elif target == 'ppc64':
platform += 'PPC64'
elif target == 'ppc64le':
platform += 'PPC64le'
elif platform == 'darwin':
if target == 'x86_64':
platform += '64'
# ... and others?
self.arch = platform
def update_options(self):
"""Set WM_OPTIONS string consistent with current settings
"""
# WM_OPTIONS
# ----
# WM_LABEL_OPTION=Int$WM_LABEL_SIZE
# WM_OPTIONS_BASE=$WM_ARCH$WM_COMPILER$WM_PRECISION_OPTION
# WM_OPTIONS=$WM_OPTIONS_BASE$WM_LABEL_OPTION$WM_COMPILE_OPTION
# or
# WM_OPTIONS=$WM_OPTIONS_BASE$WM_COMPILE_OPTION
# ----
self.options = ''.join([
self.arch,
self.compiler,
self.precision_option,
('Int' + self.label_size if self.label_size else ''),
self.compile_option])
def __str__(self):
return self.options
def __repr__(self):
return str(self)
def foam_dict(self):
"""Returns a dictionary for OpenFOAM prefs, bashrc, cshrc."""
return dict([
('WM_COMPILER', self.compiler),
('WM_LABEL_SIZE', self.label_size),
('WM_PRECISION_OPTION', self.precision_option),
('WM_COMPILE_OPTION', self.compile_option),
('WM_MPLIB', self.mplib),
])
def _rule_directory(self, projdir, general=False):
"""Return the wmake/rules/ General or compiler rules directory.
Supports wmake/rules/<ARCH><COMP> and wmake/rules/<ARCH>/<COMP>.
"""
rules_dir = os.path.join(projdir, 'wmake', 'rules')
if general:
return os.path.join(rules_dir, 'General')
arch_dir = os.path.join(rules_dir, self.arch)
comp_rules = arch_dir + self.compiler
if os.path.isdir(comp_rules):
return comp_rules
else:
return os.path.join(arch_dir, self.compiler)
def has_rule(self, projdir):
"""Verify that a wmake/rules/ compiler rule exists in the project.
"""
# Insist on a wmake rule for this architecture/compiler combination
rule_dir = self._rule_directory(projdir)
if not os.path.isdir(rule_dir):
raise InstallError(
'No wmake rule for {0} {1}'.format(self.arch, self.compiler))
return True
def create_rules(self, projdir, foam_pkg):
""" Create {c,c++}-spack and mplib{USER,USERMPI}
rules in the specified project directory.
The compiler rules are based on the respective {c,c++}Opt rules
but with additional rpath information for the OpenFOAM libraries.
The '-spack' rules channel spack information into OpenFOAM wmake
rules with minimal modification to OpenFOAM.
The rpath is used for the installed libpath (continue to use
LD_LIBRARY_PATH for values during the build).
"""
# Note: the 'c' rules normally don't need rpath, since they are just
# used for some statically linked wmake tools, but left in anyhow.
# rpath for installed OpenFOAM libraries
rpath = '{0}{1}'.format(
foam_pkg.compiler.cxx_rpath_arg,
join_path(foam_pkg.projectdir, foam_pkg.archlib))
user_mpi = mplib_content(foam_pkg.spec)
rule_dir = self._rule_directory(projdir)
with working_dir(rule_dir):
# Compiler: copy existing cOpt,c++Opt and modify '*DBUG' value
for lang in ['c', 'c++']:
src = '{0}Opt'.format(lang)
dst = '{0}{1}'.format(lang, self.compile_option)
with open(src, 'r') as infile:
with open(dst, 'w') as outfile:
for line in infile:
line = line.rstrip()
outfile.write(line)
if re.match(r'^\S+DBUG\s*=', line):
outfile.write(' ')
outfile.write(rpath)
elif re.match(r'^\S+OPT\s*=', line):
if self.arch_option:
outfile.write(' ')
outfile.write(self.arch_option)
outfile.write('\n')
# MPI rules
for mplib in ['mplibUSER', 'mplibUSERMPI']:
with open(mplib, 'w') as out:
out.write("""# Use mpi from spack ({name})\n
PFLAGS = {FLAGS}
PINC = {PINC}
PLIBS = {PLIBS}
""".format(**user_mpi))
# -----------------------------------------------------------------------------
| 37.900097
| 116
| 0.583263
|
7683f2fecda3e0e37c65dc8369a5898cd79135f2
| 523
|
py
|
Python
|
hydroengine_service/error_handler.py
|
schnjaso2/hydro-engine-service
|
7cb7b136b86c7b97fcb21220dcdef45c2b1ec6ae
|
[
"MIT"
] | null | null | null |
hydroengine_service/error_handler.py
|
schnjaso2/hydro-engine-service
|
7cb7b136b86c7b97fcb21220dcdef45c2b1ec6ae
|
[
"MIT"
] | null | null | null |
hydroengine_service/error_handler.py
|
schnjaso2/hydro-engine-service
|
7cb7b136b86c7b97fcb21220dcdef45c2b1ec6ae
|
[
"MIT"
] | null | null | null |
'''Application error handlers.'''
from flask import Blueprint, jsonify
import traceback
error_handler = Blueprint('errors', __name__)
@error_handler.app_errorhandler(Exception)
def handle_unexpected_error(error):
stack = traceback.format_exc()
status_code = 500
success = False
response = {
'success': success,
'error': {
'type': 'UnexpectedException',
'message': str(error),
'stack': stack
}
}
return jsonify(response), status_code
| 20.92
| 45
| 0.634799
|
48060e633dd1b4433347ca46e50e30e211d943a8
| 2,079
|
py
|
Python
|
WebSocketServer/megaRPI/megaCliente.py
|
felipelvrd/RPIWebAdmin
|
5f3a4d3b630183a6322ffe4b89df04db17a9dd9d
|
[
"MIT"
] | null | null | null |
WebSocketServer/megaRPI/megaCliente.py
|
felipelvrd/RPIWebAdmin
|
5f3a4d3b630183a6322ffe4b89df04db17a9dd9d
|
[
"MIT"
] | null | null | null |
WebSocketServer/megaRPI/megaCliente.py
|
felipelvrd/RPIWebAdmin
|
5f3a4d3b630183a6322ffe4b89df04db17a9dd9d
|
[
"MIT"
] | null | null | null |
from mega import MegaRequestListener, MegaError
from WebSocketServer.megaRPI.utils import enviar_cliente
from WebSocketServer.megaRPI.megaNodosManager import MegaNodosManager
class MegaCliente(object):
def __init__(self, api, web_socket_handler):
self._api = api
self.web_socket_handler = web_socket_handler
self.loginListener = LoginListener(self.web_socket_handler)
self.mega_nodos_manager = MegaNodosManager(self._api, self.web_socket_handler)
def __del__(self):
self._api.removeRequestListener(self.loginListener)
self._api.removeRequestListener(self.mega_nodos_manager.obtenerNodosListener)
def login(self, j_data):
usuario = str(j_data['email'])
contrasenna = str(j_data['contrasenna'])
self._api.login(usuario, contrasenna, self.loginListener)
def get_email(self):
if not self._api.isLoggedIn():
print('INFO: Not logged in')
else:
print self._api.getMyEmail()
# webSocket.write_message(email)
def esta_logueado(self):
if self._api.isLoggedIn():
return True
data = {
'cmd': 'noLogueado'
}
enviar_cliente(self.web_socket_handler, data)
return False
def lista_nodos(self):
if self.esta_logueado():
self.mega_nodos_manager.listar_nodos()
pass
def recargar_nodos(self):
if self.esta_logueado():
self.mega_nodos_manager.CargarNodos()
def cd(self, j_data):
directorio = str(j_data['carpeta'])
self.mega_nodos_manager.CambiarNodo(directorio)
class LoginListener(MegaRequestListener):
def __init__(self, web_socket_handler):
super(LoginListener, self).__init__()
self.webSocket = web_socket_handler
def onRequestFinish(self, api, request, e):
data = {
'cmd': 'login',
'errorCode': e.getErrorCode(),
'errorString': MegaError.getErrorString(e.getErrorCode()),
}
enviar_cliente(self.webSocket, data)
| 32.484375
| 86
| 0.660895
|
d859aab42a7ad09c1428d32a6f9fc2d554586925
| 29,125
|
py
|
Python
|
electrum/lnsweep.py
|
p3ngu19z/electrum
|
427b396c24ec1a3cfdca8e1a70c94537b35ad882
|
[
"MIT"
] | 79
|
2017-11-10T03:00:57.000Z
|
2022-02-27T16:35:04.000Z
|
electrum/lnsweep.py
|
fabrizioschiavi/electrum
|
a0f2dadf11fa1636e46a8c233b4d5651032080e4
|
[
"MIT"
] | 70
|
2017-12-25T05:28:26.000Z
|
2022-03-26T22:31:47.000Z
|
electrum/lnsweep.py
|
fabrizioschiavi/electrum
|
a0f2dadf11fa1636e46a8c233b4d5651032080e4
|
[
"MIT"
] | 64
|
2017-12-19T09:09:23.000Z
|
2022-02-08T10:26:13.000Z
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING, NamedTuple, Callable
from enum import Enum, auto
from .util import bfh, bh2u
from .bitcoin import redeem_script_to_address, dust_threshold, construct_witness
from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
derive_privkey, derive_pubkey, derive_blinded_pubkey, derive_blinded_privkey,
make_htlc_tx_witness, make_htlc_tx_with_open_channel, UpdateAddHtlc,
LOCAL, REMOTE, make_htlc_output_witness_script,
get_ordered_channel_configs, privkey_to_pubkey, get_per_commitment_secret_from_seed,
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED,
map_htlcs_to_ctx_output_idxs, Direction)
from .transaction import (Transaction, TxOutput, PartialTransaction, PartialTxInput,
PartialTxOutput, TxOutpoint)
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .lnchannel import Channel, AbstractChannel
_logger = get_logger(__name__)
class SweepInfo(NamedTuple):
name: str
csv_delay: int
cltv_expiry: int
gen_tx: Callable[[], Optional[Transaction]]
def create_sweeptxs_for_watchtower(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> List[Transaction]:
"""Presign sweeping transactions using the just received revoked pcs.
These will only be utilised if the remote breaches.
Sweep 'to_local', and all the HTLCs (two cases: directly from ctx, or from HTLC tx).
"""
# prep
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
if sweep_tx:
txs.append(sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int) -> Optional[Transaction]:
htlc_tx_witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=pcp,
subject=REMOTE,
ctn=ctn,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx)
return create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=0,
htlc_tx=htlc_tx,
htlctx_witness_script=htlc_tx_witness_script,
sweep_address=sweep_address,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
secondstage_sweep_tx = create_sweeptx_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx)
if secondstage_sweep_tx:
txs.append(secondstage_sweep_tx)
return txs
def create_sweeptx_for_their_revoked_ctx(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> Optional[Callable[[], Optional[Transaction]]]:
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return sweep_tx
return None
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction,
sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
if not is_revocation:
return
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
# same witness script as to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
# check that htlc_tx is a htlc
if htlc_tx.outputs()[0].address != htlc_address:
return
gen_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=htlc_tx,
output_idx=0,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return SweepInfo(name='redeem_htlc2',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
def create_sweeptxs_for_our_ctx(*, chan: 'AbstractChannel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str, SweepInfo]]:
"""Handle the case where we force close unilaterally with our latest ctx.
Construct sweep txns for 'to_local', and for all HTLCs (2 txns each).
'to_local' can be swept even if this is a breach (by us),
but HTLCs cannot (old HTLCs are no longer stored).
"""
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
our_per_commitment_secret = get_per_commitment_secret_from_seed(
our_conf.per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
our_pcp = ecc.ECPrivkey(our_per_commitment_secret).get_public_key_bytes(compressed=True)
our_delayed_bp_privkey = ecc.ECPrivkey(our_conf.delayed_basepoint.privkey)
our_localdelayed_privkey = derive_privkey(our_delayed_bp_privkey.secret_scalar, our_pcp)
our_localdelayed_privkey = ecc.ECPrivkey.from_secret_scalar(our_localdelayed_privkey)
their_revocation_pubkey = derive_blinded_pubkey(their_conf.revocation_basepoint.pubkey, our_pcp)
to_self_delay = their_conf.to_self_delay
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'),
per_commitment_point=our_pcp).to_bytes(32, 'big')
our_localdelayed_pubkey = our_localdelayed_privkey.get_public_key_bytes(compressed=True)
to_local_witness_script = bh2u(make_commitment_output_to_local_witness_script(
their_revocation_pubkey, to_self_delay, our_localdelayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', to_local_witness_script)
# to remote address
bpk = their_conf.payment_basepoint.pubkey
their_payment_pubkey = bpk if chan.is_static_remotekey_enabled() else derive_pubkey(their_conf.payment_basepoint.pubkey, our_pcp)
to_remote_address = make_commitment_output_to_remote_address(their_payment_pubkey)
# test ctx
_logger.debug(f'testing our ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
# we have to_local, to_remote.
# other outputs are htlcs
# if they are spent, we need to generate the script
# so, second-stage htlc sweep should not be returned here
txs = {} # type: Dict[str, SweepInfo]
# to_local
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=to_local_witness_script,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
to_self_delay=to_self_delay,
config=chan.lnworker.config)
prevout = ctx.txid() + ':%d'%output_idx
txs[prevout] = SweepInfo(name='our_ctx_to_local',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
we_breached = ctn < chan.get_oldest_unrevoked_ctn(LOCAL)
if we_breached:
_logger.info("we breached.")
# return only our_ctx_to_local, because we don't keep htlc_signatures for old states
return txs
# HTLCs
def create_txns_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int, htlc_relative_idx: int):
if htlc_direction == RECEIVED:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
else:
preimage = None
htlctx_witness_script, htlc_tx = create_htlctx_that_spends_from_our_ctx(
chan=chan,
our_pcp=our_pcp,
ctx=ctx,
htlc=htlc,
local_htlc_privkey=our_htlc_privkey,
preimage=preimage,
htlc_direction=htlc_direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
sweep_tx = lambda: create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=to_self_delay,
htlc_tx=htlc_tx,
htlctx_witness_script=htlctx_witness_script,
sweep_address=sweep_address,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
config=chan.lnworker.config)
# side effect
txs[htlc_tx.inputs()[0].prevout.to_str()] = SweepInfo(name='first-stage-htlc',
csv_delay=0,
cltv_expiry=htlc_tx.locktime,
gen_tx=lambda: htlc_tx)
txs[htlc_tx.txid() + ':0'] = SweepInfo(name='second-stage-htlc',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
# offered HTLCs, in our ctx --> "timeout"
# received HTLCs, in our ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_txns_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
return txs
def analyze_ctx(chan: 'Channel', ctx: Transaction):
# note: the remote sometimes has two valid non-revoked commitment transactions,
# either of which could be broadcast
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
per_commitment_secret = None
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
if ctn == oldest_unrevoked_remote_ctn:
their_pcp = their_conf.current_per_commitment_point
is_revocation = False
elif ctn == oldest_unrevoked_remote_ctn + 1:
their_pcp = their_conf.next_per_commitment_point
is_revocation = False
elif ctn < oldest_unrevoked_remote_ctn: # breach
try:
per_commitment_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
except UnableToDeriveSecret:
return
their_pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
is_revocation = True
#_logger.info(f'tx for revoked: {list(txs.keys())}')
elif chan.get_data_loss_protect_remote_pcp(ctn):
their_pcp = chan.get_data_loss_protect_remote_pcp(ctn)
is_revocation = False
else:
return
return ctn, their_pcp, is_revocation, per_commitment_secret
def create_sweeptxs_for_their_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str,SweepInfo]]:
"""Handle the case when the remote force-closes with their ctx.
Sweep outputs that do not have a CSV delay ('to_remote' and first-stage HTLCs).
Outputs with CSV delay ('to_local' and second-stage HTLCs) are redeemed by LNWatcher.
"""
txs = {} # type: Dict[str, SweepInfo]
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
# to_local and to_remote addresses
our_revocation_pubkey = derive_blinded_pubkey(our_conf.revocation_basepoint.pubkey, their_pcp)
their_delayed_pubkey = derive_pubkey(their_conf.delayed_basepoint.pubkey, their_pcp)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
our_revocation_pubkey, our_conf.to_self_delay, their_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
# to remote address
bpk = our_conf.payment_basepoint.pubkey
our_payment_pubkey = bpk if chan.is_static_remotekey_enabled() else derive_pubkey(bpk, their_pcp)
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
# test if this is their ctx
_logger.debug(f'testing their ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if is_revocation:
our_revocation_privkey = derive_blinded_privkey(our_conf.revocation_basepoint.privkey, per_commitment_secret)
gen_tx = create_sweeptx_for_their_revoked_ctx(chan, ctx, per_commitment_secret, chan.sweep_address)
if gen_tx:
tx = gen_tx()
txs[tx.inputs()[0].prevout.to_str()] = SweepInfo(name='to_local_for_revoked_ctx',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
# prep
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'), per_commitment_point=their_pcp)
our_htlc_privkey = ecc.ECPrivkey.from_secret_scalar(our_htlc_privkey)
their_htlc_pubkey = derive_pubkey(their_conf.htlc_basepoint.pubkey, their_pcp)
# to_local is handled by lnwatcher
# to_remote
if not chan.is_static_remotekey_enabled():
our_payment_bp_privkey = ecc.ECPrivkey(our_conf.payment_basepoint.privkey)
our_payment_privkey = derive_privkey(our_payment_bp_privkey.secret_scalar, their_pcp)
our_payment_privkey = ecc.ECPrivkey.from_secret_scalar(our_payment_privkey)
assert our_payment_pubkey == our_payment_privkey.get_public_key_bytes(compressed=True)
output_idxs = ctx.get_output_idxs_from_address(to_remote_address)
if output_idxs:
output_idx = output_idxs.pop()
prevout = ctx.txid() + ':%d'%output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_to_remote(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
our_payment_privkey=our_payment_privkey,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name='their_ctx_to_remote',
csv_delay=0,
cltv_expiry=0,
gen_tx=sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(htlc: 'UpdateAddHtlc', is_received_htlc: bool,
ctx_output_idx: int) -> None:
if not is_received_htlc and not is_revocation:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
else:
preimage = None
htlc_output_witness_script = make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=our_revocation_pubkey,
remote_htlc_pubkey=our_htlc_privkey.get_public_key_bytes(compressed=True),
local_htlc_pubkey=their_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry)
cltv_expiry = htlc.cltv_expiry if is_received_htlc and not is_revocation else 0
prevout = ctx.txid() + ':%d'%ctx_output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_htlc(
ctx=ctx,
witness_script=htlc_output_witness_script,
sweep_address=sweep_address,
preimage=preimage,
output_idx=ctx_output_idx,
privkey=our_revocation_privkey if is_revocation else our_htlc_privkey.get_secret_bytes(),
is_revocation=is_revocation,
cltv_expiry=cltv_expiry,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name=f'their_ctx_htlc_{ctx_output_idx}',
csv_delay=0,
cltv_expiry=cltv_expiry,
gen_tx=sweep_tx)
# received HTLCs, in their ctx --> "timeout"
# offered HTLCs, in their ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=their_pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_sweeptx_for_htlc(htlc=htlc,
is_received_htlc=direction == RECEIVED,
ctx_output_idx=ctx_output_idx)
return txs
def create_htlctx_that_spends_from_our_ctx(chan: 'Channel', our_pcp: bytes,
ctx: Transaction, htlc: 'UpdateAddHtlc',
local_htlc_privkey: bytes, preimage: Optional[bytes],
htlc_direction: Direction, htlc_relative_idx: int,
ctx_output_idx: int) -> Tuple[bytes, Transaction]:
assert (htlc_direction == RECEIVED) == bool(preimage), 'preimage is required iff htlc is received'
preimage = preimage or b''
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx,
name=f'our_ctx_{ctx_output_idx}_htlc_tx_{bh2u(htlc.payment_hash)}')
remote_htlc_sig = chan.get_remote_htlc_sig_for_htlc(htlc_relative_idx=htlc_relative_idx)
local_htlc_sig = bfh(htlc_tx.sign_txin(0, local_htlc_privkey))
txin = htlc_tx.inputs()[0]
witness_program = bfh(Transaction.get_preimage_script(txin))
txin.witness = make_htlc_tx_witness(remote_htlc_sig, local_htlc_sig, preimage, witness_program)
return witness_script, htlc_tx
def create_sweeptx_their_ctx_htlc(ctx: Transaction, witness_script: bytes, sweep_address: str,
preimage: Optional[bytes], output_idx: int,
privkey: bytes, is_revocation: bool,
cltv_expiry: int, config: SimpleConfig) -> Optional[PartialTransaction]:
assert type(cltv_expiry) is int
preimage = preimage or b'' # preimage is required iff (not is_revocation and htlc is offered)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.witness_script = witness_script
txin.script_sig = b''
sweep_inputs = [txin]
tx_size_bytes = 200 # TODO (depends on offered/received and is_revocation)
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2, locktime=cltv_expiry)
sig = bfh(tx.sign_txin(0, privkey))
if not is_revocation:
witness = construct_witness([sig, preimage, witness_script])
else:
revocation_pubkey = privkey_to_pubkey(privkey)
witness = construct_witness([sig, revocation_pubkey, witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
def create_sweeptx_their_ctx_to_remote(sweep_address: str, ctx: Transaction, output_idx: int,
our_payment_privkey: ecc.ECPrivkey,
config: SimpleConfig) -> Optional[PartialTransaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_hex(compressed=True)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_type = 'p2wpkh'
txin.pubkeys = [bfh(our_payment_pubkey)]
txin.num_sig = 1
sweep_inputs = [txin]
tx_size_bytes = 110 # approx size of p2wpkh->p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs)
sweep_tx.set_rbf(True)
sweep_tx.sign({our_payment_pubkey: (our_payment_privkey.get_secret_bytes(), True)})
if not sweep_tx.is_complete():
raise Exception('channel close sweep tx is not complete')
return sweep_tx
def create_sweeptx_ctx_to_local(*, sweep_address: str, ctx: Transaction, output_idx: int, witness_script: str,
privkey: bytes, is_revocation: bool, config: SimpleConfig,
to_self_delay: int=None) -> Optional[PartialTransaction]:
"""Create a txn that sweeps the 'to_local' output of a commitment
transaction into our wallet.
privkey: either revocation_privkey or localdelayed_privkey
is_revocation: tells us which ^
"""
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = bfh(witness_script)
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 121 # approx size of to_local -> p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold():
return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = sweep_tx.sign_txin(0, privkey)
witness = construct_witness([sig, int(is_revocation), witness_script])
sweep_tx.inputs()[0].witness = bfh(witness)
return sweep_tx
def create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(*,
htlc_tx: Transaction, htlctx_witness_script: bytes, sweep_address: str,
privkey: bytes, is_revocation: bool, to_self_delay: int,
config: SimpleConfig) -> Optional[PartialTransaction]:
val = htlc_tx.outputs()[0].value
prevout = TxOutpoint(txid=bfh(htlc_tx.txid()), out_idx=0)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = htlctx_witness_script
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 200 # TODO
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = bfh(tx.sign_txin(0, privkey))
witness = construct_witness([sig, int(is_revocation), htlctx_witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
| 52.195341
| 133
| 0.646798
|
c9003cc9bc0b94d7697fa9bdcd28a2d02584a6e2
| 11,967
|
py
|
Python
|
app/expansion.py
|
datactivist/fastapi-query-expansion
|
884d05cdf529e60db075904add3c7cdf97950cdd
|
[
"MIT"
] | null | null | null |
app/expansion.py
|
datactivist/fastapi-query-expansion
|
884d05cdf529e60db075904add3c7cdf97950cdd
|
[
"MIT"
] | 1
|
2021-12-17T15:15:23.000Z
|
2021-12-17T15:15:23.000Z
|
app/expansion.py
|
datactivist/fastapi-query-expansion
|
884d05cdf529e60db075904add3c7cdf97950cdd
|
[
"MIT"
] | null | null | null |
import json
import numpy as np
from enum import Enum
from pathlib import Path
import re
import sql_query
import request_lexical_resources
data_path = Path("data")
keyw_path = Path("keywords_vectors")
embeddings_path = Path("embeddings")
keywords_delimitor = " |,|;|_|\|"
class EmbeddingsType(str, Enum):
word2vec = "word2vec"
wordnet = "wordnet"
fasttext = "fasttext"
bert = "bert"
elmo = "elmo"
class SimilarityType(str, Enum):
synonym = "synonym"
hyponym = "hyponym"
hypernym = "hypernym"
holonym = "holonym"
similar = "similar"
def get_senses_from_keyword(embeddings_type, keyword):
"""
Get the senses from keyword
if model is wordnet: list of synset
if model is not wordnet: list of size 1 containing only keyword
output: list of senses
"""
return print("TODO") if embeddings_type == EmbeddingsType.wordnet else [keyword]
def compute_feedback_score(original_keyword, proposed_keyword):
"""
Compute the feedback score
Input: original_keyword: keyword at the origin of the proposition
proposed_keyword: keyword proposed to the user
Output: Feedback score, default value to 0.4 if no feedbacks available
"""
# This old version take into account when a user doesn't choose a keyword, I changed it so that a keyword not chosen doesn't get as much as a penality
"""
# get feedback for that particular search_id -> result_url sequence (TODO: check for similar search?)
feedbacks = sql_query.get_feedback_for_reranking(user_search, result)
if feedbacks != None and len(feedbacks) > 0:
# Normalize mean of all feedbacks (-1->1 to 0->1)
feedback_score = (np.mean(feedbacks) - (-1)) / (1 - (-1))
else:
# Default value if no feedbacks available
feedback_score = 0
return feedback_score
"""
# get feedback for that particular keyword1 -> keyword2 sequence (TODO: check for similar search?)
feedbacks = sql_query.get_feedback_for_expansion(original_keyword, proposed_keyword)
chosen = 0
ignored = 0
base_score = 0.4
if feedbacks is not None and len(feedbacks) > 0:
for feedback in feedbacks:
if feedback == 1:
chosen += 1
if feedback == -1:
ignored += 1
# remove a point for every 10 users that didn't choose it
chosen -= int(ignored / 10)
feedback_score = base_score + (chosen / len(feedbacks))
else:
feedback_score = base_score
# print(result.title, ":", max(0, min(feedback_score, 1)))
return max(0, min(feedback_score, 1))
def combine_similarity_and_feedback_score(feedback_score, similarity_score, alpha=0.5):
"""
Compute the combination of embedding similarity and feedback score
Input: feedback_score: feedback score computed by compute_feedback_score, if no feedbacks, default to (1 - alpha)
similarity_score: similarity between the two keywords
alpha: higher alpha = higher feedback weight
Output: score combination of similarity and feedback
"""
return (1 - alpha) * similarity_score + alpha * feedback_score
def use_feedback(original_keyword, keyword_sim_list, alpha=0.5):
"""
Apply feedback score to the list of similarity using the compute similarity feedback score method
Input: original_keyword: keyword at the origin of the proposed keywords
keyword_sim_list: list of tuple of type (keyword, similariy)
alpha: higher alpha = higher feedback weight
Output: list of tuple of type (keyword, similarity) with the newly computed scores
"""
new_list = []
for keyword_sim in keyword_sim_list:
feedback_score = compute_feedback_score(original_keyword, keyword_sim[0])
new_sim = combine_similarity_and_feedback_score(
feedback_score, keyword_sim[1], alpha
)
# print(keyword_sim[0], ":", keyword_sim[1], "->", new_sim)
new_list.append((keyword_sim[0], new_sim))
return new_list
def split_user_entry(user_entry):
"""
Split the user entry into keywords
Input: keywords as string
Output: keywords as list
Currently splitting them with space char
"""
return re.split(keywords_delimitor, user_entry)
def sort_array_of_tuple_with_second_value(array):
"""
Return an array of tuple sorted by second key values
"""
array.sort(key=lambda x: x[1], reverse=True)
return array
def get_geoloc_parents(ref_name, tag_name):
parent_list = request_lexical_resources.get_most_similar_referentiels(
tag_name, ref_name, "geoloc"
)
parents = [parent["word"] for parent in parent_list]
return {"name": ref_name, "type": "geoloc", "tags": parents}
def get_cluster(
keyword,
referentiel,
embeddings_type,
embeddings_name,
max_width,
max_depth,
only_vocabulary,
current_depth,
):
"""
Recursive function to build the data structure tree
Input: keywords: a string
embeddings_type: type of the embeddings
embeddings_name: name of the embeddings
width: maximum depth of keywords search
depth: maximum width of keywords search
current_depth: current depth
Output: A cluster
"""
cluster = {}
if type(keyword) == str:
pass
elif type(keyword) == dict:
keyword = keyword["word"]
else:
keyword = str(keyword)
cluster["sense"] = keyword
cluster["similar_senses"] = []
if current_depth < max_depth:
# to avoid looping on most similar words
slider = 1 if current_depth > 0 else 0
similar_words = request_lexical_resources.get_most_similar(
keyword,
referentiel.name,
embeddings_type,
embeddings_name,
max_width,
slider,
only_vocabulary,
)
# Process for using feedback
temp_sim = []
for word_sim in similar_words["similar"]:
temp_sim.append((word_sim["word"], word_sim["similarity"]))
temp_sim = use_feedback(keyword, temp_sim, 0.6)
temp_sim = sort_array_of_tuple_with_second_value(temp_sim)
similar_words["similar"] = []
for temp in temp_sim:
similar_words["similar"].append({"word": temp[0], "similarity": temp[1]})
# Process for using feedback
for word in similar_words[SimilarityType.synonym]:
sub_cluster = {}
sub_cluster["sense"] = word
cluster["similar_senses"].append([sub_cluster, SimilarityType.synonym])
for word in similar_words[SimilarityType.similar]:
sub_cluster = get_cluster(
word,
referentiel,
embeddings_type,
embeddings_name,
max_width,
max_depth,
only_vocabulary,
current_depth + 1,
)
cluster["similar_senses"].append([sub_cluster, SimilarityType.similar])
if current_depth + 1 < max_depth:
for sim_type in SimilarityType:
if (
sim_type != SimilarityType.synonym
and sim_type != SimilarityType.similar
):
for sense in similar_words[sim_type]:
sub_cluster = get_cluster(
sense,
referentiel,
embeddings_type,
embeddings_name,
max_width,
max_depth,
only_vocabulary,
current_depth + 1,
)
cluster["similar_senses"].append([sub_cluster, sim_type])
if len(cluster["similar_senses"]) == 0:
cluster["similar_senses"] = None
return cluster
def build_tree(
keyword,
embeddings_type,
embeddings_name,
max_depth,
max_width,
only_vocabulary,
referentiels,
):
"""
Build the data structure tree for one particular sense list (originating from one keyword)
Input: keywords: a string
embeddings_type: type of the embeddings
embeddings_name: name of the embeddings
max_depth: maximum depth of keywords search
max_width: maximum width of keywords search
only_vocabulary: wether or not it should only return keywords part of the sources vocabulary
referentiel: referentiel to use for expansion
Output: Data tree for keyword
"""
search_result = {}
search_result["original_keyword"] = keyword
tree = []
senses = get_senses_from_keyword(embeddings_type, keyword)
for sense in senses:
referentiel_output = []
if referentiels is not None:
for referentiel in referentiels:
if referentiel.type == "tags":
results = request_lexical_resources.get_most_similar_referentiels(
sense,
referentiel.name,
referentiel.type,
embeddings_type,
embeddings_name,
referentiel.width,
0,
)
keyword_sim_list = []
for result in results:
keyword_sim_list.append((result["word"], result["similarity"]))
keyword_sim_list = use_feedback(sense, keyword_sim_list, 0.6)
keyword_sim_list = sort_array_of_tuple_with_second_value(
keyword_sim_list
)
referentiel_output = {
"name": referentiel.name,
"type": referentiel.type,
"tags": [x[0] for x in keyword_sim_list],
}
search_result["referentiel"] = referentiel_output
tree.append(
get_cluster(
sense,
referentiel,
embeddings_type,
embeddings_name,
max_width,
max_depth,
only_vocabulary,
0,
)
)
search_result["tree"] = tree
return search_result
def expand_keywords(
keywords,
embeddings_type,
embeddings_name,
max_depth,
max_width,
only_vocabulary,
referentiels,
):
"""
Return the most similar keywords from the initial keywords
Input: keywords: a string
embeddings_type: type of the embeddings
embeddings_name: name of the embeddings
max_depth: maximum depth of keywords search
max_width: maximum width of keywords search
only_vocabulary: wether or not it should only return keywords part of the sources vocabulary
referentiel: object of type main.referentiel
Output: Data structure with most similar keywords found
"""
keywords_list = split_user_entry(keywords)
data = []
for keyword in keywords_list:
if len(keyword) > 3:
keyword = keyword.lower()
data.append(
build_tree(
keyword,
embeddings_type,
embeddings_name,
max_depth,
max_width,
only_vocabulary,
referentiels,
)
)
for referentiel in referentiels:
if referentiel.type == "geoloc":
data.append(
{
"original_keyword": referentiel.tag,
"referentiel": get_geoloc_parents(
referentiel.name, referentiel.tag
),
}
)
return data
| 29.330882
| 154
| 0.595889
|
db9635bc59260eb9d0378709090496a34abb2c4c
| 3,376
|
py
|
Python
|
BackEnd/cart/models.py
|
parhamzm/Kashan-Holiday-Project---SE
|
ce5205ea3b097287648be54f9d424679c649085e
|
[
"MIT"
] | 3
|
2019-11-24T20:54:17.000Z
|
2021-09-19T18:52:23.000Z
|
BackEnd/cart/models.py
|
parhamzm/Kashan-Holiday-Project---SE
|
ce5205ea3b097287648be54f9d424679c649085e
|
[
"MIT"
] | null | null | null |
BackEnd/cart/models.py
|
parhamzm/Kashan-Holiday-Project---SE
|
ce5205ea3b097287648be54f9d424679c649085e
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.conf import settings
from decimal import Decimal
from tours.models import Tour, TourVariation
from accounts.models import BasicUser
from django.db.models.signals import pre_save, post_save, post_delete
from django.core.urlresolvers import reverse
# Create your models here.
User = settings.AUTH_USER_MODEL
class CartItemHotel(models.Model):
cart = models.ForeignKey("Cart", on_delete=models.PROTECT, verbose_name=u"سبد خرید")
# item = models.ForeignKey("", verbose_name=u"")
quantity = models.PositiveIntegerField(default=1, verbose_name=u"تعداد")
class CartItem(models.Model):
cart = models.ForeignKey("Cart", on_delete=models.PROTECT, verbose_name=u"سبد خرید")
item = models.ForeignKey(TourVariation, on_delete=models.PROTECT, verbose_name=u"مورد")
quantity = models.PositiveIntegerField(default=1, verbose_name=u"تعداد")
line_item_total = models.DecimalField(max_digits=10, decimal_places=2, verbose_name=u"قیمت")
def __str__(self):
return self.item.title
def remove(self):
return self.item.remove_from_cart()
def cart_item_pre_save_receiver(sender, instance, *args, **kwargs):
qty = instance.quantity
if int(qty) >= 1:
price = instance.item.get_price()
line_item_total = Decimal(qty) * Decimal(price)
instance.line_item_total = line_item_total
pre_save.connect(cart_item_pre_save_receiver, sender=CartItem)
def cart_item_post_save_receiver(sender, instance, *args, **kwargs):
instance.cart.update_subtotal()
post_save.connect(cart_item_post_save_receiver, sender=CartItem)
post_delete.connect(cart_item_post_save_receiver, sender=CartItem)
class Cart(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT, null=True, blank=True, verbose_name=u"کاربر")
items = models.ManyToManyField(TourVariation, through=CartItem, verbose_name=u"محتویات")
timestamp = models.DateTimeField(auto_now_add=True, verbose_name=u"زمانن ایجاد")
updated = models.DateTimeField(auto_now=True, verbose_name=u"بهروزرسانی شده در")
subtotal = models.DecimalField(max_digits=50, default=100.00, verbose_name=u'مجموع', decimal_places=2)
tax_total = models.DecimalField(max_digits=50, default=100.00, verbose_name=u'مجموع مالیات', decimal_places=2)
tax_percentage = models.DecimalField(max_digits=10, decimal_places=5, default=0.01, verbose_name=u'درصد مالیات')
total = models.DecimalField(max_digits=50, default=100.00, verbose_name=u'جمع کل', decimal_places=2)
# discounts
class Meta:
verbose_name = u'سبد خرید'
verbose_name_plural = u'سبدهای خرید'
def __str__(self):
return str(self.id)
def update_subtotal(self):
print("Updating...")
subtotal = 0
items = self.cartitem_set.all()
for item in items:
subtotal += item.line_item_total
self.subtotal = "%.2f" % (subtotal)
self.save()
def calculate_tax_total_price_receiver(sender, instance, *args, **kwargs):
subtotal = Decimal(instance.subtotal)
tax_total = round(subtotal * Decimal(instance.tax_percentage), 2) # ده درصد مالیات برارزش افزوده
total = round(subtotal + Decimal(tax_total), 2)
instance.tax_total = "%.2f" %(tax_total)
instance.total = "%.2f" %(total)
pre_save.connect(calculate_tax_total_price_receiver, sender=Cart)
| 38.363636
| 116
| 0.738448
|
e517445ee13933ffbbacdc06f38fb12c2e1dd220
| 1,062
|
py
|
Python
|
examples/servo-test.py
|
MultipedRobotics/qaudruped-AX12
|
e3f077b84980bfda02853d9209e4da98c93622bb
|
[
"MIT"
] | 4
|
2019-05-18T17:18:40.000Z
|
2020-10-09T15:28:14.000Z
|
examples/servo-test.py
|
MultipedRobotics/qaudruped-AX12
|
e3f077b84980bfda02853d9209e4da98c93622bb
|
[
"MIT"
] | null | null | null |
examples/servo-test.py
|
MultipedRobotics/qaudruped-AX12
|
e3f077b84980bfda02853d9209e4da98c93622bb
|
[
"MIT"
] | 1
|
2020-10-06T01:57:33.000Z
|
2020-10-06T01:57:33.000Z
|
# #!/usr/bin/env python
# ##############################################
# # The MIT License (MIT)
# # Copyright (c) 2018 Kevin Walchko
# # see LICENSE for full details
# ##############################################
#
# from __future__ import print_function
# from __future__ import division
# # from quadruped import Engine
# # from quadruped import DiscreteRippleGait
# from quadruped import Servo
# # from quadruped import Leg4
# from pyservos import AX12
# import time
#
#
# class ServoTest(object):
# def __init__(self, ID, offset=150):
# self.servo = Servo(ID, AX12)
# self.servo.setNewSerial('/dev/tty.usbserial-A506BOT5')
# self.servo.setServoLimits(offset, -110, 110)
#
# def array(self):
# for a in [-90, -45, 0, 45, 90, 0]:
# self.servo.angle = a
# time.sleep(2)
#
# def single(self, a=0):
# self.servo.angle = a
# time.sleep(2)
#
#
# def main():
# test = ServoTest(4)
#
# try:
# test.array()
# # test.single()
# except KeyboardInterrupt:
# print('bye ...')
# time.sleep(1)
#
#
# if __name__ == '__main__':
# main()
| 22.595745
| 58
| 0.591337
|
c9f15a3b88447e12c7172c6436259f9e24855497
| 1,337
|
py
|
Python
|
.venv/lib/python3.8/site-packages/yaml_env_tag.py
|
MCMaurer/mkdocs-learning-materials-home
|
7dfba825279415cb3658e53c18783c8074c6fb10
|
[
"Apache-2.0"
] | 4
|
2021-09-23T16:16:02.000Z
|
2022-03-23T21:47:30.000Z
|
.venv/lib/python3.8/site-packages/yaml_env_tag.py
|
MCMaurer/mkdocs-learning-materials-home
|
7dfba825279415cb3658e53c18783c8074c6fb10
|
[
"Apache-2.0"
] | 10
|
2021-06-16T20:48:32.000Z
|
2021-10-04T18:22:02.000Z
|
.venv/lib/python3.8/site-packages/yaml_env_tag.py
|
MCMaurer/mkdocs-learning-materials-home
|
7dfba825279415cb3658e53c18783c8074c6fb10
|
[
"Apache-2.0"
] | 2
|
2022-01-17T16:50:08.000Z
|
2022-01-17T20:59:27.000Z
|
""" A custom YAML tag for referencing environment variables in YAML files. """
__version__ = '0.1'
import os
import yaml
from typing import Any
def construct_env_tag(loader: yaml.Loader, node: yaml.Node) -> Any:
"""Assign value of ENV variable referenced at node."""
default = None
if isinstance(node, yaml.nodes.ScalarNode):
vars = [loader.construct_scalar(node)]
elif isinstance(node, yaml.nodes.SequenceNode):
child_nodes = node.value
if len(child_nodes) > 1:
# default is resolved using YAML's (implicit) types.
default = loader.construct_object(child_nodes[-1])
child_nodes = child_nodes[:-1]
# Env Vars are resolved as string values, ignoring (implicit) types.
vars = [loader.construct_scalar(child) for child in child_nodes]
else:
raise yaml.constructor.ConstructorError(None, None,
f'expected a scalar or sequence node, but found {node.id}',
node.start_mark)
for var in vars:
if var in os.environ:
value = os.environ[var]
# Resolve value to Python type using YAML's implicit resolvers
tag = loader.resolve(yaml.nodes.ScalarNode, value, (True, False))
return loader.construct_object(yaml.nodes.ScalarNode(tag, value))
return default
| 38.2
| 78
| 0.65819
|
368f026e96589bc94efb5ffccef3474131f6a844
| 435
|
py
|
Python
|
apps/projects/migrations/0004_auto_20180517_1028.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2
|
2018-05-18T08:38:29.000Z
|
2018-05-22T08:26:09.000Z
|
apps/projects/migrations/0004_auto_20180517_1028.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118
|
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/projects/migrations/0004_auto_20180517_1028.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-05-17 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20180427_0823'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='description',
field=models.TextField(max_length=2500, null=True, verbose_name='description'),
),
]
| 22.894737
| 91
| 0.627586
|
2ceb67433a07eed1474ef9ce664725f586186fda
| 195
|
py
|
Python
|
todo/admin.py
|
markwilson107/Django-Todo
|
3c9ceda7d3ffd2e85c9303b9d0dd0377ed74b0ac
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
todo/admin.py
|
markwilson107/Django-Todo
|
3c9ceda7d3ffd2e85c9303b9d0dd0377ed74b0ac
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
todo/admin.py
|
markwilson107/Django-Todo
|
3c9ceda7d3ffd2e85c9303b9d0dd0377ed74b0ac
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Todo
class TodoAdmin(admin.ModelAdmin):
readonly_fields = ('created',)
# Register your models here.
admin.site.register(Todo, TodoAdmin)
| 24.375
| 36
| 0.774359
|
272ce6b7f11295bb2c4a681a2e2f1386b43aca5c
| 136
|
py
|
Python
|
Term 3/2/12-open-a.py
|
theseana/apondaone
|
7cbf3572a86c73220329804fee1f3d03842ae902
|
[
"MIT"
] | null | null | null |
Term 3/2/12-open-a.py
|
theseana/apondaone
|
7cbf3572a86c73220329804fee1f3d03842ae902
|
[
"MIT"
] | null | null | null |
Term 3/2/12-open-a.py
|
theseana/apondaone
|
7cbf3572a86c73220329804fee1f3d03842ae902
|
[
"MIT"
] | null | null | null |
# keep all old data and add new data
file = open('athlete.txt', 'a')
file.write('Nothin Compares\n')
file.write('Lakaka')
file.close()
| 19.428571
| 36
| 0.691176
|
622df440963a9a049bd03ea3fb2e565a7cc48432
| 2,840
|
py
|
Python
|
backend/corpora/lambdas/api/v1/collection_uuid/upload.py
|
BuildJet/single-cell-data-portal
|
080ad03f4745d59ade75c3480149e83bb76cf39b
|
[
"MIT"
] | null | null | null |
backend/corpora/lambdas/api/v1/collection_uuid/upload.py
|
BuildJet/single-cell-data-portal
|
080ad03f4745d59ade75c3480149e83bb76cf39b
|
[
"MIT"
] | null | null | null |
backend/corpora/lambdas/api/v1/collection_uuid/upload.py
|
BuildJet/single-cell-data-portal
|
080ad03f4745d59ade75c3480149e83bb76cf39b
|
[
"MIT"
] | null | null | null |
import requests
from flask import make_response, g
from .....common.corpora_config import CorporaConfig
from .....common.corpora_orm import CollectionVisibility, ProcessingStatus
from .....common import upload_sfn
from .....common.entities import Collection, Dataset
from .....api_server.db import dbconnect
from .....common.utils.dl_sources.url import MissingHeaderException, from_url
from .....common.utils.exceptions import (
ForbiddenHTTPException,
InvalidParametersHTTPException,
TooLargeHTTPException,
MethodNotAllowedException,
NotFoundHTTPException,
)
from .....common.utils.math_utils import GB
def link(collection_uuid: str, body: dict, user: str):
dataset_id = upload_from_link(collection_uuid, user, body["url"])
return make_response({"dataset_uuid": dataset_id}, 202)
def relink(collection_uuid: str, body: dict, user: str):
dataset_id = upload_from_link(collection_uuid, user, body["url"], body["id"])
return make_response({"dataset_uuid": dataset_id}, 202)
@dbconnect
def upload_from_link(collection_uuid: str, user: str, url: str, dataset_id: str = None):
db_session = g.db_session
# Verify Dropbox URL
valid_link = from_url(url)
if not valid_link:
raise InvalidParametersHTTPException("The dropbox shared link is invalid.")
# Get file info
try:
resp = valid_link.file_info()
except requests.HTTPError:
raise InvalidParametersHTTPException("The URL provided causes an error with Dropbox.")
except MissingHeaderException as ex:
raise InvalidParametersHTTPException(ex.detail)
if resp["size"] > CorporaConfig().upload_max_file_size_gb * GB:
raise TooLargeHTTPException()
if resp["name"].rsplit(".")[-1].lower() not in CorporaConfig().upload_file_formats:
raise InvalidParametersHTTPException("The file referred to by the link is not a support file format.")
# Create dataset
collection = Collection.get_collection(db_session, collection_uuid, CollectionVisibility.PRIVATE, owner=user)
if not collection:
raise ForbiddenHTTPException
if dataset_id:
dataset = Dataset.get(db_session, dataset_id)
if collection_uuid == dataset.collection_id and CollectionVisibility.PRIVATE == dataset.collection_visibility:
if dataset.processing_status.processing_status in [ProcessingStatus.SUCCESS, ProcessingStatus.FAILURE]:
dataset.reprocess()
else:
raise MethodNotAllowedException
else:
raise NotFoundHTTPException
else:
dataset = Dataset.create(db_session, collection=collection)
dataset.update(processing_status=dataset.new_processing_status())
# Start processing link
upload_sfn.start_upload_sfn(collection_uuid, dataset.id, valid_link.url)
return dataset.id
| 40.571429
| 118
| 0.734507
|
92e7cab9697705f98ac31dce9e0d74476ed52ea8
| 2,541
|
py
|
Python
|
dgm/models/bayesian_generator.py
|
mlii/variational-continual-learning
|
c5e0a7b56dfd4370127b092eb85213efc42bfcc9
|
[
"Apache-2.0"
] | null | null | null |
dgm/models/bayesian_generator.py
|
mlii/variational-continual-learning
|
c5e0a7b56dfd4370127b092eb85213efc42bfcc9
|
[
"Apache-2.0"
] | null | null | null |
dgm/models/bayesian_generator.py
|
mlii/variational-continual-learning
|
c5e0a7b56dfd4370127b092eb85213efc42bfcc9
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from mlp import init_weights
"""
A Bayesian MLP generator
"""
def sample_gaussian(mu, log_sig):
return mu + tf.exp(log_sig) * tf.random.normal(mu.get_shape())
def bayesian_mlp_layer(d_in, d_out, activation, name):
mu_W = tf.Variable(init_weights(d_in, d_out), name=name + '_mu_W')
mu_b = tf.Variable(tf.zeros([d_out]), name=name + '_mu_b')
log_sig_W = tf.Variable(tf.ones([d_in, d_out]) * -6, name=name + '_log_sig_W')
log_sig_b = tf.Variable(tf.ones([d_out]) * -6, name=name + '_log_sig_b')
def apply_layer(x, sampling=True):
if sampling:
W = sample_gaussian(mu_W, log_sig_W)
b = sample_gaussian(mu_b, log_sig_b)
else:
print('use mean of q(theta)...')
W = mu_W;
b = mu_b
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def generator_head(dimZ, dimH, n_layers, name):
fc_layer_sizes = [dimZ] + [dimH for i in range(n_layers)]
layers = []
N_layers = len(fc_layer_sizes) - 1
for i in range(N_layers):
d_in = fc_layer_sizes[i];
d_out = fc_layer_sizes[i + 1]
name_layer = name + '_head_l%d' % i
layers.append(bayesian_mlp_layer(d_in, d_out, 'relu', name_layer))
print('decoder head MLP of size', fc_layer_sizes)
def apply(x, sampling=True):
for layer in layers:
x = layer(x, sampling)
return x
return apply
def generator_shared(dimX, dimH, n_layers, last_activation, name):
# now construct a decoder
fc_layer_sizes = [dimH for i in range(n_layers)] + [dimX]
layers = []
N_layers = len(fc_layer_sizes) - 1
for i in range(N_layers):
d_in = fc_layer_sizes[i];
d_out = fc_layer_sizes[i + 1]
if i < N_layers - 1:
activation = 'relu'
else:
activation = last_activation
name_layer = name + '_shared_l%d' % i
layers.append(bayesian_mlp_layer(d_in, d_out, activation, name_layer))
print('decoder shared MLP of size', fc_layer_sizes)
def apply(x, sampling=True):
for layer in layers:
x = layer(x, sampling)
return x
return apply
def generator(head_net, shared_net):
def apply(x, sampling=True):
x = head_net(x, sampling)
x = shared_net(x, sampling)
return x
return apply
def construct_gen(gen, dimZ, sampling=True):
def gen_data(N):
# start from sample z_0, generate data
z = tf.random.normal(shape=(N, dimZ))
return gen(z, sampling)
return gen_data
| 25.41
| 80
| 0.658402
|
fa3826a78e2b26c024cff93920f2ee678997bccd
| 239
|
py
|
Python
|
fatproject/fat/admin.py
|
baydinsoftware/FAT
|
b580ef8016ffb9088cff9951ff23d21166f8f2dd
|
[
"MIT"
] | 1
|
2019-02-11T12:52:08.000Z
|
2019-02-11T12:52:08.000Z
|
fatproject/fat/admin.py
|
baydinsoftware/FAT
|
b580ef8016ffb9088cff9951ff23d21166f8f2dd
|
[
"MIT"
] | 1
|
2019-03-11T23:20:34.000Z
|
2019-03-11T23:20:34.000Z
|
fatproject/fat/admin.py
|
baydinsoftware/FAT
|
b580ef8016ffb9088cff9951ff23d21166f8f2dd
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Position, Stage, Candidate, Feedback
admin.site.register(Position)
admin.site.register(Stage)
admin.site.register(Candidate)
admin.site.register(Feedback)
| 23.9
| 56
| 0.811715
|
4639196dc392ab023a457b941ea4bdbf31d4f3c9
| 5,614
|
py
|
Python
|
netapp/santricity/models/symbol/port_idtlv.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
netapp/santricity/models/symbol/port_idtlv.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
netapp/santricity/models/symbol/port_idtlv.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
# coding: utf-8
"""
PortIDTLV.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class PortIDTLV(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PortIDTLV - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'subtype': 'str', # (required parameter)
'port_id': 'str'
}
self.attribute_map = {
'subtype': 'subtype', # (required parameter)
'port_id': 'portID'
}
self._subtype = None
self._port_id = None
@property
def subtype(self):
"""
Gets the subtype of this PortIDTLV.
This field contains the ID for the subtype.
:return: The subtype of this PortIDTLV.
:rtype: str
:required/optional: required
"""
return self._subtype
@subtype.setter
def subtype(self, subtype):
"""
Sets the subtype of this PortIDTLV.
This field contains the ID for the subtype.
:param subtype: The subtype of this PortIDTLV.
:type: str
"""
allowed_values = ["unk", "intfAlias", "portComp", "macAddr", "ntwkAddr", "intfName", "agentCircuit", "local", "__UNDEFINED"]
if subtype not in allowed_values:
raise ValueError(
"Invalid value for `subtype`, must be one of {0}"
.format(allowed_values)
)
self._subtype = subtype
@property
def port_id(self):
"""
Gets the port_id of this PortIDTLV.
This field contains an octet string indicating the specific identifier for the port.
:return: The port_id of this PortIDTLV.
:rtype: str
:required/optional: required
"""
return self._port_id
@port_id.setter
def port_id(self, port_id):
"""
Sets the port_id of this PortIDTLV.
This field contains an octet string indicating the specific identifier for the port.
:param port_id: The port_id of this PortIDTLV.
:type: str
"""
self._port_id = port_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 35.531646
| 844
| 0.614535
|
240be404520b255d24af493f5816f5da24d3aadd
| 6,472
|
py
|
Python
|
integration-tests/tests/frame_category_summary_test.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | 1
|
2016-04-05T21:57:16.000Z
|
2016-04-05T21:57:16.000Z
|
integration-tests/tests/frame_category_summary_test.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | null | null | null |
integration-tests/tests/frame_category_summary_test.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | null | null | null |
# vim: set encoding=utf-8
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import trustedanalytics as ta
# show full stack traces
ta.errors.show_details = True
ta.loggers.set_api()
# TODO: port setup should move to a super class
if ta.server.port != 19099:
ta.server.port = 19099
ta.connect()
class FrameCategorySummaryTest(unittest.TestCase):
"""
Test category_summary()
This is a build-time test so it needs to be written to be as fast as possible:
- Only use the absolutely smallest toy data sets, e.g 20 rows rather than 500 rows
- Tests are ran in parallel
- Tests should be short and isolated.
"""
_multiprocess_can_split_ = True
def setUp(self):
print "define csv file"
schema = [("source",str),("target",str)]
self.csv = ta.CsvFile("/datasets/noun_graph_small.csv", schema)
def test_category_summary_topk(self):
print "create frame"
frame = ta.Frame(self.csv)
print "compute category summary"
cm = frame.categorical_summary(('source', {'top_k' : 2}))
expected_result = {u'categorical_summary': [{u'column': u'source', u'levels': [
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'thing'},
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'abstraction'},
{u'percentage': 0.0, u'frequency': 0, u'level': u'Missing'},
{u'percentage': 0.35714285714285715, u'frequency': 10, u'level': u'Other'}]}]}
self.assertEquals(cm, expected_result, "test_category_summary_topk expected_result %s got %s" % (expected_result, cm))
def test_category_summary_threshold(self):
print "create frame"
frame = ta.Frame(self.csv)
print "compute category summary"
cm = frame.categorical_summary(('source', {'threshold' : 0.5}))
expected_result = {u'categorical_summary': [{u'column': u'source', u'levels': [
{u'percentage': 0.0, u'frequency': 0, u'level': u'Missing'},
{u'percentage': 1.0, u'frequency': 28, u'level': u'Other'}]}]}
self.assertEquals(cm, expected_result, "test_category_summary_threshold expected_result %s got %s" % (expected_result, cm))
def test_category_summary_both(self):
print "create frame"
frame = ta.Frame(self.csv)
print "compute category summary"
cm = frame.categorical_summary(('source', {'top_k' : 2}), ('target', {'threshold' : 0.5}))
expected_result = {u'categorical_summary': [{u'column': u'source', u'levels': [
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'thing'},
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'abstraction'},
{u'percentage': 0.0, u'frequency': 0, u'level': u'Missing'},
{u'percentage': 0.35714285714285715, u'frequency': 10, u'level': u'Other'}]}, {u'column': u'target',
u'levels': [
{u'percentage': 0.0,
u'frequency': 0,
u'level': u'Missing'},
{u'percentage': 1.0,
u'frequency': 28,
u'level': u'Other'}]}]}
self.assertEquals(cm, expected_result, "test_category_summary_both expected %s got %s" % (expected_result, cm))
def test_category_summary_none(self):
print "create frame"
frame = ta.Frame(self.csv)
print "compute category summary"
cm = frame.categorical_summary('source','target')
expected_result = {u'categorical_summary': [{u'column': u'source', u'levels': [
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'thing'},
{u'percentage': 0.32142857142857145, u'frequency': 9, u'level': u'abstraction'},
{u'percentage': 0.25, u'frequency': 7, u'level': u'physical_entity'},
{u'percentage': 0.10714285714285714, u'frequency': 3, u'level': u'entity'},
{u'percentage': 0.0, u'frequency': 0, u'level': u'Missing'},
{u'percentage': 0.0, u'frequency': 0, u'level': u'Other'}]}, {u'column': u'target', u'levels': [
{u'percentage': 0.07142857142857142, u'frequency': 2, u'level': u'thing'},
{u'percentage': 0.07142857142857142, u'frequency': 2, u'level': u'physical_entity'},
{u'percentage': 0.07142857142857142, u'frequency': 2, u'level': u'entity'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'variable'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'unit'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'substance'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'subject'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'set'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'reservoir'},
{u'percentage': 0.03571428571428571, u'frequency': 1, u'level': u'relation'},
{u'percentage': 0.0, u'frequency': 0, u'level': u'Missing'},
{u'percentage': 0.5357142857142857, u'frequency': 15, u'level': u'Other'}]}]}
self.assertEquals(cm, expected_result, "test_category_summary_none expected %s got %s" % (expected_result, cm))
if __name__ == "__main__":
unittest.main()
| 50.96063
| 131
| 0.573857
|
cf81e885c5a0e28b7b1eb50f9434f2fa57096413
| 4,529
|
py
|
Python
|
tests/test_object_filter.py
|
PackeTsar/meraki-cli
|
c9e5bbf475bf367fd56ab559acc721d6031f2f5f
|
[
"MIT"
] | 45
|
2020-06-07T04:16:57.000Z
|
2022-02-14T05:09:16.000Z
|
tests/test_object_filter.py
|
PackeTsar/meraki-cli
|
c9e5bbf475bf367fd56ab559acc721d6031f2f5f
|
[
"MIT"
] | 13
|
2020-05-28T03:08:49.000Z
|
2022-03-17T15:35:57.000Z
|
tests/test_object_filter.py
|
PackeTsar/meraki-cli
|
c9e5bbf475bf367fd56ab559acc721d6031f2f5f
|
[
"MIT"
] | 5
|
2021-03-09T14:36:35.000Z
|
2022-03-07T09:27:11.000Z
|
import unittest
from meraki_cli.__main__ import _object_filter
LISTOFDICTS = [
{'id': '1', 'name': 'THING1'},
{'id': '2', 'name': 'THING2'},
{'id': '100', 'name': 'OTHERTHING'},
{'id': '200', 'name': 'OTHER200THING'},
{'id': '300', 'name': 'ELSE'}
]
class TestObjectFilter(unittest.TestCase):
def testObjectFilterNotListError(self):
# Should throw a log error since listofdicts input is not a list
with self.assertLogs(level='ERROR'):
_object_filter(LISTOFDICTS[0], ['goingToErorAnyways'])
def testObjectFilterNonFilter(self):
# Should return nothing if filter list is empty
output = _object_filter(LISTOFDICTS, [])
assert output == []
def testObjectFilterSimple(self):
# Should return OTHERTHING, but nothing else
output = _object_filter(LISTOFDICTS, ['name:OTHERTHING'])
assert output == [LISTOFDICTS[2]]
def testObjectFilterSimpleRegex(self):
# Should return THING1 and THING2
output = _object_filter(LISTOFDICTS, ['name:THING.'])
assert output == LISTOFDICTS[0:2]
def testObjectFilterOr(self):
# Should return THING1 and OTHERTHING
output = _object_filter(LISTOFDICTS,
['name:THING1', 'name:OTHERTHING'])
assert output == [LISTOFDICTS[0], LISTOFDICTS[2]]
def testObjectFilterMultiKeyOr(self):
# Should return THING1, THING2, and OTHER200THING
output = _object_filter(LISTOFDICTS,
['name:THING.', 'id:200'])
assert output == LISTOFDICTS[0:2] + [LISTOFDICTS[3]]
def testObjectFilterAnd(self):
# Should return OTHERTHING and OTHER200THING
output = _object_filter(LISTOFDICTS,
['name:THING', 'id:...'], and_logic=True)
assert output == LISTOFDICTS[2:4]
def testObjectFilterEmptyList(self):
output = _object_filter([], ['name:THING'])
assert output == []
def testObjectFilterKeyMissingWarning(self):
# Should throw a warning since this key does not exist in any of
# the data
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['missing:key'])
# Test with multiple filter keys, one of which is good. Make sure
# the warning still fires.
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'])
# Test with AND logic and make sure warning fires
with self.assertLogs(level='WARNING'):
_object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'],
and_logic=True)
def testObjectFilterKeyMissingReturn(self):
# Should return an empty list since this key does not exist in any
# of the data.
output = _object_filter(LISTOFDICTS, ['missing:key'])
assert output == []
# Test with multiple keys. Should return objects (using "OR" logic)
output = _object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'])
assert output == LISTOFDICTS[0:2]
# Test with multiple keys. Should return nothing (using "AND" logic)
output = _object_filter(LISTOFDICTS, ['name:THING.', 'missing:key'],
and_logic=True)
assert output == []
def testObjectFilterKeyInconsistentData(self):
# Create a listofdicts with inconsistent keys
data = [
{'k2': 'v2'},
{'k1': 'v1', 'k2': 'v2'},
{'k3': 'v3', 'k4': 'v4'},
]
# Should return no warnings
assert _object_filter(data, ['k1:v.']) == [data[1]]
assert _object_filter(data, ['k1:v.'], and_logic=True) == []
def testObjectFilterComplexData(self):
# Test filtering complex values (dict). Should be flattened to str
# before filtering happens
data = [
{'k': {'test1': 'test1'}},
{'k': {'test2': 'test2'}},
{'k': {'test3': 'test3'}},
]
assert _object_filter(data, ['k:test2']) == [data[1]]
def testObjectFilterMalformedString(self):
# Test that a malformed filter causes a SystemExit
with self.assertRaises(SystemExit) as cm:
# And throws an ERROR log
with self.assertLogs(level='ERROR'):
_object_filter(LISTOFDICTS, ['badfilter'])
# And the exit code is 1 (error)
self.assertEqual(cm.exception.code, 1)
| 39.72807
| 76
| 0.595496
|
3707d6175d2b6fb929096f3d148ffca5e5270fdd
| 1,550
|
py
|
Python
|
core/polyaxon/init/file.py
|
rimon-safesitehq/polyaxon
|
c456d5bec00b36d75feabdccffa45b2be9a6346e
|
[
"Apache-2.0"
] | null | null | null |
core/polyaxon/init/file.py
|
rimon-safesitehq/polyaxon
|
c456d5bec00b36d75feabdccffa45b2be9a6346e
|
[
"Apache-2.0"
] | 51
|
2021-04-06T07:59:21.000Z
|
2022-03-29T01:08:22.000Z
|
core/polyaxon/init/file.py
|
rimon-safesitehq/polyaxon
|
c456d5bec00b36d75feabdccffa45b2be9a6346e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict
from polyaxon import settings
from polyaxon.client import RunClient, get_rel_asset_path
from polyaxon.exceptions import PolyaxonClientException, PolyaxonContainerException
from polyaxon.polyboard.artifacts import V1ArtifactKind, V1RunArtifact
from polyaxon.utils.path_utils import get_base_filename
def create_file_lineage(filepath: str, summary: Dict, kind: str):
kind = kind or V1ArtifactKind.FILE
if not filepath:
return
filename = os.path.basename(filepath)
if settings.CLIENT_CONFIG.no_api:
return
try:
run_client = RunClient()
except PolyaxonClientException as e:
raise PolyaxonContainerException(e)
artifact_run = V1RunArtifact(
name=get_base_filename(filename),
kind=kind,
path=get_rel_asset_path(filepath),
summary=summary,
is_input=True,
)
run_client.log_artifact_lineage(artifact_run)
| 31
| 83
| 0.748387
|
d39481361394699a541e42f633f9f1a7b90f83a7
| 24,589
|
py
|
Python
|
Bio/Motif/Motif.py
|
nuin/biopython
|
045d57b08799ef52c64bd4fa807629b8a7e9715a
|
[
"PostgreSQL"
] | 2
|
2016-05-09T04:20:06.000Z
|
2017-03-07T10:25:53.000Z
|
Bio/Motif/Motif.py
|
nmatzke/biopython
|
381fc1d1fd2fe07cec7cefbbe5eb5a93757ab67a
|
[
"PostgreSQL"
] | null | null | null |
Bio/Motif/Motif.py
|
nmatzke/biopython
|
381fc1d1fd2fe07cec7cefbbe5eb5a93757ab67a
|
[
"PostgreSQL"
] | 1
|
2019-08-19T22:05:14.000Z
|
2019-08-19T22:05:14.000Z
|
# Copyright 2003 by Bartek Wilczynski. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Implementation of sequence motifs.
Changes:
10.2007 - BW added matrix (vertical, horizontal) input, jaspar, transfac-like output
26.08.2007 - added a background attribute (Bartek Wilczynski)
26.08.2007 - added a DPQ measure (Bartek Wilczynski)
9.2007 (BW) : added the .to_fasta() and .weblogo() methods allowing to use the Berkeley weblogo server at http://weblogo.berkeley.edu/
"""
from __future__ import generators
from Bio.Seq import Seq
from Bio.SubsMat import FreqTable
from Bio.Alphabet import IUPAC
import math,random
class Motif(object):
"""
A class representing sequence motifs.
"""
def __init__(self,alphabet=IUPAC.unambiguous_dna):
self.instances = []
self.has_instances=False
self.counts = {}
self.has_counts=False
self.mask = []
self._pwm_is_current = False
self._pwm = []
self._log_odds_is_current = False
self._log_odds = []
self.alphabet=alphabet
self.length=None
self.background=dict(map(lambda n: (n,1.0/len(self.alphabet.letters)), self.alphabet.letters))
self.beta=1.0
self.info=None
self.name=""
def _check_length(self, len):
if self.length==None:
self.length = len
elif self.length != len:
print "len",self.length,self.instances
raise ValueError("You can't change the length of the motif")
def _check_alphabet(self,alphabet):
if self.alphabet==None:
self.alphabet=alphabet
elif self.alphabet != alphabet:
raise ValueError("Wrong Alphabet")
def add_instance(self,instance):
"""
adds new instance to the motif
"""
self._check_alphabet(instance.alphabet)
self._check_length(len(instance))
if self.has_counts:
for i in range(self.length):
let=instance[i]
self.counts[let][i]+=1
if self.has_instances or not self.has_counts:
self.instances.append(instance)
self.has_instances=True
self._pwm_is_current = False
self._log_odds_is_current = False
def set_mask(self,mask):
"""
sets the mask for the motif
The mask should be a string containing asterisks in the position of significant columns and spaces in other columns
"""
self._check_length(len(mask))
self.mask=[]
for char in mask:
if char=="*":
self.mask.append(1)
elif char==" ":
self.mask.append(0)
else:
raise ValueError("Mask should contain only '*' or ' ' and not a '%s'"%char)
def pwm(self,laplace=True):
"""
returns the PWM computed for the set of instances
if laplace=True (default), pseudocounts equal to self.background multiplied by self.beta are added to all positions.
"""
if self._pwm_is_current:
return self._pwm
#we need to compute new pwm
self._pwm = []
for i in xrange(self.length):
dict = {}
#filling the dict with 0's
for letter in self.alphabet.letters:
if laplace:
dict[letter]=self.beta*self.background[letter]
else:
dict[letter]=0.0
if self.has_counts:
#taking the raw counts
for letter in self.alphabet.letters:
dict[letter]+=self.counts[letter][i]
elif self.has_instances:
#counting the occurences of letters in instances
for seq in self.instances:
#dict[seq[i]]=dict[seq[i]]+1
dict[seq[i]]+=1
self._pwm.append(FreqTable.FreqTable(dict,FreqTable.COUNT,self.alphabet))
self._pwm_is_current=1
return self._pwm
def log_odds(self,laplace=True):
"""
returns the logg odds matrix computed for the set of instances
"""
if self._log_odds_is_current:
return self._log_odds
#we need to compute new pwm
self._log_odds = []
pwm=self.pwm(laplace)
for i in xrange(self.length):
d = {}
for a in self.alphabet.letters:
d[a]=math.log(pwm[i][a]/self.background[a],2)
self._log_odds.append(d)
self._log_odds_is_current=1
return self._log_odds
def ic(self):
"""Method returning the information content of a motif.
"""
res=0
pwm=self.pwm()
for i in range(self.length):
res+=2
for a in self.alphabet.letters:
if pwm[i][a]!=0:
res+=pwm[i][a]*math.log(pwm[i][a],2)
return res
def exp_score(self,st_dev=False):
"""
Computes expected score of motif's instance and its standard deviation
"""
exs=0.0
var=0.0
pwm=self.pwm()
for i in range(self.length):
ex1=0.0
ex2=0.0
for a in self.alphabet.letters:
if pwm[i][a]!=0:
ex1+=pwm[i][a]*(math.log(pwm[i][a],2)-math.log(self.background[a],2))
ex2+=pwm[i][a]*(math.log(pwm[i][a],2)-math.log(self.background[a],2))**2
exs+=ex1
var+=ex2-ex1**2
if st_dev:
return exs,math.sqrt(var)
else:
return exs
def search_instances(self,sequence):
"""
a generator function, returning found positions of instances of the motif in a given sequence
"""
if not self.has_instances:
raise ValueError ("This motif has no instances")
for pos in xrange(0,len(sequence)-self.length+1):
for instance in self.instances:
if instance.tostring()==sequence[pos:pos+self.length].tostring():
yield(pos,instance)
break # no other instance will fit (we don't want to return multiple hits)
def score_hit(self,sequence,position,normalized=0,masked=0):
"""
give the pwm score for a given position
"""
lo=self.log_odds()
score = 0.0
for pos in xrange(self.length):
a = sequence[position+pos]
if not masked or self.mask[pos]:
try:
score += lo[pos][a]
except:
pass
if normalized:
if not masked:
score/=self.length
else:
score/=len(filter(lambda x: x, self.mask))
return score
def search_pwm(self,sequence,normalized=0,masked=0,threshold=0.0,both=True):
"""
a generator function, returning found hits in a given sequence with the pwm score higher than the threshold
"""
if both:
rc = self.reverse_complement()
sequence=sequence.tostring().upper()
for pos in xrange(0,len(sequence)-self.length+1):
score = self.score_hit(sequence,pos,normalized,masked)
if score > threshold:
yield (pos,score)
if both:
rev_score = rc.score_hit(sequence,pos,normalized,masked)
if rev_score > threshold:
yield (-pos,rev_score)
def dist_pearson(self, motif, masked = 0):
"""
return the similarity score based on pearson correlation for the given motif against self.
We use the Pearson's correlation of the respective probabilities.
"""
if self.alphabet != motif.alphabet:
raise ValueError("Cannot compare motifs with different alphabets")
max_p=-2
for offset in range(-self.length+1,motif.length):
if offset<0:
p = self.dist_pearson_at(motif,-offset)
else: #offset>=0
p = motif.dist_pearson_at(self,offset)
if max_p<p:
max_p=p
max_o=-offset
return 1-max_p,max_o
def dist_pearson_at(self,motif,offset):
sxx = 0 # \sum x^2
sxy = 0 # \sum x \cdot y
sx = 0 # \sum x
sy = 0 # \sum y
syy = 0 # \sum x^2
norm=self.length
for pos in range(max(self.length,offset+motif.length)):
for l in self.alphabet.letters:
xi = self[pos][l]
yi = motif[pos-offset][l]
sx = sx + xi
sy = sy + yi
sxx = sxx + xi * xi
syy = syy + yi * yi
sxy = sxy + xi * yi
norm *= len(self.alphabet.letters)
s1 = (sxy - sx*sy*1.0/norm)
s2 = (sxx - sx*sx*1.0/norm)*(syy- sy*sy*1.0/norm)
return s1/math.sqrt(s2)
def dist_product(self,other):
"""
A similarity measure taking into account a product probability of generating overlaping instances of two motifs
"""
max_p=0.0
for offset in range(-self.length+1,other.length):
if offset<0:
p = self.dist_product_at(other,-offset)
else: #offset>=0
p = other.dist_product_at(self,offset)
if max_p<p:
max_p=p
max_o=-offset
return 1-max_p/self.dist_product_at(self,0),max_o
def dist_product_at(self,other,offset):
s=0
for i in range(max(self.length,offset+other.length)):
f1=self[i]
f2=other[i-offset]
for n,b in self.background.items():
s+=b*f1[n]*f2[n]
return s/i
def dist_dpq(self,other):
"""
Calculates the dpq distance measure between motifs.
It is calculated as a maximal value of the formula:
\sqrt{\sum_{i=1}^{alignment.len()} \sum_{k=1}^alphabet.len() \
\{ m1[i].freq(alphabet[k])*log_2(m1[i].freq(alphabet[k])/m2[i].freq(alphabet[k])) +
m2[i].freq(alphabet[k])*log_2(m2[i].freq(alphabet[k])/m1[i].freq(alphabet[k]))
}
over possible non-spaced alignemts of two motifs.
reference to the measure:
D. M Endres and J. E Schindelin, "A new metric for probability distributions", IEEE transactions on Information Theory 49, no. 7 (July 2003): 1858-1860.
"""
min_d=float("inf")
min_o=-1
d_s=[]
for offset in range(-self.length+1,other.length):
#print "%2.3d"%offset,
if offset<0:
d = self.dist_dpq_at(other,-offset)
overlap = self.length+offset
else: #offset>=0
d = other.dist_dpq_at(self,offset)
overlap = other.length-offset
overlap = min(self.length,other.length,overlap)
out = self.length+other.length-2*overlap
#print d,1.0*(overlap+out)/overlap,d*(overlap+out)/overlap
#d = d/(2*overlap)
d = (d/(out+overlap))*(2*overlap+out)/(2*overlap)
#print d
d_s.append((offset,d))
if min_d> d:
min_d=d
min_o=-offset
return min_d,min_o#,d_s
def dist_dpq_at(self,other,offset):
"""
calculates the dist_dpq measure with a given offset.
offset should satisfy 0<=offset<=self.length
"""
def dpq (f1,f2,alpha):
s=0
for n in alpha.letters:
avg=(f1[n]+f2[n])/2
s+=f1[n]*math.log(f1[n]/avg,2)+f2[n]*math.log(f2[n]/avg,2)
return math.sqrt(s)
s=0
for i in range(max(self.length,offset+other.length)):
f1=self[i]
f2=other[i-offset]
s+=dpq(f1,f2,self.alphabet)
return s
def read(self,stream):
"""Reads the motif from the stream (in AlignAce format).
the self.alphabet variable must be set beforehand.
If the last line contains asterisks it is used for setting mask
"""
while 1:
ln = stream.readline()
if "*" in ln:
self.set_mask(ln.strip("\n\c"))
break
self.add_instance(Seq(ln.strip(),self.alphabet))
def __str__(self,masked=False):
""" string representation of a motif.
"""
str = ""
for inst in self.instances:
str = str + inst.tostring() + "\n"
if masked:
for i in xrange(self.length):
if self.mask[i]:
str = str + "*"
else:
str = str + " "
str = str + "\n"
return str
def __len__(self):
"""return the length of a motif
"""
if self.length==None:
return 0
else:
return self.length
def write(self,stream):
"""
writes the motif to the stream
"""
stream.write(self.__str__())
def to_fasta(self):
"""
FASTA representation of motif
"""
str = ""
for i,inst in enumerate(self.instances):
str = str + "> instance %d\n"%i + inst.tostring() + "\n"
return str
def reverse_complement(self):
"""
Gives the reverse complement of the motif
"""
res = Motif()
if self.has_instances:
for i in self.instances:
res.add_instance(i.reverse_complement())
else: # has counts
res.has_counts=True
res.counts["A"]=self.counts["T"]
res.counts["T"]=self.counts["A"]
res.counts["G"]=self.counts["C"]
res.counts["C"]=self.counts["G"]
res.counts["A"].reverse()
res.counts["C"].reverse()
res.counts["G"].reverse()
res.counts["T"].reverse()
res.length=self.length
res.mask = self.mask
return res
def from_jaspar_pfm(self,stream,make_instances=False):
"""
reads the motif from Jaspar .pfm file
The instances are fake, but the pwm is accurate.
"""
return self.from_horiz_matrix(stream,letters="ACGT",make_instances=make_instances)
def from_vert_matrix(self,stream,letters=None,make_instances=False):
"""reads a horizontal count matrix from stream and fill in the counts.
"""
self.counts = {}
self.has_counts=True
if letters==None:
letters=self.alphabet.letters
self.length=0
for i in letters:
self.counts[i]=[]
for ln in stream.readlines():
rec=map(float,ln.strip().split())
for k,v in zip(letters,rec):
self.counts[k].append(v)
self.length+=1
self.set_mask("*"*self.length)
if make_instances==True:
self.make_instances_from_counts()
return self
def from_horiz_matrix(self,stream,letters=None,make_instances=False):
"""reads a horizontal count matrix from stream and fill in the counts.
"""
if letters==None:
letters=self.alphabet.letters
self.counts = {}
self.has_counts=True
for i in letters:
ln = stream.readline().strip().split()
#if there is a letter in the beginning, ignore it
if ln[0]==i:
ln=ln[1:]
#print ln
try:
self.counts[i]=map(int,ln)
except ValueError: #not integers
self.counts[i]=map(float,ln) #map(lambda s: int(100*float(s)),ln)
#print counts[i]
s = sum(map(lambda nuc: self.counts[nuc][0],letters))
#print "sum", s
l = len(self.counts[letters[0]])
self.length=l
self.set_mask("*"*l)
if make_instances==True:
self.make_instances_from_counts()
return self
def make_instances_from_counts(self):
"""Creates "fake" instances for a motif created from a count matrix.
In case the sums of counts are different for different columnes, the shorter columns are padded with background.
"""
alpha="".join(self.alphabet.letters)
#col[i] is a column taken from aligned motif instances
col=[]
self.has_instances=True
self.instances=[]
s = sum(map(lambda nuc: self.counts[nuc][0],self.alphabet.letters))
for i in range(self.length):
col.append("")
for n in self.alphabet.letters:
col[i] = col[i]+ (n*(self.counts[n][i]))
if len(col[i])<s:
print "WARNING, column too short",len(col[i]),s
col[i]+=(alpha*s)[:(s-len(col[i]))]
#print i,col[i]
#iterate over instances
for i in range(s):
inst="" #start with empty seq
for j in range(self.length): #iterate over positions
inst+=col[j][i]
#print i,inst
inst=Seq(inst,self.alphabet)
self.add_instance(inst)
return self.instances
def make_counts_from_instances(self):
"""Creates the count matrix for a motif with instances.
"""
#make strings for "columns" of motifs
#col[i] is a column taken from aligned motif instances
counts={}
for a in self.alphabet.letters:
counts[a]=[]
self.has_counts=True
s = len(self.instances)
for i in range(self.length):
ci = dict(map(lambda a: (a,0),self.alphabet.letters))
for inst in self.instances:
ci[inst[i]]+=1
for a in self.alphabet.letters:
counts[a].append(ci[a])
self.counts=counts
return counts
def from_jaspar_sites(self,stream):
"""
reads the motif from Jaspar .sites file
The instances and pwm are OK.
"""
while True:
ln = stream.readline()# read the header "$>...."
if ln=="" or ln[0]!=">":
break
ln=stream.readline().strip()#read the actual sequence
i=0
while ln[i]==ln[i].lower():
i+=1
inst=""
while i<len(ln) and ln[i]==ln[i].upper():
inst+=ln[i]
i+=1
inst=Seq(inst,self.alphabet)
self.add_instance(inst)
self.set_mask("*"*len(inst))
return self
def __getitem__(self,index):
"""Returns the probability distribution over symbols at a given position, padding with background.
If the requested index is out of bounds, the returned distribution comes from background.
"""
if index in range(self.length):
return self.pwm()[index]
else:
return self.background
def consensus(self):
"""Returns the consensus sequence of a motif.
"""
res=""
for i in range(self.length):
max_f=0
max_n="X"
for n in self[i].keys():
if self[i][n]>max_f:
max_f=self[i][n]
max_n=n
res+=max_n
return Seq(res,self.alphabet)
def anticonsensus(self):
"""returns the least probable pattern to be generated from this motif.
"""
res=""
for i in range(self.length):
min_f=10.0
min_n="X"
for n in self[i].keys():
if self[i][n]<min_f:
min_f=self[i][n]
min_n=n
res+=min_n
return Seq(res,self.alphabet)
def max_score(self):
"""Maximal possible score for this motif.
returns the score computed for the consensus sequence.
"""
return self.score_hit(self.consensus(),0)
def min_score(self):
"""Minimal possible score for this motif.
returns the score computed for the anticonsensus sequence.
"""
return self.score_hit(self.anticonsensus(),0)
def weblogo(self,fname,format="PNG",**kwds):
"""
uses the Berkeley weblogo service to download and save a weblogo of itself
requires an internet connection.
The parameters from **kwds are passed directly to the weblogo server.
"""
import urllib
import urllib2
al= self.to_fasta()
url = 'http://weblogo.berkeley.edu/logo.cgi'
values = {'sequence' : al,
'format' : format,
'logowidth' : '18',
'logoheight' : '5',
'logounits' : 'cm',
'kind' : 'AUTO',
'firstnum' : "1",
'command' : 'Create Logo',
'smallsamplecorrection' : "on",
'symbolsperline' : 32,
'res' : '96',
'res_units' : 'ppi',
'antialias' : 'on',
'title' : '',
'barbits' : '',
'xaxis': 'on',
'xaxis_label' : '',
'yaxis': 'on',
'yaxis_label' : '',
'showends' : 'on',
'shrink' : '0.5',
'fineprint' : 'on',
'ticbits' : '1',
'colorscheme' : 'DEFAULT',
'color1' : 'green',
'color2' : 'blue',
'color3' : 'red',
'color4' : 'black',
'color5' : 'purple',
'color6' : 'orange',
'color1' : 'black',
}
for k,v in kwds.items():
values[k]=str(v)
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
f=open(fname,"w")
im=response.read()
f.write(im)
f.close()
def to_transfac(self):
"""Write the representation of a motif in TRANSFAC format
"""
res="XX\nTY Motif\n" #header
try:
res+="ID %s\n"%self.name
except:
pass
res+="BF undef\nP0"
for a in self.alphabet.letters:
res+=" %s"%a
res+="\n"
if not self.has_counts:
self.make_counts_from_instances()
for i in range(self.length):
if i<9:
res+="0%d"%(i+1)
else:
res+="%d"%(i+1)
for a in self.alphabet.letters:
res+=" %d"%self.counts[a][i]
res+="\n"
res+="XX\n"
return res
def to_vertical_matrix(self,letters=None):
"""Return string representation of the motif as a matrix.
"""
if letters==None:
letters=self.alphabet.letters
self._pwm_is_current=False
pwm=self.pwm(laplace=False)
res=""
for i in range(self.length):
res+="\t".join([str(pwm[i][a]) for a in letters])
res+="\n"
return res
def to_horizontal_matrix(self,letters=None,normalized=True):
"""Return string representation of the motif as a matrix.
"""
if letters==None:
letters=self.alphabet.letters
res=""
if normalized: #output PWM
self._pwm_is_current=False
mat=self.pwm(laplace=False)
for a in letters:
res+="\t".join([str(mat[i][a]) for i in range(self.length)])
res+="\n"
else: #output counts
if not self.has_counts:
self.make_counts_from_instances()
mat=self.counts
for a in letters:
res+="\t".join([str(mat[a][i]) for i in range(self.length)])
res+="\n"
return res
def to_jaspar_pfm(self):
"""Returns the pfm representation of the motif
"""
return self.to_horizontal_matrix(normalized=False,letters="ACGT")
| 33.318428
| 160
| 0.519907
|
513b84215a1fea5e26d2dcf52334cb313f2d5cef
| 19,042
|
py
|
Python
|
tools/perf/core/results_dashboard.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
tools/perf/core/results_dashboard.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perf/core/results_dashboard.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
#!/usr/bin/env vpython
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for adding results to perf dashboard."""
# This file was copy-pasted over from:
# //build/scripts/slave/results_dashboard.py
# That file is now deprecated and this one is
# the new source of truth.
import calendar
import datetime
import httplib
import json
import os
import subprocess
import sys
import time
import traceback
import urllib
import urllib2
import zlib
import logging
# TODO(crbug.com/996778): Figure out how to get httplib2 hermetically.
import httplib2 # pylint: disable=import-error
from core import path_util
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s pid=%(process)d'
' %(module)s.%(funcName)s:%(lineno)d %(message)s')
# The paths in the results dashboard URLs for sending results.
SEND_RESULTS_PATH = '/add_point'
SEND_HISTOGRAMS_PATH = '/add_histograms'
class SendResultException(Exception):
pass
class SendResultsRetryException(SendResultException):
pass
class SendResultsFatalException(SendResultException):
pass
def LuciAuthTokenGeneratorCallback():
args = ['luci-auth', 'token']
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() == 0:
return p.stdout.read()
else:
raise RuntimeError(
'Error generating authentication token.\nStdout: %s\nStder:%s' %
(p.stdout.read(), p.stderr.read()))
def SendResults(data, data_label, url, send_as_histograms=False,
token_generator_callback=LuciAuthTokenGeneratorCallback,
num_retries=4):
"""Sends results to the Chrome Performance Dashboard.
This function tries to send the given data to the dashboard.
Args:
data: The data to try to send. Must be JSON-serializable.
data_label: string name of the data to be uploaded. This is only used for
logging purpose.
url: Performance Dashboard URL (including schema).
send_as_histograms: True if result is to be sent to /add_histograms.
token_generator_callback: a callback for generating the authentication token
to upload to perf dashboard.
If |token_generator_callback| is not specified, it's default to
LuciAuthTokenGeneratorCallback.
num_retries: Number of times to retry uploading to the perf dashboard upon
recoverable error.
"""
start = time.time()
all_data_uploaded = False
data_type = ('histogram' if send_as_histograms else 'chartjson')
dashboard_data_str = json.dumps(data)
# When perf dashboard is overloaded, it takes sometimes to spin up new
# instance. So sleep before retrying again. (
# For more details, see crbug.com/867379.
wait_before_next_retry_in_seconds = 15
for i in xrange(1, num_retries + 1):
try:
logging.info(
'Sending %s result of %s to dashboard (attempt %i out of %i).' %
(data_type, data_label, i, num_retries))
if send_as_histograms:
_SendHistogramJson(url, dashboard_data_str, token_generator_callback)
else:
# TODO(eakuefner): Remove this logic once all bots use histograms.
_SendResultsJson(url, dashboard_data_str, token_generator_callback)
all_data_uploaded = True
break
except SendResultsRetryException as e:
logging.error('Error while uploading %s data: %s' % (data_type, str(e)))
time.sleep(wait_before_next_retry_in_seconds)
wait_before_next_retry_in_seconds *= 2
except SendResultsFatalException as e:
logging.error(
'Fatal error while uploading %s data: %s' % (data_type, str(e)))
break
except Exception:
logging.error('Unexpected error while uploading %s data: %s' %
(data_type, traceback.format_exc()))
break
logging.info(
'Time spent sending results to %s: %s' % (url, time.time() - start))
return all_data_uploaded
def MakeHistogramSetWithDiagnostics(histograms_file,
test_name, bot, buildername, buildnumber,
project, buildbucket,
revisions_dict, is_reference_build,
perf_dashboard_machine_group, output_dir,
max_bytes=0):
"""Merges Histograms, adds Diagnostics, and batches the results.
Args:
histograms_file: input filename
output_dir: output directory
max_bytes: If non-zero, tries to produce files no larger than max_bytes.
(May generate a file that is larger than max_bytes if max_bytes is smaller
than a single Histogram.)
"""
add_diagnostics_args = []
add_diagnostics_args.extend([
'--benchmarks', test_name,
'--bots', bot,
'--builds', buildnumber,
'--masters', perf_dashboard_machine_group,
'--is_reference_build', 'true' if is_reference_build else '',
])
if max_bytes:
add_diagnostics_args.extend(['--max_bytes', max_bytes])
stdio_url = _MakeStdioUrl(test_name, buildername, buildnumber)
if stdio_url:
add_diagnostics_args.extend(['--log_urls_k', 'Buildbot stdio'])
add_diagnostics_args.extend(['--log_urls_v', stdio_url])
build_status_url = _MakeBuildStatusUrl(
project, buildbucket, buildername, buildnumber)
if build_status_url:
add_diagnostics_args.extend(['--build_urls_k', 'Build Status'])
add_diagnostics_args.extend(['--build_urls_v', build_status_url])
for k, v in revisions_dict.iteritems():
add_diagnostics_args.extend((k, v))
add_diagnostics_args.append(histograms_file)
# Subprocess only accepts string args
add_diagnostics_args = [str(v) for v in add_diagnostics_args]
add_reserved_diagnostics_path = os.path.join(
path_util.GetChromiumSrcDir(), 'third_party', 'catapult', 'tracing',
'bin', 'add_reserved_diagnostics')
# This script may write multiple files to output_dir.
output_path = os.path.join(output_dir, test_name + '.json')
cmd = ([sys.executable, add_reserved_diagnostics_path] +
add_diagnostics_args + ['--output_path', output_path])
logging.info(cmd)
subprocess.check_call(cmd)
def MakeListOfPoints(charts, bot, test_name, project, buildbucket, buildername,
buildnumber, supplemental_columns,
perf_dashboard_machine_group,
revisions_dict=None):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names to chart data, as generated by the
log processor classes (see process_log_utils.GraphingLogProcessor).
bot: A string which comes from perf_id, e.g. linux-release.
test_name: A test suite name, e.g. sunspider.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_columns: A dictionary of extra data to send with a point.
perf_dashboard_machine_group: Builder's perf machine group.
Returns:
A list of dictionaries in the format accepted by the perf dashboard.
Each dictionary has the keys "master", "bot", "test", "value", "revision".
The full details of this format are described at http://goo.gl/TcJliv.
"""
results = []
for chart_name, chart_data in sorted(charts.items()):
point_id, revision_columns = _RevisionNumberColumns(
revisions_dict if revisions_dict is not None else chart_data, prefix='r_')
for trace_name, trace_values in sorted(chart_data['traces'].items()):
is_important = trace_name in chart_data.get('important', [])
test_path = _TestPath(test_name, chart_name, trace_name)
result = {
'master': perf_dashboard_machine_group,
'bot': bot,
'test': test_path,
'revision': point_id,
'supplemental_columns': {}
}
# Add the supplemental_columns values that were passed in after the
# calculated revision column values so that these can be overwritten.
result['supplemental_columns'].update(revision_columns)
result['supplemental_columns'].update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
result['supplemental_columns'].update(
_GetBuildStatusUriColumn(project, buildbucket, buildername,
buildnumber))
result['supplemental_columns'].update(supplemental_columns)
result['value'] = trace_values[0]
result['error'] = trace_values[1]
# Add other properties to this result dictionary if available.
if chart_data.get('units'):
result['units'] = chart_data['units']
if is_important:
result['important'] = True
results.append(result)
return results
def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, project,
buildbucket, buildername,
buildnumber, supplemental_dict, is_ref,
perf_dashboard_machine_group):
"""Generates Dashboard JSON in the new Telemetry format.
See http://goo.gl/mDZHPl for more info on the format.
Args:
chart_json: A dict containing the telmetry output.
revision_dict: Dictionary of revisions to include, include "rev",
which determines the point ID.
test_name: A test suite name, e.g. sunspider.
bot: A string which comes from perf_id, e.g. linux-release.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_dict: A dictionary of extra data to send with a point;
this includes revisions and annotation data.
is_ref: True if this is a reference build, False otherwise.
perf_dashboard_machine_group: Builder's perf machine group.
Returns:
A dictionary in the format accepted by the perf dashboard.
"""
if not chart_json:
logging.error('Error: No json output from telemetry.')
logging.error('@@@STEP_FAILURE@@@')
point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')
supplemental = {}
for key in supplemental_dict:
if key.startswith('r_'):
versions[key.replace('r_', '', 1)] = supplemental_dict[key]
if key.startswith('a_'):
supplemental[key.replace('a_', '', 1)] = supplemental_dict[key]
supplemental.update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
supplemental.update(
_GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber))
# TODO(sullivan): The android recipe sends "test_name.reference"
# while the desktop one just sends "test_name" for ref builds. Need
# to figure out why.
# https://github.com/catapult-project/catapult/issues/2046
test_name = test_name.replace('.reference', '')
fields = {
'master': perf_dashboard_machine_group,
'bot': bot,
'test_suite_name': test_name,
'point_id': point_id,
'supplemental': supplemental,
'versions': versions,
'chart_data': chart_json,
'is_ref': is_ref,
}
return fields
def _MakeStdioUrl(test_name, buildername, buildnumber):
"""Returns a string url pointing to buildbot stdio log."""
# TODO(780914): Link to logdog instead of buildbot.
if not buildername or not buildnumber:
return ''
return '%sbuilders/%s/builds/%s/steps/%s/logs/stdio' % (
_GetBuildBotUrl(),
urllib.quote(buildername),
urllib.quote(str(buildnumber)),
urllib.quote(test_name))
def _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber):
if not (buildername and buildnumber):
return None
if not project:
project = 'chrome'
if not buildbucket:
buildbucket = 'ci'
return 'https://ci.chromium.org/ui/p/%s/builders/%s/%s/%s' % (
urllib.quote(project),
urllib.quote(buildbucket),
urllib.quote(buildername),
urllib.quote(str(buildnumber)))
def _GetStdioUriColumn(test_name, buildername, buildnumber):
"""Gets a supplemental column containing buildbot stdio link."""
url = _MakeStdioUrl(test_name, buildername, buildnumber)
if not url:
return {}
return _CreateLinkColumn('stdio_uri', 'Buildbot stdio', url)
def _GetBuildStatusUriColumn(project, buildbucket, buildername, buildnumber):
"""Gets a supplemental column containing buildbot status link."""
url = _MakeBuildStatusUrl(project, buildbucket, buildername, buildnumber)
if not url:
return {}
return _CreateLinkColumn('build_uri', 'Buildbot status page', url)
def _CreateLinkColumn(name, label, url):
"""Returns a column containing markdown link to show on dashboard."""
return {'a_' + name: '[%s](%s)' % (label, url)}
def _GetBuildBotUrl():
"""Gets the buildbot URL which contains hostname and master name."""
return os.environ.get('BUILDBOT_BUILDBOTURL',
'http://build.chromium.org/p/chromium/')
def _GetTimestamp():
"""Get the Unix timestamp for the current time."""
return int(calendar.timegm(datetime.datetime.utcnow().utctimetuple()))
def _RevisionNumberColumns(data, prefix):
"""Get the point id and revision-related columns from the given data.
Args:
data: A dict of information from one line of the log file.
master: The name of the buildbot master.
prefix: Prefix for revision type keys. 'r_' for non-telemetry JSON, '' for
telemetry JSON.
Returns:
A tuple with the point id (which must be an int), and a dict of
revision-related columns.
"""
revision_supplemental_columns = {}
# The dashboard requires points' x-values to be integers, and points are
# ordered by these x-values. If data['rev'] can't be parsed as an int, assume
# that it's a git commit hash and use timestamp as the x-value.
try:
revision = int(data['rev'])
if revision and 300000 < revision < 1000000:
# Assume that revision is the commit position number for the master
# branch in the chromium/src repo.
revision_supplemental_columns[prefix + 'commit_pos'] = revision
except ValueError:
# The dashboard requires ordered integer revision numbers. If the revision
# is not an integer, assume it's a git hash and send a timestamp.
revision = _GetTimestamp()
revision_supplemental_columns[prefix + 'chromium'] = data['rev']
# An explicit data['point_id'] overrides the default behavior.
if 'point_id' in data:
revision = int(data['point_id'])
# For other revision data, add it if it's present and not undefined:
for key in ['webrtc_git', 'v8_rev']:
if key in data and data[key] != 'undefined':
revision_supplemental_columns[prefix + key] = data[key]
# If possible, also send the git hash.
if 'git_revision' in data and data['git_revision'] != 'undefined':
revision_supplemental_columns[prefix + 'chromium'] = data['git_revision']
return revision, revision_supplemental_columns
def _TestPath(test_name, chart_name, trace_name):
"""Get the slash-separated test path to send.
Args:
test: Test name. Typically, this will be a top-level 'test suite' name.
chart_name: Name of a chart where multiple trace lines are grouped. If the
chart name is the same as the trace name, that signifies that this is
the main trace for the chart.
trace_name: The "trace name" is the name of an individual line on chart.
Returns:
A slash-separated list of names that corresponds to the hierarchy of test
data in the Chrome Performance Dashboard; doesn't include master or bot
name.
"""
# For tests run on reference builds by builds/scripts/slave/telemetry.py,
# "_ref" is appended to the trace name. On the dashboard, as long as the
# result is on the right chart, it can just be called "ref".
if trace_name == chart_name + '_ref':
trace_name = 'ref'
chart_name = chart_name.replace('_by_url', '')
# No slashes are allowed in the trace name.
trace_name = trace_name.replace('/', '_')
# The results for "test/chart" and "test/chart/*" will all be shown on the
# same chart by the dashboard. The result with path "test/path" is considered
# the main trace for the chart.
test_path = '%s/%s/%s' % (test_name, chart_name, trace_name)
if chart_name == trace_name:
test_path = '%s/%s' % (test_name, chart_name)
return test_path
def _SendResultsJson(url, results_json, token_generator_callback):
"""Make a HTTP POST with the given JSON to the Performance Dashboard.
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
results_json: JSON string that contains the data to be sent.
Returns:
None if successful, or an error string if there were errors.
"""
# When data is provided to urllib2.Request, a POST is sent instead of GET.
# The data must be in the application/x-www-form-urlencoded format.
data = urllib.urlencode({'data': results_json})
req = urllib2.Request(url + SEND_RESULTS_PATH, data)
try:
oauth_token = token_generator_callback()
req.headers['Authorization'] = 'Bearer %s' % oauth_token
urllib2.urlopen(req, timeout=60 * 5)
except (urllib2.HTTPError, urllib2.URLError, httplib.HTTPException):
error = traceback.format_exc()
if 'HTTPError: 400' in error:
# If the remote app rejects the JSON, it's probably malformed,
# so we don't want to retry it.
raise SendResultsFatalException('Discarding JSON, error:\n%s' % error)
raise SendResultsRetryException(error)
def _SendHistogramJson(url, histogramset_json, token_generator_callback):
"""POST a HistogramSet JSON to the Performance Dashboard.
Args:
url: URL of Performance Dashboard instance, e.g.
"https://chromeperf.appspot.com".
histogramset_json: JSON string that contains a serialized HistogramSet.
For |token_generator_callback|, see SendResults's
documentation.
Returns:
None if successful, or an error string if there were errors.
"""
try:
oauth_token = token_generator_callback()
data = zlib.compress(histogramset_json)
headers = {
'Authorization': 'Bearer %s' % oauth_token,
'User-Agent': 'perf-uploader/1.0'
}
http = httplib2.Http()
response, _ = http.request(
url + SEND_HISTOGRAMS_PATH, method='POST', body=data, headers=headers)
# A 500 is presented on an exception on the dashboard side, timeout,
# exception, etc. The dashboard can also send back 400 and 403, we could
# recover from 403 (auth error), but 400 is generally malformed data.
if response.status in (403, 500):
raise SendResultsRetryException('HTTP Response %d: %s' % (
response.status, response.reason))
elif response.status != 200:
raise SendResultsFatalException('HTTP Response %d: %s' % (
response.status, response.reason))
except httplib.ResponseNotReady:
raise SendResultsRetryException(traceback.format_exc())
except httplib2.HttpLib2Error:
raise SendResultsRetryException(traceback.format_exc())
| 36.903101
| 80
| 0.700924
|
e13b9b79935001d5b772a03f4ffb6b20ce88683f
| 7,495
|
py
|
Python
|
train.py
|
agupta-git/Abstractive-text-summarizer
|
ee1268efed505a9e4fc68bf84a994cd8edd3fb9c
|
[
"MIT"
] | null | null | null |
train.py
|
agupta-git/Abstractive-text-summarizer
|
ee1268efed505a9e4fc68bf84a994cd8edd3fb9c
|
[
"MIT"
] | null | null | null |
train.py
|
agupta-git/Abstractive-text-summarizer
|
ee1268efed505a9e4fc68bf84a994cd8edd3fb9c
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import pandas as pd;
start = time.perf_counter()
import tensorflow as tf
import argparse
import pickle
import os
from model import Model
from utils import build_dict, build_dataset, batch_iter
import io
import warnings
warnings.filterwarnings("ignore")
# Uncomment next 2 lines to suppress error and Tensorflow info verbosity. Or change logging levels
tf.logging.set_verbosity(tf.logging.FATAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def add_arguments(parser):
parser.add_argument("--num_hidden", type=int, default=150, help="Network size.")
parser.add_argument("--num_layers", type=int, default=2, help="Network depth.")
parser.add_argument("--beam_width", type=int, default=50, help="Beam width for beam search decoder.")
parser.add_argument("--glove", action="store_true", help="Use glove .")
parser.add_argument("--embedding_size", type=int, default=300, help="Word embedding size.")
parser.add_argument("--learning_rate", type=float, default=1e-2, help="Anit's Learning rate.")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size.")
parser.add_argument("--num_epochs", type=int, default=100, help="Number of epochs.")
parser.add_argument("--keep_prob", type=float, default=0.8, help="Dropout keep prob.")
parser.add_argument("--toy", action="store_true", help="Use only 500 samples of data")
parser.add_argument("--with_model", action="store_true", help="Continue from previously saved model")
saveAccuracy=True;
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
with open("args.pickle", "wb") as f:
pickle.dump(args, f)
if not os.path.exists("saved_model"):
os.mkdir("saved_model")
else:
if args.with_model:
old_model_checkpoint_path = open('saved_model/checkpoint', 'r')
old_model_checkpoint_path = "".join(["saved_model/",old_model_checkpoint_path.read().splitlines()[0].split('"')[1] ])
print("Building dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("train", args.toy)
print("Loading training dataset...")
train_x, train_y = build_dataset("train", word_dict, article_max_len, summary_max_len, args.toy)
def tensorboard_comp_graph(tf,sess):
if not os.path.exists('summaries'):
os.mkdir('summaries')
if not os.path.exists(os.path.join('summaries','fourth')):
os.mkdir(os.path.join('summaries','fourth'))
tf.summary.FileWriter(os.path.join('summaries','fourth'),sess.graph)
# def tensorboard_scalar_graph(tf,sess):
# if not os.path.exists('GRAPHS'):
# os.mkdir('GRAPHS')
# if not os.path.exists(os.path.join('GRAPHS','fifth')):
# os.mkdir(os.path.join('GRAPHS','fifth'))
#
# writer=tf.summary.FileWriter(os.path.join('GRAPHS','fifth'),sess.graph)
# return writer
#with tf.name_scope('performance'):
# Summaries need to be displayed
# Whenever you need to record the loss, feed the mean loss to this placeholder
#tf_loss_ph=tf.placeholder(tf.float32,shape=None,name='loss_summary')
# Create a scalar summary object for the loss so it can be displayed
#tf_loss_summary=tf.summary.scalar('loss',tf_loss_ph)
# # Whenever you need to record the loss, feed the mean test accuracy to this placeholder
# tf_accuracy_ph=tf.placeholder(tf.float32,shape=None,name='accuracy_summary')
# # Create a scalar summary object for the accuracy so it can be displayed
# tf_accuracy_summary=tf.summary.scalar('accuracy',tf_accuracy_ph)
#performance_summaries=tf.summary.merge([tf_loss_summary])
step=0
loss_dict={}
all_loss=[]
with tf.Session() as sess:
loss_per_epoch=list()
model = Model(reversed_dict, article_max_len, summary_max_len, args)
tensorboard_comp_graph(tf,sess)
print(tf.contrib.slim.model_analyzer.analyze_vars(tf.trainable_variables(),print_info=True))
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
if 'old_model_checkpoint_path' in globals():
print("Continuing from previous trained model:" , old_model_checkpoint_path , "...")
saver.restore(sess, old_model_checkpoint_path )
batches = batch_iter(train_x, train_y, args.batch_size, args.num_epochs)
num_batches_per_epoch = (len(train_x) - 1) // args.batch_size + 1
print("\nIteration starts.")
print("Number of batches per epoch :", num_batches_per_epoch)
for batch_x, batch_y in batches:
batch_x_len = list(map(lambda x: len([y for y in x if y != 0]), batch_x))
batch_decoder_input = list(map(lambda x: [word_dict["<s>"]] + list(x), batch_y))
batch_decoder_len = list(map(lambda x: len([y for y in x if y != 0]), batch_decoder_input))
batch_decoder_output = list(map(lambda x: list(x) + [word_dict["</s>"]], batch_y))
batch_decoder_input = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_input))
batch_decoder_output = list(
map(lambda d: d + (summary_max_len - len(d)) * [word_dict["<padding>"]], batch_decoder_output))
train_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
model.decoder_input: batch_decoder_input,
model.decoder_len: batch_decoder_len,
model.decoder_target: batch_decoder_output
}
#writer_main=tensorboard_scalar_graph(tf,sess)
#writer_loss=tensorboard_scalar_graph(tf,sess)
#with tf.device('/gpu:0'):
_,step,loss=sess.run([model.update,model.global_step,model.loss],
feed_dict=train_feed_dict)
loss_per_epoch.append(loss)
if step % 1000 == 0:
print("step {0}: loss = {1}".format(step, loss))
if step % num_batches_per_epoch == 0:
hours, rem = divmod(time.perf_counter() - start, 3600)
minutes, seconds = divmod(rem, 60)
saver.save(sess, "./saved_model/model.ckpt", global_step=step)
print(" Epoch {0}: Model is saved.".format(step // num_batches_per_epoch),
"Elapsed: {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds) , "\n")
avg_loss=np.mean(loss_per_epoch)
loss_dict={"loss":avg_loss, "epoch":(step // num_batches_per_epoch)}
all_loss.append(loss_dict)
# Execute the summaries defined above
#summ=sess.run(performance_summaries,feed_dict={tf_loss_ph:avg_loss})
# Write the obtained summaries to the file, so they can be displayed
#writer_loss.add_summary(summ,(step // num_batches_per_epoch))
#summary=tf.Summary(value=[tf.Summary.Value(tag="Loss",simple_value=loss)])
#writer_main.add_summary(summary,global_step=(step // num_batches_per_epoch))
saver.save(sess,"summaries/fourth//model.ckpt",global_step=step)
weights=sess.run([model.embeddings],feed_dict=train_feed_dict)[0]
out_v=io.open('summaries/fourth/vecs.tsv','w',encoding='utf-8')
out_m=io.open('summaries/fourth/meta.tsv','w',encoding='utf-8')
for word_num in range(1,model.vocabulary_size):
word=reversed_dict[word_num]
embeddings=weights[word_num]
out_m.write(word + '\n')
out_v.write('\t'.join(str(x) for x in embeddings) + '\n')
out_v.close()
out_m.close()
df=pd.DataFrame(all_loss)
df.to_csv("loss.csv")
| 43.323699
| 125
| 0.68459
|
9b4dc8105f625fc77ac66b2c70c4be429692248a
| 24
|
py
|
Python
|
RNAPuzzles/rnapuzzles/views/metrics/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | null | null | null |
RNAPuzzles/rnapuzzles/views/metrics/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | 26
|
2019-10-08T11:11:25.000Z
|
2022-03-12T00:52:30.000Z
|
RNAPuzzles/rnapuzzles/views/metrics/__init__.py
|
whinyadventure/RNA-Puzzles
|
bbd147e1a0748a77b5e3424a93ad57bb430b5a0e
|
[
"Apache-2.0"
] | 1
|
2020-05-11T18:51:04.000Z
|
2020-05-11T18:51:04.000Z
|
from .calculate import *
| 24
| 24
| 0.791667
|
64ceef1aef17c3b54f7222294eff0d7fcaca521d
| 718
|
py
|
Python
|
rest-service/manager_rest/rest/resources_v3_1/aria/__init__.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/rest/resources_v3_1/aria/__init__.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
rest-service/manager_rest/rest/resources_v3_1/aria/__init__.py
|
yeshess/cloudify-manager
|
04dd199ce7df54355b87e9594f9db9fb1582924b
|
[
"Apache-2.0"
] | null | null | null |
#########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . aria_cli import * # noqa
| 42.235294
| 79
| 0.688022
|
95361de8805fcb8bfdfe565e70909a45eed95087
| 34
|
py
|
Python
|
timerdec/__init__.py
|
desmoteo/timerdec
|
bb5433335a64a21c4dbe206c2d56303c4b843e62
|
[
"MIT"
] | 1
|
2020-11-10T10:19:31.000Z
|
2020-11-10T10:19:31.000Z
|
timerdec/__init__.py
|
desmoteo/timerdec
|
bb5433335a64a21c4dbe206c2d56303c4b843e62
|
[
"MIT"
] | null | null | null |
timerdec/__init__.py
|
desmoteo/timerdec
|
bb5433335a64a21c4dbe206c2d56303c4b843e62
|
[
"MIT"
] | null | null | null |
from .shared_data import *
init()
| 11.333333
| 26
| 0.735294
|
70899eb1e91ea7f96c4290fc05177da43222cdda
| 491
|
py
|
Python
|
bokeh/models/actions.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 1
|
2021-11-07T18:55:59.000Z
|
2021-11-07T18:55:59.000Z
|
bokeh/models/actions.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/models/actions.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 1
|
2021-08-01T08:38:53.000Z
|
2021-08-01T08:38:53.000Z
|
""" Client-side interactivity. """
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import String
class Action(PlotObject):
""" Base class for interactive actions. """
class OpenURL(Action):
""" Open a URL in a new tab or window (browser dependent). """
url = String("http://", help="""
The URL to direct the web browser to. This can be a template string,
which will be formatted with data from the data source.
""")
| 27.277778
| 72
| 0.692464
|
09dd3209c22aabe351c71bcad64bd948faeca1be
| 8,178
|
py
|
Python
|
app/filter.py
|
thiwup/thiwup-search
|
2eba31032ab44f34bef6bf975a1ab69f49c8b0b4
|
[
"MIT"
] | null | null | null |
app/filter.py
|
thiwup/thiwup-search
|
2eba31032ab44f34bef6bf975a1ab69f49c8b0b4
|
[
"MIT"
] | null | null | null |
app/filter.py
|
thiwup/thiwup-search
|
2eba31032ab44f34bef6bf975a1ab69f49c8b0b4
|
[
"MIT"
] | null | null | null |
from app.request import VALID_PARAMS
from app.utils.filter_utils import *
from bs4.element import ResultSet
from cryptography.fernet import Fernet
import re
import urllib.parse as urlparse
from urllib.parse import parse_qs
class Filter:
def __init__(self, user_keys: dict, mobile=False, config=None):
if config is None:
config = {}
self.near = config['near'] if 'near' in config else ''
self.dark = config['dark'] if 'dark' in config else False
self.nojs = config['nojs'] if 'nojs' in config else False
self.new_tab = config['new_tab'] if 'new_tab' in config else False
self.alt_redirect = config['alts'] if 'alts' in config else False
self.mobile = mobile
self.user_keys = user_keys
self.main_divs = ResultSet('')
self._elements = 0
def __getitem__(self, name):
return getattr(self, name)
@property
def elements(self):
return self._elements
def reskin(self, page):
# Aesthetic only re-skinning
if self.dark:
page = page.replace(
'fff', '000').replace(
'202124', 'ddd').replace(
'1967D2', '3b85ea')
return page
def encrypt_path(self, msg, is_element=False):
# Encrypts path to avoid plaintext results in logs
if is_element:
# Element paths are encrypted separately from text, to allow key
# regeneration once all items have been served to the user
enc_path = Fernet(
self.user_keys['element_key']
).encrypt(msg.encode()).decode()
self._elements += 1
return enc_path
return Fernet(
self.user_keys['text_key']
).encrypt(msg.encode()).decode()
def clean(self, soup):
self.main_divs = soup.find('div', {'id': 'main'})
self.remove_ads()
self.fix_question_section()
self.update_styling(soup)
for img in [_ for _ in soup.find_all('img') if 'src' in _.attrs]:
self.update_element_src(img, 'image/png')
for audio in [_ for _ in soup.find_all('audio') if 'src' in _.attrs]:
self.update_element_src(audio, 'audio/mpeg')
for link in soup.find_all('a', href=True):
self.update_link(link)
input_form = soup.find('form')
if input_form is not None:
input_form['method'] = 'POST'
# Ensure no extra scripts passed through
for script in soup('script'):
script.decompose()
# Update default footer and header
footer = soup.find('footer')
if footer:
# Remove divs that have multiple links beyond just page navigation
[_.decompose() for _ in footer.find_all('div', recursive=False)
if len(_.find_all('a', href=True)) > 3]
header = soup.find('header')
if header:
header.decompose()
return soup
def remove_ads(self):
if not self.main_divs:
return
for div in [_ for _ in self.main_divs.find_all('div', recursive=True)]:
div_ads = [_ for _ in div.find_all('span', recursive=True)
if has_ad_content(_.text)]
_ = div.decompose() if len(div_ads) else None
def fix_question_section(self):
if not self.main_divs:
return
question_divs = [_ for _ in self.main_divs.find_all(
'div', recursive=False
) if len(_.find_all('h2')) > 0]
if len(question_divs) == 0:
return
# Wrap section in details element to allow collapse/expand
details = BeautifulSoup(features='html.parser').new_tag('details')
summary = BeautifulSoup(features='html.parser').new_tag('summary')
summary.string = question_divs[0].find('h2').text
question_divs[0].find('h2').decompose()
details.append(summary)
question_divs[0].wrap(details)
for question_div in question_divs:
questions = [_ for _ in question_div.find_all(
'div', recursive=True
) if _.text.endswith('?')]
for question in questions:
question['style'] = 'padding: 10px; font-style: italic;'
def update_element_src(self, element, mime):
src = element['src']
if src.startswith('//'):
src = 'https:' + src
if src.startswith(LOGO_URL):
# Re-brand with Whoogle logo
element['src'] = 'static/img/logo.png'
element['style'] = 'height:40px;width:162px'
return
elif src.startswith(GOOG_IMG) or GOOG_STATIC in src:
element['src'] = BLANK_B64
return
element['src'] = 'element?url=' + self.encrypt_path(
src,
is_element=True) + '&type=' + urlparse.quote(mime)
def update_styling(self, soup):
# Remove unnecessary button(s)
for button in soup.find_all('button'):
button.decompose()
# Remove svg logos
for svg in soup.find_all('svg'):
svg.decompose()
# Update logo
logo = soup.find('a', {'class': 'l'})
if logo and self.mobile:
logo['style'] = ('display:flex; justify-content:center; '
'align-items:center; color:#685e79; '
'font-size:18px; ')
# Fix search bar length on mobile
try:
search_bar = soup.find('header').find('form').find('div')
search_bar['style'] = 'width: 100%;'
except AttributeError:
pass
def update_link(self, link):
# Replace href with only the intended destination (no "utm" type tags)
href = link['href'].replace('https://www.google.com', '')
if 'advanced_search' in href or 'tbm=shop' in href:
# FIXME: The "Shopping" tab requires further filtering (see #136)
# Temporarily removing all links to that tab for now.
link.decompose()
return
elif self.new_tab:
link['target'] = '_blank'
result_link = urlparse.urlparse(href)
query_link = parse_qs(
result_link.query
)['q'][0] if '?q=' in href else ''
if query_link.startswith('/'):
# Internal google links (i.e. mail, maps, etc) should still
# be forwarded to Google
link['href'] = 'https://google.com' + query_link
elif '/search?q=' in href:
# "li:1" implies the query should be interpreted verbatim,
# which is accomplished by wrapping the query in double quotes
if 'li:1' in href:
query_link = '"' + query_link + '"'
new_search = 'search?q=' + self.encrypt_path(query_link)
query_params = parse_qs(urlparse.urlparse(href).query)
for param in VALID_PARAMS:
if param not in query_params:
continue
param_val = query_params[param][0]
new_search += '&' + param + '=' + param_val
link['href'] = new_search
elif 'url?q=' in href:
# Strip unneeded arguments
link['href'] = filter_link_args(query_link)
# Add no-js option
if self.nojs:
gen_nojs(link)
else:
link['href'] = href
# Replace link location if "alts" config is enabled
if self.alt_redirect:
# Search and replace all link descriptions
# with alternative location
link['href'] = get_site_alt(link['href'])
link_desc = link.find_all(
text=re.compile('|'.join(SITE_ALTS.keys())))
if len(link_desc) == 0:
return
# Replace link destination
link_desc[0].replace_with(get_site_alt(link_desc[0]))
| 36.026432
| 80
| 0.549034
|
466daf204a1ae86a7f37107342046305ea7249fc
| 15,360
|
py
|
Python
|
tensorflow/contrib/model_pruning/python/layers/layers.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71
|
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/contrib/model_pruning/python/layers/layers.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/contrib/model_pruning/python/layers/layers.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow layers with added variables for parameter masking.
Branched from tensorflow/contrib/layers/python/layers/layers.py
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
def _model_variable_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def masked_convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an 2D convolution followed by an optional batch_norm layer.
The layer creates a mask variable on top of the weight variable. The input to
the convolution operation is the elementwise multiplication of the mask
variable and the weigh
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
elif input_rank == 4:
layer_class = core.MaskedConv2D
elif input_rank == 5:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
else:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
if data_format is None or data_format == 'NHWC':
df = 'channels_last'
elif data_format == 'NCHW':
df = 'channels_first'
else:
raise ValueError('Unsupported data format', data_format)
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
masked_conv2d = masked_convolution
@add_arg_scope
def masked_fully_connected(
inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a sparse fully connected layer. The weight matrix is masked.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core.MaskedFullyConnected(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
| 42.197802
| 80
| 0.689518
|
a1b7cfef093e1b4f95235b8635f41fa61cd79d9d
| 2,163
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ZipDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class ZipDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_dataset(self, arr):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array(arr)
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
return dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
# Equal length components
arr = [37.0, 38.0, 39.0, 40.0]
num_outputs = len(arr)
self.run_core_tests(lambda: self._build_dataset(arr), num_outputs)
# Variable length components
diff_size_arr = [1.0, 2.0]
self.run_core_tests(lambda: self._build_dataset(diff_size_arr), 2)
if __name__ == "__main__":
test.main()
| 36.661017
| 106
| 0.718447
|
8f0e621fd53e2034d643c65d5028f03d35a4dbd1
| 2,590
|
py
|
Python
|
source/mmmtrainerconfig.py
|
AI-Guru/MMM-JSB
|
2cf0faeedc402b4574f292712632855675ae4037
|
[
"Apache-2.0"
] | 72
|
2021-05-10T11:12:24.000Z
|
2022-03-30T17:49:06.000Z
|
source/mmmtrainerconfig.py
|
AI-Guru/MMM-JSB
|
2cf0faeedc402b4574f292712632855675ae4037
|
[
"Apache-2.0"
] | 3
|
2021-06-12T10:10:44.000Z
|
2022-01-20T16:53:37.000Z
|
source/mmmtrainerconfig.py
|
AI-Guru/MMM-JSB
|
2cf0faeedc402b4574f292712632855675ae4037
|
[
"Apache-2.0"
] | 9
|
2021-05-10T12:21:38.000Z
|
2022-03-10T14:37:16.000Z
|
# Copyright 2021 Tristan Behrens.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import os
from source import logging
logger = logging.create_logger("mmmtrainerconfig")
class MMMTrainerBaseConfig:
def __init__(
self,
framework="pytorch",
tokenizer_path="",
dataset_train_files=[],
dataset_validate_files=[],
pad_length=768,
shuffle_buffer_size=10000,
batch_size=8,
epochs=10,
n_head=8,
n_layer=6,
n_embd=512,
n_positions=1024,
n_ctx=1024
):
# Check if the framework is valid.
valid_frameworks = ["pytorch"]
if framework not in valid_frameworks:
error_string = f"Invalid framework {framework}. Expected one of {valid_frameworks}."
logger.error(error_string)
raise Exception(error_string)
# Check if any dataset files are missing.
missing_dataset_files = []
missing_dataset_files = [file for file in dataset_train_files + dataset_validate_files if not os.path.exists(file)]
if len(missing_dataset_files) != 0:
error_string = f"Missing dataset files {missing_dataset_files}."
logger.error(error_string)
raise Exception(error_string)
assert pad_length <= n_positions
self.framework = framework
self.tokenizer_path = tokenizer_path
self.dataset_train_files = dataset_train_files
self.dataset_validate_files = dataset_validate_files
self.pad_length = pad_length
self.shuffle_buffer_size = shuffle_buffer_size
self.batch_size = batch_size
self.epochs = epochs
self.n_head = n_head
self.n_layer = n_layer
self.n_embd = n_embd
self.n_positions = n_positions
self.n_ctx = n_ctx
class JSBTrackConfig(MMMTrainerBaseConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class JSBBarConfig(MMMTrainerBaseConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
| 31.585366
| 124
| 0.672587
|
f5a8b5fe0faa2b9e88d94eb5a57b23dd65002fa3
| 1,267
|
py
|
Python
|
cassava/config.py
|
p-s-vishnu/cassava-leaf-disease-classification
|
41f26cb6b87f27f49db9a4d5dadcebb153b250a5
|
[
"MIT"
] | 7
|
2021-04-22T08:12:40.000Z
|
2022-03-14T10:27:43.000Z
|
cassava/config.py
|
p-s-vishnu/cassava-leaf-disease-classification
|
41f26cb6b87f27f49db9a4d5dadcebb153b250a5
|
[
"MIT"
] | 9
|
2021-04-17T16:19:24.000Z
|
2021-06-07T11:30:18.000Z
|
cassava/config.py
|
p-s-vishnu/cassava-leaf-disease-classification
|
41f26cb6b87f27f49db9a4d5dadcebb153b250a5
|
[
"MIT"
] | 3
|
2021-05-10T06:23:38.000Z
|
2022-03-14T10:27:49.000Z
|
"""Project configurations
"""
TRAIN_PATH = ""
TEST_PATH = ""
DEBUG = False
APEX = False
PRINT_FREQ = 100
NUM_WORKERS = 4
MODEL_NAME = "tf_efficientnet_b4_ns"
SIZE = 512
# ['ReduceLROnPlateau', 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts']
SCHEDULER = "CosineAnnealingWarmRestarts"
CRITERION = "BiTemperedLoss" # ['CrossEntropyLoss', LabelSmoothing', 'FocalLoss',
# 'FocalCosineLoss', 'SymmetricCrossEntropyLoss', 'BiTemperedLoss', 'TaylorCrossEntropyLoss']
EPOCHS = 10
# factor=0.2 # ReduceLROnPlateau
# patience=4 # ReduceLROnPlateau
# eps=1e-6 # ReduceLROnPlateau
# T_max=10 # CosineAnnealingLR
T_0 = 10 # CosineAnnealingWarmRestarts
LR = 1e-4
MIN_LR = 1e-6
BATCH_SIZE = 12
WEIGHT_DECAY = 1e-6
GRADIENT_ACCUM_STEPS = 1
MAX_GRAD_NORM = 1000
SEED = 42
TARGET_SIZE = 5 # Num of Classes
LABEL_MAP = {
0: "Cassava Bacterial Blight (CBB)",
1: "Cassava Brown Streak Disease (CBSD)",
2: "Cassava Green Mottle (CGM)",
3: "Cassava Mosaic Disease (CMD)",
4: "Healthy",
}
TARGET_COL = "label"
N_FOLD = 5
TRN_FOLD = [0] # Change Values according to the fold which you are training.
TRAIN = True
INFERENCE = False
SMOOTHING = 0.05
# bi-tempered-loss https://www.kaggle.com/c/cassava-leaf-disease-classification/discussion/202017
T1 = 0.3
T2 = 1.0
| 27.543478
| 97
| 0.73086
|
e1a99bbbcf9566e3579d8b42e83c8dcd7b11767a
| 14,543
|
py
|
Python
|
tensorflow/python/training/gradient_descent_test.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 74
|
2020-07-06T17:11:39.000Z
|
2022-01-28T06:31:28.000Z
|
tensorflow/python/training/gradient_descent_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/training/gradient_descent_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 12
|
2020-07-08T07:27:17.000Z
|
2021-12-27T08:54:27.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GradientDescentOptimizerTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
sgd_op = optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
self.assertEqual(0, len(optimizer.variables()))
def testBasicResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
def testBasicCallableParams(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lr = lambda: 3.0
sgd_op = gradient_descent.GradientDescentOptimizer(lr).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
self.evaluate(variables.global_variables_initializer())
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
def testWithGlobalStep(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
self.assertAllCloseAccordingToType(1, self.evaluate(global_step))
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
self.evaluate(var1))
def testCapturingInDefunWhileExecutingEagerly(self):
with context.eager_mode():
optimizer = gradient_descent.GradientDescentOptimizer(1.0)
def step():
self.v = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as tape:
loss = self.v ** 2
grad = tape.gradient(loss, self.v)
optimizer.apply_gradients([(grad, self.v)])
return self.v.read_value()
compiled_step = function.defun(step)
self.assertEqual(float(step()), -1.0)
self.assertEqual(float(compiled_step()), -1.0)
# This shouldn't fail; in particular, the learning rate tensor should
# be an EagerTensor once again, not a graph Tensor.
self.assertEqual(float(step()), -1.0)
if __name__ == "__main__":
test.main()
| 51.570922
| 80
| 0.645259
|
b1ac46c150ed1c8e80f6b85844a8497e239684bc
| 1,920
|
py
|
Python
|
tests/engine/_tests.py
|
2kodevs/Search-Engine
|
840001f825d9632c6c7a5fd24151b79ca1a9a06b
|
[
"MIT"
] | null | null | null |
tests/engine/_tests.py
|
2kodevs/Search-Engine
|
840001f825d9632c6c7a5fd24151b79ca1a9a06b
|
[
"MIT"
] | null | null | null |
tests/engine/_tests.py
|
2kodevs/Search-Engine
|
840001f825d9632c6c7a5fd24151b79ca1a9a06b
|
[
"MIT"
] | null | null | null |
import unittest
from src import SearchEngine, Indexer
class SearchEngineTestCase(unittest.TestCase):
def setUp(self):
self.engine = SearchEngine('./tests/mocks/animal_corpus', 'cranfield')
def test_vectorize_query(self):
q = 'zorro nutria'
ans = ['nutria', 'zorro']
self.assertEqual(self.engine.vectorize_query(q), ans)
def test_get_weights(self):
q = 'leon nutria zorro'
q = self.engine.vectorize_query(q)
q = list(filter(lambda term: term in self.engine.index['vocabulary'], q))
self.engine.indexer.N = 1
self.engine.indexer.max_freq = [1]
w, wq = self.engine.get_weights(q, 0.5)
ans_w = [
[0.09691001300805642, 0.09691001300805642, 0.09691001300805642, 0.09691001300805642, 0],
[0, 0, 0.3979400086720376, 0, 0.3979400086720376],
[0, 0.07394958320545211, 0.22184874961635637, 0.22184874961635637, 0],
]
self.assertEqual(w, ans_w)
def test_search(self):
q = 'nutria leon'
ranking = self.engine.search(q, 0)
print(ranking)
def test__full_feedback(self):
q = 'nutria leon zorro'
ranking = self.engine.search(q, 0)
print(ranking)
feedback = [
(ranking[0], True),
(ranking[1], True),
(ranking[2], False),
(ranking[3], True),
(ranking[4], False),
]
ranking = self.engine.give_feedback(feedback, 0)
print(ranking)
def test__pseudo_feedback(self):
q = 'nutria leon zorro'
ranking = self.engine.search(q, 0)
print(ranking)
feedback = [
(ranking[0]),
(ranking[1]),
(ranking[2]),
(ranking[3]),
(ranking[4]),
]
ranking = self.engine.give_feedback(feedback, 0, True, 3)
print(ranking)
| 30.47619
| 100
| 0.564583
|
034b06029f28af7b89e6d197d850c19dd5e368a8
| 627
|
py
|
Python
|
Labs/AI and Machine Learning/Cognitive Toolkit/resources/cntk2images.py
|
varunjha089/computerscience
|
bd90079e4a8701e92c9f88f598bfa86203b6cbb7
|
[
"MIT"
] | 24
|
2017-06-08T01:16:28.000Z
|
2017-08-24T06:49:52.000Z
|
Labs/AI and Machine Learning/Cognitive Toolkit/resources/cntk2images.py
|
varunjha089/computerscience
|
bd90079e4a8701e92c9f88f598bfa86203b6cbb7
|
[
"MIT"
] | null | null | null |
Labs/AI and Machine Learning/Cognitive Toolkit/resources/cntk2images.py
|
varunjha089/computerscience
|
bd90079e4a8701e92c9f88f598bfa86203b6cbb7
|
[
"MIT"
] | 6
|
2017-06-08T05:32:27.000Z
|
2019-03-12T02:47:10.000Z
|
#
from os import walk
from PIL import Image
import numpy as np
inputFile = ''
with open(inputFile) as f:
images = f.readlines()
images = [line.strip() for line in images]
w, h = 28, 28
imgcnt = 0;
for imageline in images:
dataparts = imageline.split(" |features ")
imagedatastr = dataparts[1].split(" ")
imagedata = np.zeros((h, w), dtype=np.uint8)
rowIdx = 0;
colIdx = 0;
for i in imagedatastr:
imagedata[colIdx, rowIdx] = 255 - int(i)
rowIdx += 1
if rowIdx % h == 0:
rowIdx = 0
colIdx += 1
imgcnt += 1
im = Image.fromarray(imagedata, 'L')
im.save(str(imgcnt) + ".png")
| 13.630435
| 45
| 0.617225
|
117efebece96f9d983cc75b11d16a6091fb1eefe
| 4,517
|
py
|
Python
|
tests/test_changelist.py
|
0xTowel/python-perforce
|
249b73b54ed8f49f6e0be60f53c738212ff55aaa
|
[
"MIT"
] | 43
|
2015-06-03T17:46:34.000Z
|
2022-01-21T14:05:19.000Z
|
tests/test_changelist.py
|
0xTowel/python-perforce
|
249b73b54ed8f49f6e0be60f53c738212ff55aaa
|
[
"MIT"
] | 43
|
2015-06-12T06:10:38.000Z
|
2021-02-11T22:26:00.000Z
|
tests/test_changelist.py
|
0xTowel/python-perforce
|
249b73b54ed8f49f6e0be60f53c738212ff55aaa
|
[
"MIT"
] | 7
|
2016-02-18T08:56:13.000Z
|
2020-08-26T22:28:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python-perforce
----------------------------------
Tests for `python-perforce` module.
"""
import os
import unittest
import datetime
import random
import string
import path
import pytest
import six
from perforce import Connection, Changelist
from perforce import errors
TO_ADD = path.path(r"E:\Users\brett\Perforce\p4_unit_tests\p4_test\to_add1.txt")
CL = 23
P4PORT = 'DESKTOP-M97HMBQ:1666'
P4USER = 'p4test'
P4CLIENT = 'p4_unit_tests'
class ChangelistTests(unittest.TestCase):
def setUp(self):
self._conn = Connection(port=P4PORT, client=P4CLIENT, user=P4USER)
# pytest.deprecated_call(self._conn.findChangelist, CL)
def test_changelist(self):
cl = self._conn.findChangelist(CL)
self.assertEqual(cl.description, 'DO NOT COMMIT')
self.assertEqual(len(cl), 2)
self.assertEqual(CL, int(cl))
self.assertEqual(P4CLIENT, cl.client)
self.assertEqual('pending', cl.status)
self.assertEqual(P4USER, cl.user)
self.assertEqual(datetime.datetime(2017, 7, 3, 21, 4, 32), cl.time)
self.assertEqual(repr(cl), '<Changelist {}>'.format(CL))
assert cl[0].depotFile == '//p4_test/edit.txt'
default = self._conn.findChangelist()
with self.assertRaises(TypeError):
'foo' in cl
for r in cl:
pass
cl.description = 'xxx'
self.assertEqual(cl.description, 'xxx')
with self._conn.findChangelist('testing') as cl:
self.assertEqual(cl.description, 'testing')
rev = self._conn.ls('//p4_test/synced.txt')[0]
cl.append(rev)
try:
cl.append(r'C:/tmp/foo.txt')
except errors.RevisionError:
pass
cl.append(TO_ADD)
self.assertEqual(len(cl), 2)
self.assertTrue(cl.isDirty)
cl = self._conn.findChangelist('testing')
self.assertEqual(len(cl), 2)
rev = self._conn.ls('//p4_test/synced.txt')[0]
rev.revert()
cl.query()
self.assertEqual(len(cl), 1)
cl.revert()
self.assertEqual(len(cl), 0)
cl.delete()
cl = self._conn.findChangelist('submitting')
with cl:
rev = self._conn.ls('//p4_test/submit.txt')[0]
cl.append(rev)
with open(rev.clientFile, 'w+') as fh:
s = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(64))
fh.write(s)
cl.submit()
def test_reopen():
c = Connection(port=P4PORT, client=P4CLIENT, user=P4USER)
rev = c.ls('//p4_test/synced.txt')[0]
default = c.findChangelist()
default.append(rev)
default.save()
assert len(default) == 1
cl = c.findChangelist('testing')
cl.append(rev)
cl.save()
assert len(cl) == 1
cl2 = c.findChangelist('testing2')
cl2.append(rev)
cl2.save()
assert len(cl2) == 1
#assert len(cl) == 0
rev.revert()
assert len(cl2) == 0
cl.delete()
cl2.delete()
def test_descriptions():
c = Connection(port=P4PORT, client=P4CLIENT, user=P4USER)
cl = c.findChangelist('testing')
assert cl.description == 'testing'
cl.delete()
cl = c.findChangelist('this\nis\nmultiline')
assert format(cl).endswith('Client: p4_unit_tests\n\nUser: p4test\n\nStatus: pending\n\nDescription:\n\tthis\n\tis\n\tmultiline\n\t\n\nFiles:\n\n')
cl.delete()
cl = c.findChangelist('this\nis\nmultiline\n\n')
assert format(cl).endswith('Client: p4_unit_tests\n\nUser: p4test\n\nStatus: pending\n\nDescription:\n\tthis\n\tis\n\tmultiline\n\t\n\nFiles:\n\n')
cl.delete()
cl1 = c.findChangelist('this\nis\n\nmultiline')
cl2 = c.findChangelist('this\nis\n\nmultiline')
assert cl1 == cl2
cl1.delete()
# def test_changelist_object():
# c = Connection(port=P4PORT, client=P4CLIENT, user=P4USER)
# cl = Changelist(c, 145)
# assert len(cl) == 1
# assert cl[0].isEdit is False
#
# os.chdir(r'E:\Users\brett\Perforce\p4_unit_tests_alt\p4_test')
# os.environ['P4CONFIG'] = '.p4config'
# cl = Changelist(145)
# assert len(cl) == 1
# assert cl[0].isEdit == False
# os.environ['P4CONFIG'] = ''
#
#
def test_iadd():
c = Connection(port=P4PORT, client=P4CLIENT, user=P4USER)
cl = c.findChangelist('iadd')
files = c.ls('//p4_test/s...', exclude_deleted=True)
cl += files
assert len(cl) == 2
cl.delete()
| 28.23125
| 153
| 0.612796
|
648baab74a081e02e8ef90c3af37a56d373d1fe6
| 5,112
|
py
|
Python
|
SimulationModels/get_sim_data_v2.py
|
kasmith/cbmm-project-christmas
|
2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a
|
[
"MIT"
] | null | null | null |
SimulationModels/get_sim_data_v2.py
|
kasmith/cbmm-project-christmas
|
2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a
|
[
"MIT"
] | null | null | null |
SimulationModels/get_sim_data_v2.py
|
kasmith/cbmm-project-christmas
|
2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function
from physicsTable import *
from physicsTable.constants import *
from physicsTable.models import PointSimulation
import os, json, glob
KAP_V_NORM = 20 # simulation for conditions with motion (small noise in velocity)
KAP_V_NOMOT = 1e-10 # simulation for no motion condition (very high noise, ie almost uniform distribution)
KAP_B = 25
KAP_M = 50000
P_ERR = 25
TIMEUP = 50.
N_SIMS = 5000
CPUS = 1
WRITE_JSON = True
# Regex used to list trials that are going to be simulated
TRIAL_REGEX_CONT = '*_*_*.json' # containment trials
TRIAL_REGEX_REG = 'regular_*.json' # regular trials
def run_single_sim(table, n_sims, kap_v, kap_b, kap_m, p_err, timeup, cpus):
ps = PointSimulation(table, kap_v, kap_b, kap_m, p_err, nsims=n_sims, cpus=cpus, maxtime=timeup)
ps.runSimulation()
outcomes = ps.getOutcomes()
bounces = ps.getBounces()
times = ps.getTimes()
p_green = outcomes[GREENGOAL]/n_sims
p_red = outcomes[REDGOAL]/n_sims
p_timeup = 1 - p_green - p_red
avg_bounces = sum(bounces) / len(bounces)
avg_time = sum(times) / len(times)
return p_green, p_red, p_timeup, avg_bounces, avg_time
def get_sim_data(n_sims=N_SIMS, kap_v_norm=KAP_V_NORM, kap_v_nomot = KAP_V_NOMOT, kap_b=KAP_B, kap_m=KAP_M, p_err=P_ERR, timeup = TIMEUP, cpus=CPUS):
goal_dict = get_goal_dict()
with open('sim_data_full.csv', 'w') as csv_out:
csv_out.write('Trial,IsContained,Direction,Goal,PGreen,PRed,PTimeUp,AvgBounces,AvgTime\n')
json_dict = {}
os_path_c = os.path.join('..', 'psiturk-rg-cont', 'templates', 'trials', TRIAL_REGEX_CONT)
for f in glob.iglob(os_path_c):
trial_name = f.split(os.path.sep)[-1][:-5]
print('Running simulations for: ' + trial_name)
tr = loadFromJSON(f)
json_dict[trial_name] = {}
for dir in ['forward','reverse','none']:
tab = tr.makeTable()
if dir == 'reverse':
tab.balls.setvel(map(lambda x: -x, tab.balls.getvel()))
if dir == 'none':
kap_v = kap_v_nomot
else:
tab.step(.5)
kap_v = kap_v_norm
p_green, p_red, p_timeup, avg_bounces, avg_time = run_single_sim(tab, n_sims, kap_v, kap_b, kap_m, p_err, timeup, cpus)
goal = goal_dict[trial_name]
csv_line = ','.join(
(trial_name, 'contained',dir, goal, str(p_green), str(p_red), str(p_timeup), str(avg_bounces), str(avg_time))) + '\n'
csv_out.write(csv_line)
if WRITE_JSON:
json_dict[trial_name][dir] = {'goal': goal, 'p_green': p_green, 'p_red': p_red, 'avg_bounces': avg_bounces, 'avg_time': avg_time}
os_path_r = os.path.join('..', 'psiturk-rg-cont', 'templates', 'trials', TRIAL_REGEX_REG)
for f in glob.iglob(os_path_r):
trial_name = f.split(os.path.sep)[-1][:-5]
print('Running simulations for: ' + trial_name)
tr = loadFromJSON(f)
json_dict[trial_name] = {}
for dir in ['forward', 'none']:
tab = tr.makeTable()
if dir == 'none':
kap_v = kap_v_nomot
else:
tab.step(.5)
kap_v = kap_v_norm
p_green, p_red, p_timeup, avg_bounces, avg_time = run_single_sim(tab, n_sims, kap_v, kap_b, kap_m,
p_err, timeup, cpus)
goal = goal_dict[trial_name]
csv_line = ','.join(
(trial_name, 'regular', dir, goal, str(p_green), str(p_red), str(p_timeup), str(avg_bounces),
str(avg_time))) + '\n'
csv_out.write(csv_line)
if WRITE_JSON:
json_dict[trial_name][dir] = {'goal': goal, 'p_green': p_green, 'p_red': p_red,
'avg_bounces': avg_bounces, 'avg_time': avg_time}
if WRITE_JSON:
with open('sim_data_full.json', 'w') as json_out:
json.dump(json_dict, json_out)
def loadFromJSON(jsonfl):
with open(jsonfl,'rU') as jfl:
j = json.load(jfl)
tr = RedGreenTrial(j['Name'], j['Dims'], j['ClosedEnds'])
b = j['Ball']
tr.addBall(b[0],b[1],b[2],b[3],b[4])
for w in j['Walls']:
tr.addWall(w[0],w[1],w[2],w[3])
for o in j['Occluders']:
tr.addOcc(o[0],o[1],o[2])
for g in j['Goals']:
tr.addGoal(g[0],g[1],g[2],g[3])
return tr
def get_goal_dict():
goal_dict = {}
data_path = os.path.join('..', 'ContainmentAnalysis', 'rawdata.csv')
with open(data_path, 'r') as f:
for line in f:
line_split = line.split(',')
trial_name = line_split[2]
if trial_name not in goal_dict:
goal_dict[trial_name] = line_split[-4]
return goal_dict
if __name__ == '__main__':
get_sim_data()
| 38.727273
| 149
| 0.570227
|
816dbabe204373322f1441b908d4bfe18a928b07
| 14,035
|
py
|
Python
|
pytorch_retinanet/model.py
|
AljoSt/pytorch-retinanet
|
303d6d3ed10d6408696bb49b78a8ec36f680d563
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pytorch_retinanet/model.py
|
AljoSt/pytorch-retinanet
|
303d6d3ed10d6408696bb49b78a8ec36f680d563
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pytorch_retinanet/model.py
|
AljoSt/pytorch-retinanet
|
303d6d3ed10d6408696bb49b78a8ec36f680d563
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-08-16T08:19:20.000Z
|
2019-08-16T08:19:20.000Z
|
import torch.nn as nn
import torch.nn.functional
import torch
import math
import time
import torch.utils.model_zoo as model_zoo
from .utils import BasicBlock, Bottleneck, BBoxTransform, ClipBoxes, non_max_suppression
from .anchors import Anchors
from . import losses
import numpy as np
def nms(boxes, scores, thresh):
"""
Calculate non maximum suppression
:param scores: pytorch tensor containing rects
:param scores: pytorch tensor containing scores
:param thresh: overlapping thresh used for nms
:return: indices corresponding to the found rectangles
"""
scores = scores.cpu().detach().numpy()
boxes = boxes.cpu().detach().numpy()
return non_max_suppression(boxes, confidences=scores, overlap_thresh=thresh)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class PyramidFeatures(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(PyramidFeatures, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(C5_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = nn.functional.interpolate(P5_x, scale_factor=2, mode='nearest')
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = nn.functional.interpolate(P4_x, scale_factor=2, mode='nearest')
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
P6_x = self.P6(C5)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P3_x, P4_x, P5_x, P6_x, P7_x]
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * 4, kernel_size=3, padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
# out is B x C x W x H, with C = 4*num_anchors
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchors = num_anchors
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
# out is B x C x W x H, with C = n_classes + n_anchors
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
class ResNet(nn.Module):
def __init__(self, num_classes, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if block == BasicBlock:
fpn_sizes = [self.layer2[layers[1] - 1].conv2.out_channels, self.layer3[layers[2] - 1].conv2.out_channels,
self.layer4[layers[3] - 1].conv2.out_channels]
elif block == Bottleneck:
fpn_sizes = [self.layer2[layers[1] - 1].conv3.out_channels, self.layer3[layers[2] - 1].conv3.out_channels,
self.layer4[layers[3] - 1].conv3.out_channels]
self.fpn = PyramidFeatures(fpn_sizes[0], fpn_sizes[1], fpn_sizes[2])
self.regressionModel = RegressionModel(256)
self.classificationModel = ClassificationModel(256, num_classes=num_classes)
self.anchors = Anchors()
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
self.focalLoss = losses.FocalLoss()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
prior = 0.01
self.classificationModel.output.weight.data.fill_(0)
self.classificationModel.output.bias.data.fill_(-math.log((1.0 - prior) / prior))
self.regressionModel.output.weight.data.fill_(0)
self.regressionModel.output.bias.data.fill_(0)
self.freeze_bn()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def forward(self, inputs):
if self.training:
img_batch, annotations = inputs
else:
img_batch = inputs
x = self.conv1(img_batch)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
features = self.fpn([x2, x3, x4])
regression = torch.cat([self.regressionModel(feature) for feature in features], dim=1)
classification = torch.cat([self.classificationModel(feature) for feature in features], dim=1)
anchors = self.anchors(img_batch)
if self.training:
return self.focalLoss(classification, regression, anchors, annotations)
else:
transformed_anchors = self.regressBoxes(anchors, regression)
transformed_anchors = self.clipBoxes(transformed_anchors, img_batch)
scores = torch.max(classification, dim=2, keepdim=True)[0]
# create tensors for results
batch_size = classification.shape[0]
detection_count = 300 # maximum of 300 detections per image
res_scores = torch.zeros((batch_size, detection_count)).type_as(scores)
res_classifications = torch.zeros((batch_size, detection_count)).type_as(classification)
res_bboxes = torch.zeros((batch_size, detection_count, 4)).type_as(transformed_anchors)
# go through each item in the batch and calculate the final scores, classifications, and bboxes
for i in range(batch_size):
scores_over_thresh = (scores[i] > 0.05)[:, 0]
if scores_over_thresh.sum() == 0:
continue
# filter detections with a low score
c = classification[i, scores_over_thresh, :]
t_a = transformed_anchors[i, scores_over_thresh, :]
s = scores[i, scores_over_thresh, 0]
# computer non-maximum suppression
anchors_nms_idx = nms(boxes=t_a, scores=s, thresh=0.5)
# get score and class
nms_scores, nms_class = c[anchors_nms_idx, :].max(dim=1)
end_idx = np.min([detection_count, len(anchors_nms_idx)])
# fill the result tensors with results
res_scores[i, :end_idx] = nms_scores
res_classifications[i, :end_idx] = nms_class
res_bboxes[i, :end_idx, :] = t_a[anchors_nms_idx, :]
return res_scores, res_classifications, res_bboxes
def resnet18(num_classes, pretrained=False, device=None, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
device: torch.device, to choose between cpu or gpu
"""
model = ResNet(num_classes, BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)
# transfer weights etc. to given device
if device:
model.to(device)
return model
def resnet34(num_classes, pretrained=False, device=None, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
device: torch.device, to choose between cpu or gpu
"""
model = ResNet(num_classes, BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)
# transfer weights etc. to given device
if device:
model.to(device)
return model
def resnet50(num_classes, pretrained=False, device=None, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
device: torch.device, to choose between cpu or gpu
"""
model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)
# transfer weights etc. to given device
if device:
model.to(device)
return model
def resnet101(num_classes, pretrained=False, device=None, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
device: torch.device, to choose between cpu or gpu
"""
model = ResNet(num_classes, Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)
# transfer weights etc. to given device
if device:
model.to(device)
return model
def resnet152(num_classes, pretrained=False, device=None, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
device: torch.device, to choose between cpu or gpu
"""
model = ResNet(num_classes, Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)
# transfer weights etc. to given device
if device:
model.to(device)
return model
| 35.352645
| 118
| 0.634129
|
5d80386838435f84f87fb540bd8c71ae501413a2
| 1,489
|
py
|
Python
|
python/paddle/nn/initializer/__init__.py
|
Ray2020BD/Paddle
|
994087188816575d456c2f9c2a6c90aad83b4e71
|
[
"Apache-2.0"
] | 2
|
2020-12-09T16:09:59.000Z
|
2020-12-09T16:10:02.000Z
|
python/paddle/nn/initializer/__init__.py
|
Ray2020BD/Paddle
|
994087188816575d456c2f9c2a6c90aad83b4e71
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/nn/initializer/__init__.py
|
Ray2020BD/Paddle
|
994087188816575d456c2f9c2a6c90aad83b4e71
|
[
"Apache-2.0"
] | 1
|
2021-03-23T00:59:48.000Z
|
2021-03-23T00:59:48.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the initializers to create a Parameter in neural network
from ...fluid.initializer import Bilinear #DEFINE_ALIAS
from . import constant
from .constant import Constant #DEFINE_ALIAS
from . import kaiming
from .kaiming import KaimingNormal #DEFINE_ALIAS
from .kaiming import KaimingUniform #DEFINE_ALIAS
__all__ = ['Bilinear', ]
__all__ += constant.__all__
__all__ += kaiming.__all__
from . import xavier
from .xavier import XavierNormal #DEFINE_ALIAS
from .xavier import XavierUniform #DEFINE_ALIAS
from . import assign
from .assign import Assign #DEFINE_ALIAS
from . import normal
from .normal import Normal #DEFINE_ALIAS
from .normal import TruncatedNormal #DEFINE_ALIAS
from . import uniform
from .uniform import Uniform #DEFINE_ALIAS
__all__ += xavier.__all__
__all__ += assign.__all__
__all__ += normal.__all__
__all__ += uniform.__all__
| 31.020833
| 74
| 0.779046
|
85779e698fe1ba801ad4d3314ec743b07728f588
| 305
|
py
|
Python
|
data/multilingual/Latn.MAD/Serif_12/pdf_to_json_test_Latn.MAD_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.MAD/Serif_12/pdf_to_json_test_Latn.MAD_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.MAD/Serif_12/pdf_to_json_test_Latn.MAD_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.MAD/Serif_12/udhr_Latn.MAD_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5
| 75
| 0.813115
|
6dff8f97ea7aac998920026f750652da0ff9e39d
| 114
|
py
|
Python
|
polecat/db/helpers.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 4
|
2019-08-10T12:56:12.000Z
|
2020-01-21T09:51:20.000Z
|
polecat/db/helpers.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 71
|
2019-04-09T05:39:21.000Z
|
2020-05-16T23:09:24.000Z
|
polecat/db/helpers.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | null | null | null |
def print_table(table):
print(repr(table))
for column in table.columns:
print(f' {repr(column)}')
| 22.8
| 33
| 0.631579
|
8dfd4bf8cd141977eb60dcda0e6dd49976330f9a
| 378
|
py
|
Python
|
corefacility/authorizations/mailru/test/test_account_set.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
corefacility/authorizations/mailru/test/test_account_set.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
corefacility/authorizations/mailru/test/test_account_set.py
|
serik1987/corefacility
|
78d84e19403361e83ef562e738473849f9133bef
|
[
"RSA-MD"
] | null | null | null |
from core.test.entity_set.external_accounts import TestExternalAccounts
from .account_set_object import AccountSetObject
class TestAccountSet(TestExternalAccounts):
"""
Tests the set of external accounts
"""
_external_account_set_object_class = AccountSetObject
_no_filter_accounts = ["account0@mail.ru", "account10@mail.ru"]
del TestExternalAccounts
| 22.235294
| 71
| 0.78836
|
a9d5320a1c866820ad8b41b436df7a376cbc1859
| 412
|
py
|
Python
|
modules/discord_bot/command_controller.py
|
dimterex/outlook2tracker
|
bda21cf02f11df206922342706973c657e2612a2
|
[
"Apache-2.0"
] | 1
|
2022-01-24T09:12:55.000Z
|
2022-01-24T09:12:55.000Z
|
modules/discord_bot/command_controller.py
|
dimterex/outlook2tracker
|
bda21cf02f11df206922342706973c657e2612a2
|
[
"Apache-2.0"
] | null | null | null |
modules/discord_bot/command_controller.py
|
dimterex/outlook2tracker
|
bda21cf02f11df206922342706973c657e2612a2
|
[
"Apache-2.0"
] | null | null | null |
NEXT_MEETING_COMMAND = 'next_meeting'
WRITE_LOG_COMMAND = 'write'
class Command_Controller:
def __init__(self):
self.commands = {}
def configure(self, command: str, action):
self.commands[command] = action
def receive_message(self, command: str, promise_id: int, argumets: []):
if command in self.commands:
self.commands[command].execute(promise_id, argumets)
| 27.466667
| 75
| 0.682039
|
af3ed9bf6562b05f164fe704f0ad6f5a25776c06
| 287
|
py
|
Python
|
awswrangler/__metadata__.py
|
bechbd/aws-data-wrangler
|
8a9048c9d1354eb5a3fb42206e8d76a5ea5ca29d
|
[
"Apache-2.0"
] | null | null | null |
awswrangler/__metadata__.py
|
bechbd/aws-data-wrangler
|
8a9048c9d1354eb5a3fb42206e8d76a5ea5ca29d
|
[
"Apache-2.0"
] | null | null | null |
awswrangler/__metadata__.py
|
bechbd/aws-data-wrangler
|
8a9048c9d1354eb5a3fb42206e8d76a5ea5ca29d
|
[
"Apache-2.0"
] | null | null | null |
"""Metadata Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
__title__: str = "awswrangler"
__description__: str = "Pandas on AWS."
__version__: str = "2.15.1"
__license__: str = "Apache License 2.0"
| 23.916667
| 63
| 0.735192
|
9a39ee711faaaf9a9ee4b487bd07243c005b1ae9
| 8,820
|
py
|
Python
|
feature_tree.py
|
luvalenz/time-series-variability-tree
|
57ba881baa5caa6ec8b3dd27526c2d9d3368248b
|
[
"MIT"
] | 1
|
2018-10-19T06:28:55.000Z
|
2018-10-19T06:28:55.000Z
|
feature_tree.py
|
luvalenz/time-series-variability-tree
|
57ba881baa5caa6ec8b3dd27526c2d9d3368248b
|
[
"MIT"
] | null | null | null |
feature_tree.py
|
luvalenz/time-series-variability-tree
|
57ba881baa5caa6ec8b3dd27526c2d9d3368248b
|
[
"MIT"
] | 1
|
2018-10-19T06:28:54.000Z
|
2018-10-19T06:28:54.000Z
|
from collections import Counter
import pydotplus as pydot
import numpy as np
from sklearn.cluster import KMeans
class FeatureTree:
def __init__(self, max_level, db_features_list, k):
self.max_level = max_level
self.db_features_dict = {}
self.db_features_ids = []
self.graph = pydot.Dot(graph_type='graph')
self.query_ts = None
self.query_score_chart = None
self.node_shortcuts = None
self.weights = None
self.d_matrix = None
self._original_time_series_ids = None
self._query_vector = None
self.n_nodes = 0
self.k = k
id_ = 0
for ts in db_features_list:
ts._id = id_
self.db_features_ids.append(ts._id)
self.db_features_dict[ts._id] = ts
id_ += 1
self.root = Node(0, self.max_level, self.db_features_ids, affinities, None,
None, self.get_db_features_dict(),
self.get_next_node_id(), self.get_original_time_series_ids(), self.k)
self._build_node_shorcuts()
self._build_weights_vector()
self._build_d_matrix()
@property
def n_features(self):
return len(self.db_features_ids)
@property
def original_time_series_ids(self):
if self._original_time_series_ids is None:
self._original_time_series_ids = list(self.root.inverted_file)
return self._original_time_series_ids
@property
def n_original_time_series(self):
return len(self.original_time_series_ids)
@property
def query_vector(self):
if self._query_vector is None:
q_vector = np.array([node.q for node in self.node_shortcuts])
q_norm = np.linalg.norm(q)
self._query_vector = q_vector/ q_norm
return self._query_vector
def make_query(self, time_series):
features = time_series.run_sliding_window()
for node in self.node_shortcuts:
node.n_query_features = 0
self._query_vector = None
for subsequence in features:
self.root.add_query_subsequence(subsequence)
def get_db_features_dict(self):
def _get_db_features_dict():
return self.db_features_dict
return _get_db_features_dict
def get_next_node_id(self):
def _get_next_node_id():
n_nodes = self.n_nodes
self.n_nodes += 1
return n_nodes
return _get_next_node_id
def get_original_time_series_ids(self):
def _get_original_time_series_ids():
return self.original_time_series_ids
return _get_original_time_series_ids
def generate_graph(self):
self.root.add_to_graph(None, self.graph)
def _build_node_shorcuts(self):
shortcut_dict = {}
self.root.add_shortcut_to_dict(shortcut_dict)
shortcut_list = [shortcut_dict[i] for i in range(self.n_nodes)]
self.node_shortcuts = shortcut_list
def _build_weights_vector(self):
weights_list = [node.weight for node in self.node_shortcuts]
self.weights = np.array(weights_list)
def _build_d_matrix(self):
d_list = []
for node in self.node_shortcuts:
d_list.append(node.d_vector)
d_matrix = np.column_stack(d_list)
d_norm = np.linalg.norm(d_matrix, axis=1)
d_matrix = (d_matrix.T / d_norm).T
d_matrix[d_matrix == np.inf] = 0
self.d_matrix = np.nan_to_num(d_matrix)
class Node:
def __init__(self, level, max_level, ids, affinities, center_id,
parent, db_features_dict_getter,
next_node_id_getter, original_time_series_ids_getter, k):
self.level = level
self.max_level = max_level
self.features_ids = np.array(ids)
self.center_id = center_id
self.parent = parent
self.get_db_features_dict = db_features_dict_getter
self.get_original_time_series_ids_in_tree = original_time_series_ids_getter
self._id = next_node_id_getter()
self.n_query_features = 0
self.k = k
self.children = None
self._inverted_file = None
if level + 1 == max_level or len(ids) == 1:
self._generate_inverted_files()
else:
self._generate_children(affinities, next_node_id_getter)
def __str__(self):
if self.children is None:
return str(self.inverted_file())
else:
return super().__str__()
@property
def is_leaf(self):
return self.children is None
@property
def inverted_file(self):
if self.is_leaf:
return self._inverted_file
else:
inverted_file = Counter()
for child in self.children:
inverted_file += child.inverted_file
return inverted_file
@property
def n_original_time_series_in_node(self):
return len(self.inverted_file)
@property
def n_original_time_series_in_tree(self):
return len(self.get_original_time_series_ids_in_tree())
@property
def weight(self):
return np.log(self.n_original_time_series_in_tree/
self.n_original_time_series_in_node)
@property
def m_vector(self):
m = np.zeros(self.n_original_time_series_in_tree)
ids = self.get_original_time_series_ids_in_tree()
for key, value in self.inverted_file.items():
index = ids.index(key)
m[index] = value
return m
@property
def q(self):
if self.n_query_features is None:
return None
return self.n_query_features*self.weight
@property
def d_vector(self):
return self.weight*self.m_vector
@property
def center(self):
features_dict = self.get_db_features_dict()
return features_dict[self.center_id]
def add_shortcut_to_dict(self, shortcut_dict):
shortcut_dict[self._id] = self
if not self.is_leaf:
for child in self.children:
child.add_shortcut_to_dict(shortcut_dict)
def _generate_children(self, affinities, next_node_id_getter):
print("---- GENERATING CHILDREN -----")
print("affinities shape")
print(affinities.shape)
ap = KMeans(n_clusters=self.k)
ap.fit(affinities)
n_clusters = len(ap.cluster_centers_indices_)
print("ids length")
print(len(self.features_ids))
print("center indices")
print(ap.cluster_centers_indices_)
cluster_centers_ids = self.features_ids[ap.cluster_centers_indices_]
print("{0} clusters".format(n_clusters))
labels = ap.labels_
children = []
i = 0
for cluster_label, center_id in zip(range(n_clusters),
cluster_centers_ids):
indices = np.where(labels==cluster_label)[0]
child_ids = self.features_ids[indices]
child_affinities = affinities[indices][:, indices]
print("child {0}".format(i))
print("child indices")
print(indices)
print("child affinities")
print(child_affinities)
i += 1
child = Node(self.level+1, self.max_level, child_ids,
child_affinities, center_id,
self, self.get_db_features_dict,
next_node_id_getter, self.get_original_time_series_ids_in_tree, self.k)
children.append(child)
self.children = children
def add_query_subsequence(self, subsequence):
self.n_query_features += 1
if not self.is_leaf:
distances = []
for node in self.children:
distance = time_series_twed(subsequence, node.center)
distances.append(distance)
nearest_child = self.children[np.argmin(distances)]
nearest_child.add_query_subsequence(features)
def _generate_inverted_files(self):
self._inverted_file = Counter()
db_features_dict = self.get_db_features_dict()
for _id in self.features_ids:
subsequence = db_features_dict[_id]
original_time_series_id = subsequence.original_id
counter = Counter({original_time_series_id: 1})
self._inverted_file += counter
def add_to_graph(self, parent_graph_node, graph):
graph_node = pydot.Node(str(self))
graph.add_node(graph_node)
if parent_graph_node is not None:
graph.add_edge(pydot.Edge(parent_graph_node,
graph_node))
if self.children is not None:
for child in self.children:
child.add_to_graph(graph_node, graph)
| 34.86166
| 96
| 0.628345
|
919c1aa26cb002f3f8f22beb83d2689ab018d749
| 27,754
|
py
|
Python
|
torch/fx/passes/splitter_base.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | 1
|
2021-05-11T11:53:47.000Z
|
2021-05-11T11:53:47.000Z
|
torch/fx/passes/splitter_base.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | 1
|
2021-05-10T01:18:33.000Z
|
2021-05-10T01:18:33.000Z
|
torch/fx/passes/splitter_base.py
|
sigmunjr/pytorch
|
526e12029274cf0257954616d5cd5260b1021f52
|
[
"Intel"
] | null | null | null |
import argparse
from collections import defaultdict
from dataclasses import dataclass
from typing import List, Dict, Optional, Tuple
import torch
from torch.fx.experimental.graph_manipulation import get_size_of_node
from torch.fx.node import map_arg
from .operator_support import (
get_node_target,
OperatorSupport,
)
from .graph_drawer import FxGraphDrawer
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
FxNetAccFusionsFinder,
CALLABLE_NODE_OPS,
Tensors,
NodeList,
NodeSet,
)
class _SplitterSettingBase:
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--min_acc_module_size",
default=1,
help="Minimum size limit of an accelerator subgraph.",
)
parser.add_argument(
"--skip_fusion",
default=False,
action="store_true",
help="If true then no fusion groups. Fusion group is used to "
"enforce no non-tensor data flow between submodules. If we don't "
"have this constrain, setting this to false is recommended as it "
"can reduce overhead.",
)
parser.add_argument(
"--allow_non_tensor",
default=False,
action="store_true",
help="For some backends non-tensor data flow between cpu and them "
"are not allowed. Therefore, if a node supported by accelerator but "
"it has non-tensor inputs or outputs to a cpu node we would want to "
"consider it as a cpu node during splitting. However, for some backends "
"we might not care about non-tensor data flow and we can set this option "
"to true to disable the functionality that prevent non-tensor data flow.",
)
args, unknown = parser.parse_known_args()
self.min_acc_module_size: int = args.min_acc_module_size
self.skip_fusion: bool = args.skip_fusion
self.allow_non_tensor: bool = args.allow_non_tensor
# TODO: this can probably be optimized
class FxNetAccNodesFinder:
"""
Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor
input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.
I.e. if we have a chain:
ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1
where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.
This behavior can be turned off by passing allow_non_tensor=True.
"""
def __init__(
self,
module: torch.fx.GraphModule,
operator_support: OperatorSupport,
allow_non_tensor: bool,
):
self.module = module
self.operator_support = operator_support
self.allow_non_tensor = allow_non_tensor
def reduce_acc_nodes_non_tensor_input_helper(
self, cpu_worklist: NodeList
):
"""
Transitively excludes nodes from ACC supported set.
For every node in the worklist:
- removes its downstream ACC nodes from ACC supported set,
- if any downstream ACC node produces non-tensor output,
then it gets added into the worklist.
"""
while cpu_worklist:
node = cpu_worklist.pop(0)
for user in node.users:
if user in self.acc_nodes:
self.acc_nodes.remove(user)
if "tensor_meta" not in user.meta:
cpu_worklist.append(user)
def reduce_acc_nodes_non_tensor_input(self):
"""
Excludes nodes from ACC supported set that have direct
upstream CPU nodes that produce non-tensor outputs.
"""
non_tensor_cpu_nodes: NodeList = []
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
if node in self.acc_nodes:
continue
if "tensor_meta" in node.meta:
continue
non_tensor_cpu_nodes.append(node)
self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)
def reduce_acc_nodes_non_tensor_output(self):
"""
Excludes nodes from ACC supported set that produce non-tensor
outputs and have downstream CPU nodes.
"""
while True:
new_cpu_nodes: NodeList = []
for acc_node in self.acc_nodes:
if "tensor_meta" in acc_node.meta:
continue
for user in acc_node.users:
if user not in self.acc_nodes:
new_cpu_nodes.append(acc_node)
break
if not new_cpu_nodes:
break
for new_cpu_node in new_cpu_nodes:
self.acc_nodes.remove(new_cpu_node)
self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)
def __call__(self) -> NodeSet:
submodules = dict(self.module.named_modules())
self.acc_nodes = {
n
for n in self.module.graph.nodes
if n.op in CALLABLE_NODE_OPS
and self.operator_support.is_node_supported(submodules, n)
}
if not self.allow_non_tensor:
self.reduce_acc_nodes_non_tensor_input()
self.reduce_acc_nodes_non_tensor_output()
return self.acc_nodes
class FxNetSplitterInternalError(Exception):
pass
@dataclass
class Subgraph:
is_acc: bool
nodes: NodeList
class _SplitterBase:
"""
Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.
Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.
Assumes that only "call_module", "call_function" and "call_method" from FX IR can potentially be executed on the accelerator.
Given the following graph:
==> b ==>
// \\
a d
\\ //
==> c ==>
class SimpleModule(torch.nn.Module):
def forward(self, a):
b = torch.sin(a)
c = torch.cos(a)
d = b + c
return d
and providing "operator_support" that indicates that 'b' and 'c' can be executed on the accelerator,
we will get the following split result:
main:
def forward(self, a):
run_on_acc_0_0 = self._run_on_acc_0_0(a)
getitem = run_on_acc_0_0[0]
getitem_1 = run_on_acc_0_0[1]
run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)
return run_on_cpu_1_1
_run_on_acc_0_0:
def forward(self, a):
sin_1 = torch.sin(a)
cos_1 = torch.cos(a)
return (sin_1, cos_1)
_run_on_cpu_1_1:
def forward(self, sin_1, cos_1):
add_1 = sin_1 + cos_1
return add_1
"""
# PCIe bandwidth for the backend, default to 100 GB/s
PCIe_BW = 100 * 2 ** 30
def __init__(
self,
module: torch.fx.GraphModule,
sample_input: Tensors,
operator_support: OperatorSupport,
settings: _SplitterSettingBase,
):
"""
Preprocesses graph before splitting:
- finds nodes supported by ACC,
- finds fusion groups for ACC nodes having non-tensor IO,
- builds a graph of direct dependencies,
- builds a map of fused nodes to their fusions.
As a result we get self.acc_nodes, self.deps and self.fusions.
"""
assert isinstance(module, torch.fx.GraphModule)
self.module = module
ShapeProp(self.module).propagate(*sample_input)
self.settings = settings
self.operator_support = operator_support
self.sample_input = sample_input
self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()
if self.settings.skip_fusion:
self.fusions = {}
else:
self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()
# Modify deps to add more deps for fused nodes
self.deps = self.find_deps()
self.update_deps_for_fusions()
# ===============================================================
# Helpers for ctor and initial state
# ===============================================================
def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:
"""
Builds a graph of node dependencies. Leaf nodes don't have any
dependencies and the "output" node doesn't have nodes depending on it.
Resulting graph has only direct dependencies, i.e. there are no
transitive dependencies.
"""
deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
for user in node.users:
if user.op != "output":
deps[user].add(node)
return deps
def update_deps_for_fusions(self):
"""
Updates graph of dependencies so that:
- nodes from the same fusion depend on the same set of outer nodes,
- outer nodes depending on a fusion depend on all nodes in that fusion.
"""
for node in self.fusions:
fusion = self.fusions[node]
for fused_neighbor in fusion:
self.deps[node].update(self.deps[fused_neighbor] - fusion)
for user in fused_neighbor.users:
if user not in fusion:
self.deps[user].add(node)
# ===============================================================
# Helpers for preview
# ===============================================================
def _lower_model_to_backend(
self, mod: torch.fx.GraphModule, inputs: Tensors
) -> torch.nn.Module:
"""
Lower the model to a backend.
"""
return mod
def _find_culprit(
self, mod: torch.fx.GraphModule, inputs: Tensors
) -> str:
"""
When an error occurs during lowering or running the lowered mod, we use this
function to find culprits in the `mod` that causes the error.
"""
return "Unable to find a culprit because _find_culprit() function is not implemented."
def _draw_graph_based_on_node_support(
self, mod: torch.fx.GraphModule, supported_nodes: NodeList
):
color_map = {
"default": "AliceBlue",
"supported": "chartreuse1",
"unsupported": "crimson",
}
class CustomDrawer(FxGraphDrawer):
def _get_node_style(self, node):
template = super()._get_node_style(node)
if node in supported_nodes:
template["fillcolor"] = color_map["supported"]
elif node.op in CALLABLE_NODE_OPS:
template["fillcolor"] = color_map["unsupported"]
else:
template["fillcolor"] = color_map["default"]
return template
drawer = CustomDrawer(mod, "node_support", ignore_getattr=True)
dot_graph = drawer.get_main_dot_graph()
dot_graph.write_raw("node_support.dot")
def node_support_preview(self, dump_graph: bool = False):
submodules = dict(self.module.named_modules())
supported_nodes: NodeList = []
supported_node_types = defaultdict(set)
unsupported_node_types = defaultdict(set)
def get_dtype(arg):
tensor_meta = arg.meta.get("tensor_meta")
return tensor_meta.dtype if tensor_meta else None
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
target = get_node_target(submodules, node)
# Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.
arg_dtypes = [
get_dtype(arg) if isinstance(arg, torch.fx.Node) else None
for arg in node.args
]
# Find last non-None element. If all elements are None, return max_len.
last_index = len(arg_dtypes) - next(
(
i
for i, dtype in enumerate(reversed(arg_dtypes))
if dtype is not None
),
len(arg_dtypes),
)
# Strip None elements at the end.
arg_dtypes_tuple = tuple(arg_dtypes[:last_index])
kwarg_dtypes_tuple = tuple(
(k, get_dtype(arg))
for k, arg in node.kwargs.items()
if isinstance(arg, torch.fx.Node)
)
if self.operator_support.is_node_supported(submodules, node):
supported_nodes.append(node)
supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
else:
unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
if dump_graph:
self._draw_graph_based_on_node_support(self.module, supported_nodes)
reports = "\nSupported node types in the model:\n"
for t, dtypes in supported_node_types.items():
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
reports += "\nUnsupported node types in the model:\n"
for t, dtypes in unsupported_node_types.items():
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
print(reports)
# Return reports for testing purpose
return reports
def split_preview(self, dump_graph: bool = False):
reports = ""
subgraphs = self.put_nodes_into_subgraphs()
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
reports += f"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
reports += f"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
for i, subgraph in enumerate(subgraphs):
reports += f"_run_on_acc_{i}: " if subgraph.is_acc else f"_run_on_cpu_{i}: "
reports += f"{len(subgraph.nodes)} node(s)\n"
self.tag(subgraphs)
split_mod = self.split(remove_tag=True)
split_mod.eval()
if dump_graph:
drawer = FxGraphDrawer(
split_mod, "preview", ignore_getattr=True
)
dot_graphs = drawer.get_all_dot_graphs()
for name, dot_graph in dot_graphs.items():
dot_graph.write_raw(f"{name}.dot")
max_qps: float = self.PCIe_BW
bottleneck_module = ""
for node in split_mod.graph.nodes:
if node.op == "call_module" and "acc" in node.target:
reports += f"\nProcessing acc submodule {node.target}\n"
submod = getattr(split_mod, node.target)
def get_submod_inputs(main_mod, submod, example_inputs):
sub_inputs = None
def get_inputs(self, inputs):
nonlocal sub_inputs
sub_inputs = inputs
handle = submod.register_forward_pre_hook(get_inputs)
main_mod(*example_inputs)
handle.remove()
return sub_inputs
submod_inputs = get_submod_inputs(
split_mod, submod, self.sample_input
)
ShapeProp(submod).propagate(*submod_inputs)
total_input_bytes = 0
total_output_bytes = 0
reports += "Checking inputs...\n"
for n in submod.graph.nodes:
if n.op == "placeholder":
if "tensor_meta" not in n.meta:
reports += f"Input {n.name} is not a tensor, this might cause problems during lowering!\n"
else:
total_input_bytes += get_size_of_node(submod, n)[0]
if n.op == "output":
output_node = n
reports += "Checking outputs...\n"
def get_bytes(node: torch.fx.Node):
nonlocal total_output_bytes
nonlocal reports
if "tensor_meta" not in node.meta:
reports += f"Output {node.name} is not a tensor, this might cause problems during lowering!\n"
else:
total_output_bytes += get_size_of_node(submod, node)[0]
map_arg(output_node.args, get_bytes)
qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)
reports += f"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},"
reports += f" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\n"
if qps < max_qps:
max_qps = qps
bottleneck_module = node.target
try:
lowered_submod = self._lower_model_to_backend(submod, submod_inputs)
except RuntimeError:
reports += "Run into an error during lowering!\n"
reports += self._find_culprit(submod, submod_inputs)
continue
try:
lowered_submod(*submod_inputs)
except RuntimeError:
reports += "Run into an error during inference!\n"
reports += self._find_culprit(submod, submod_inputs)
else:
reports += "Lowering and running succeed!\n"
reports += f"\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},"
reports += f" bottleneck is submodule {bottleneck_module}."
print(reports)
# return the reports for testing purposes
return reports
# ===============================================================
# Helpers for extend_acc_subgraph() method
# ===============================================================
def find_reverse_deps(
self, tag_id: Optional[int] = None
) -> Dict[torch.fx.Node, NodeSet]:
"""
Builds reversed topological node dependencies, if tag_id is specified,
we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.
"""
result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
for user in node.users:
if user.op not in CALLABLE_NODE_OPS:
continue
if tag_id is None or (int(user.tag.split("_")[-1]) < tag_id):
result[node].add(user)
return result
def update_reverse_deps_for_fusions(
self, deps: Dict[torch.fx.Node, NodeSet]
):
processed_node = set()
for node, fusion in self.fusions.items():
if node in processed_node:
continue
new_dep = set()
# Create a new dependency set which include all the
# dependencies of the nodes in the fusion group
for n in fusion:
new_dep.update(deps[n])
# Exclude nodes in the fusion
new_dep.difference_update(fusion)
# Update dependency
for n in fusion:
deps[n] = new_dep
for arg in n.all_input_nodes:
if arg not in fusion:
deps[arg].update(fusion)
processed_node.add(n)
def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:
"""
Finds parent nodes of the `tag` subgraph.
Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph
and is not a placeholder, we consider it as the parent node of the subgraph.
"""
parent_nodes = set()
for node in self.module.graph.nodes:
if node.op in CALLABLE_NODE_OPS and node.tag == tag:
for arg in node.all_input_nodes:
if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:
parent_nodes.add(arg)
return parent_nodes
def extend_acc_subgraph(self, tag: str):
"""
Extend the acc subgraph with `tag` going the reversed topological direction.
"""
# Dict that maps node to its users and ignore users that
# are in the subgraph that has greater tag
deps = self.find_reverse_deps(tag_id=int(tag.split("_")[-1]))
self.update_reverse_deps_for_fusions(deps)
# Parent nodes of the subgraph
parent_nodes = self.find_parent_nodes_of_subgraph(tag)
visited_nodes: NodeSet = set()
while parent_nodes:
node = None
# Find a acc node that depends on visited nodes only
for n in parent_nodes:
if deps[n] <= visited_nodes and n in self.acc_nodes:
node = n
break
if node is None:
break
# Put the node into `tag` subgraph
node.tag = tag # type: ignore[attr-defined]
parent_nodes.remove(node)
visited_nodes.add(node)
# If node is in a fusion group, add all fusion buddies to parent nodes
if node in self.fusions:
for fusion_node in self.fusions[node]:
if fusion_node not in visited_nodes:
parent_nodes.add(fusion_node)
# Add inputs of the node to parent nodes
for arg in node.all_input_nodes:
if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:
parent_nodes.add(arg)
# ===============================================================
# Helpers for split() method
# ===============================================================
def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:
"""
Finds nodes that consume module inputs or getattr nodes.
"""
starter_cpu_nodes: NodeSet = set()
starter_acc_nodes: NodeSet = set()
for node in self.module.graph.nodes:
if node.op not in {"placeholder", "getattr"}:
continue
for user in node.users:
if user in self.acc_nodes:
starter_acc_nodes.add(user)
else:
starter_cpu_nodes.add(user)
return starter_cpu_nodes, starter_acc_nodes
def put_nodes_into_subgraphs(self) -> List[Subgraph]:
# We start graph traversal from leaf nodes
current_cpu_nodes, current_acc_nodes = self.starter_nodes()
visited_nodes: NodeSet = set()
# If there are CPU nodes, start with them
acc_subgraph: bool = not current_cpu_nodes
current_subgraph_nodes: NodeList = []
# Result accumulator
subgraphs: List[Subgraph] = []
while current_cpu_nodes or current_acc_nodes:
# Find the first node that should belong to the current subgraph and has all dependencies resolved
current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes
node = next(
(n for n in current_nodes if self.deps[n] <= visited_nodes),
None,
)
# If nothing was found, then it's time to flip the mode and start a new subgraph
if node is None:
if not current_subgraph_nodes:
raise FxNetSplitterInternalError("Subgraph can't be empty")
subgraphs.append(
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
)
acc_subgraph = not acc_subgraph
current_subgraph_nodes = []
continue
current_nodes.remove(node)
visited_nodes.add(node)
current_subgraph_nodes.append(node)
# Add fusion buddies
if node in self.fusions:
current_acc_nodes.update(self.fusions[node] - visited_nodes)
# Put depending nodes into the queue
for user in node.users:
if user.op not in CALLABLE_NODE_OPS:
continue
# Add downstream nodes
if user in self.acc_nodes:
current_acc_nodes.add(user)
else:
current_cpu_nodes.add(user)
# Check if the last subgraph was not created
if current_subgraph_nodes:
subgraphs.append(
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
)
if not subgraphs:
raise FxNetSplitterInternalError("Couldn't create subgraphs")
return subgraphs
def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:
"""
This pass finds ACC submodules with less than specified size and merges
them with adjacent CPU submodules.
"""
result: List[Subgraph] = []
for subgraph in subgraphs:
if subgraph.is_acc:
if len(subgraph.nodes) >= self.settings.min_acc_module_size:
result.append(subgraph)
else:
if result:
result[-1].nodes.extend(subgraph.nodes)
else:
subgraph.is_acc = False
result.append(subgraph)
else:
if result and not result[-1].is_acc:
result[-1].nodes.extend(subgraph.nodes)
else:
result.append(subgraph)
return result
def tag(self, subgraphs: List[Subgraph]):
self.tags: List[str] = []
for subgraph in subgraphs:
template = "_run_on_acc_{}" if subgraph.is_acc else "_run_on_cpu_{}"
tag = template.format(len(self.tags))
self.tags.append(tag)
for node in subgraph.nodes:
if hasattr(node, "tag"):
raise FxNetSplitterInternalError(f"Node {node} was already tagged")
node.tag = tag # type: ignore[attr-defined]
def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:
split_module = split_by_tags(self.module, self.tags)
if remove_tag:
for node in self.module.graph.nodes:
if hasattr(node, "tag"):
del node.tag
return split_module
def __call__(self) -> torch.fx.GraphModule:
subgraphs = self.put_nodes_into_subgraphs()
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
self.tag(subgraphs)
return self.split()
| 36.760265
| 131
| 0.576133
|
132b028e2238ca35217a2837dcb4674ef19d175c
| 104
|
py
|
Python
|
src/user/__init__.py
|
hirmeos/altmetrics
|
546726f01e81e3ca2f37d448d01a55625e9be1ea
|
[
"MIT"
] | 2
|
2019-02-28T04:11:06.000Z
|
2020-05-04T08:44:22.000Z
|
src/user/__init__.py
|
hirmeos/altmetrics
|
546726f01e81e3ca2f37d448d01a55625e9be1ea
|
[
"MIT"
] | 23
|
2019-02-08T12:27:40.000Z
|
2020-12-16T20:43:50.000Z
|
src/user/__init__.py
|
hirmeos/altmetrics
|
546726f01e81e3ca2f37d448d01a55625e9be1ea
|
[
"MIT"
] | 1
|
2020-05-04T08:44:25.000Z
|
2020-05-04T08:44:25.000Z
|
""" User module - code relating to users and user authentication will
eventually all be moved here.
"""
| 26
| 69
| 0.75
|
4f179e51c73325f77bae057e7e2bffdb7b066673
| 4,978
|
py
|
Python
|
scanpy/datasets/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/datasets/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/datasets/__init__.py
|
gioelelm/scanpy
|
97391a0e7908b9644b2d6640c8e26d37bdc7811e
|
[
"BSD-3-Clause"
] | 1
|
2019-02-18T07:39:59.000Z
|
2019-02-18T07:39:59.000Z
|
# Author: F. Alex Wolf (http://falexwolf.de)
"""Init runs, manage examples.
"""
from .builtin import *
from ..utils import check_adata
def init_run(run_name, suffix='', recompute=True, reread=False,
return_module=False):
"""Read and preprocess data based on a "run file".
Filenames of the form "runs_whatevername.py", "scanpy_whatevername.py" and
"preprocessing_whatevername.py" in the current working directory are
automatically considered as run files.
In addition, there are builtin examples defined in the run file
https://github.com/theislab/scanpy/tree/master/scanpy/examples/builtin.py
Parameters
----------
run_name : str
Key for looking up an example-preprocessing function.
suffix : str, optional (default: '')
Set suffix to be appended to `run_name` in naming output files.
recompute : bool, optional (default: True)
Recompute preprocessing.
reread : bool, optional (default: False)
Reread the original data file (often a text file, much slower) instead
of the hdf5 file.
return_module : bool, optional (default: False)
Return example module.
Returns
-------
adata : AnnData
Annotated data matrix, optionally with metadata such as
adata.add['xroot'] : np.ndarray or int
Expression vector or index of root cell for DPT analysis.
Additionally, if return_module == True:
exmodule : dict, optional
Example module.
"""
import os, sys
from .. import readwrite
from .. import settings as sett
from .. import logging as logg
sett._run_basename = run_name
sett._run_suffix = suffix
sett.run_name = sett._run_basename + sett._run_suffix
adata_file = readwrite.get_filename_from_key(sett.run_name)
adata_file_exists = os.path.exists(adata_file)
# find the runfile with preprocessing functions etc.
loop_over_filenames = [filename for filename in os.listdir('.')
if (filename.startswith('runs')
or filename.startswith('preprocessing')
or filename.startswith('scanpy'))
and filename.endswith('.py')]
if len(loop_over_filenames) == 0:
logg.m('did not find user examples, to provide some,\n'
' generate a file "runs_whatevername.py" in your working directory,\n'
' analogous to https://github.com/theislab/scanpy/blob/master/scanpy/examples/builtin.py',
v='hint')
not_found = True
sys.path.insert(0, '.')
for filename in loop_over_filenames:
exmodule = __import__(filename.replace('.py', ''))
try:
exfunc = getattr(exmodule, run_name)
not_found = False
except AttributeError:
pass
if not_found:
try:
exfunc = getattr(builtin, run_name)
exmodule = builtin
except AttributeError:
import types
sys.exit('Do not know how to run example "{}".\nEither define a function {}() '
'that returns an AnnData object in "./runfile_whatevername.py".\n'
'Or, use one of the builtin examples: {}'
.format(run_name, run_name,
[a for a in dir(builtin)
if isinstance(getattr(builtin, a), types.FunctionType)]))
if not adata_file_exists or recompute or reread:
logg.m('reading and preprocessing data')
# run the function
adata = exfunc()
# add run_name to adata
logg.m('... X has shape n_samples × n_variables = {} × {}'
.format(adata.X.shape[0], adata.X.shape[1]))
# do sanity checks on data dictionary
adata = check_adata(adata, verbosity=1)
# write the prepocessed data
readwrite.write(sett.run_name, adata)
else:
adata = readwrite.read(sett.run_name)
if return_module:
return adata, exmodule
else:
return adata
# -------------------------------------------------------------------------------
# Reading and writing with sett.run_name
# -------------------------------------------------------------------------------
def read_run(run_name=None, suffix=''):
"""Read run and init sett.run_name if provided.
"""
from .. import settings as sett
if run_name is None: run_name = sett.run_name
if suffix == '': suffix = sett._run_suffix
sett._run_basename = run_name
sett._run_suffix = suffix
sett.run_name = sett._run_basename + sett._run_suffix
return init_run(run_name, suffix=suffix, recompute=False)
def write_run(data, ext=None):
"""Write run.
ext : str or None (default: None)
File extension from wich to infer file format.
"""
from .. import readwrite
from .. import settings as sett
readwrite.write(sett.run_name, data, ext=ext)
| 37.712121
| 108
| 0.604661
|
aa398c2b684056aa972b58b8dcaa3faf289b09ac
| 1,453
|
py
|
Python
|
base/logs/log.py
|
wobal/Kissenium
|
cd9c17b8e0ff80ebb66c3d6db16626670b2d2d12
|
[
"Apache-2.0"
] | 4
|
2017-09-15T03:11:57.000Z
|
2018-02-02T16:15:55.000Z
|
base/logs/log.py
|
wobal/Kissenium
|
cd9c17b8e0ff80ebb66c3d6db16626670b2d2d12
|
[
"Apache-2.0"
] | 4
|
2018-01-28T11:24:13.000Z
|
2018-03-15T20:45:55.000Z
|
base/logs/log.py
|
wobal/Kissenium
|
cd9c17b8e0ff80ebb66c3d6db16626670b2d2d12
|
[
"Apache-2.0"
] | 1
|
2020-12-02T12:01:24.000Z
|
2020-12-02T12:01:24.000Z
|
# coding: utf-8
import logging
from logging.handlers import RotatingFileHandler
from base.config.config import Config
from base.tools.sm_tools import SmallTools
class Log4Kissenium:
def __init__(self):
self.config = Config()
def setup(self, name, path):
"""
Every log file will be created in "reports/" folder.
:param name: Filename of the log
:param path: Relative path of the log
:return: logger
"""
final_path = SmallTools.get_reports_folder(path)
logger = logging.getLogger(name)
logger.setLevel(self.get_log_level())
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler(final_path + name + '.log', 'a', 1000000, 1)
file_handler.setLevel(self.get_log_level())
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def get_log_level(self):
if self.config.get_log_level() == "DEBUG":
return logging.DEBUG
elif self.config.get_log_level() == "INFO":
return logging.INFO
elif self.config.get_log_level() == "WARNING":
return logging.WARNING
elif self.config.get_log_level() == "ERROR":
return logging.ERROR
else:
return logging.DEBUG
@staticmethod
def get_logger(name):
return logging.getLogger(name)
| 30.914894
| 87
| 0.636614
|
640787313e91c518884f7b118fd3df897081d3e5
| 69,590
|
py
|
Python
|
muskit/tasks/abs_task.py
|
hertz-pj/Muskits
|
e515fc87efe894a64d96557b440a99561766824a
|
[
"Apache-2.0"
] | null | null | null |
muskit/tasks/abs_task.py
|
hertz-pj/Muskits
|
e515fc87efe894a64d96557b440a99561766824a
|
[
"Apache-2.0"
] | null | null | null |
muskit/tasks/abs_task.py
|
hertz-pj/Muskits
|
e515fc87efe894a64d96557b440a99561766824a
|
[
"Apache-2.0"
] | null | null | null |
# Adopted from ESPNet: https://github.com/espnet/espnet
from abc import ABC
from abc import abstractmethod
import argparse
from dataclasses import dataclass
from distutils.version import LooseVersion
import functools
import logging
import os
from pathlib import Path
import sys
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import torch
import torch.multiprocessing
import torch.nn
import torch.optim
from torch.utils.data import DataLoader
from typeguard import check_argument_types
from typeguard import check_return_type
import wandb
import yaml
# from muskit import __version__
from muskit.iterators.abs_iter_factory import AbsIterFactory
from muskit.iterators.chunk_iter_factory import ChunkIterFactory
from muskit.iterators.multiple_iter_factory import MultipleIterFactory
from muskit.iterators.sequence_iter_factory import SequenceIterFactory
from muskit.main_funcs.collect_stats import collect_stats
from muskit.optimizers.sgd import SGD
from muskit.samplers.build_batch_sampler import BATCH_TYPES
from muskit.samplers.build_batch_sampler import build_batch_sampler
from muskit.samplers.unsorted_batch_sampler import UnsortedBatchSampler
from muskit.schedulers.noam_lr import NoamLR
from muskit.schedulers.warmup_lr import WarmupLR
from muskit.torch_utils.load_pretrained_model import load_pretrained_model
from muskit.torch_utils.model_summary import model_summary
from muskit.torch_utils.pytorch_version import pytorch_cudnn_version
from muskit.torch_utils.set_all_random_seed import set_all_random_seed
from muskit.train.abs_muskit_model import AbsMuskitModel
from muskit.train.class_choices import ClassChoices
from muskit.train.dataset import AbsDataset
from muskit.train.dataset import DATA_TYPES
from muskit.train.dataset import MuskitDataset
from muskit.train.distributed_utils import DistributedOption
from muskit.train.distributed_utils import free_port
from muskit.train.distributed_utils import get_master_port
from muskit.train.distributed_utils import get_node_rank
from muskit.train.distributed_utils import get_num_nodes
from muskit.train.distributed_utils import resolve_distributed_mode
from muskit.train.iterable_dataset import IterableMuskitDataset
from muskit.train.trainer import Trainer
from muskit.utils.build_dataclass import build_dataclass
from muskit.utils import config_argparse
from muskit.utils.cli_utils import get_commandline_args
from muskit.utils.get_default_kwargs import get_default_kwargs
from muskit.utils.nested_dict_action import NestedDictAction
from muskit.utils.types import humanfriendly_parse_size_or_none
from muskit.utils.types import int_or_none
from muskit.utils.types import str2bool
from muskit.utils.types import str2triple_str
from muskit.utils.types import str_or_int
from muskit.utils.types import str_or_none
from muskit.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"):
from torch.multiprocessing.spawn import ProcessContext
else:
from torch.multiprocessing.spawn import SpawnContext as ProcessContext
optim_classes = dict(
adam=torch.optim.Adam,
sgd=SGD,
adadelta=torch.optim.Adadelta,
adagrad=torch.optim.Adagrad,
adamax=torch.optim.Adamax,
asgd=torch.optim.ASGD,
lbfgs=torch.optim.LBFGS,
rmsprop=torch.optim.RMSprop,
rprop=torch.optim.Rprop,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.2.0"):
optim_classes["adamw"] = torch.optim.AdamW
try:
import torch_optimizer
optim_classes.update(
accagd=torch_optimizer.AccSGD,
adabound=torch_optimizer.AdaBound,
adamod=torch_optimizer.AdaMod,
diffgrad=torch_optimizer.DiffGrad,
lamb=torch_optimizer.Lamb,
novograd=torch_optimizer.NovoGrad,
pid=torch_optimizer.PID,
# torch_optimizer<=0.0.1a10 doesn't support
# qhadam=torch_optimizer.QHAdam,
qhm=torch_optimizer.QHM,
radam=torch_optimizer.RAdam,
sgdw=torch_optimizer.SGDW,
yogi=torch_optimizer.Yogi,
)
del torch_optimizer
except ImportError:
pass
try:
import apex
optim_classes.update(
fusedadam=apex.optimizers.FusedAdam,
fusedlamb=apex.optimizers.FusedLAMB,
fusednovograd=apex.optimizers.FusedNovoGrad,
fusedsgd=apex.optimizers.FusedSGD,
)
del apex
except ImportError:
pass
try:
import fairscale
except ImportError:
fairscale = None
scheduler_classes = dict(
ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,
lambdalr=torch.optim.lr_scheduler.LambdaLR,
steplr=torch.optim.lr_scheduler.StepLR,
multisteplr=torch.optim.lr_scheduler.MultiStepLR,
exponentiallr=torch.optim.lr_scheduler.ExponentialLR,
CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
scheduler_classes.update(
noamlr=NoamLR,
warmuplr=WarmupLR,
)
if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"):
CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
scheduler_classes.update(
cycliclr=torch.optim.lr_scheduler.CyclicLR,
onecyclelr=torch.optim.lr_scheduler.OneCycleLR,
CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,
)
# To lower keys
optim_classes = {k.lower(): v for k, v in optim_classes.items()}
scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}
@dataclass
class IteratorOptions:
preprocess_fn: callable
collate_fn: callable
data_path_and_name_and_type: list
shape_files: list
batch_size: int
batch_bins: int
batch_type: str
max_cache_size: float
max_cache_fd: int
distributed: bool
num_batches: Optional[int]
num_iters_per_epoch: Optional[int]
train: bool
class AbsTask(ABC):
# Use @staticmethod, or @classmethod,
# instead of instance method to avoid God classes
# If you need more than one optimizers, change this value in inheritance
num_optimizers: int = 1
trainer = Trainer
class_choices_list: List[ClassChoices] = []
def __init__(self):
raise RuntimeError("This class can't be instantiated.")
@classmethod
@abstractmethod
def add_task_arguments(cls, parser: argparse.ArgumentParser):
pass
@classmethod
@abstractmethod
def build_collate_fn(
cls, args: argparse.Namespace, train: bool
) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:
"""Return "collate_fn", which is a callable object and given to DataLoader.
>>> from torch.utils.data import DataLoader
>>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...)
In many cases, you can use our common collate_fn.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_preprocess_fn(
cls, args: argparse.Namespace, train: bool
) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:
raise NotImplementedError
@classmethod
@abstractmethod
def required_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Define the required names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as following,
>>> from muskit.train.abs_muskit_model import AbsMuskitModel
>>> class Model(AbsMuskitModel):
... def forward(self, input, output, opt=None): pass
then "required_data_names" should be as
>>> required_data_names = ('input', 'output')
"""
raise NotImplementedError
@classmethod
@abstractmethod
def optional_data_names(
cls, train: bool = True, inference: bool = False
) -> Tuple[str, ...]:
"""Define the optional names by Task
This function is used by
>>> cls.check_task_requirements()
If your model is defined as follows,
>>> from muskit.train.abs_muskit_model import AbsMuskitModel
>>> class Model(AbsMuskitModel):
... def forward(self, input, output, opt=None): pass
then "optional_data_names" should be as
>>> optional_data_names = ('opt',)
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_model(cls, args: argparse.Namespace) -> AbsMuskitModel:
raise NotImplementedError
@classmethod
def get_parser(cls) -> config_argparse.ArgumentParser:
assert check_argument_types()
class ArgumentDefaultsRawTextHelpFormatter(
argparse.RawTextHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter,
):
pass
parser = config_argparse.ArgumentParser(
description="base parser",
formatter_class=ArgumentDefaultsRawTextHelpFormatter,
)
# NOTE(kamo): Use '_' instead of '-' to avoid confusion.
# I think '-' looks really confusing if it's written in yaml.
# NOTE(kamo): add_arguments(..., required=True) can't be used
# to provide --print_config mode. Instead of it, do as
parser.set_defaults(required=["output_dir"])
group = parser.add_argument_group("Common configuration")
group.add_argument(
"--print_config",
action="store_true",
help="Print the config file and exit",
)
group.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
group.add_argument(
"--dry_run",
type=str2bool,
default=False,
help="Perform process without training",
)
group.add_argument(
"--iterator_type",
type=str,
choices=["sequence", "chunk", "task", "none"],
default="sequence",
help="Specify iterator type",
)
group.add_argument("--output_dir", type=str_or_none, default=None)
group.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
group.add_argument(
"--gpu_id",
type=int,
default=0,
help="GPU_id, only works when ngpu=1",
)
group.add_argument(
"--pitch_aug_min",
type=int,
default=0,
help="The lower bound of midi semitone when pitch augmentation",
)
group.add_argument(
"--pitch_aug_max",
type=int,
default=0,
help="The upper bound of midi semitone when pitch augmentation",
)
group.add_argument("--pitch_mean",
type=str,
default="None",
help="The mean midi-value of training split, None means no-adaptive-pitch-augmentation")
group.add_argument(
"--time_aug_min",
type=float,
default=1,
help="The lower bound of time augmentation factor",
)
group.add_argument(
"--time_aug_max",
type=float,
default=1,
help="The upper bound of time augmentation factor",
)
group.add_argument(
"--random_crop",
type=bool,
default=False,
help="Flag to use random crop augmentation during training",
)
group.add_argument(
"--mask_aug",
type=bool,
default=False,
help="Flag to use masking augmentation during training",
)
group.add_argument("--seed", type=int, default=0, help="Random seed")
group.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group.add_argument(
"--num_att_plot",
type=int,
default=3,
help="The number images to plot the outputs from attention. "
"This option makes sense only when attention-based model",
)
group = parser.add_argument_group("distributed training related")
group.add_argument(
"--dist_backend",
default="nccl",
type=str,
help="distributed backend",
)
group.add_argument(
"--dist_init_method",
type=str,
default="env://",
help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", '
'"WORLD_SIZE", and "RANK" are referred.',
)
group.add_argument(
"--dist_world_size",
default=None,
type=int_or_none,
help="number of nodes for distributed training",
)
group.add_argument(
"--dist_rank",
type=int_or_none,
default=None,
help="node rank for distributed training",
)
group.add_argument(
# Not starting with "dist_" for compatibility to launch.py
"--local_rank",
type=int_or_none,
default=None,
help="local rank for distributed training. This option is used if "
"--multiprocessing_distributed=false",
)
group.add_argument(
"--dist_master_addr",
default=None,
type=str_or_none,
help="The master address for distributed training. "
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_master_port",
default=None,
type=int_or_none,
help="The master port for distributed training"
"This value is used when dist_init_method == 'env://'",
)
group.add_argument(
"--dist_launcher",
default=None,
type=str_or_none,
choices=["slurm", "mpi", None],
help="The launcher type for distributed training",
)
group.add_argument(
"--multiprocessing_distributed",
default=False,
type=str2bool,
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
group.add_argument(
"--unused_parameters",
type=str2bool,
default=False,
help="Whether to use the find_unused_parameters in "
"torch.nn.parallel.DistributedDataParallel ",
)
group.add_argument(
"--sharded_ddp",
default=False,
type=str2bool,
help="Enable sharded training provided by fairscale",
)
group = parser.add_argument_group("cudnn mode related")
group.add_argument(
"--cudnn_enabled",
type=str2bool,
default=torch.backends.cudnn.enabled,
help="Enable CUDNN",
)
group.add_argument(
"--cudnn_benchmark",
type=str2bool,
default=torch.backends.cudnn.benchmark,
help="Enable cudnn-benchmark mode",
)
group.add_argument(
"--cudnn_deterministic",
type=str2bool,
default=True,
help="Enable cudnn-deterministic mode",
)
group = parser.add_argument_group("collect stats mode related")
group.add_argument(
"--collect_stats",
type=str2bool,
default=False,
help='Perform on "collect stats" mode',
)
group.add_argument(
"--write_collected_feats",
type=str2bool,
default=False,
help='Write the output features from the model when "collect stats" mode',
)
group = parser.add_argument_group("Trainer related")
group.add_argument(
"--vocoder_checkpoint",
default="",
type=str,
help="checkpoint file to be loaded.",
)
group.add_argument(
"--vocoder_config",
default="",
type=str,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
group.add_argument(
"--vocoder_normalize_before",
default=False,
action="store_true",
help="whether to perform feature normalization before input to the model. "
"if true, it assumes that the feature is de-normalized. this is useful when "
"text2mel model and vocoder use different feature statistics.",
)
group.add_argument(
"--max_epoch",
type=int,
default=40,
help="The maximum number epoch to train",
)
group.add_argument(
"--patience",
type=int_or_none,
default=None,
help="Number of epochs to wait without improvement "
"before stopping the training",
)
group.add_argument(
"--val_scheduler_criterion",
type=str,
nargs=2,
default=("valid", "loss"),
help="The criterion used for the value given to the lr scheduler. "
'Give a pair referring the phase, "train" or "valid",'
'and the criterion name. The mode specifying "min" or "max" can '
"be changed by --scheduler_conf",
)
group.add_argument(
"--early_stopping_criterion",
type=str,
nargs=3,
default=("valid", "loss", "min"),
help="The criterion used for judging of early stopping. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--best_model_criterion",
type=str2triple_str,
nargs="+",
default=[
("train", "loss", "min"),
("valid", "loss", "min"),
("train", "acc", "max"),
("valid", "acc", "max"),
],
help="The criterion used for judging of the best model. "
'Give a pair referring the phase, "train" or "valid",'
'the criterion name, and the mode, "min" or "max", e.g. "acc,max".',
)
group.add_argument(
"--keep_nbest_models",
type=int,
nargs="+",
default=[10],
help="Remove previous snapshots excluding the n-best scored epochs",
)
group.add_argument(
"--grad_clip",
type=float,
default=5.0,
help="Gradient norm threshold to clip",
)
group.add_argument(
"--grad_clip_type",
type=float,
default=2.0,
help="The type of the used p-norm for gradient clip. Can be inf",
)
group.add_argument(
"--grad_noise",
type=str2bool,
default=False,
help="The flag to switch to use noise injection to "
"gradients during training",
)
group.add_argument(
"--accum_grad",
type=int,
default=1,
help="The number of gradient accumulation",
)
group.add_argument(
"--no_forward_run",
type=str2bool,
default=False,
help="Just only iterating data loading without "
"model forwarding and training",
)
group.add_argument(
"--resume",
type=str2bool,
default=False,
help="Enable resuming if checkpoint is existing",
)
group.add_argument(
"--train_dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type for training.",
)
group.add_argument(
"--use_amp",
type=str2bool,
default=False,
help="Enable Automatic Mixed Precision. This feature requires pytorch>=1.6",
)
group.add_argument(
"--log_interval",
type=int_or_none,
default=None,
help="Show the logs every the number iterations in each epochs at the "
"training phase. If None is given, it is decided according the number "
"of training samples automatically .",
)
group.add_argument(
"--use_tensorboard",
type=str2bool,
default=True,
help="Enable tensorboard logging",
)
group.add_argument(
"--use_wandb",
type=str2bool,
default=False,
help="Enable wandb logging",
)
group.add_argument(
"--wandb_project",
type=str,
default=None,
help="Specify wandb project",
)
group.add_argument(
"--wandb_id",
type=str,
default=None,
help="Specify wandb id",
)
group.add_argument(
"--detect_anomaly",
type=str2bool,
default=False,
help="Set torch.autograd.set_detect_anomaly",
)
group = parser.add_argument_group("Pretraining model related")
group.add_argument("--pretrain_path", help="This option is obsoleted")
group.add_argument(
"--init_param",
type=str,
default=[],
nargs="*",
help="Specify the file path used for initialization of parameters. "
"The format is '<file_path>:<src_key>:<dst_key>:<exclude_keys>', "
"where file_path is the model file path, "
"src_key specifies the key of model states to be used in the model file, "
"dst_key specifies the attribute of the model to be initialized, "
"and exclude_keys excludes keys of model states for the initialization."
"e.g.\n"
" # Load all parameters"
" --init_param some/where/model.pth\n"
" # Load only decoder parameters"
" --init_param some/where/model.pth:decoder:decoder\n"
" # Load only decoder parameters excluding decoder.embed"
" --init_param some/where/model.pth:decoder:decoder:decoder.embed\n"
" --init_param some/where/model.pth:decoder:decoder:decoder.embed\n",
)
group.add_argument(
"--freeze_param",
type=str,
default=[],
nargs="*",
help="Freeze parameters",
)
group = parser.add_argument_group("BatchSampler related")
group.add_argument(
"--num_iters_per_epoch",
type=int_or_none,
default=None,
help="Restrict the number of iterations for training per epoch",
)
group.add_argument(
"--batch_size",
type=int,
default=20,
help="The mini-batch size used for training. Used if batch_type='unsorted',"
" 'sorted', or 'folded'.",
)
group.add_argument(
"--valid_batch_size",
type=int_or_none,
default=None,
help="If not given, the value of --batch_size is used",
)
group.add_argument(
"--batch_bins",
type=int,
default=1000000,
help="The number of batch bins. Used if batch_type='length' or 'numel'",
)
group.add_argument(
"--valid_batch_bins",
type=int_or_none,
default=None,
help="If not given, the value of --batch_bins is used",
)
group.add_argument("--train_shape_file", type=str, action="append", default=[])
group.add_argument("--valid_shape_file", type=str, action="append", default=[])
group = parser.add_argument_group("Sequence iterator related")
_batch_type_help = ""
for key, value in BATCH_TYPES.items():
_batch_type_help += f'"{key}":\n{value}\n'
group.add_argument(
"--batch_type",
type=str,
default="folded",
choices=list(BATCH_TYPES),
help=_batch_type_help,
)
group.add_argument(
"--valid_batch_type",
type=str_or_none,
default=None,
choices=list(BATCH_TYPES) + [None],
help="If not given, the value of --batch_type is used",
)
group.add_argument("--fold_length", type=int, action="append", default=[])
group.add_argument(
"--sort_in_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort the samples in each mini-batches by the sample "
'lengths. To enable this, "shape_file" must have the length information.',
)
group.add_argument(
"--sort_batch",
type=str,
default="descending",
choices=["descending", "ascending"],
help="Sort mini-batches by the sample lengths",
)
group.add_argument(
"--multiple_iterator",
type=str2bool,
default=False,
help="Use multiple iterator mode",
)
group = parser.add_argument_group("Chunk iterator related")
group.add_argument(
"--chunk_length",
type=str_or_int,
default=500,
help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'."
"If multiple numbers separated by command are given, "
"one of them is selected randomly for each samples. "
"If two numbers are given with '-', it indicates the range of the choices. "
"Note that if the sequence length is shorter than the all chunk_lengths, "
"the sample is discarded. ",
)
group.add_argument(
"--chunk_shift_ratio",
type=float,
default=0.5,
help="Specify the shift width of chunks. If it's less than 1, "
"allows the overlapping and if bigger than 1, there are some gaps "
"between each chunk.",
)
group.add_argument(
"--num_cache_chunks",
type=int,
default=1024,
help="Shuffle in the specified number of chunks and generate mini-batches "
"More larger this value, more randomness can be obtained.",
)
group = parser.add_argument_group("Dataset related")
_data_path_and_name_and_type_help = (
"Give three words splitted by comma. It's used for the training data. "
"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. "
"The first value, some/path/a.scp, indicates the file path, "
"and the second, foo, is the key name used for the mini-batch data, "
"and the last, sound, decides the file type. "
"This option is repeatable, so you can input any number of features "
"for your task. Supported file types are as follows:\n\n"
)
for key, dic in DATA_TYPES.items():
_data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n'
group.add_argument(
"--train_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
help=_data_path_and_name_and_type_help,
)
group.add_argument(
"--valid_data_path_and_name_and_type",
type=str2triple_str,
action="append",
default=[],
)
group.add_argument(
"--allow_variable_data_keys",
type=str2bool,
default=False,
help="Allow the arbitrary keys for mini-batch with ignoring "
"the task requirements",
)
group.add_argument(
"--max_cache_size",
type=humanfriendly.parse_size,
default=0.0,
help="The maximum cache size for data loader. e.g. 10MB, 20GB.",
)
group.add_argument(
"--max_cache_fd",
type=int,
default=32,
help="The maximum number of file descriptors to be kept "
"as opened for ark files. "
"This feature is only valid when data type is 'kaldi_ark'.",
)
group.add_argument(
"--valid_max_cache_size",
type=humanfriendly_parse_size_or_none,
default=None,
help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. "
"If None, the 5 percent size of --max_cache_size",
)
group = parser.add_argument_group("Optimizer related")
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
group.add_argument(
f"--optim{suf}",
type=lambda x: x.lower(),
default="adadelta",
choices=list(optim_classes),
help="The optimizer type",
)
group.add_argument(
f"--optim{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for optimizer",
)
group.add_argument(
f"--scheduler{suf}",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=list(scheduler_classes) + [None],
help="The lr scheduler type",
)
group.add_argument(
f"--scheduler{suf}_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for lr scheduler",
)
cls.trainer.add_arguments(parser)
cls.add_task_arguments(parser)
assert check_return_type(parser)
return parser
@classmethod
def build_optimizers(
cls,
args: argparse.Namespace,
model: torch.nn.Module,
) -> List[torch.optim.Optimizer]:
if cls.num_optimizers != 1:
raise RuntimeError(
"build_optimizers() must be overridden if num_optimizers != 1"
)
optim_class = optim_classes.get(args.optim)
if optim_class is None:
raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}")
if args.sharded_ddp:
if fairscale is None:
raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'")
optim = fairscale.optim.oss.OSS(
params=model.parameters(), optim=optim_class, **args.optim_conf
)
else:
optim = optim_class(model.parameters(), **args.optim_conf)
optimizers = [optim]
return optimizers
@classmethod
def exclude_opts(cls) -> Tuple[str, ...]:
"""The options not to be shown by --print_config"""
return "required", "print_config", "config", "ngpu"
@classmethod
def get_default_config(cls) -> Dict[str, Any]:
"""Return the configuration as dict.
This method is used by print_config()
"""
def get_class_type(name: str, classes: dict):
_cls = classes.get(name)
if _cls is None:
raise ValueError(f"must be one of {list(classes)}: {name}")
return _cls
# This method is used only for --print_config
assert check_argument_types()
parser = cls.get_parser()
args, _ = parser.parse_known_args()
config = vars(args)
# Excludes the options not to be shown
for k in AbsTask.exclude_opts():
config.pop(k)
for i in range(1, cls.num_optimizers + 1):
suf = "" if i == 1 else str(i)
name = config[f"optim{suf}"]
optim_class = get_class_type(name, optim_classes)
conf = get_default_kwargs(optim_class)
# Overwrite the default by the arguments,
conf.update(config[f"optim{suf}_conf"])
# and set it again
config[f"optim{suf}_conf"] = conf
name = config[f"scheduler{suf}"]
if name is not None:
scheduler_class = get_class_type(name, scheduler_classes)
conf = get_default_kwargs(scheduler_class)
# Overwrite the default by the arguments,
conf.update(config[f"scheduler{suf}_conf"])
# and set it again
config[f"scheduler{suf}_conf"] = conf
for class_choices in cls.class_choices_list:
if getattr(args, class_choices.name) is not None:
class_obj = class_choices.get_class(getattr(args, class_choices.name))
conf = get_default_kwargs(class_obj)
name = class_choices.name
# Overwrite the default by the arguments,
conf.update(config[f"{name}_conf"])
# and set it again
config[f"{name}_conf"] = conf
return config
@classmethod
def check_required_command_args(cls, args: argparse.Namespace):
assert check_argument_types()
for k in vars(args):
if "-" in k:
raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")')
required = ", ".join(
f"--{a}" for a in args.required if getattr(args, a) is None
)
if len(required) != 0:
parser = cls.get_parser()
parser.print_help(file=sys.stderr)
p = Path(sys.argv[0]).name
print(file=sys.stderr)
print(
f"{p}: error: the following arguments are required: " f"{required}",
file=sys.stderr,
)
sys.exit(2)
@classmethod
def check_task_requirements(
cls,
dataset: Union[AbsDataset, IterableMuskitDataset],
allow_variable_data_keys: bool,
train: bool,
inference: bool = False,
) -> None:
"""Check if the dataset satisfy the requirement of current Task"""
assert check_argument_types()
mes = (
f"If you intend to use an additional input, modify "
f'"{cls.__name__}.required_data_names()" or '
f'"{cls.__name__}.optional_data_names()". '
f"Otherwise you need to set --allow_variable_data_keys true "
)
for k in cls.required_data_names(train, inference):
if not dataset.has_name(k):
raise RuntimeError(
f'"{cls.required_data_names(train, inference)}" are required for'
f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}'
)
if not allow_variable_data_keys:
task_keys = cls.required_data_names(
train, inference
) + cls.optional_data_names(train, inference)
for k in dataset.names():
if k not in task_keys:
raise RuntimeError(
f"The data-name must be one of {task_keys} "
f'for {cls.__name__}: "{k}" is not allowed.\n{mes}'
)
@classmethod
def print_config(cls, file=sys.stdout) -> None:
assert check_argument_types()
# Shows the config: e.g. python train.py asr --print_config
config = cls.get_default_config()
file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))
@classmethod
def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):
assert check_argument_types()
print(get_commandline_args(), file=sys.stderr)
if args is None:
parser = cls.get_parser()
args = parser.parse_args(cmd)
# args.version = __version__
if args.pretrain_path is not None:
raise RuntimeError("--pretrain_path is deprecated. Use --init_param")
if args.print_config:
cls.print_config()
sys.exit(0)
cls.check_required_command_args(args)
# "distributed" is decided using the other command args
resolve_distributed_mode(args)
if not args.distributed or not args.multiprocessing_distributed:
cls.main_worker(args)
else:
assert args.ngpu > 1, args.ngpu
# Multi-processing distributed mode: e.g. 2node-4process-4GPU
# | Host1 | Host2 |
# | Process1 | Process2 | <= Spawn processes
# |Child1|Child2|Child1|Child2|
# |GPU1 |GPU2 |GPU1 |GPU2 |
# See also the following usage of --multiprocessing-distributed:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)
if num_nodes == 1:
args.dist_master_addr = "localhost"
args.dist_rank = 0
# Single node distributed training with multi-GPUs
if (
args.dist_init_method == "env://"
and get_master_port(args.dist_master_port) is None
):
# Get the unused port
args.dist_master_port = free_port()
# Assume that nodes use same number of GPUs each other
args.dist_world_size = args.ngpu * num_nodes
node_rank = get_node_rank(args.dist_rank, args.dist_launcher)
# The following block is copied from:
# https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py
error_queues = []
processes = []
mp = torch.multiprocessing.get_context("spawn")
for i in range(args.ngpu):
# Copy args
local_args = argparse.Namespace(**vars(args))
local_args.local_rank = i
local_args.dist_rank = args.ngpu * node_rank + i
local_args.ngpu = 1
process = mp.Process(
target=cls.main_worker,
args=(local_args,),
daemon=False,
)
process.start()
processes.append(process)
error_queues.append(mp.SimpleQueue())
# Loop on join until it returns True or raises an exception.
while not ProcessContext(processes, error_queues).join():
pass
@classmethod
def main_worker(cls, args: argparse.Namespace):
assert check_argument_types()
# 0. Init distributed process
distributed_option = build_dataclass(DistributedOption, args)
# Setting distributed_option.dist_rank, etc.
distributed_option.init_options()
# NOTE(kamo): Don't use logging before invoking logging.basicConfig()
if not distributed_option.distributed or distributed_option.dist_rank == 0:
if not distributed_option.distributed:
_rank = ""
else:
_rank = (
f":{distributed_option.dist_rank}/"
f"{distributed_option.dist_world_size}"
)
# NOTE(kamo):
# logging.basicConfig() is invoked in main_worker() instead of main()
# because it can be invoked only once in a process.
# FIXME(kamo): Should we use logging.getLogger()?
logging.basicConfig(
level=args.log_level,
format=f"[{os.uname()[1].split('.')[0]}{_rank}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
# Suppress logging if RANK != 0
logging.basicConfig(
level="ERROR",
format=f"[{os.uname()[1].split('.')[0]}"
f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]"
f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
# Invoking torch.distributed.init_process_group
distributed_option.init_torch_distributed()
# 1. Set random-seed
set_all_random_seed(args.seed)
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
torch.backends.cudnn.deterministic = args.cudnn_deterministic
if args.detect_anomaly:
logging.info("Invoking torch.autograd.set_detect_anomaly(True)")
torch.autograd.set_detect_anomaly(args.detect_anomaly)
# 2. Build model
model = cls.build_model(args=args)
if not isinstance(model, AbsMuskitModel):
raise RuntimeError(
f"model must inherit {AbsMuskitModel.__name__}, but got {type(model)}"
)
if args.ngpu == 1 and torch.cuda.is_available():
torch.cuda.set_device(args.gpu_id)
logging.info(f"GPU {args.gpu_id} is used")
model = model.to(
dtype=getattr(torch, args.train_dtype),
device="cuda" if args.ngpu > 0 else "cpu",
)
for t in args.freeze_param:
for k, p in model.named_parameters():
if k.startswith(t + ".") or k == t:
logging.info(f"Setting {k}.requires_grad = False")
p.requires_grad = False
# 3. Build optimizer
optimizers = cls.build_optimizers(args, model=model)
# 4. Build schedulers
schedulers = []
for i, optim in enumerate(optimizers, 1):
suf = "" if i == 1 else str(i)
name = getattr(args, f"scheduler{suf}")
conf = getattr(args, f"scheduler{suf}_conf")
if name is not None:
cls_ = scheduler_classes.get(name)
if cls_ is None:
raise ValueError(
f"must be one of {list(scheduler_classes)}: {name}"
)
scheduler = cls_(optim, **conf)
else:
scheduler = None
schedulers.append(scheduler)
logging.info(pytorch_cudnn_version())
logging.info(model_summary(model))
for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):
suf = "" if i == 1 else str(i)
logging.info(f"Optimizer{suf}:\n{o}")
logging.info(f"Scheduler{suf}: {s}")
# 5. Dump "args" to config.yaml
# NOTE(kamo): "args" should be saved after object-buildings are done
# because they are allowed to modify "args".
output_dir = Path(args.output_dir)
if not distributed_option.distributed or distributed_option.dist_rank == 0:
output_dir.mkdir(parents=True, exist_ok=True)
with (output_dir / "config.yaml").open("w", encoding="utf-8") as f:
logging.info(
f'Saving the configuration in {output_dir / "config.yaml"}'
)
yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
# 6. Loads pre-trained model
for p in args.init_param:
logging.info(f"Loading pretrained params from {p}")
load_pretrained_model(
model=model,
init_param=p,
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
map_location=f"cuda:{torch.cuda.current_device()}"
if args.ngpu > 0
else "cpu",
)
if args.dry_run:
pass
elif args.collect_stats:
# Perform on collect_stats mode. This mode has two roles
# - Derive the length and dimension of all input data
# - Accumulate feats, square values, and the length for whitening
logging.info(args)
if args.valid_batch_size is None:
args.valid_batch_size = args.batch_size
if len(args.train_shape_file) != 0:
train_key_file = args.train_shape_file[0]
else:
train_key_file = None
if len(args.valid_shape_file) != 0:
valid_key_file = args.valid_shape_file[0]
else:
valid_key_file = None
collect_stats(
model=model,
train_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.train_data_path_and_name_and_type,
key_file=train_key_file,
batch_size=args.batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
valid_iter=cls.build_streaming_iterator(
data_path_and_name_and_type=args.valid_data_path_and_name_and_type,
key_file=valid_key_file,
batch_size=args.valid_batch_size,
dtype=args.train_dtype,
num_workers=args.num_workers,
allow_variable_data_keys=args.allow_variable_data_keys,
ngpu=args.ngpu,
preprocess_fn=cls.build_preprocess_fn(args, train=False),
collate_fn=cls.build_collate_fn(args, train=False),
),
output_dir=output_dir,
ngpu=args.ngpu,
log_interval=args.log_interval,
write_collected_feats=args.write_collected_feats,
)
else:
# 7. Build iterator factories
if args.multiple_iterator:
train_iter_factory = cls.build_multiple_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
else:
train_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="train",
)
valid_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="valid",
)
if args.num_att_plot != 0:
plot_attention_iter_factory = cls.build_iter_factory(
args=args,
distributed_option=distributed_option,
mode="plot_att",
)
else:
plot_attention_iter_factory = None
# 8. Start training
if args.use_wandb:
if (
not distributed_option.distributed
or distributed_option.dist_rank == 0
):
if args.wandb_project is None:
project = (
"Muskit_"
+ cls.__name__
+ str(Path(".").resolve()).replace("/", "_")
)
else:
project = args.wandb_project
if args.wandb_id is None:
wandb_id = str(output_dir).replace("/", "_")
else:
wandb_id = args.wandb_id
wandb.init(
project=project,
dir=output_dir,
id=wandb_id,
resume="allow",
)
wandb.config.update(args)
else:
# wandb also supports grouping for distributed training,
# but we only logs aggregated data,
# so it's enough to perform on rank0 node.
args.use_wandb = False
# Don't give args to trainer.run() directly!!!
# Instead of it, define "Options" object and build here.
trainer_options = cls.trainer.build_options(args)
cls.trainer.run(
model=model,
optimizers=optimizers,
schedulers=schedulers,
train_iter_factory=train_iter_factory,
valid_iter_factory=valid_iter_factory,
plot_attention_iter_factory=plot_attention_iter_factory,
trainer_options=trainer_options,
distributed_option=distributed_option,
)
@classmethod
def build_iter_options(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
):
if mode == "train":
preprocess_fn = cls.build_preprocess_fn(args, train=True)
collate_fn = cls.build_collate_fn(args, train=True)
data_path_and_name_and_type = args.train_data_path_and_name_and_type
shape_files = args.train_shape_file
batch_size = args.batch_size
batch_bins = args.batch_bins
batch_type = args.batch_type
max_cache_size = args.max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = args.num_iters_per_epoch
train = True
elif mode == "valid":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
if args.valid_batch_type is None:
batch_type = args.batch_type
else:
batch_type = args.valid_batch_type
if args.valid_batch_size is None:
batch_size = args.batch_size
else:
batch_size = args.valid_batch_size
if args.valid_batch_bins is None:
batch_bins = args.batch_bins
else:
batch_bins = args.valid_batch_bins
if args.valid_max_cache_size is None:
# Cache 5% of maximum size for validation loader
max_cache_size = 0.05 * args.max_cache_size
else:
max_cache_size = args.valid_max_cache_size
max_cache_fd = args.max_cache_fd
distributed = distributed_option.distributed
num_batches = None
num_iters_per_epoch = None
train = False
elif mode == "plot_att":
preprocess_fn = cls.build_preprocess_fn(args, train=False)
collate_fn = cls.build_collate_fn(args, train=False)
data_path_and_name_and_type = args.valid_data_path_and_name_and_type
shape_files = args.valid_shape_file
batch_type = "unsorted"
batch_size = 1
batch_bins = 0
num_batches = args.num_att_plot
max_cache_fd = args.max_cache_fd
# num_att_plot should be a few sample ~ 3, so cache all data.
max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0
# always False because plot_attention performs on RANK0
distributed = False
num_iters_per_epoch = None
train = False
else:
raise NotImplementedError(f"mode={mode}")
return IteratorOptions(
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
data_path_and_name_and_type=data_path_and_name_and_type,
shape_files=shape_files,
batch_type=batch_type,
batch_size=batch_size,
batch_bins=batch_bins,
num_batches=num_batches,
max_cache_size=max_cache_size,
max_cache_fd=max_cache_fd,
distributed=distributed,
num_iters_per_epoch=num_iters_per_epoch,
train=train,
)
@classmethod
def build_iter_factory(
cls,
args: argparse.Namespace,
distributed_option: DistributedOption,
mode: str,
kwargs: dict = None,
) -> AbsIterFactory:
"""Build a factory object of mini-batch iterator.
This object is invoked at every epochs to build the iterator for each epoch
as following:
>>> iter_factory = cls.build_iter_factory(...)
>>> for epoch in range(1, max_epoch):
... for keys, batch in iter_fatory.build_iter(epoch):
... model(**batch)
The mini-batches for each epochs are fully controlled by this class.
Note that the random seed used for shuffling is decided as "seed + epoch" and
the generated mini-batches can be reproduces when resuming.
Note that the definition of "epoch" doesn't always indicate
to run out of the whole training corpus.
"--num_iters_per_epoch" option restricts the number of iterations for each epoch
and the rest of samples for the originally epoch are left for the next epoch.
e.g. If The number of mini-batches equals to 4, the following two are same:
- 1 epoch without "--num_iters_per_epoch"
- 4 epoch with "--num_iters_per_epoch" == 4
"""
assert check_argument_types()
iter_options = cls.build_iter_options(args, distributed_option, mode)
# Overwrite iter_options if any kwargs is given
if kwargs is not None:
for k, v in kwargs.items():
setattr(iter_options, k, v)
if args.iterator_type == "sequence":
return cls.build_sequence_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
elif args.iterator_type == "chunk":
return cls.build_chunk_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
elif args.iterator_type == "task":
return cls.build_task_iter_factory(
args=args,
iter_options=iter_options,
mode=mode,
)
else:
raise RuntimeError(f"Not supported: iterator_type={args.iterator_type}")
@classmethod
def build_sequence_iter_factory(
cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str
) -> AbsIterFactory:
assert check_argument_types()
dataset = MuskitDataset(
iter_options.data_path_and_name_and_type,
float_dtype=args.train_dtype,
preprocess=iter_options.preprocess_fn,
max_cache_size=iter_options.max_cache_size,
max_cache_fd=iter_options.max_cache_fd,
mode=mode,
pitch_aug_min=args.pitch_aug_min,
pitch_aug_max=args.pitch_aug_max,
pitch_mean=args.pitch_mean,
time_aug_min=args.time_aug_min,
time_aug_max=args.time_aug_max,
random_crop=args.random_crop,
mask_aug=args.mask_aug,
)
cls.check_task_requirements(
dataset, args.allow_variable_data_keys, train=iter_options.train
)
if Path(
Path(iter_options.data_path_and_name_and_type[0][0]).parent, "utt2category"
).exists():
utt2category_file = str(
Path(
Path(iter_options.data_path_and_name_and_type[0][0]).parent,
"utt2category",
)
)
else:
utt2category_file = None
batch_sampler = build_batch_sampler(
type=iter_options.batch_type,
shape_files=iter_options.shape_files,
fold_lengths=args.fold_length,
batch_size=iter_options.batch_size,
batch_bins=iter_options.batch_bins,
sort_in_batch=args.sort_in_batch,
sort_batch=args.sort_batch,
drop_last=False,
min_batch_size=torch.distributed.get_world_size()
if iter_options.distributed
else 1,
utt2category_file=utt2category_file,
)
batches = list(batch_sampler)
if iter_options.num_batches is not None:
batches = batches[: iter_options.num_batches]
bs_list = [len(batch) for batch in batches]
logging.info(f"[{mode}] dataset:\n{dataset}")
logging.info(f"[{mode}] Batch sampler: {batch_sampler}")
logging.info(
f"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, "
f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}"
)
if iter_options.distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
for batch in batches:
if len(batch) < world_size:
raise RuntimeError(
f"The batch-size must be equal or more than world_size: "
f"{len(batch)} < {world_size}"
)
batches = [batch[rank::world_size] for batch in batches]
return SequenceIterFactory(
dataset=dataset,
batches=batches,
seed=args.seed,
num_iters_per_epoch=iter_options.num_iters_per_epoch,
shuffle=iter_options.train,
num_workers=args.num_workers,
collate_fn=iter_options.collate_fn,
pin_memory=args.ngpu > 0,
)
@classmethod
def build_chunk_iter_factory(
cls,
args: argparse.Namespace,
iter_options: IteratorOptions,
mode: str,
) -> AbsIterFactory:
assert check_argument_types()
dataset = MuskitDataset(
iter_options.data_path_and_name_and_type,
float_dtype=args.train_dtype,
preprocess=iter_options.preprocess_fn,
max_cache_size=iter_options.max_cache_size,
max_cache_fd=iter_options.max_cache_fd,
)
cls.check_task_requirements(
dataset, args.allow_variable_data_keys, train=iter_options.train
)
if len(iter_options.shape_files) == 0:
key_file = iter_options.data_path_and_name_and_type[0][0]
else:
key_file = iter_options.shape_files[0]
batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)
batches = list(batch_sampler)
if iter_options.num_batches is not None:
batches = batches[: iter_options.num_batches]
logging.info(f"[{mode}] dataset:\n{dataset}")
if iter_options.distributed:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if len(batches) < world_size:
raise RuntimeError("Number of samples is smaller than world_size")
if iter_options.batch_size < world_size:
raise RuntimeError("batch_size must be equal or more than world_size")
if rank < iter_options.batch_size % world_size:
batch_size = iter_options.batch_size // world_size + 1
else:
batch_size = iter_options.batch_size // world_size
num_cache_chunks = args.num_cache_chunks // world_size
# NOTE(kamo): Split whole corpus by sample numbers without considering
# each of the lengths, therefore the number of iteration counts are not
# always equal to each other and the iterations are limitted
# by the fewest iterations.
# i.e. the samples over the counts are discarded.
batches = batches[rank::world_size]
else:
batch_size = iter_options.batch_size
num_cache_chunks = args.num_cache_chunks
return ChunkIterFactory(
dataset=dataset,
batches=batches,
seed=args.seed,
batch_size=batch_size,
# For chunk iterator,
# --num_iters_per_epoch doesn't indicate the number of iterations,
# but indicates the number of samples.
num_samples_per_epoch=iter_options.num_iters_per_epoch,
shuffle=iter_options.train,
num_workers=args.num_workers,
collate_fn=iter_options.collate_fn,
pin_memory=args.ngpu > 0,
chunk_length=args.chunk_length,
chunk_shift_ratio=args.chunk_shift_ratio,
num_cache_chunks=num_cache_chunks,
)
# NOTE(kamo): Not abstract class
@classmethod
def build_task_iter_factory(
cls,
args: argparse.Namespace,
iter_options: IteratorOptions,
mode: str,
) -> AbsIterFactory:
"""Build task specific iterator factory
Example:
>>> class YourTask(AbsTask):
... @classmethod
... def add_task_arguments(cls, parser: argparse.ArgumentParser):
... parser.set_defaults(iterator_type="task")
...
... @classmethod
... def build_task_iter_factory(
... cls,
... args: argparse.Namespace,
... iter_options: IteratorOptions,
... mode: str,
... ):
... return FooIterFactory(...)
...
... @classmethod
... def build_iter_options(
.... args: argparse.Namespace,
... distributed_option: DistributedOption,
... mode: str
... ):
... # if you need to customize options object
"""
raise NotImplementedError
@classmethod
def build_multiple_iter_factory(
cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str
):
assert check_argument_types()
iter_options = cls.build_iter_options(args, distributed_option, mode)
assert len(iter_options.data_path_and_name_and_type) > 0, len(
iter_options.data_path_and_name_and_type
)
# 1. Sanity check
num_splits = None
for path in [
path for path, _, _ in iter_options.data_path_and_name_and_type
] + list(iter_options.shape_files):
if not Path(path).is_dir():
raise RuntimeError(f"{path} is not a directory")
p = Path(path) / "num_splits"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
with p.open() as f:
_num_splits = int(f.read())
if num_splits is not None and num_splits != _num_splits:
raise RuntimeError(
f"Number of splits are mismathed: "
f"{iter_options.data_path_and_name_and_type[0][0]} and {path}"
)
num_splits = _num_splits
for i in range(num_splits):
p = Path(path) / f"split.{i}"
if not p.exists():
raise FileNotFoundError(f"{p} is not found")
# 2. Create functions to build an iter factory for each splits
data_path_and_name_and_type_list = [
[
(str(Path(p) / f"split.{i}"), n, t)
for p, n, t in iter_options.data_path_and_name_and_type
]
for i in range(num_splits)
]
shape_files_list = [
[str(Path(s) / f"split.{i}") for s in iter_options.shape_files]
for i in range(num_splits)
]
num_iters_per_epoch_list = [
(iter_options.num_iters_per_epoch + i) // num_splits
if iter_options.num_iters_per_epoch is not None
else None
for i in range(num_splits)
]
max_cache_size = iter_options.max_cache_size / num_splits
# Note that iter-factories are built for each epoch at runtime lazily.
build_funcs = [
functools.partial(
cls.build_iter_factory,
args,
distributed_option,
mode,
kwargs=dict(
data_path_and_name_and_type=_data_path_and_name_and_type,
shape_files=_shape_files,
num_iters_per_epoch=_num_iters_per_epoch,
max_cache_size=max_cache_size,
),
)
for (
_data_path_and_name_and_type,
_shape_files,
_num_iters_per_epoch,
) in zip(
data_path_and_name_and_type_list,
shape_files_list,
num_iters_per_epoch_list,
)
]
# 3. Build MultipleIterFactory
return MultipleIterFactory(
build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed
)
@classmethod
def build_streaming_iterator(
cls,
data_path_and_name_and_type,
preprocess_fn,
collate_fn,
key_file: str = None,
batch_size: int = 1,
dtype: str = np.float32,
num_workers: int = 1,
allow_variable_data_keys: bool = False,
ngpu: int = 0,
inference: bool = False,
) -> DataLoader:
"""Build DataLoader using iterable dataset"""
assert check_argument_types()
# For backward compatibility for pytorch DataLoader
if collate_fn is not None:
kwargs = dict(collate_fn=collate_fn)
else:
kwargs = {}
# IterableDataset is supported from pytorch=1.2
if LooseVersion(torch.__version__) >= LooseVersion("1.2"):
dataset = IterableMuskitDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
key_file=key_file,
)
if dataset.apply_utt2category:
kwargs.update(batch_size=1)
else:
kwargs.update(batch_size=batch_size)
else:
dataset = MuskitDataset(
data_path_and_name_and_type,
float_dtype=dtype,
preprocess=preprocess_fn,
)
if key_file is None:
key_file = data_path_and_name_and_type[0][0]
batch_sampler = UnsortedBatchSampler(
batch_size=batch_size,
key_file=key_file,
drop_last=False,
)
kwargs.update(batch_sampler=batch_sampler)
cls.check_task_requirements(
dataset, allow_variable_data_keys, train=False, inference=inference
)
return DataLoader(
dataset=dataset,
pin_memory=ngpu > 0,
num_workers=num_workers,
**kwargs,
)
# ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~
@classmethod
def build_model_from_file(
cls,
config_file: Union[Path, str],
model_file: Union[Path, str] = None,
device: str = "cpu",
) -> Tuple[AbsMuskitModel, argparse.Namespace]:
"""This method is used for inference or fine-tuning.
Args:
config_file: The yaml file saved when training.
model_file: The model file saved when training.
device:
"""
assert check_argument_types()
config_file = Path(config_file)
with config_file.open("r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
model = cls.build_model(args)
if not isinstance(model, AbsMuskitModel):
raise RuntimeError(
f"model must inherit {AbsMuskitModel.__name__}, but got {type(model)}"
)
model.to(device)
if model_file is not None:
logging.info(f"Load model state dict from: {model_file}")
if device == "cuda":
# NOTE(kamo): "cuda" for torch.load always indicates cuda:0
# in PyTorch<=1.4
device = f"cuda:{torch.cuda.current_device()}"
model.load_state_dict(torch.load(model_file, map_location=device))
return model, args
| 37.779587
| 100
| 0.577224
|
81fd6350f24f163b50ea72a3e5838ced811ea745
| 7,102
|
py
|
Python
|
cliente/views/cliente.py
|
gustavopinho/pmocgestao
|
950f39a252c13cda29003c03c99ffcfcf3824e86
|
[
"MIT"
] | null | null | null |
cliente/views/cliente.py
|
gustavopinho/pmocgestao
|
950f39a252c13cda29003c03c99ffcfcf3824e86
|
[
"MIT"
] | 1
|
2019-12-13T12:34:05.000Z
|
2019-12-13T12:34:05.000Z
|
cliente/views/cliente.py
|
gustavopinho/pmocgestao
|
950f39a252c13cda29003c03c99ffcfcf3824e86
|
[
"MIT"
] | null | null | null |
import calendar
from datetime import date
from django.contrib import messages
from django.contrib.auth.mixins import (
LoginRequiredMixin, PermissionRequiredMixin
)
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.views.generic import View
from django.views.generic.edit import (
CreateView, UpdateView
)
from django.urls import reverse_lazy
from cliente.models import Cliente
from core.utils import get_empresa
# Dashboard
class ClienteDashboardView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, View):
template_name = 'cliente/dashboard.html'
context = {}
raise_exception = True
permission_required = (
'cliente.view_cliente'
)
def get(self, request, empresa, pk):
empresa = get_empresa(self.request, self.kwargs['empresa'])
self.context['cliente'] = empresa.clientes.get(pk=pk)
return render(request, self.template_name, self.context)
class ClienteRelatoriosView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, View):
raise_exception = True
permission_required = (
'cliente.view_cliente'
)
def get(self, request, empresa, pk):
rel = request.GET.get('rel')
ano = int(request.GET.get('ano'))
empresa = get_empresa(self.request, self.kwargs['empresa'])
cliente = empresa.clientes.get(pk=pk)
# Relatório de manutenções mensal
if rel == 'R1':
data = {
'labels': [
'Janeiro', 'Fevereiro', 'Março',
'Abril', 'Maio', 'Junho',
'Julho','Agosto', 'Setembro',
'Outubro', 'Novembro', 'Dezembro'
],
'datasets' :[]
}
agendado = {
'label': 'Agendado',
'backgroundColor': '#E46651',
'data' : []
}
executado = {
'label': 'Executado',
'backgroundColor': '#00D8FF',
'data' : []
}
for m in range(1, 13):
start_date = date(ano, m, 1)
end_date = date(ano, m, calendar.monthrange(ano, m)[1])
agendado['data'].append(cliente.manutencoes.filter(
data_prevista__range=(start_date, end_date),
tipo__nome='PREVENTIVA',
data_execucao__isnull=True
).count())
executado['data'].append(cliente.manutencoes.filter(
data_prevista__range=(start_date, end_date),
tipo__nome='PREVENTIVA',
data_execucao__isnull=False
).count())
data['datasets'].append(executado)
data['datasets'].append(agendado)
return JsonResponse({'results' : data})
# Relatório de manutenções anual
if rel == 'R2':
start_date = date(ano, 1, 1)
end_date = date(ano, 12, calendar.monthrange(ano, 12)[1])
data = {
'labels': ['Agendado', 'Executado'],
'datasets' :[{
'backgroundColor': [
'#E46651',
'#00D8FF',
],
'data': [
cliente.manutencoes.filter(
data_prevista__range=(start_date, end_date),
tipo__nome='PREVENTIVA',
data_execucao__isnull=True
).count(),
cliente.manutencoes.filter(
data_prevista__range=(start_date, end_date),
tipo__nome='PREVENTIVA',
data_execucao__isnull=False
).count(),
]
}]
}
return JsonResponse({'results' : data})
# Cliente
class ClienteCreateView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, CreateView):
model = Cliente
fields = [
'nome', 'razao_social', 'cnpj',
'logradouro', 'numero', 'complemento',
'bairro', 'cidade', 'uf', 'cep',
'email', 'fone', 'celular', 'usuarios'
]
success_url = '/'
success_message = 'Cliente cadastrado com sucesso!'
raise_exception = True
permission_required = (
'cliente.add_cliente'
)
def form_valid(self, form):
empresa = get_empresa(self.request, self.kwargs['empresa'])
self.success_url = reverse_lazy('core:dashboard')
# Verificar se o usuário tem permissão para acessar essa empresa
form.instance.empresa = empresa
return super(ClienteCreateView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(ClienteCreateView, self).get_context_data(**kwargs)
context['empresa'] = get_empresa(self.request, self.kwargs['empresa'])
return context
def get_form(self, form_class=None):
form = super(ClienteCreateView, self).get_form(form_class)
empresa = get_empresa(self.request, self.kwargs['empresa'])
form.fields["usuarios"].queryset = empresa.usuarios.filter(acesso='PMOCCLIENTE', usuario__is_staff=False)
return form
class ClienteUpdateView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
model = Cliente
fields = [
'nome', 'razao_social', 'cnpj',
'logradouro', 'numero', 'complemento',
'bairro', 'cidade', 'uf', 'cep',
'email', 'fone', 'celular', 'usuarios'
]
success_url = '/'
success_message = 'Cliente atualizado com sucesso!'
raise_exception = True
permission_required = (
'cliente.change_cliente'
)
def form_valid(self, form):
empresa = get_empresa(self.request, self.kwargs['empresa'])
self.success_url = reverse_lazy('empresa:cliente:cliente_update', kwargs={"empresa" : empresa.pk, "pk": self.kwargs['pk']})
# Verificar se o usuário tem permissão para acessar essa empresa
form.instance.empresa = empresa
return super(ClienteUpdateView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(ClienteUpdateView, self).get_context_data(**kwargs)
context['empresa'] = get_empresa(self.request, self.kwargs['empresa'])
return context
def get_form(self, form_class=None):
form = super(ClienteUpdateView, self).get_form(form_class)
empresa = get_empresa(self.request, self.kwargs['empresa'])
form.fields["usuarios"].queryset = empresa.usuarios.filter(acesso='PMOCCLIENTE', usuario__is_staff=False)
return form
# Cliente
cliente_dashboard = ClienteDashboardView.as_view()
cliente_relatorios = ClienteRelatoriosView.as_view()
cliente_create = ClienteCreateView.as_view()
cliente_update = ClienteUpdateView.as_view()
| 35.51
| 131
| 0.591664
|
9e2fa1a400566198853b982daf259405c3c280a7
| 170
|
py
|
Python
|
Chap04/04_06.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | null | null | null |
Chap04/04_06.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | null | null | null |
Chap04/04_06.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | 5
|
2021-01-15T04:13:50.000Z
|
2021-02-06T02:52:42.000Z
|
guess = input("What's my favorite food? ")
if guess == "cookies":
print("Yep! So amazing!")
else:
print("Yuck! That’s not it!")
print("Thanks for playing!")
| 14.166667
| 42
| 0.611765
|
a90f74399dfcf3690df96423bfe2e32764c1c405
| 365
|
py
|
Python
|
taotao-cloud-python/taotao-cloud-oldboy/day51-django-ORM/Django_ORM/app01/models.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 47
|
2021-04-13T10:32:13.000Z
|
2022-03-31T10:30:30.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day51-django-ORM/Django_ORM/app01/models.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 1
|
2021-11-01T07:41:04.000Z
|
2021-11-01T07:41:10.000Z
|
taotao-cloud-python/taotao-cloud-oldboy/day51-django-ORM/Django_ORM/app01/models.py
|
shuigedeng/taotao-cloud-paren
|
3d281b919490f7cbee4520211e2eee5da7387564
|
[
"Apache-2.0"
] | 21
|
2021-04-13T10:32:17.000Z
|
2022-03-26T07:43:22.000Z
|
from django.db import models
# Create your models here.
class Book(models.Model):
name=models.CharField(max_length=20)
price=models.IntegerField()
pub_date=models.DateField()
author=models.CharField(max_length=32,null=False)
def __str__(self):
return self.name
class Author(models.Model):
name=models.CharField(max_length=32)
| 19.210526
| 53
| 0.723288
|
0f23a71190d77d3353744ea20e188f764fd94b36
| 11,347
|
py
|
Python
|
openpyexcel/drawing/shape.py
|
sciris/openpyexcel
|
1fde667a1adc2f4988279fd73a2ac2660706b5ce
|
[
"MIT"
] | 2
|
2019-07-03T06:37:42.000Z
|
2020-05-15T00:28:13.000Z
|
openpyexcel/drawing/shape.py
|
sciris/openpyexcel
|
1fde667a1adc2f4988279fd73a2ac2660706b5ce
|
[
"MIT"
] | null | null | null |
openpyexcel/drawing/shape.py
|
sciris/openpyexcel
|
1fde667a1adc2f4988279fd73a2ac2660706b5ce
|
[
"MIT"
] | 1
|
2020-01-06T10:01:42.000Z
|
2020-01-06T10:01:42.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
from openpyexcel.styles.colors import Color, BLACK, WHITE
from openpyexcel.utils.units import (
pixels_to_EMU,
EMU_to_pixels,
short_color,
)
from openpyexcel.compat import deprecated
from openpyexcel.xml.functions import Element, SubElement, tostring
from openpyexcel.xml.constants import (
DRAWING_NS,
SHEET_DRAWING_NS,
CHART_NS,
CHART_DRAWING_NS,
PKG_REL_NS
)
from openpyexcel.compat.strings import safe_string
class Shape(object):
""" a drawing inside a chart
coordiantes are specified by the user in the axis units
"""
MARGIN_LEFT = 6 + 13 + 1
MARGIN_BOTTOM = 17 + 11
FONT_WIDTH = 7
FONT_HEIGHT = 8
ROUND_RECT = 'roundRect'
RECT = 'rect'
# other shapes to define :
'''
"line"
"lineInv"
"triangle"
"rtTriangle"
"diamond"
"parallelogram"
"trapezoid"
"nonIsoscelesTrapezoid"
"pentagon"
"hexagon"
"heptagon"
"octagon"
"decagon"
"dodecagon"
"star4"
"star5"
"star6"
"star7"
"star8"
"star10"
"star12"
"star16"
"star24"
"star32"
"roundRect"
"round1Rect"
"round2SameRect"
"round2DiagRect"
"snipRoundRect"
"snip1Rect"
"snip2SameRect"
"snip2DiagRect"
"plaque"
"ellipse"
"teardrop"
"homePlate"
"chevron"
"pieWedge"
"pie"
"blockArc"
"donut"
"noSmoking"
"rightArrow"
"leftArrow"
"upArrow"
"downArrow"
"stripedRightArrow"
"notchedRightArrow"
"bentUpArrow"
"leftRightArrow"
"upDownArrow"
"leftUpArrow"
"leftRightUpArrow"
"quadArrow"
"leftArrowCallout"
"rightArrowCallout"
"upArrowCallout"
"downArrowCallout"
"leftRightArrowCallout"
"upDownArrowCallout"
"quadArrowCallout"
"bentArrow"
"uturnArrow"
"circularArrow"
"leftCircularArrow"
"leftRightCircularArrow"
"curvedRightArrow"
"curvedLeftArrow"
"curvedUpArrow"
"curvedDownArrow"
"swooshArrow"
"cube"
"can"
"lightningBolt"
"heart"
"sun"
"moon"
"smileyFace"
"irregularSeal1"
"irregularSeal2"
"foldedCorner"
"bevel"
"frame"
"halfFrame"
"corner"
"diagStripe"
"chord"
"arc"
"leftBracket"
"rightBracket"
"leftBrace"
"rightBrace"
"bracketPair"
"bracePair"
"straightConnector1"
"bentConnector2"
"bentConnector3"
"bentConnector4"
"bentConnector5"
"curvedConnector2"
"curvedConnector3"
"curvedConnector4"
"curvedConnector5"
"callout1"
"callout2"
"callout3"
"accentCallout1"
"accentCallout2"
"accentCallout3"
"borderCallout1"
"borderCallout2"
"borderCallout3"
"accentBorderCallout1"
"accentBorderCallout2"
"accentBorderCallout3"
"wedgeRectCallout"
"wedgeRoundRectCallout"
"wedgeEllipseCallout"
"cloudCallout"
"cloud"
"ribbon"
"ribbon2"
"ellipseRibbon"
"ellipseRibbon2"
"leftRightRibbon"
"verticalScroll"
"horizontalScroll"
"wave"
"doubleWave"
"plus"
"flowChartProcess"
"flowChartDecision"
"flowChartInputOutput"
"flowChartPredefinedProcess"
"flowChartInternalStorage"
"flowChartDocument"
"flowChartMultidocument"
"flowChartTerminator"
"flowChartPreparation"
"flowChartManualInput"
"flowChartManualOperation"
"flowChartConnector"
"flowChartPunchedCard"
"flowChartPunchedTape"
"flowChartSummingJunction"
"flowChartOr"
"flowChartCollate"
"flowChartSort"
"flowChartExtract"
"flowChartMerge"
"flowChartOfflineStorage"
"flowChartOnlineStorage"
"flowChartMagneticTape"
"flowChartMagneticDisk"
"flowChartMagneticDrum"
"flowChartDisplay"
"flowChartDelay"
"flowChartAlternateProcess"
"flowChartOffpageConnector"
"actionButtonBlank"
"actionButtonHome"
"actionButtonHelp"
"actionButtonInformation"
"actionButtonForwardNext"
"actionButtonBackPrevious"
"actionButtonEnd"
"actionButtonBeginning"
"actionButtonReturn"
"actionButtonDocument"
"actionButtonSound"
"actionButtonMovie"
"gear6"
"gear9"
"funnel"
"mathPlus"
"mathMinus"
"mathMultiply"
"mathDivide"
"mathEqual"
"mathNotEqual"
"cornerTabs"
"squareTabs"
"plaqueTabs"
"chartX"
"chartStar"
"chartPlus"
'''
@deprecated("Chart Drawings need a complete rewrite")
def __init__(self,
chart,
coordinates=((0, 0), (1, 1)),
text=None,
scheme="accent1"):
self.chart = chart
self.coordinates = coordinates # in axis units
self.text = text
self.scheme = scheme
self.style = Shape.RECT
self.border_width = 0
self.border_color = BLACK # "F3B3C5"
self.color = WHITE
self.text_color = BLACK
@property
def border_color(self):
return self._border_color
@border_color.setter
def border_color(self, color):
self._border_color = short_color(color)
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = short_color(color)
@property
def text_color(self):
return self._text_color
@text_color.setter
def text_color(self, color):
self._text_color = short_color(color)
@property
def border_width(self):
return self._border_width
@border_width.setter
def border_width(self, w):
self._border_width = w
@property
def coordinates(self):
"""Return coordindates in axis units"""
return self._coordinates
@coordinates.setter
def coordinates(self, coords):
""" set shape coordinates in percentages (left, top, right, bottom)
"""
# this needs refactoring to reflect changes in charts
self.axis_coordinates = coords
(x1, y1), (x2, y2) = coords # bottom left, top right
drawing_width = pixels_to_EMU(self.chart.drawing.width)
drawing_height = pixels_to_EMU(self.chart.drawing.height)
plot_width = drawing_width * self.chart.width
plot_height = drawing_height * self.chart.height
margin_left = self.chart._get_margin_left() * drawing_width
xunit = plot_width / self.chart.get_x_units()
margin_top = self.chart._get_margin_top() * drawing_height
yunit = self.chart.get_y_units()
x_start = (margin_left + (float(x1) * xunit)) / drawing_width
y_start = ((margin_top
+ plot_height
- (float(y1) * yunit))
/ drawing_height)
x_end = (margin_left + (float(x2) * xunit)) / drawing_width
y_end = ((margin_top
+ plot_height
- (float(y2) * yunit))
/ drawing_height)
# allow user to specify y's in whatever order
# excel expect y_end to be lower
if y_end < y_start:
y_end, y_start = y_start, y_end
self._coordinates = (
self._norm_pct(x_start), self._norm_pct(y_start),
self._norm_pct(x_end), self._norm_pct(y_end)
)
@staticmethod
def _norm_pct(pct):
""" force shapes to appear by truncating too large sizes """
if pct > 1:
return 1
elif pct < 0:
return 0
return pct
class ShapeWriter(object):
""" one file per shape """
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('{%s}userShapes' % CHART_NS)
for shape in self._shapes:
anchor = SubElement(root, '{%s}relSizeAnchor' % CHART_DRAWING_NS)
xstart, ystart, xend, yend = shape.coordinates
_from = SubElement(anchor, '{%s}from' % CHART_DRAWING_NS)
SubElement(_from, '{%s}x' % CHART_DRAWING_NS).text = str(xstart)
SubElement(_from, '{%s}y' % CHART_DRAWING_NS).text = str(ystart)
_to = SubElement(anchor, '{%s}to' % CHART_DRAWING_NS)
SubElement(_to, '{%s}x' % CHART_DRAWING_NS).text = str(xend)
SubElement(_to, '{%s}y' % CHART_DRAWING_NS).text = str(yend)
sp = SubElement(anchor, '{%s}sp' % CHART_DRAWING_NS, {'macro':'', 'textlink':''})
nvspr = SubElement(sp, '{%s}nvSpPr' % CHART_DRAWING_NS)
SubElement(nvspr, '{%s}cNvPr' % CHART_DRAWING_NS, {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, '{%s}cNvSpPr' % CHART_DRAWING_NS)
sppr = SubElement(sp, '{%s}spPr' % CHART_DRAWING_NS)
frm = SubElement(sppr, '{%s}xfrm' % DRAWING_NS,)
# no transformation
SubElement(frm, '{%s}off' % DRAWING_NS, {'x':'0', 'y':'0'})
SubElement(frm, '{%s}ext' % DRAWING_NS, {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, '{%s}prstGeom' % DRAWING_NS, {'prst':str(shape.style)})
SubElement(prstgeom, '{%s}avLst' % DRAWING_NS)
fill = SubElement(sppr, '{%s}solidFill' % DRAWING_NS, )
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.color})
border = SubElement(sppr, '{%s}ln' % DRAWING_NS, {'w':str(shape._border_width)})
sf = SubElement(border, '{%s}solidFill' % DRAWING_NS)
SubElement(sf, '{%s}srgbClr' % DRAWING_NS, {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return tostring(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, '{%s}txBody' % CHART_DRAWING_NS)
SubElement(tx_body, '{%s}bodyPr' % DRAWING_NS, {'vertOverflow':'clip'})
SubElement(tx_body, '{%s}lstStyle' % DRAWING_NS)
p = SubElement(tx_body, '{%s}p' % DRAWING_NS)
if shape.text:
r = SubElement(p, '{%s}r' % DRAWING_NS)
rpr = SubElement(r, '{%s}rPr' % DRAWING_NS, {'lang':'en-US'})
fill = SubElement(rpr, '{%s}solidFill' % DRAWING_NS)
SubElement(fill, '{%s}srgbClr' % DRAWING_NS, {'val':shape.text_color})
SubElement(r, '{%s}t' % DRAWING_NS).text = shape.text
else:
SubElement(p, '{%s}endParaRPr' % DRAWING_NS, {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, '{%s}style' % CHART_DRAWING_NS)
ln_ref = SubElement(style, '{%s}lnRef' % DRAWING_NS, {'idx':'2'})
scheme_clr = SubElement(ln_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
SubElement(scheme_clr, '{%s}shade' % DRAWING_NS, {'val':'50000'})
fill_ref = SubElement(style, '{%s}fillRef' % DRAWING_NS, {'idx':'1'})
SubElement(fill_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
effect_ref = SubElement(style, '{%s}effectRef' % DRAWING_NS, {'idx':'0'})
SubElement(effect_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'accent1'})
font_ref = SubElement(style, '{%s}fontRef' % DRAWING_NS, {'idx':'minor'})
SubElement(font_ref, '{%s}schemeClr' % DRAWING_NS, {'val':'lt1'})
| 27.211031
| 113
| 0.607826
|
8dc653562d5a93d8c150088e73b615941eec385f
| 314
|
py
|
Python
|
tests/management/commands/test_dblog.py
|
grantjenks/django-dblog
|
c064b8018b4221ee7d6260cf6430b7cfa8dccee8
|
[
"Apache-2.0"
] | 2
|
2019-01-16T10:31:30.000Z
|
2019-06-20T03:02:28.000Z
|
tests/management/commands/test_dblog.py
|
grantjenks/django-dblog
|
c064b8018b4221ee7d6260cf6430b7cfa8dccee8
|
[
"Apache-2.0"
] | 3
|
2019-01-29T06:37:36.000Z
|
2019-01-29T06:43:50.000Z
|
tests/management/commands/test_dblog.py
|
grantjenks/django-dblog
|
c064b8018b4221ee7d6260cf6430b7cfa8dccee8
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.core.management.base import BaseCommand
dblog = logging.getLogger('dblog.' + __name__)
class Command(BaseCommand):
help = 'Test command for dblog'
def handle(self, *args, **options):
dblog.info('Testing dblog in management command.')
self.stdout.write('OK')
| 22.428571
| 58
| 0.697452
|
5c7a648bfe9562639c369b0c90675ba540a3c77d
| 926
|
py
|
Python
|
django/utils/html.py
|
Haakenlid/tassen
|
911a2541c77eca522ba5a723f175786f4f9eb481
|
[
"Apache-2.0"
] | 16
|
2017-03-21T03:53:37.000Z
|
2021-08-14T06:28:02.000Z
|
django/utils/html.py
|
universitas/universitas.no
|
911a2541c77eca522ba5a723f175786f4f9eb481
|
[
"Apache-2.0"
] | 104
|
2017-03-25T00:12:46.000Z
|
2021-03-09T22:40:58.000Z
|
django/utils/html.py
|
Haakenlid/tassen
|
911a2541c77eca522ba5a723f175786f4f9eb481
|
[
"Apache-2.0"
] | 6
|
2017-03-21T03:53:40.000Z
|
2020-06-07T14:15:38.000Z
|
import re
from bs4 import BeautifulSoup as bs
# from lxml import etree, html
def minify(source):
"""Minify HTML"""
stripped = re.sub(r'\s*\n\s*', '', source)
stripped = re.sub(r'\s+', ' ', stripped)
soup = bs(stripped, 'html5lib')
return ''.join(str(c) for c in soup.body.contents).strip()
# def prettify(source):
# """Prettify HTML"""
# minified = minify(source)
# root = html.fromstring(minified)
# return etree.tostring(root, encoding='unicode', pretty_print=True).strip()
data = """
<div>
<h1>
Hello
</h1>
</div>
"""
def test_minify():
assert minify(' <p/> ') == '<p> </p>'
assert minify(' \t<p>A<div>\nB</div> ') == '<p>A</p><div>B</div>'
minified = minify(data)
assert minified == '<div><h1>Hello</h1></div>'
# def test_prettify():
# pretty = prettify(data)
# assert pretty == '<div>\n <h1>Hello</h1>\n</div>'
| 22.585366
| 80
| 0.561555
|
244602a7d5bad420767be01ee04666ecb28d4e6d
| 9,250
|
py
|
Python
|
docs/source/conf.py
|
mgaller/django-crudbuilder
|
f1d7bd509970c76ccca0879ba1ae97ee1121947b
|
[
"Apache-2.0"
] | 186
|
2015-10-28T12:32:31.000Z
|
2022-03-24T18:39:15.000Z
|
docs/source/conf.py
|
mgaller/django-crudbuilder
|
f1d7bd509970c76ccca0879ba1ae97ee1121947b
|
[
"Apache-2.0"
] | 72
|
2016-01-28T09:25:50.000Z
|
2021-11-24T03:58:59.000Z
|
docs/source/conf.py
|
mgaller/django-crudbuilder
|
f1d7bd509970c76ccca0879ba1ae97ee1121947b
|
[
"Apache-2.0"
] | 73
|
2015-11-08T21:46:06.000Z
|
2022-03-24T10:11:50.000Z
|
# -*- coding: utf-8 -*-
#
# django-crudbuilder documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 10 15:47:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-crudbuilder'
copyright = u'2015, Asif'
author = u'Asif'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.5'
# The full version, including alpha/beta/rc tags.
release = '0.1.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-crudbuilderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-crudbuilder.tex', u'django-crudbuilder Documentation',
u'Asif', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-crudbuilder', u'django-crudbuilder Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-crudbuilder', u'django-crudbuilder Documentation',
author, 'django-crudbuilder', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.45614
| 79
| 0.719784
|
26c7cbc47b593f636a6dbe6cc287ff5a67f19853
| 3,129
|
py
|
Python
|
rf_model/chembl_rf_vs.py
|
xiaotaw/protein_kinase
|
14a6ba490288f03195f13b331768cb7adfe4cf01
|
[
"Apache-2.0"
] | 11
|
2018-04-09T14:15:39.000Z
|
2022-01-11T17:59:13.000Z
|
rf_model/chembl_rf_vs.py
|
xiaotaw/protein_kinase
|
14a6ba490288f03195f13b331768cb7adfe4cf01
|
[
"Apache-2.0"
] | null | null | null |
rf_model/chembl_rf_vs.py
|
xiaotaw/protein_kinase
|
14a6ba490288f03195f13b331768cb7adfe4cf01
|
[
"Apache-2.0"
] | 3
|
2019-12-04T03:45:50.000Z
|
2020-04-14T12:35:26.000Z
|
# Author: xiaotaw@qq.com (Any bug report is welcome)
# Time Created: Dec 2016
# Time Last Updated: Dec 2016
# Addr: Shenzhen, China
# Description:
import os
import sys
import time
import getpass
import numpy as np
from scipy import sparse
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
sys.path.append("/home/%s/Documents/chembl/data_files/" % getpass.getuser())
import chembl_input as ci
# the newly picked out 15 targets, include 9 targets from 5 big group, and 6 targets from others.
target_list = ["CHEMBL279", "CHEMBL203", # Protein Kinases
"CHEMBL217", "CHEMBL253", # GPCRs (Family A)
"CHEMBL235", "CHEMBL206", # Nuclear Hormone Receptors
"CHEMBL240", "CHEMBL4296", # Voltage Gated Ion Channels
"CHEMBL4805", # Ligand Gated Ion Channels
"CHEMBL204", "CHEMBL244", "CHEMBL4822", "CHEMBL340", "CHEMBL205", "CHEMBL4005" # Others
]
target_list = ["CHEMBL206", "CHEMBL217", "CHEMBL235", "CHEMBL240",
"CHEMBL253", "CHEMBL4296",
]
def virtual_screening(target):
# input dataset
d = ci.DatasetVS(target)
# read saved rf clf model
clf = joblib.load("model_files/rf_%s.m" % target)
# pred file
pred_dir = "pred_files/%s" % target
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for part_num in range(13):
t0 = time.time()
pred_path = os.path.join(pred_dir, "vs_pubchem_%d.pred" % part_num)
predfile = open(pred_path, "w")
fp_dir = "/raid/xiaotaw/pubchem/fp_files/%d" % part_num
for i in range(part_num * 10000000 + 1, (part_num + 1) * 10000000, 25000):
fp_fn = os.path.join(fp_dir, "Compound_{:0>9}_{:0>9}.apfp".format(i, i + 24999))
if os.path.exists(fp_fn):
d.reset(fp_fn)
features = d.features_dense
pred = clf.predict_proba(features)
for id_, pred_v in zip(d.pubchem_id, pred[:, 1]):
predfile.write("%s\t%f\n" % (id_, pred_v))
#print("%s\t%d\n" % (fp_fn, pred.shape[0]))
t1 = time.time()
print("%s %d: %.3f" %(target, part_num, t1-t0))
def analyse(target):
vs_pred_file = "pred_files/%s/vs_pubchem.pred" % (target)
if not os.path.exists(vs_pred_file):
os.system("cat pred_files/%s/vs_pubchem_*.pred > pred_files/%s/vs_pubchem.pred" % (target, target))
aa = np.genfromtxt(vs_pred_file, delimiter="\t")
a = aa[:, 1]
test_pred_file = "pred_files/test_%s.pred" % (target)
bb = np.genfromtxt(test_pred_file, delimiter="\t", usecols=[1,2])
b = bb[:, 0][bb[:, 1].astype(bool)]
x = []
y = []
for i in range(10):
mark = (i + 1) / 20.0
xi = 1.0 * (b > mark).sum() / b.shape[0]
yi = (a > mark).sum()
x.append(xi)
y.append(yi)
plt.plot(x, y, "*")
plt.xlabel("pos yeild rate")
plt.ylabel("vs pubchem false pos")
plt.savefig("pred_files/%s/analyse.png" % (target))
target = target_list[int(sys.argv[1])]
virtual_screening(target)
analyse(target)
"""
for target in target_list:
virtual_screening(target)
#analyse(target)
"""
| 32.936842
| 103
| 0.644615
|
bfdea6c0da37bf0de2704a8c53c7c703acb0e5ff
| 2,526
|
py
|
Python
|
redis-monitor/plugins/base_monitor.py
|
abael/ScrapyCluster
|
93a0bd069fe005963b120719c0da9636f24cf289
|
[
"MIT"
] | null | null | null |
redis-monitor/plugins/base_monitor.py
|
abael/ScrapyCluster
|
93a0bd069fe005963b120719c0da9636f24cf289
|
[
"MIT"
] | null | null | null |
redis-monitor/plugins/base_monitor.py
|
abael/ScrapyCluster
|
93a0bd069fe005963b120719c0da9636f24cf289
|
[
"MIT"
] | null | null | null |
import time
class BaseMonitor(object):
'''
Base monitor for handling incoming requests seen within redis
These classes have an implied redis_conn variable for manipulating
a redis connection
'''
# override this with your own regex to look for in redis
regex = None
def setup(self, settings):
'''
Setup the handler
@param settings: The loaded settings file
@param redis_conn: The redis connection for further processing
'''
if self.regex == None:
raise NotImplementedError("Please specify a regex for the plugin")
def handle(self, key, value):
'''
Process a valid incoming tuple and handle any logic associated
with it
@param @param key: The key that matched the request
@param value: The value associated with the key
'''
raise NotImplementedError("Please implement handle() for your handler class")
def get_current_time(self):
'''
@return: the current time stamp
'''
return self._get_current_time()
def _get_current_time(self):
'''
Split this way for unit testing
'''
return time.time()
def _set_logger(self, logger):
'''
Set the logger
@param logger: The LogObject
'''
self.logger = logger
def check_precondition(self, key, val):
'''
Precondition plugin processing check. Useful so we don't execute
plugin code over and over again that acts at only certain times
@param key: the key that matched the request
@param val: The value stored at the key
@return: True if the plugin should process the key(s), otherwise False
'''
return True
def get_log_dict(self, action, appid, spiderid=None, uuid=None,
crawlid=None):
'''
Returns a basic dictionary for logging
@param action: the action taken by the redis monitor
@param spiderid: the spider id
@param appid: the application id
@param uuid: a unique id of the request
@param crawlid: a unique crawl id of the request
'''
extras = {}
extras['action'] = action
extras['appid'] = appid
if spiderid is not None:
extras['spiderid'] = spiderid
if uuid is not None:
extras['uuid'] = uuid
if crawlid is not None:
extras['crawlid'] = crawlid
return extras
| 29.717647
| 85
| 0.602138
|
5981190f85f2ec3be6af3dc96d3a9417ce05b201
| 4,433
|
py
|
Python
|
fastreport_cloud_sdk/models/subscription_invites_vm.py
|
FastReports/FastReport-Cloud-Python
|
4442e19ef4c980222ede6d9e0597f564d6d85b26
|
[
"MIT"
] | null | null | null |
fastreport_cloud_sdk/models/subscription_invites_vm.py
|
FastReports/FastReport-Cloud-Python
|
4442e19ef4c980222ede6d9e0597f564d6d85b26
|
[
"MIT"
] | null | null | null |
fastreport_cloud_sdk/models/subscription_invites_vm.py
|
FastReports/FastReport-Cloud-Python
|
4442e19ef4c980222ede6d9e0597f564d6d85b26
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FastReport Cloud
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from fastreport_cloud_sdk.configuration import Configuration
class SubscriptionInvitesVM(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'invites': 'list[SubscriptionInviteVM]',
'count': 'int'
}
attribute_map = {
'invites': 'invites',
'count': 'count'
}
def __init__(self, invites=None, count=None, local_vars_configuration=None): # noqa: E501
"""SubscriptionInvitesVM - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._invites = None
self._count = None
self.discriminator = None
self.invites = invites
if count is not None:
self.count = count
@property
def invites(self):
"""Gets the invites of this SubscriptionInvitesVM. # noqa: E501
:return: The invites of this SubscriptionInvitesVM. # noqa: E501
:rtype: list[SubscriptionInviteVM]
"""
return self._invites
@invites.setter
def invites(self, invites):
"""Sets the invites of this SubscriptionInvitesVM.
:param invites: The invites of this SubscriptionInvitesVM. # noqa: E501
:type invites: list[SubscriptionInviteVM]
"""
self._invites = invites
@property
def count(self):
"""Gets the count of this SubscriptionInvitesVM. # noqa: E501
:return: The count of this SubscriptionInvitesVM. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SubscriptionInvitesVM.
:param count: The count of this SubscriptionInvitesVM. # noqa: E501
:type count: int
"""
self._count = count
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubscriptionInvitesVM):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SubscriptionInvitesVM):
return True
return self.to_dict() != other.to_dict()
| 28.235669
| 124
| 0.587864
|
907feb0b47d31e8692c1006f4aec35d78f819327
| 16,753
|
py
|
Python
|
django_select2/forms.py
|
darbula/django-select2
|
e7cd2b229b85ba39ffdc19be69bad734ab0ae2c0
|
[
"Apache-2.0"
] | null | null | null |
django_select2/forms.py
|
darbula/django-select2
|
e7cd2b229b85ba39ffdc19be69bad734ab0ae2c0
|
[
"Apache-2.0"
] | null | null | null |
django_select2/forms.py
|
darbula/django-select2
|
e7cd2b229b85ba39ffdc19be69bad734ab0ae2c0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django-Select2 Widgets.
These components are responsible for rendering
the necessary HTML data markups. Since this whole
package is to render choices using Select2 JavaScript
library, hence these components are meant to be used
with choice fields.
Widgets are generally of two types:
1. **Light** --
They are not meant to be used when there
are too many options, say, in thousands.
This is because all those options would
have to be pre-rendered onto the page
and JavaScript would be used to search
through them. Said that, they are also one
the most easiest to use. They are a
drop-in-replacement for Django's default
select widgets.
2. **Heavy** --
They are suited for scenarios when the number of options
are large and need complex queries (from maybe different
sources) to get the options.
This dynamic fetching of options undoubtedly requires
Ajax communication with the server. Django-Select2 includes
a helper JS file which is included automatically,
so you need not worry about writing any Ajax related JS code.
Although on the server side you do need to create a view
specifically to respond to the queries.
3. **Model** --
Model-widgets are a further specialized versions of Heavies.
These do not require views to serve Ajax requests.
When they are instantiated, they register themselves
with one central view which handles Ajax requests for them.
Heavy widgets have the word 'Heavy' in their name.
Light widgets are normally named, i.e. there is no
'Light' word in their names.
.. inheritance-diagram:: django_select2.forms
:parts: 1
"""
from __future__ import absolute_import, unicode_literals
from functools import reduce
from itertools import chain
from pickle import PicklingError
from django import forms
from django.core import signing
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.forms.models import ModelChoiceIterator
from django.utils.encoding import force_text
from six.moves.cPickle import PicklingError as cPicklingError
from .cache import cache
from .conf import settings
class Select2Mixin(object):
"""
The base mixin of all Select2 widgets.
This mixin is responsible for rendering the necessary
data attributes for select2 as well as adding the static
form media.
"""
def build_attrs(self, extra_attrs=None, **kwargs):
"""Add select2 data attributes."""
attrs = super(Select2Mixin, self).build_attrs(extra_attrs=extra_attrs, **kwargs)
if self.is_required:
attrs.setdefault('data-allow-clear', 'false')
else:
attrs.setdefault('data-allow-clear', 'true')
attrs.setdefault('data-placeholder', '')
attrs.setdefault('data-minimum-input-length', 0)
if 'class' in attrs:
attrs['class'] += ' django-select2'
else:
attrs['class'] = 'django-select2'
return attrs
def render_options(self, *args, **kwargs):
"""Render options including an empty one, if the field is not required."""
output = '<option></option>' if not self.is_required else ''
output += super(Select2Mixin, self).render_options(*args, **kwargs)
return output
def _get_media(self):
"""
Construct Media as a dynamic property.
.. Note:: For more information visit
https://docs.djangoproject.com/en/1.8/topics/forms/media/#media-as-a-dynamic-property
"""
return forms.Media(
js=(settings.SELECT2_JS, 'django_select2/django_select2.js'),
css={'screen': (settings.SELECT2_CSS,)}
)
media = property(_get_media)
class Select2TagMixin(object):
"""Mixin to add select2 tag functionality."""
def build_attrs(self, extra_attrs=None, **kwargs):
"""Add select2's tag attributes."""
self.attrs.setdefault('data-minimum-input-length', 1)
self.attrs.setdefault('data-tags', 'true')
self.attrs.setdefault('data-token-separators', '[",", " "]')
return super(Select2TagMixin, self).build_attrs(extra_attrs, **kwargs)
class Select2Widget(Select2Mixin, forms.Select):
"""
Select2 drop in widget.
Example usage::
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': Select2Widget
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(widget=Select2Widget)
"""
pass
class Select2MultipleWidget(Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for multiple select.
Works just like :class:`.Select2Widget` but for multi select.
"""
pass
class Select2TagWidget(Select2TagMixin, Select2Mixin, forms.SelectMultiple):
"""
Select2 drop in widget for for tagging.
Example for :class:`.django.contrib.postgres.fields.ArrayField`::
class MyWidget(Select2TagWidget):
def value_from_datadict(self, data, files, name):
values = super(MyWidget, self).value_from_datadict(data, files, name):
return ",".join(values)
"""
pass
class HeavySelect2Mixin(object):
"""Mixin that adds select2's AJAX options and registers itself on Django's cache."""
def __init__(self, **kwargs):
"""
Return HeavySelect2Mixin.
Args:
data_view (str): URL pattern name
data_url (str): URL
"""
self.data_view = kwargs.pop('data_view', None)
self.data_url = kwargs.pop('data_url', None)
if not (self.data_view or self.data_url):
raise ValueError('You must ether specify "data_view" or "data_url".')
self.userGetValTextFuncName = kwargs.pop('userGetValTextFuncName', 'null')
super(HeavySelect2Mixin, self).__init__(**kwargs)
def get_url(self):
"""Return URL from instance or by reversing :attr:`.data_view`."""
if self.data_url:
return self.data_url
return reverse(self.data_view)
def build_attrs(self, extra_attrs=None, **kwargs):
"""Set select2's AJAX attributes."""
attrs = super(HeavySelect2Mixin, self).build_attrs(extra_attrs=extra_attrs, **kwargs)
# encrypt instance Id
self.widget_id = signing.dumps(id(self))
attrs['data-field_id'] = self.widget_id
attrs.setdefault('data-ajax--url', self.get_url())
attrs.setdefault('data-ajax--cache', "true")
attrs.setdefault('data-ajax--type', "GET")
attrs.setdefault('data-minimum-input-length', 2)
attrs['class'] += ' django-select2-heavy'
return attrs
def render(self, *args, **kwargs):
"""Render widget and register it in Django's cache."""
output = super(HeavySelect2Mixin, self).render(*args, **kwargs)
self.set_to_cache()
return output
def _get_cache_key(self):
return "%s%s" % (settings.SELECT2_CACHE_PREFIX, id(self))
def set_to_cache(self):
"""
Add widget object to Django's cache.
You may need to overwrite this method, to pickle all information
that is required to serve your JSON response view.
"""
try:
cache.set(self._get_cache_key(), {
'widget': self,
'url': self.get_url(),
})
except (PicklingError, cPicklingError, AttributeError):
msg = "You need to overwrite \"set_to_cache\" or ensure that %s is serialisable."
raise NotImplementedError(msg % self.__class__.__name__)
def render_options(self, *args):
"""Render only selected options."""
try:
selected_choices, = args
except ValueError: # Signature contained `choices` prior to Django 1.10
choices, selected_choices = args
choices = chain(self.choices, choices)
else:
choices = self.choices
output = ['<option></option>' if not self.is_required else '']
selected_choices = {force_text(v) for v in selected_choices}
choices = {(k, v) for k, v in choices if force_text(k) in selected_choices}
for option_value, option_label in choices:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class HeavySelect2Widget(HeavySelect2Mixin, Select2Widget):
"""
Select2 widget with AJAX support that registers itself to Django's Cache.
Usage example::
class MyWidget(HeavySelect2Widget):
data_view = 'my_view_name'
or::
class MyForm(forms.Form):
my_field = forms.ChoicesField(
widget=HeavySelect2Widget(
data_url='/url/to/json/response'
)
)
"""
pass
class HeavySelect2MultipleWidget(HeavySelect2Mixin, Select2MultipleWidget):
"""Select2 multi select widget similar to :class:`.HeavySelect2Widget`."""
pass
class HeavySelect2TagWidget(HeavySelect2Mixin, Select2TagWidget):
"""Select2 tag widget."""
pass
# Auto Heavy widgets
class ModelSelect2Mixin(object):
"""Widget mixin that provides attributes and methods for :class:`.AutoResponseView`."""
model = None
queryset = None
search_fields = []
"""
Model lookups that are used to filter the QuerySet.
Example::
search_fields = [
'title__icontains',
]
"""
max_results = 25
"""Maximal results returned by :class:`.AutoResponseView`."""
def __init__(self, *args, **kwargs):
"""
Overwrite class parameters if passed as keyword arguments.
Args:
model (django.db.models.Model): Model to select choices from.
queryset (django.db.models.QuerySet): QuerySet to select choices from.
search_fields (list): List of model lookup strings.
max_results (int): Max. JsonResponse view page size.
"""
self.model = kwargs.pop('model', self.model)
self.queryset = kwargs.pop('queryset', self.queryset)
self.search_fields = kwargs.pop('search_fields', self.search_fields)
self.max_results = kwargs.pop('max_results', self.max_results)
defaults = {'data_view': 'django_select2-json'}
defaults.update(kwargs)
super(ModelSelect2Mixin, self).__init__(*args, **defaults)
def set_to_cache(self):
"""
Add widget's attributes to Django's cache.
Split the QuerySet, to not pickle the result set.
"""
queryset = self.get_queryset()
cache.set(self._get_cache_key(), {
'queryset':
[
queryset.none(),
queryset.query,
],
'cls': self.__class__,
'search_fields': self.search_fields,
'max_results': self.max_results,
'url': self.get_url(),
})
def filter_queryset(self, term, queryset=None):
"""
Return QuerySet filtered by search_fields matching the passed term.
Args:
term (str): Search term
Returns:
QuerySet: Filtered QuerySet
"""
if queryset is None:
queryset = self.get_queryset()
search_fields = self.get_search_fields()
select = Q()
term = term.replace('\t', ' ')
term = term.replace('\n', ' ')
for t in [t for t in term.split(' ') if not t == '']:
select &= reduce(lambda x, y: x | Q(**{y: t}), search_fields,
Q(**{search_fields[0]: t}))
return queryset.filter(select).distinct()
def get_queryset(self):
"""
Return QuerySet based on :attr:`.queryset` or :attr:`.model`.
Returns:
QuerySet: QuerySet of available choices.
"""
if self.queryset is not None:
queryset = self.queryset
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise NotImplementedError(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return queryset
def get_search_fields(self):
"""Return list of lookup names."""
if self.search_fields:
return self.search_fields
raise NotImplementedError('%s, must implement "search_fields".' % self.__class__.__name__)
def render_options(self, *args):
"""Render only selected options and set QuerySet from :class:`ModelChoicesIterator`."""
try:
selected_choices, = args
except ValueError:
choices, selected_choices = args
choices = chain(self.choices, choices)
else:
choices = self.choices
selected_choices = {force_text(v) for v in selected_choices}
output = ['<option></option>' if not self.is_required else '']
if isinstance(self.choices, ModelChoiceIterator):
if not self.queryset:
self.queryset = self.choices.queryset
selected_choices = {c for c in selected_choices
if c not in self.choices.field.empty_values}
choices = {(obj.pk, self.label_from_instance(obj))
for obj in self.choices.queryset.filter(pk__in=selected_choices)}
else:
choices = {(k, v) for k, v in choices if force_text(k) in selected_choices}
for option_value, option_label in choices:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
def label_from_instance(self, obj):
"""
Return option label representation from instance.
Can be overridden to change the representation of each choice.
Example usage::
class MyWidget(ModelSelect2Widget):
def label_from_instance(obj):
return force_text(obj.title).upper()
Args:
obj (django.db.models.Model): Instance of Django Model.
Returns:
str: Option label.
"""
return force_text(obj)
class ModelSelect2Widget(ModelSelect2Mixin, HeavySelect2Widget):
"""
Select2 drop in model select widget.
Example usage::
class MyWidget(ModelSelect2Widget):
search_fields = [
'title__icontains',
]
class MyModelForm(forms.ModelForm):
class Meta:
model = MyModel
fields = ('my_field', )
widgets = {
'my_field': MyWidget,
}
or::
class MyForm(forms.Form):
my_choice = forms.ChoiceField(
widget=ModelSelect2Widget(
model=MyOtherModel,
search_fields=['title__icontains']
)
)
.. tip:: The ModelSelect2(Multiple)Widget will try
to get the QuerySet from the fields choices.
Therefore you don't need to define a QuerySet,
if you just drop in the widget for a ForeignKey field.
"""
pass
class ModelSelect2MultipleWidget(ModelSelect2Mixin, HeavySelect2MultipleWidget):
"""
Select2 drop in model multiple select widget.
Works just like :class:`.ModelSelect2Widget` but for multi select.
"""
pass
class ModelSelect2TagWidget(ModelSelect2Mixin, HeavySelect2TagWidget):
"""
Select2 model widget with tag support.
This it not a simple drop in widget.
It requires to implement you own :func:`.value_from_datadict`
that adds missing tags to you QuerySet.
Example::
class MyModelSelect2TagWidget(ModelSelect2TagWidget):
queryset = MyModel.objects.all()
def value_from_datadict(self, data, files, name):
values = super().value_from_datadict(self, data, files, name)
qs = self.queryset.filter(**{'pk__in': list(values)})
pks = set(force_text(getattr(o, pk)) for o in qs)
cleaned_values = []
for val in value:
if force_text(val) not in pks:
val = queryset.create(title=val).pk
cleaned_values.append(val)
return cleaned_values
"""
pass
| 31.84981
| 98
| 0.616367
|
efb97c4cb63f005d3b9eb53988e2db7f5075d022
| 2,711
|
py
|
Python
|
tests/test_x3d.py
|
Jordan-Cottle/domonic
|
05d70af0d1564d3ba80c860bb5f5bfe089293b18
|
[
"MIT"
] | 1
|
2022-03-09T03:39:04.000Z
|
2022-03-09T03:39:04.000Z
|
tests/test_x3d.py
|
Jordan-Cottle/domonic
|
05d70af0d1564d3ba80c860bb5f5bfe089293b18
|
[
"MIT"
] | null | null | null |
tests/test_x3d.py
|
Jordan-Cottle/domonic
|
05d70af0d1564d3ba80c860bb5f5bfe089293b18
|
[
"MIT"
] | null | null | null |
"""
test_collada
~~~~~~~~~~~~
"""
import unittest
# import requests
# from mock import patch
from domonic import domonic
from domonic.dom import *
from domonic.html import *
from domonic.x3d import *
from domonic.decorators import silence
class TestCase(unittest.TestCase):
# @silence
def test_domonic_x3dom(self):
x3dom_test = html(
head(
meta(**{"_http-equiv":"X-UA-Compatible"}, _content="IE=edge"),
title("My first X3DOM page"),
script(_type='text/javascript', _src='https://www.x3dom.org/download/x3dom.js'),
script(
"""
$(document).ready(function(){
var screenshotCount = 0;
//Every time the user clicks on the 'take screenhot' button
$("#btnTakeScreenshot").on("click", function() {
//Get data url from the runtime
var imgUrl = document.getElementById("canvas").runtime.getScreenshot();
//Create preview image...
var newScreenshotImage = document.createElement('img');
newScreenshotImage.src = imgUrl;
newScreenshotImage.id = "screenshot_" + screenshotCount;
$('#screenshotPreviews').append(newScreenshotImage);
//...and download link
var newScreenshotDownloadLink = document.createElement('a');
newScreenshotDownloadLink.href = imgUrl;
newScreenshotDownloadLink.download = "screenshot_" + screenshotCount + ".png";
newScreenshotDownloadLink.innerHTML = "Speichern";
$('#screenshotPreviews').append(newScreenshotDownloadLink);
screenshotCount++;
$('#screenshotCount').html(screenshotCount);
});
});
"""
),
link(_rel='stylesheet', _type='text/css', _href='https://www.x3dom.org/download/x3dom.css')
),
body(
h1("Animate Objects with X3DOM!"),
p("Learn how to animate objects."),
x3d(_width='500px', _height='400px').append(
scene(
transform(_DEF="ball").append(
shape(
appearance(
material(_diffuseColor='1 0 0')
),
sphere()
)
),
timeSensor(_DEF="time", _cycleInterval="2", _loop="true"),
PositionInterpolator(_DEF="move", _key="0 0.5 1", _keyValue="0 0 0 0 3 0 0 0 0"),
Route(_fromNode="time", _fromField ="fraction_changed", _toNode="move", _toField="set_fraction"),
Route(_fromNode="move", _fromField ="value_changed", _toNode="ball", _toField="translation")
)
)
)
)
render( str(x3dom_test) )#, "sphere_test.html" )
if __name__ == '__main__':
unittest.main()
| 31.894118
| 109
| 0.589082
|
24caa9f77cb3f9c435f6264752ecaf8e6ccb0a05
| 933
|
py
|
Python
|
smartd/view/history.py
|
twd2/SmartD
|
f4687056a69e9bcadf6e98990ad670685e0e4a53
|
[
"MIT"
] | 4
|
2016-04-15T16:55:20.000Z
|
2019-12-31T06:48:03.000Z
|
smartd/view/history.py
|
twd2/SmartD
|
f4687056a69e9bcadf6e98990ad670685e0e4a53
|
[
"MIT"
] | null | null | null |
smartd/view/history.py
|
twd2/SmartD
|
f4687056a69e9bcadf6e98990ad670685e0e4a53
|
[
"MIT"
] | null | null | null |
import json
from smartd import app, util
import smartd.util.datetime
from smartd.model import data
from smartd.util import event
from smartd.view import base
from tornado import web
from tornado import gen
from tornado import options
@app.route('History', '/history/([^/]+)/(.+)')
class HistoryHandler(base.BaseHandler):
CATEGORY_COLOR = {'plant': 'success'}
@gen.coroutine
def get(self, category, type):
to_float = lambda _: float(_) if self.get_query_argument('float', 'true') == 'true' else _
l = yield data.get_list(category, type, int(self.get_query_argument('count', 2880)))
l.reverse()
x = [util.datetime.strtime(doc['_id'].generation_time) for doc in l]
y = [to_float(doc['value']) for doc in l]
self.render('history.html', category=category, type=type,
color=HistoryHandler.CATEGORY_COLOR.get(category, 'default'),
data_x=json.dumps(x), data_y=json.dumps(y))
| 35.884615
| 94
| 0.696677
|
6daed345d322109eacdda25b13a4a1fef1bc58dd
| 5,055
|
py
|
Python
|
experimental/attentive_uncertainty/contextual_bandits/offline_contextual_bandits.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
experimental/attentive_uncertainty/contextual_bandits/offline_contextual_bandits.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
experimental/attentive_uncertainty/contextual_bandits/offline_contextual_bandits.py
|
miksu/edward2
|
973acdb23701f320ebaee8a56fc44d4414acfa4e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Thompson sampling for contextual bandit problems via offline SNPs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from edward2.experimental.attentive_uncertainty import regressor
from edward2.experimental.attentive_uncertainty.contextual_bandits import utils
import numpy as np
import tensorflow as tf
from deep_contextual_bandits import bandit_algorithm # local file import
from deep_contextual_bandits import contextual_dataset # local file import
class OfflineContextualBandits(bandit_algorithm.BanditAlgorithm):
"""Thompson sampling via offline strutured neural processes."""
def __init__(self,
name,
hparams):
self.name = name
self.hparams = hparams
self.verbose = getattr(hparams, 'verbose', True)
self._is_anp = getattr(hparams, 'is_anp', False)
if self._is_anp:
input_dim = hparams.context_dim
output_dim = hparams.num_actions
else:
input_dim = hparams.context_dim + hparams.num_actions
output_dim = 1
self.t = 0
self.data_h = contextual_dataset.ContextualDataset(
hparams.context_dim, hparams.num_actions, intercept=False)
if self.verbose:
print('Initializing model {}.'.format(self.name))
self.snp = regressor.Regressor(
input_dim=input_dim,
output_dim=output_dim,
x_encoder_sizes=hparams.x_encoder_sizes,
x_y_encoder_sizes=hparams.x_y_encoder_sizes,
global_latent_net_sizes=hparams.global_latent_net_sizes,
local_latent_net_sizes=hparams.local_latent_net_sizes,
heteroskedastic_net_sizes=hparams.heteroskedastic_net_sizes,
att_type=hparams.att_type,
att_heads=hparams.att_heads,
uncertainty_type=hparams.uncertainty_type,
mean_att_type=hparams.mean_att_type,
scale_att_type_1=hparams.scale_att_type_1,
scale_att_type_2=hparams.scale_att_type_2,
activation=hparams.activation,
output_activation=hparams.output_activation,
data_uncertainty=hparams.data_uncertainty,
local_variational=hparams.local_variational,
model_path=hparams.model_path)
self._one_hot_vectors = tf.one_hot(
indices=np.arange(hparams.num_actions),
depth=hparams.num_actions)
def action(self, context):
"""Samples rewards from posterior, and chooses best action accordingly.
Args:
context: A d-dimensional np.ndarray with the context.
Returns:
Greedy action based on Thompson sampling.
"""
# Round robin until each action has been selected "initial_pulls" times
if self.t < self.hparams.num_actions * self.hparams.initial_pulls:
return self.t % self.hparams.num_actions
vals = []
context = tf.to_float(context)
if self._is_anp:
contexts, rewards, actions = self.data_h.get_data_with_weights()
historical_x = tf.to_float(tf.expand_dims(contexts, axis=0))
historical_y = tf.to_float(tf.expand_dims(rewards*actions, axis=0))
target_x = tf.expand_dims(tf.reshape(context, [1, -1]), axis=0)
else:
contexts, rewards, actions = utils.get_data_with_masked_rewards(
self.data_h)
context_action_pairs = tf.concat([contexts, actions], axis=-1)
historical_x = tf.to_float(tf.expand_dims(context_action_pairs, axis=0))
historical_y = tf.to_float(rewards.reshape(1, -1, 1))
tiled_context = tf.concat(
[tf.tile(tf.reshape(context, [1, -1]), [self.hparams.num_actions, 1]),
self._one_hot_vectors], axis=-1
)
target_x = tf.expand_dims(tiled_context, axis=0)
target_y = None
predictions = self.snp(historical_x, historical_y, target_x, target_y)
vals = tf.squeeze(predictions.distribution.mean())
return tf.argmax(vals).numpy()
def update(self, context, action, reward):
"""Updates the posterior of the SNP model.
For an offline SNP model, the posterior gets directly updated by
updating the observed dataset. No parameter updates are needed.
Args:
context: A d-dimensional np.ndarray with the context.
action: Integer between 0 and k-1 representing the chosen arm.
reward: Real number representing the reward for the (context, action).
Returns:
None.
"""
self.t += 1
self.data_h.add(context, action, reward)
if self.verbose and self.t % 100 == 0:
print('Number of contexts=', self.t)
| 36.630435
| 80
| 0.719683
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.