content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def request_pet_name():
"""Requests users pet name as input.
Args:
NONE
Returns:
User's name.
Raises:
ValueError: If input is not a character.
"""
while True:
try:
if (pet_name := input("Enter your pet's name: \n")).isalpha():
break
else:
print("Must be characters, please enter your pet's name again.")
except ValueError:
print("Provide name with only characters.")
continue
return pet_name | efef2cfb0792b89f158f5a0bb42d10cf9bd1655d | 28,500 |
def butter_bandpass_filter(voltage, lowcut, highcut, fs, order=5):
"""Filter data with a bandpass, butterworth filter
Args:
voltage: array of voltage data from an ECG signal
lowcut: low frequency cutoff
highcut: high frequency cutoff
fs: sampling frequency
order: filter order (power)
Returns:
filtdat: array of filtered voltage data
"""
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
filtdat = lfilter(b, a, voltage)
return filtdat | 31892cc5c98289f2e8af3a610b9f4ad1f1cbb58b | 28,501 |
def _get_variable_names(expression):
"""Return the list of variable names in the Numexpr `expression`."""
names = []
stack = [expression]
while stack:
node = stack.pop()
if node.astType == 'variable':
names.append(node.value)
elif hasattr(node, 'children'):
stack.extend(node.children)
return list(set(names)) | db75b0066b89bc7a6a022a56b28981910836524c | 28,502 |
from pathlib import Path
import typing
import pathlib
import os
def is_readable(path: Path,
access_by: typing.Union[ReadBy, str, None] = None) -> bool:
"""
:return: True if the object at the path `path` is readable
"""
path = pathlib.Path(path).resolve()
if access_by is None or (isinstance(access_by, str) and
access_by == path.owner()):
return os.access(path, os.R_OK)
if isinstance(access_by, str):
access_by = ReadBy.OTHER.value
return bool(path.stat().st_mode & access_by) | 4c6ad5f9d9de756277e2762cdee785ddb0400042 | 28,503 |
def add(data_path, _):
"""add templates based on arguments and configurations."""
ask_option = AskOption(data_path)
library_chosen = LibraryChosen()
confirmation = Confirmation()
add_library = AddLibrary()
type_library_name = TypeLibraryName()
possible_states = [
ask_option, library_chosen, confirmation, add_library, type_library_name
]
machine = Machine(
initial_state=ask_option,
possible_states=possible_states
)
try:
machine.run()
except BackSignal:
return BACK
except HaltSignal:
return | 09472c91394e41d345d5ac648c7b90a0e80cfcf3 | 28,504 |
from scipy.special import gamma
def GGD(x,d=2,p=1):
"""Two parameter generalized gamma distribution (GGD)
Parameters
----------
x : array_like (positive)
d : float (positive)
p : float (positive)
Returns
-------
pdf : array_like
Notes
-----
.. math::
G(x;d,p) = \frac{p}{\Gamma(d/p)}x^{d-1}\exp{-x^p}
where Gamma() is the gamma function
"""
return p/gamma(d/p)*x**(d-1)*np.exp(-x**p) | c18914f118870ff535d039f136e08a21e386ba43 | 28,505 |
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository.
"""
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
with project.repo_lock(getattr(settings, 'REPO_LOCK_SECONDS', 30)):
update_docs_output = {}
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown"
.format(project.repo_type)))
# Get the actual code on disk
if version:
log.info('Checking out version {slug}: {identifier}'.format(
slug=version.slug, identifier=version.identifier))
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
update_docs_output['checkout'] = version_repo.checkout(
version.identifier
)
else:
# Does this ever get called?
log.info('Updating to latest revision')
version_slug = 'latest'
version_repo = project.vcs_repo(version_slug)
update_docs_output['checkout'] = version_repo.update()
# Ensure we have a conf file (an exception is raised if not)
project.conf_file(version.slug)
# Do Virtualenv bits:
if project.use_virtualenv:
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
update_docs_output['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='virtualenv-2.7 -p {interpreter}'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version_slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
if project.python_interpreter != 'python3':
# Let's see if this works now.
sphinx = 'sphinx==1.1.3'
update_docs_output['sphinx'] = run(
('{cmd} install {ignore_option} {sphinx} '
'virtualenv==1.10.1 setuptools==1.1 '
'docutils==0.11').format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
sphinx=sphinx, ignore_option=ignore_option))
else:
sphinx = 'sphinx==1.1.3'
# python 3 specific hax
update_docs_output['sphinx'] = run(
('{cmd} install {ignore_option} {sphinx} '
'virtualenv==1.9.1 docutils==0.11').format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
sphinx=sphinx, ignore_option=ignore_option))
if project.requirements_file:
os.chdir(project.checkout_path(version_slug))
update_docs_output['requirements'] = run(
'{cmd} install --force-reinstall --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
requirements=project.requirements_file))
os.chdir(project.checkout_path(version_slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
update_docs_output['install'] = run(
'{cmd} install --force-reinstall --ignore-installed .'.format(
cmd=project.venv_bin(version=version_slug, bin='pip')))
else:
update_docs_output['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version_slug,
bin='python')))
else:
update_docs_output['install'] = (999, "", "No setup.py, skipping install")
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api.project(project.pk).sync_versions.post(json.dumps(version_post_data))
except Exception, e:
print "Sync Verisons Exception: %s" % e.message
return update_docs_output | c9bcbf369cbe329c6e82c3634b537a2d31df995a | 28,506 |
def t(string):
"""
add \t
"""
return (string.count(".")) * "\t" + string | a394ac3983369836666d0610c345c6ef3c095994 | 28,507 |
def get_boundary_levels(eris):
"""Get boundary levels for eris."""
return [func(eris.keys()) for func in (min, max)] | 20d98447e600fecc3b9495e9fb5e5d09ff3b3c1e | 28,508 |
import os
import glob
import json
def summary_to_json(d):
""" convert to one json
path: dataset1195/attic/json
dataset1195/json
*map.json
*fastqc.json
*dhs.json,
*frip.json
*pbc.json
*meta.json
*macs2*.json
"""
f = open(os.path.basename(d) + "_compiled_database.xls", 'w')
print >>f, "DatsetId\tTreatOrControlReplicates\tReadLength\tFastQC\tRawReadsNumber\tUniquelyMappedReadsNumber\tUniquelyMappedRatio\tPBC\tFRiP\tPeaksNumber\tPeaksFoldChange>=10\tPeaksFoldChange>=20\tFragmentSize\tChIPReplicatesPairwiseLabels\tReplicatesOverlapRatio\tReplicatesWigCorrelation\tMergedPeaksNumber\tMergedPeaksFoldChange>=10\tMergedPeaksFoldChange>=20\tMergedPeaksPromoterPercentage\tMergedPeaksExonPercentage\tMergedPeaksIntronPercentage\tMergedPeaksIntergenicPercentage\tMergedPeaksUnionDHSRatio"
with open(d) as inf:
for line in inf:
line = line.strip().split()
def path_find(x):
fin = glob.glob(os.path.join(os.path.abspath(line[1]), "attic", "json", x)) + glob.glob(os.path.join(os.path.abspath(line[1]), "json", x))
if fin and os.path.getsize(fin[0]) > 0:
return fin
return None
if os.path.exists(line[1]):
json_dict = {}
explore = path_find("*map.json") ## try to search prefix by *map.json
explore2 = path_find("*fastqc.json") ## try to search prefix by *map.json
if explore:
prefix = os.path.basename('_'.join(explore[0].split('_')[:-1]))
else:
if explore2:
prefix = os.path.basename('_'.join(explore2[0].split('_')[:-1]))
print(prefix)
maps = None
if path_find(prefix+"_map.json"):
maps = json_load(path_find(prefix+"_map.json")[0])
fastqs = None
if path_find(prefix+"_fastqc.json"):
fastqs = json_load(path_find(prefix+"_fastqc.json")[0])
if maps:
samples = maps['stat'].keys()
if fastqs:
samples = fastqs['stat'].keys()
# get samples labels
samples, tn, tc = sort_dc(samples)
if path_find(prefix+"_fastqc.json"):
fastqs = json_load(path_find(prefix+"_fastqc.json")[0])['stat']
else:
fastqs = {}
if path_find(prefix+"_pbc.json"):
pbcs = json_load(path_find(prefix+"_pbc.json")[0])['stat']
else:
pbcs = {}
if path_find(prefix+"_dhs.json"):
dhs = json_load(path_find(prefix+"_dhs.json")[0])['stat']
else:
dhs = 0.0
if path_find(prefix+"_frip.json"):
frip = json_load(path_find(prefix+"_frip.json")[0])['stat']
else:
frip = {}
if path_find(prefix+"_frag.json"):
frag = json_load(path_find(prefix+"_frag.json")[0])['stat']
else:
frag = {}
if path_find(prefix+"_macs2.json"):
mergedpeaks = json_load(path_find(prefix+"_macs2.json")[0])
else:
mergedpeaks = {}
reps = glob.glob(os.path.join(line[1], 'attic', 'json', '*_rep.json'))+glob.glob(os.path.join(line[1], 'json', '*_rep.json'))
peaks = []
f10 = []
f20 = []
FLAG = True
pairlabels = ""
repcors = ""
repol = ""
if reps:
for r in reps:
if 'macs' in r:
peaks = []
with open(r) as repf:
m = json.load(repf)
for i in samples:
if not 'control' in i:
try:
peaks.append( m['stat'][i]['totalpeak'] )
except:
FLAG = False
break
f10.append( m['stat'][i]['peaksge10'] )
f20.append( m['stat'][i]['peaksge20'] )
else:
with open(r) as repf:
m = json.load(repf)
metric = m['stat']
meta = m['input']['overlap']
mets = []
for met in meta:
mets.append(map(lambda x: str(int(x)+1), met.replace('.overlap', '').split('_')[-2:]))
cor = metric['cor']
overlap = metric['overlap']
if not FLAG:
continue
for c, o, m in zip(cor, overlap, mets):
pairlabels += ','.join(m) + ';'
repcors += str(c) + ';'
largePeak = max([peaks[int(m[0])-1], peaks[int(m[1])-1]])
if largePeak > 0:
repol += str(float(o)/largePeak) + ';'
else:
repol += 'NA;'
meta = path_find(prefix+"_meta.json")
if meta:
for i in meta:
if "enrich" in i:
continue
else:
meta = json_load(i)
n = 0
for sam in samples:
content = ''
content += line[0] + '\t'
content += sam + '\t'
if fastqs:
content += str(fastqs.get(sam, 0)['sequence_length']) + '\t' + str(fastqs.get(sam, 0)['median']) + '\t'
else:
content += 'NA\t'
continue
if maps:
content += str(maps['stat'][sam]['total']) + '\t'
content += str(maps['stat'][sam]['mapped']) + '\t'
content += str(round(float(maps['stat'][sam]["mapped"])/maps['stat'][sam]["total"],4)) + '\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
continue
if pbcs:
content += str(pbcs.get(sam, 0)['PBC'])+'\t'
else:
content += 'NA\t'
if frip:
content += str(frip.get(sam, 0)['frip']) + '\t'
else:
content += 'NA\t'
if 'control' in sam:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
else:
if tn == 1:
if mergedpeaks:
content += str(mergedpeaks['stat'].get('totalpeak', 0)) + '\t'
content += str(mergedpeaks['stat'].get('peaksge10', 0)) + '\t'
content += str(mergedpeaks['stat'].get('peaksge20', 0)) + '\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
else:
if peaks:
content += "%s\t"%(peaks[n])
content += "%s\t"%f10[n]
content += "%s\t"%f20[n]
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
n += 1
if frag:
content += "%s\t"%(frag[sam])
else:
content += 'NA\t'
if reps and peaks:
if pairlabels:
content += pairlabels.rstrip(';') + '\t'
content += repol.rstrip(';') + '\t'
content += repcors.rstrip(';') + '\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
if mergedpeaks:
content += str(mergedpeaks['stat'].get('totalpeak', 0)) + '\t'
content += str(mergedpeaks['stat'].get('peaksge10', 0)) + '\t'
content += str(mergedpeaks['stat'].get('peaksge20', 0)) + '\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
if meta:
if meta.has_key('stat'):
meta = meta['stat']
else:
meta = meta
content += str(meta['promoter']) + '\t'
content += str(meta['exon']) + '\t'
content += str(meta['intron']) + '\t'
content += str(meta['inter']) + '\t'
else:
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
content += 'NA\t'
if dhs:
if hasattr(dhs, 'get'):
if dhs.get('number',0)>0:
content += str(float(dhs.get('overlap',0))/dhs.get('number',0))
else:
content += 'NA'
else:
content += 'NA'
else:
content += 'NA'
print >>f, content
f.flush()
f.close() | a0cc165f8a6004d6e1f2f12574b65d0d9f218c9e | 28,509 |
import json
def team_changepass():
"""The ``/team/changepass`` endpoint requires authentication and expects
the ``team_id`` and ``password`` as arguments. The team's password
will be set to ``password``
Note that this endpoint requires a POST request.
It can be reached at ``/team/changepass?secret=<API_SECRET>``.
The JSON response is::
{
"result": "success" or "fail"
}
:param int team_id: the team_id whose password to change.
:param str password: the new plaintext password.
:return: a JSON dictionary with with status code "success" or "fail"
"""
team_id = request.form.get("team_id")
password = request.form.get("password")
cursor = mysql.cursor()
password_encrypted = hash_password(password)
cursor.execute("""UPDATE teams SET password = %s
WHERE id = %s""",
(password_encrypted, team_id))
mysql.database.commit()
if cursor.rowcount == 0:
return json.dumps({"result": "fail"})
else:
return json.dumps({"result": "success"}) | 47f9921e9e457828a44e27f2b055ab47df52142e | 28,510 |
def load_options(parser=None, argv=[], positional_args=True):
""" parses sys.argv, possibly exiting if there are mistakes
If you set parser to a ConfigParser object, then you have control
over the usage string and you can prepopulate it with options you
intend to use. But don't set a ``--config`` / ``-c`` option;
load_options uses that to find a configuration file to load
If a parser was passed in, we return ``(config, parser, [args])``.
Otherwise we return ``(config, [args])``. Args is only included
if ``positional_args`` is True and there are positional arguments
See :func:`load_config` for details on the ``--config`` option.
"""
def is_config_appender(arg):
return "." in arg and "=" in arg and arg.find(".") < arg.find("=")
parser_passed_in=parser
if not parser:
parser = OptionParser()
parser.add_option("-c", "--config", help="the path to a config file to read options from")
if argv:
options, args = parser.parse_args(argv)
else:
options, args = parser.parse_args()
print "arg",args
print "options",options
config = load_config(options.config, [a for a in args if is_config_appender(a)])
other_args = [a for a in args if not is_config_appender(a)]
return_list = [config]
if parser_passed_in:
return_list.append(options)
if other_args:
if positional_args:
return_list.append(other_args)
else:
raise Exception("Arguments %s not understood" % other_args)
else:
if positional_args:
raise Exception("This program expects one or more positional arguments that are missing")
if len(return_list) == 1:
return return_list[0]
else:
return tuple(return_list) | d0114ba8473b7a0b9283d65ec9fe97a19f54019f | 28,511 |
def read_spans(fname, separator = ';'):
"""
Read in a span file, of the form
Polynomial;NumberOfComplexPlaces;Root;SpanDimension;VolumeSpan;ManifoldSpan;FitRatio
Returns a dictionary object (certainly NOT a Dataset) such that they
keys are polynomials, and the values are dictionaries. These
dictionaries have keys of roots and the values are [SpanDimension,
VolumeSpan, ManifoldSpan, FitRatio.
"""
f = open(fname,'r')
f.readline()
spans = dict()
for l in f.readlines():
w = l.replace('"','').replace(' ','').strip('\n').split(separator) # whitespace can cause weird problems
for i in [4,5,7,8,9,10]:
try:
w[i] = w[i][2:-2].split("','") # convert string back to list of strings
except IndexError:
break
spans.setdefault(w[0],dict())[w[2]] = w[4:]
return spans | 3bec0157f5905dd1c3ffa80cc0d1999f50ecc48c | 28,512 |
import argparse
def compress_image(args: argparse.Namespace) -> None:
""" Compresses an image by applying SVD decomposition """
def rescale(x: Matrix) -> Matrix:
return (x - x.min()) / (x.max() - x.min())
img = np.array(Image.open(args.file)) / 255.
n_components = args.k if args.k is not None else img.shape[1]
svd_method = custom_svd if args.svd_method_type == 'custom' \
else sklearn_svd
colormap = 'RGB'
if len(img.shape) == 2:
colormap = 'L'
img = img.reshape(img.shape[0], img.shape[1], 1)
compressed_img = []
for ch in range(img.shape[2]):
data = compress_layer(img[:, :, ch], svd_method, n_components)
compressed_img.append(np.expand_dims(data, 2))
compressed_img = np.concatenate(compressed_img, axis=2)
compressed_img = (rescale(compressed_img) * 255).astype('uint8')
if colormap == 'L':
compressed_img = compressed_img[:, :, 0]
if args.output is not None:
Image.fromarray(compressed_img).save(args.output) | 4c6bf91a10b83f90f08888a6778c38f5f5822b7c | 28,513 |
from typing import Dict
def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]:
"""
Overview:
merge two hooks, which has the same keys, each value is sorted by hook priority with stable method
Arguments:
- hooks1 (:obj:`dict`): hooks1 to be merged
- hooks2 (:obj:`dict`): hooks2 to be merged
Returns:
- new_hooks (:obj:`dict`): merged new hooks
.. note::
This merge function uses stable sort method without disturbing the same priority hook
"""
assert set(hooks1.keys()) == set(hooks2.keys())
new_hooks = {}
for k in hooks1.keys():
new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority)
return new_hooks | add5ae72917ca9aff109e8ac86a4d6902c14b298 | 28,514 |
import os
def frame_extraction(src , annotationPath, short_side):
"""Extract frames given video_path.
Args:
video_path (str): The video_path.
"""
videoPaths = open(annotationPath, 'r')
video_paths = []
videoLabels = []
frameHW = None
for line in videoPaths.readlines():
# Load the video, extract frames into ./tmp/video_name
line = line.split()
videoLabels.append(line[1])
videoPath = osp.join(src, line[0])
target_dir = videoPath.split('.')[0]
os.makedirs(target_dir, exist_ok=True)
# Should be able to handle videos up to several hours
frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')
vid = cv2.VideoCapture(videoPath)
video_paths.append(target_dir)
flag, frame = vid.read()
cnt = 0
new_h, new_w = None, None
while flag:
if new_h is None:
h, w, _ = frame.shape
new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))
frame = mmcv.imresize(frame, (new_w, new_h))
frameHW = (new_w, new_h)
frame_path = frame_tmpl.format(cnt + 1)
cv2.imwrite(frame_path, frame)
cnt += 1
flag, frame = vid.read()
return video_paths, frameHW, videoLabels | ad07535463fbc0336fe7ac6b4cb5b2813da97832 | 28,515 |
def get_max_assocs_in_sample_csr(assoc_mat):
"""
Returns the maximum number of co-associations a sample has and the index of
that sample.
"""
first_col = assoc_mat.indptr
n_cols = first_col[1:] - first_col[:-1]
max_row_size = n_cols.max()
max_row_idx = n_cols.argmax()
return max_row_size, max_row_idx | a341153afa0398cb2a43b97614cd39129e6b2ac5 | 28,516 |
def command_mood(self, args):
"""
/mood [<mood> [text]]
"""
if not args:
return self.xmpp.plugin['xep_0107'].stop()
mood = args[0]
if mood not in pep.MOODS:
return self.information('%s is not a correct value for a mood.'
% mood,
'Error')
if len(args) == 2:
text = args[1]
else:
text = None
self.xmpp.plugin['xep_0107'].publish_mood(mood, text,
callback=dumb_callback) | 43d383711f56e70440dd61ff5485f649ad96626b | 28,517 |
def putativePrimer(seq,lastShared):
"""
Generate a mock primer based on desired TM or length
and end position. This is used to estimate whether an
exact match restriction site found in the shared region
of two sequences is likely to be captured by a primer
(rendering it necessary to modify the site or throw out
the enzyme) or if it can be safely ignored.
Input
seq = string of valid DNA bases
lastShared = integer indicating last base of primer
Output
bestPrimer = string of valid DNA bases
"""
# type can be 'tm' or 'length'
seq = seq.lower()
# Generate primer sub-sequences
currentStart = 0
primerList = []
while currentStart >= 0:
currentPrimer = seq[currentStart:lastShared]
primerList.append(currentPrimer)
currentStart -= 1
if Settings.primerType == 'tm':
output = []
for eachPrimer in primerList:
output.append(estimateTM(eachPrimer))
# Filter for negative values
filterList = [x for x in range(0,len(output)) if output[x] <= 0]
primerList = [primerList[x] for x in range(0,len(output)) if x not in filterList]
output = [output[x] for x in range(0,len(output)) if x not in filterList]
# Find minimum diff
outputDiff = [abs(x - Settings.TM) for x in output]
# Choose the best primer sub-sequence based on difference from optimum Tm
bestPrimer = [primerList[x] for x in range(0,len(outputDiff)) if outputDiff[x] == min(outputDiff)]
return(bestPrimer)
elif Settings.primerType == 'length':
# compare length of primers in list to optimum length
positionList = list(range(0,len(primerList)))
filterList = [abs(len(x) - Settings.minLength) for x in primerList]
bestPrimer = [primerList[x] for x in positionList if filterList[x] == min(filterList)]
return(bestPrimer) | 0efa964ba834735bb71f3c8e2d565762ec7cfb8d | 28,518 |
import argparse
def default_arg_parser(formatter_class=None):
"""
This function creates an ArgParser to parse command line arguments.
:param formatter_class: Formatting the arg_parser output into a specific
form. For example: In the manpage format.
"""
formatter_class = (CustomFormatter if formatter_class is None
else formatter_class)
description = """
Automated code benchmark solution.
Empower developers with tools to trace and analyze project performances.
"""
arg_parser = argparse.ArgumentParser(
formatter_class=formatter_class,
description=description)
config_group = arg_parser.add_argument_group('Config')
config_group.add_argument(
'-C', '--config', type=PathArg, nargs=1,
help='configuration file location, defaults to .codebench.yml')
config_group.add_argument(
'-I', '--no_config', const=True, action='store_const',
help='run without using any config file')
before_group = arg_parser.add_argument_group('Before')
before_group.add_argument(
'-b', '--before_all', type=PathArg, nargs=1,
help='script that to be run before all')
before_group.add_argument(
'-e', '--before_each', type=PathArg, nargs=1,
help='script that to be run before each benchmark')
start_group = arg_parser.add_argument_group('Script')
start_group.add_argument(
'-s', '--script', type=PathArg, nargs=1,
help='benchmark script to be run')
after_group = arg_parser.add_argument_group('After')
after_group.add_argument(
'-a', '--after_all', type=PathArg, nargs=1,
help='script that to be run after all')
after_group.add_argument(
'-f', '--after_each', type=PathArg, nargs=1,
help='script that to be run after each benchmark')
commits_group = arg_parser.add_argument_group('Commits')
commits_group.add_argument(
'-g', '--git_folder', type=PathArg,
help='project git directory')
commits_group.add_argument(
'-l', '--baseline', type=str,
help='baseline commit hash')
commits_group.add_argument(
'-c', '--commits', type=str, nargs='+',
help='one or more commits to be benchmarked')
report_group = arg_parser.add_argument_group('Report')
report_group.add_argument(
'-t', '--report_types', type=str, nargs='+',
choices=['chart'],
help='report types')
return arg_parser | 6b70c16622a0eefa7709c12beb357d1a94542ad8 | 28,519 |
import random
def get_config(runner,
raw_uri: str,
root_uri: str,
target: str = BUILDINGS,
nochip: bool = True,
test: bool = False) -> SemanticSegmentationConfig:
"""Generate the pipeline config for this task. This function will be called
by RV, with arguments from the command line, when this example is run.
Args:
runner (Runner): Runner for the pipeline. Will be provided by RV.
raw_uri (str): Directory where the raw data resides
root_uri (str): Directory where all the output will be written.
target (str): "buildings" | "roads". Defaults to "buildings".
nochip (bool, optional): If True, read directly from the TIFF during
training instead of from pre-generated chips. The analyze and chip
commands should not be run, if this is set to True. Defaults to
True.
test (bool, optional): If True, does the following simplifications:
(1) Uses only a small subset of training and validation scenes.
(2) Enables test mode in the learner, which makes it use the
test_batch_sz and test_num_epochs, among other things.
Defaults to False.
Returns:
SemanticSegmentationConfig: An pipeline config.
"""
spacenet_cfg = SpacenetConfig.create(raw_uri, target)
scene_ids = spacenet_cfg.get_scene_ids()
if len(scene_ids) == 0:
raise ValueError(
'No scenes found. Something is configured incorrectly.')
random.seed(5678)
scene_ids = sorted(scene_ids)
random.shuffle(scene_ids)
# Workaround to handle scene 1000 missing on S3.
if '1000' in scene_ids:
scene_ids.remove('1000')
split_ratio = 0.8
num_train_ids = round(len(scene_ids) * split_ratio)
train_ids = scene_ids[:num_train_ids]
val_ids = scene_ids[num_train_ids:]
if test:
train_ids = train_ids[:16]
val_ids = val_ids[:4]
channel_order = [0, 1, 2]
class_config = spacenet_cfg.get_class_config()
train_scenes = [
build_scene(spacenet_cfg, id, channel_order) for id in train_ids
]
val_scenes = [
build_scene(spacenet_cfg, id, channel_order) for id in val_ids
]
scene_dataset = DatasetConfig(
class_config=class_config,
train_scenes=train_scenes,
validation_scenes=val_scenes)
chip_sz = 325
img_sz = chip_sz
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)
if nochip:
data = SemanticSegmentationGeoDataConfig(
scene_dataset=scene_dataset,
window_opts=GeoDataWindowConfig(
method=GeoDataWindowMethod.sliding,
size=chip_sz,
stride=chip_options.stride),
img_sz=img_sz,
num_workers=4)
else:
data = SemanticSegmentationImageDataConfig(
img_sz=img_sz, num_workers=4)
backend = PyTorchSemanticSegmentationConfig(
data=data,
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(
lr=1e-4,
num_epochs=5,
test_num_epochs=2,
batch_sz=8,
one_cycle=True),
log_tensorboard=True,
run_tensorboard=False,
test_mode=test)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=scene_dataset,
backend=backend,
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
img_format='npy',
chip_options=chip_options) | d30651205d500850a32f0be6364653d4d7f638fa | 28,520 |
def readfmt(s, fmt=DEFAULT_INPUTFMT):
"""Reads a given string into an array of floats using the given format"""
ret = map(float, s.strip().split())
return ret | 30024e27450ab6f350d7829894865f68e13d95f2 | 28,521 |
def is_image_sharable(context, image, **kwargs):
"""Return True if the image can be shared to others in this context."""
# Is admin == image sharable
if context.is_admin:
return True
# Only allow sharing if we have an owner
if context.owner is None:
return False
# If we own the image, we can share it
if context.owner == image['owner']:
return True
# Let's get the membership association
if 'membership' in kwargs:
membership = kwargs['membership']
if membership is None:
# Not shared with us anyway
return False
else:
members = image_member_find(context,
image_id=image['id'],
member=context.owner)
if members:
member = members[0]
else:
# Not shared with us anyway
return False
# It's the can_share attribute we're now interested in
return member['can_share'] | 778ca70c4b12c0f20586ce25a35551e1356d20c8 | 28,522 |
import sys
from io import StringIO
def run_fct_get_stdout(fct: callable, *args) -> str:
"""Runs a function and collects stdout
:param fct: function to be run
:param args: arguments for the function
:return: collected stdout
"""
# redirect stdout
stdout_old = sys.stdout
stdout_read = StringIO()
sys.stdout = stdout_read
# run the function
fct(*args)
# Read stdout and restore it
sys.stdout = stdout_old
return stdout_read.getvalue() | 58f9832c2f9d82e7d884314060c23003521052f3 | 28,523 |
def ksz_radial_function(z,ombh2, Yp, gasfrac = 0.9,xe=1, tau=0, params=None):
"""
K(z) = - T_CMB sigma_T n_e0 x_e(z) exp(-tau(z)) (1+z)^2
Eq 4 of 1810.13423
"""
if params is None: params = default_params
T_CMB_muk = params['T_CMB'] # muK
thompson_SI = constants['thompson_SI']
meterToMegaparsec = constants['meter_to_megaparsec']
ne0 = ne0_shaw(ombh2,Yp)
return T_CMB_muk*thompson_SI*ne0*(1.+z)**2./meterToMegaparsec * xe *np.exp(-tau) | 64551363c6b3c99028ebfd3f7cee69c0c273a2e2 | 28,524 |
def find_commits(repo, ref='HEAD', grep=None):
"""
Find git commits.
:returns: List of matching commits' SHA1.
:param ref: Git reference passed to ``git log``
:type ref: str
:param grep: Passed to ``git log --grep``
:type grep: str or None
"""
opts = []
if grep:
opts += ['--grep', grep]
commits = git(repo, 'log', '--format=%H', *opts, ref, '--')
return commits.splitlines() | 8adb5e0dfebfc5ef86f0a17b2b4a7596ab91a382 | 28,525 |
def stft(y, n_fft=2048, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.complex64, pad_mode='reflect'):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
np.pad : array padding
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.stft(y))
>>> D
array([[2.58028018e-03, 4.32422794e-02, 6.61255598e-01, ...,
6.82710262e-04, 2.51654536e-04, 7.23036574e-05],
[2.49403086e-03, 5.15930466e-02, 6.00107312e-01, ...,
3.48026224e-04, 2.35853557e-04, 7.54836728e-05],
[7.82410789e-04, 1.05394892e-01, 4.37517226e-01, ...,
6.29352580e-04, 3.38571583e-04, 8.38094638e-05],
...,
[9.48568513e-08, 4.74725084e-07, 1.50052492e-05, ...,
1.85637656e-08, 2.89708542e-08, 5.74304337e-09],
[1.25165826e-07, 8.58259284e-07, 1.11157215e-05, ...,
3.49099771e-08, 3.11740926e-08, 5.29926236e-09],
[1.70630571e-07, 8.92518756e-07, 1.23656537e-05, ...,
5.33256745e-08, 3.33264900e-08, 5.13272980e-09]], dtype=float32)
Use left-aligned frames, instead of centered frames
>>> D_left = np.abs(librosa.stft(y, center=False))
Use a shorter hop length
>>> D_short = np.abs(librosa.stft(y, hop_length=64))
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
#fft_window = get_window(window, win_length, fftbins=True)
fft_window = vorbis(win_length)
# Pad the window out to n_fft size
fft_window = util.pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Check audio is valid
util.valid_audio(y)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(n_fft // 2), mode=pad_mode)
# Window the time series.
y_frames = util.frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
stft_matrix[:, bl_s:bl_t] = fft.fft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)[:stft_matrix.shape[0]]
return stft_matrix | 5c32d84d424da643d5e73c4a7d068267c7c70afc | 28,526 |
import sys
def user(cmd, directory=None, auto_assert=True, return_io=False,
bash_only=False, silent=True):
"""Used in system tests to emulate a user action"""
if type(cmd) in [list, tuple]:
cmd = ' '.join(cmd)
if not bash_only:
# Handle special cases
for case in _special_user_commands:
if cmd.startswith(case):
cmd = ''.join(cmd.split(case)[1:]).strip()
return _special_user_commands[case](
cmd,
directory=directory,
auto_assert=auto_assert,
return_io=return_io,
silent=silent
)
ret = -1
try:
p = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out is not None and not isinstance(out, str):
out = out.decode('utf-8')
if err is not None and not isinstance(err, str):
err = err.decode('utf-8')
ret = p.returncode
except CalledProcessError as err:
ret = err.returncode
if not silent:
print(out, file=sys.stdout, end='')
print(err, file=sys.stderr, end='')
if auto_assert:
assert ret == 0, \
"user command '" + cmd + "' returned " + str(p.returncode)
if return_io:
return ret, out, err
return ret | 7d89de3ee02e2006aad03575d70932de29fd4d4f | 28,527 |
def findMatches(arg_by_ref, checkForName=False):
"""Finds POIs with the same geometry in 2 datasets.
For each POI in the first dataset, check whether there is a corresponding POI in the 2nd one.
If it exists, move the POI from the second dataset to a resulting dataset B. In any case, the
POIs from the first dataset are going to be moved in the resulting dataset A.
arg_by_ref array -- The array containing the 2 datasets
checkForName boolean -- Whether to also check for same name
returns tuple -- The two resulting datasets
"""
dataA = arg_by_ref[0]
dataB = arg_by_ref[1]
definition = list(dataA.columns.values)
res_A = pd.DataFrame(columns=definition)
res_B = pd.DataFrame(columns=definition)
for index, poiA in dataA.iterrows():
wkt = poiA.WKT
if checkForName:
poiB = dataB.loc[(dataB.WKT == wkt) & (dataB[NAME] == poiA[NAME])]
else:
poiB = dataB.loc[dataB.WKT == wkt]
exists = (poiB.WKT.count() > 0)
if exists:
res_B = res_B.append(poiB)
dataB = dataB.drop(poiB.index)
res_A = res_A.append(poiA)
dataA = dataA.drop(index)
arg_by_ref[0] = dataA
arg_by_ref[1] = dataB
return (res_A, res_B) | 68f32bc29b970bb86663060c46490698a0e1b3b9 | 28,528 |
def _decicelsius_to_kelvins(temperatures_decicelsius):
"""Converts from temperatures from decidegrees Celsius to Kelvins.
:param temperatures_decicelsius: numpy array of temperatures in decidegrees
Celsius.
:return: temperatures_kelvins: numpy array of temperatures in Kelvins, with
same shape as input.
"""
return temperatures_decicelsius * 0.1 + 273.15 | 880d42637970c680cd241b5418890468443c6a5b | 28,529 |
def emails_to_warn():
""" who should get warning about errors messages in the chestfreezer? """
emails_for_escalation = _get_array_option_with_default('emails_to_warn', DEFAULT_EMAILS_TO_WARN)
return emails_for_escalation | f7135f2b55e813391ee86fae65e8f6cc10ccd31e | 28,530 |
import matplotlib.collections as collections
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time
def plot_bondcurrents(f, idx_elec, only='+', E=0.0, k='avg', zaxis=2, avg=True, scale='raw', xyz_origin=None,
vmin=None, vmax=None, lw=5, log=False, adosmap=False, ADOSmin=None, ADOSmax=None, arrows=False,
lattice=False, ps=20, ados=False, atoms=None, out=None, ymin=None, ymax=None, xmin=None, xmax=None,
spsite=None, dpi=180, units='angstrom'):
""" Read bond currents from tbtrans output and plot them
Parameters
----------
f : string
TBT.nc file
idx_elec : int
the electrode of originating electrons
only : {'+', '-', 'all'}
If "+" is supplied only the positive orbital currents are used, for "-",
only the negative orbital currents are used, else return the sum of both.
E : float or int,
A float for energy in eV, int for explicit energy index
k : bool, int or array_like
whether the returned bond current is k-averaged,
an explicit k-point or a selection of k-points
zaxis : int
index of out-of plane direction
avg : bool
if "True", then it averages all currents coming from each atom and plots
them in a homogeneous map
if "False" it plots ALL bond currents as lines originating from each atom
scale : {'%' or 'raw'}
wheter values are percent. Change vmin and vmax accordingly between 0% and 100%
vmin : float
min value in colormap. All data greater than this will be blue
vmax : float
max value in colormap. All data greater than this will be yellow
lattice : bool
whether you want xy coord of atoms plotted as black dots in the figure
ps : float
size of these dots
spsite : list of int
special atoms in the lattice that you want to plot as red dots instead
atoms : np.array or list
list of atoms for which reading and plotting bondcurrents
out : string
name of final png figure
.....
Returns
-------
bc, nc.E[idx_E], geom
bc : bond currents
nc.E[idx_E] : energy
geom : geometry
Notes
-----
- atoms must be 0-based
- Be sure that atoms belong to a single plane (say, only graphene, no tip)
"""
t = time.time()
print('\n***** BOND-CURRENTS (2D map) *****\n')
nc = si.get_sile(f)
elec = nc.elecs[idx_elec]
# Read bond currents from TBT.nc file
bc, energy, geom = read_bondcurrents(f, idx_elec, only, E, k)
# If needed, select only selected atoms from bc_bg.
bc_coo = bc.tocoo()
i_list, j_list, bc_list = bc_coo.row, bc_coo.col, bc_coo.data
if atoms is None:
print('Reading bond-currents among all atoms in device region')
atoms = nc.a_dev
del bc_coo
else:
# Only choose atoms with positive indices
atoms = atoms[atoms >= 0]
select = np.logical_and(np.in1d(i_list, atoms), np.in1d(j_list, atoms))
i_list, j_list, bc_list = i_list[select], j_list[select], bc_list[select]
del bc_coo, select
print('Number of bond-current entries: {}'.format(np.shape(bc_list)))
print('MIN bc among selected atoms (from file) = {}'.format(np.min(bc_list)))
print('MAX bc among selected atoms (from file) = {}'.format(np.max(bc_list)))
#print('i\tj\tBond-current')
#for i, j, bc in zip(i_list, j_list, bc_list):
# print('{}\t{}\t{}'.format(i, j, bc))
# Plot
cmap = cm.viridis
if out is None:
figname = 'BondCurrents_{}_E{}.png'.format(elec, energy)
else:
figname = '{}_{}_E{}.png'.format(out, elec, energy)
fig, ax = plt.subplots()
ax.set_aspect('equal')
if log:
bc_list = np.log(bc_list+1)
norm = LogNorm()
else:
norm=None
if zaxis == 2:
xaxis, yaxis = 0, 1
elif zaxis == 0:
xaxis, yaxis = 1, 2
elif zaxis == 1:
xaxis, yaxis = 0, 2
if avg:
# Plot bond currents as avg 2D map
atoms_sort = np.sort(atoms)
bc_avg = bc.sum(1).A.ravel()[atoms_sort]
if scale is 'radial':
_, r = geom.close_sc(xyz_origin, R=np.inf, idx=atoms_sort, ret_rij=True)
bc_avg = np.multiply(bc_avg, r)
if units == 'angstrom':
unitstr = '$\AA$'
x, y = geom.xyz[atoms_sort, xaxis], geom.xyz[atoms_sort, yaxis]
a_mask = 1.54
elif units == 'nm':
unitstr = 'nm'
x, y = .1*geom.xyz[atoms_sort, xaxis], .1*geom.xyz[atoms_sort, yaxis]
a_mask = .1*1.54
if scale is '%':
if vmin is None:
vmin = np.amin(bc_avg)*100/np.amax(bc_avg)
if vmax is None:
vmax = 100
vmin = vmin*np.amax(bc_avg)/100
vmax = vmax*np.amax(bc_avg)/100
else:
if vmin is None:
vmin = np.amin(bc_avg)
if vmax is None:
vmax = np.amax(bc_avg)
coords = np.column_stack((x, y))
img, min, max = mask_interpolate(coords, bc_avg, oversampling=30, a=a_mask)
# Note that we tell imshow to show the array created by mask_interpolate
# faithfully and not to interpolate by itself another time.
image = ax.imshow(img.T, extent=(min[0], max[0], min[1], max[1]),
origin='lower', interpolation='none', cmap='viridis',
vmin=vmin, vmax=vmax)
else:
if vmin is None:
vmin = np.min(bc_list)
if vmax is None:
vmax = np.max(bc_list)
# Plot bond currents as half-segments
start_list = zip(geom.xyz[i_list, xaxis], geom.xyz[i_list, yaxis])
half_end_list = zip(.5*(geom.xyz[i_list, xaxis]+geom.xyz[j_list, xaxis]),
.5*(geom.xyz[i_list, yaxis]+geom.xyz[j_list, yaxis]))
line_list = list(map(list, zip(start_list, half_end_list))) # segments length = 1/2 bonds length
linewidths = lw * bc_list / np.max(bc_list)
lattice_bonds = collections.LineCollection(line_list, cmap=cmap, linewidths=linewidths, norm=norm)
lattice_bonds.set_array(bc_list/np.amax(bc_list))
lattice_bonds.set_clim(vmin/np.amax(bc_list), vmax/np.amax(bc_list))
ax.add_collection(lattice_bonds)
image = lattice_bonds
if lattice:
if units == 'angstrom':
x, y = geom.xyz[atoms, xaxis], geom.xyz[atoms, yaxis]
if units == 'nm':
x, y = .1*geom.xyz[atoms, xaxis], .1*geom.xyz[atoms, yaxis]
ax.scatter(x, y, s=ps*2, marker='o', facecolors='None', linewidth=0.8, edgecolors='k')
if spsite is not None:
if units == 'angstrom':
xs, ys = geom.xyz[spsite, xaxis], geom.xyz[spsite, yaxis]
if units == 'nm':
xs, ys = .1*geom.xyz[spsite, xaxis], .1*geom.xyz[spsite, yaxis]
ax.scatter(xs, ys, s=ps*2, marker='x', color='red')
ax.autoscale()
ax.margins(0.)
#ax.margins(0.05)
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.xlabel('x ({})'.format(unitstr))
plt.ylabel('y ({})'.format(unitstr))
plt.gcf()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
if avg:
axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin, vmax])
if vmin == 0.:
axcb.ax.set_yticklabels(['0', '$\geq$ {:.3e}'.format(vmax)])
else:
axcb.ax.set_yticklabels(['$\leq$ {:.3e}'.format(vmin), '$\geq$ {:.3e}'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))
else:
axcb = plt.colorbar(image, cax=cax, format='%f', ticks=[vmin/np.amax(bc_list), vmax/np.amax(bc_list)])
if scale is '%':
vmin, vmax = vmin*100/max_newbc_bg, vmax*100/max_newbc_bg
axcb.ax.set_yticklabels(['{:.1f} %'.format(vmin), '{:.1f} %'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {:.1f} %'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {:.1f} %'.format(vmax))
else:
axcb.ax.set_yticklabels(['{:.3e}'.format(vmin), '{:.3e}'.format(vmax)])
print('MIN bc among selected atoms (in final plot) = {}'.format(vmin))
print('MAX bc among selected atoms (in final plot) = {}'.format(vmax))
plt.savefig(figname, bbox_inches='tight', transparent=True, dpi=dpi)
print('Successfully plotted to "{}"'.format(figname))
print('Done in {} sec'.format(time.time() - t))
return bc_list, vmin, vmax, i_list, j_list | 4ba1265966be42c6838af01df740e0e545e2f215 | 28,531 |
import types
from typing import Sequence
from typing import Tuple
def get_public_symbols(
root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]:
"""Returns `(symbol_name, symbol)` for all symbols of `root_module`."""
fns = []
for name in getattr(root_module, '__all__'):
o = getattr(root_module, name)
fns.append((name, o))
return fns | 96be2bf9d2548f1c7b5b8b12b926996105b084ca | 28,532 |
def NewStandardEnv(packager, provider):
"""NewStandardEnv(object packager, object provider) object
NewStandardEnv returns a new *Env with the given params plus standard declarations.
"""
return Env(handle=_checker.checker_NewStandardEnv(packager.handle, provider.handle)) | ff8842553a2dc1676c0b4abe4b8f1f5bee41b753 | 28,533 |
def get_ecs_secret_access_key(config_fpath, bucket_name):
"""Return the ECS secret access key.
:param config_fpath: path to the dtool config file
:param bucket_name: name of the bucket in a ECS namespace
:returns: the ECS secret access key or an empty string
"""
key = ECS_SECRET_ACCESS_KEY_KEY_PREFIX + bucket_name
return get_config_value_from_file(
key,
config_fpath,
""
) | 3583d5d45a9d8f70f839c33ab7007b85977483e4 | 28,534 |
def solve_naked_quads(sudoku, verbose):
"""Exclude the candidates of seen quad-value cell quads from unsolved cells
in their unit."""
return solve_naked_n_tuples(sudoku, 4, verbose) | 0a8a67928e7c3cb65fa5868cc30b60c08823ce6a | 28,535 |
def prototypical_spectra_plot(
dataset,
results_df,
plot_type="imshow",
fig=None,
fig_kws={},
plot_kws={},
cbar_kws={},
**kwargs
):
"""Plot the prototypical spectra from the calibration samples.
Args:
dataset (pyeem.datasets.Dataset): [description]
results_df (pandas.DataFrame): [description]
plot_type (str, optional): [description]. Defaults to "imshow".
fig (matplotlib.pyplot.figure, optional): [description]. Defaults to None.
fig_kws (dict, optional): Optional keyword arguments to include for the figure. Defaults to {}.
plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an argument to the matplotlib plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
matplotlib.axes.Axes: [description]
"""
nspectra = len(results_df.index.unique())
nrows, ncols = _get_subplot_dims(nspectra)
nplots = nrows * ncols
# Set the fig_kws as a mapping of default and kwargs
default_fig_kws = dict(
tight_layout={"h_pad": 5, "w_pad": 0.05}, figsize=(ncols ** 2, nrows * ncols)
)
# Set the fig_kws
fig_kws = dict(default_fig_kws, **fig_kws)
fig = plt.figure(**fig_kws)
projection = None
if plot_type in ["surface", "surface_contour"]:
projection = "3d"
axes = []
for i in range(1, ncols * nrows + 1):
ax = fig.add_subplot(nrows, ncols, i, projection=projection)
axes.append(ax)
for i in range(nspectra, nplots):
axes[i].axis("off")
axes[i].set_visible(False)
# axes[i].remove()
ax_idx = 0
for index, row in results_df.iterrows():
proto_eem_df = pd.read_hdf(dataset.hdf, key=row["hdf_path"])
source_name = proto_eem_df.index.get_level_values("source").unique().item()
proto_conc = proto_eem_df.index.get_level_values("proto_conc").unique().item()
source_units = (
proto_eem_df.index.get_level_values("source_units").unique().item()
)
intensity_units = (
proto_eem_df.index.get_level_values("intensity_units").unique().item()
)
title = "Prototypical Spectrum: {0}\n".format(source_name.title())
title += "Concentration: {0} {1}".format(proto_conc, source_units)
idx_names = proto_eem_df.index.names
drop_idx_names = [
idx_name for idx_name in idx_names if idx_name != "emission_wavelength"
]
proto_eem_df = proto_eem_df.reset_index(level=drop_idx_names, drop=True)
eem_plot(
proto_eem_df,
plot_type=plot_type,
intensity_units=intensity_units,
title=title,
ax=axes[ax_idx],
fig_kws=fig_kws,
plot_kws=plot_kws,
cbar_kws=cbar_kws,
**kwargs
)
ax_idx += 1
pad = kwargs.get("tight_layout_pad", 1.08)
h_pad = kwargs.get("tight_layout_hpad", None)
w_pad = kwargs.get("tight_layout_wpad", None)
rect = kwargs.get("tight_layout_rect", None)
if plot_type in ["surface", "surface_contour"]:
w_pad = kwargs.get("tight_layout_wpad", 25)
plt.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
return axes | 96bd2d6b283b01257b35c4ec436ecc9e3457db7b | 28,536 |
import os
def parse(name, **kwargs):
""" Parse a C/C++ file
"""
idx = clang.cindex.Index.create()
assert os.path.exists(name)
tu = idx.parse(name, **kwargs)
return _ensure_parse_valid(tu) | 2c655e49fb4a086341fb9f37fb3e0825b69e33b0 | 28,537 |
def generate_gate_piover8(
c_sys: CompositeSystem, is_physicality_required: bool = True
) -> "Gate":
"""Return the Gate class for the pi/8 (T) gate on the composite system.
Parameters
----------
c_sys: CompositeSystem
is_physicality_required: bool = True
whether the generated object is physicality required, by default True
Returns
----------
Gate
The Gate class for the pi/8 (T) gate on the composite system.
"""
assert len(c_sys.elemental_systems) == 1
hs = generate_gate_piover8_mat()
gate = Gate(c_sys=c_sys, hs=hs, is_physicality_required=is_physicality_required)
return gate | 5c3cd7721a3bf7de2eb96521f2b9c04e82845dee | 28,538 |
def get_ftext_trials_fast(review_id):
"""
retrieve all ftext trials related to a review
@param review_id: pmid of review
@return: all registered trials and their linked publications
"""
conn = dblib.create_con(VERBOSE=True)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""
SELECT tr.nct_id, tr.brief_title, tr.overall_status, tr.brief_summary, tr.enrollment, tr.completion_date
FROM tregistry_entries tr
INNER JOIN freetext_review_rtrial rt ON tr.nct_id = rt.nct_id
LEFT JOIN trialpubs_rtrial t on tr.nct_id = t.nct_id
WHERE rt.review_id = %s
GROUP BY tr.nct_id, rt.review_id, rt.nct_id
""", (review_id,))
reg_trials = list(cur.fetchall())
# for i, trial in enumerate(reg_trials):
# trial = dict(trial)
# if usr:
# for v in trial['voters']:
# if usr and usr.nickname == v[1]:
# trial['user_vote'] = v[0]
# trial['nicknames'] = ['you' if x[1] == usr.nickname else x[1] for x in trial['voters'] if x[1] is not None]
# else:
# trial['nicknames'] = [x[1] for x in trial['voters'] if x[1] is not None]
# if trial['nicknames']:
# trial['voters'] = str(', '.join(trial['nicknames']))
# else:
# trial['voters'] = ""
# reg_trials[i] = trial.copy()
return {'reg_trials': reg_trials} | e4fd91e93a5b32b083ddf9d0dccd282dee339601 | 28,539 |
from typing import List
from typing import Tuple
def find_edges(names: List[str]) -> List[Tuple[str, str]]:
"""
Given a set of short lineages, return a list of pairs of parent-child
relationships among lineages.
"""
longnames = [decompress(name) for name in names]
edges = []
for x in longnames:
if x == "A":
continue # A is root
y = get_parent(x)
while y is not None and y not in longnames:
y = get_parent(y)
if y is None:
continue
if y != x:
edges.append((x, y) if x < y else (y, x))
edges = [(compress(x), compress(y)) for x, y in edges]
assert len(set(edges)) == len(edges)
assert len(edges) == len(names) - 1
return edges | 9fd254de99a1be4647c476cfcd997b580cb44605 | 28,540 |
import argparse
import os
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Apples and bananas',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
help='Input text or file')
parser.add_argument('-v',
'--vowel',
help='The vowel to subsitutue',
metavar='vowel',
type=str,
choices=['a', 'e', 'i', 'o', 'u'],
default='a')
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args | 8d4c1a4baba3d6998234897e9283193df2e52d88 | 28,541 |
def get_plot_spec_binstat_abs(energy, bins = 50, range = (0, 5)):
"""
Create `PlotSpec` for plot of some stat of abs energy resolution vs TrueE.
"""
# pylint: disable=redefined-builtin
return PlotSpec(
title = None,
label_x = 'True %s Energy [GeV]' % (energy),
label_y = '(Reco - True) %s Energy [GeV]' % (energy),
bins_x = bins,
range_x = range,
bins_y = None,
range_y = None,
minor = True,
grid = True,
) | 4b6b1d5e32234ac3d5c3b9acece4b4fcae795fee | 28,542 |
def generate_token(data):
"""Generate a token for given data object"""
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
return serializer.dumps(data, salt=current_app.config['SECURITY_PASSWORD_SALT']) | 0df4e85179da9b4d5bf56a868b652f490df4887a | 28,543 |
import time
def monitor_gcp(vm_name: str, job_arguments: dict):
"""Monitor status of job based on vm_name. Requires stable connection."""
# Check VM status from command line
while True:
try:
check_cmd = (
[
"gcloud",
"alpha",
"compute",
"tpus",
"--zone",
"europe-west4-a",
"--verbosity",
"critical",
]
if job_arguments["use_tpus"]
else [
"gcloud",
"compute",
"instances",
"list",
"--verbosity",
"critical",
]
)
out = sp.check_output(check_cmd)
break
except sp.CalledProcessError as e:
stderr = e.stderr
return_code = e.returncode
print(stderr, return_code)
time.sleep(1)
# Clean up and check if vm_name is in list of all jobs
job_info = out.split(b"\n")[1:-1]
running_job_names = []
for i in range(len(job_info)):
decoded_job_info = job_info[i].decode("utf-8").split()
if decoded_job_info[-1] in ["STAGING", "RUNNING"]:
running_job_names.append(decoded_job_info[0])
job_status = vm_name in running_job_names
return job_status | ba78353b84267a48e0dbd9c4ae7b8da280ddf471 | 28,544 |
import sys
def CleanError(ErrorMessage,subproc=False):
"""Clean Errors from Log File when using import statement"""
try:
error = ErrorMessage[-2]#.split(':')
except:
sys.stdout.write("\n%s%s%s%s%s" % (bcolors.red,bcolors.underline,bcolors.bold,"Something Went Wrong - Seems You might have Imported Debuggy in the Script", bcolors.end))
sys.exit(1)
if subproc:
try:
ErrorLineno = int(ErrorMessage.split('\n')[1].split(',')[1].strip(' line'))
except:
sys.exit(1)
else:
ErrorLineno = int(ErrorMessage[1].split(',')[1].strip(' line'))
error = error
return (ErrorLineno,error) | 09b207bc72cf77be62d33e1c98ed72d4977abf26 | 28,545 |
def five_top_workers(month, year):
"""
Top 5 presence users with information about them.
"""
dict_months = []
monthly_grouped = group_by_month(get_data(), year)
for user in monthly_grouped:
try:
dict_months.append((user.items()[0][0], user.items()[0][1][month]))
except:
pass
sorted_dict = sorted_months_dict(dict_months)
return five_top_user_data(dict_months, sorted_dict) | 75a63d49e11f528b90a90509b87ab22d58a87c72 | 28,546 |
import collections
import re
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, prefix=""):
"""Compute the union of the current variables and checkpoint variables."""
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
initialized_variable_names = {}
assignment_map = collections.OrderedDict()
for x in tf.train.list_variables(init_checkpoint):
(name, var) = (x[0], x[1])
if prefix + name not in name_to_variable:
continue
assignment_map[name] = prefix + name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return assignment_map, initialized_variable_names | 5469356a8b70da9268f42c08588bed0c765446c8 | 28,547 |
def u_rce_hh80(lats, thermal_ro, rot_rate=ROT_RATE_EARTH, radius=RAD_EARTH):
"""Zonal wind in gradient balance with equilibrium temperatures."""
return rot_rate*radius*cosdeg(lats)*((1 + 2*thermal_ro)**0.5 - 1) | 2fe0aba3f66a6429cbeb6674a46769d4474e31a4 | 28,548 |
def CreatePiecewiseFunction(**params):
"""Create and return a piecewise function. Optionally, parameters can be
given to assign to the piecewise function.
"""
pfunc = servermanager.piecewise_functions.PiecewiseFunction()
controller = servermanager.ParaViewPipelineController()
controller.InitializeProxy(pfunc)
SetProperties(pfunc, **params)
controller.RegisterOpacityTransferFunction(pfunc)
return pfunc | 6d0e55676a7abf98e967a354e321524b82f2c674 | 28,549 |
import json
def cancelCardTransactionPayload(cancel_time):
"""
Function for constructing payload for cancelCardTransaction API call.
Note: All parameters are of type String unless otherwise stated below.
:param cancel_time: Date and time of the request. Format - YYYY-MM-DD HH:mm:ss
:return: JSON Payload for API call
"""
payload_py = {
"cancel_time": cancel_time
}
payload_json = json.dumps(payload_py)
return payload_json | e96ee75bbc4c20a094283fa664bca6ddd6b9556c | 28,550 |
import re
def remove_elongation(word):
"""
:param word: the input word to remove elongation
:return: delongated word
"""
regex_tatweel = r'(\w)\1{2,}'
# loop over the number of times the regex matched the word
for index_ in range(len(re.findall(regex_tatweel, word))):
if re.search(regex_tatweel, word):
elongation_found = re.search(regex_tatweel, word)
elongation_replacement = elongation_found.group()[0]
elongation_pattern = elongation_found.group()
word = re.sub(elongation_pattern, elongation_replacement, word, flags=re.MULTILINE)
else:
break
return word | a0b4be8640193568075f053009e5761894f302c1 | 28,551 |
def coevolve_alignment(method,alignment,**kwargs):
""" Apply coevolution method to alignment (for intramolecular coevolution)
method: f(alignment,**kwargs) -> 2D array of coevolution scores
alignment: alignment object for which coevolve scores should be
calculated
**kwargs: parameters to be passed to method()
"""
# Perform method specific validation steps
if method == sca_alignment: sca_input_validation(alignment,**kwargs)
if method == ancestral_state_alignment:
ancestral_states_input_validation(alignment,**kwargs)
validate_alignment(alignment)
return method(alignment,**kwargs) | 056813427f21b806742fd2bf613dcd2b769e709f | 28,552 |
from textwrap import dedent, wrap
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
This function is typically applied to the results of the ``get_known_facts``
and ``get_known_facts_keys`` functions defined at the bottom of
this file.
"""
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``.
Do NOT manually edit this file.
Instead, run ./bin/ask_update.py.
"""
from sympy.core.cache import cacheit
from sympy.logic.boolalg import And, Not, Or
from sympy.assumptions.ask import Q
# -{ Known facts in Conjunctive Normal Form }-
@cacheit
def get_known_facts_cnf():
return And(
%s
)
# -{ Known facts in compressed sets }-
@cacheit
def get_known_facts_dict():
return {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
c = LINE.join([str(a) for a in cnf.args])
mapping = single_fact_lookup(known_facts_keys, cnf)
items = sorted(mapping.items(), key=str)
keys = [str(i[0]) for i in items]
values = ['set(%s)' % sorted(i[1], key=str) for i in items]
m = LINE.join(['\n'.join(
wrap("%s: %s" % (k, v),
subsequent_indent=HANG,
break_long_words=False))
for k, v in zip(keys, values)]) + ','
return fact_string % (c, m) | 39744ee1bd56ad0bc2fc6412a06da772f45d1a2b | 28,553 |
def mro(*bases):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Suppose you intended creating a class K with the given base classes. This
function returns the MRO which K would have, *excluding* K itself (since
it doesn't yet exist), as if you had actually created the class.
Another way of looking at this, if you pass a single class K, this will
return the linearization of K (the MRO of K, *including* itself).
Found at:
http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
"""
seqs = [list(C.__mro__) for C in bases] + [list(bases)]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
return tuple(res)
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError("inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0] | bbc3fde351c92c4ae0a5c82a3e06e95de29e2e8d | 28,554 |
import os
import subprocess
def getPeaksAt(peaks, bigwigs, folder='', bigwignames=[], peaknames=[], window=1000, title='', numpeaks=4000, numthreads=8,
width=5, length=10,torecompute=False, name='temp/peaksat.pdf', refpoint="TSS", scale=None,
sort=False, withDeeptools=True, onlyProfile=False, cluster=1, vmax=None, vmin=None, overlap=False,
legendLoc=None):
"""
get pysam data
ask for counts only at specific locus based on windows from center+-size from sorted MYC peaks
for each counts, do a rolling average (or a convolving of the data) with numpy
append to an array
return array, normalized
"""
if withDeeptools:
if isinstance(peaks, pd.DataFrame):
peaks = 'peaks.bed '
peaks.to_csv('peaks.bed', sep='\t', index=False, header=False)
elif type(peaks) == list:
pe = ''
i=0
for n, p in enumerate(peaks):
if 20 < int(os.popen('wc -l ' + p).read().split(' ')[0]):
pe += p + ' '
elif len(peaknames) > 0:
peaknames.pop(n-i)
i+=1
peaks = pe
elif type(peaks) == str:
peaks += ' '
else:
raise ValueError(' we dont know this filetype')
if type(bigwigs) is list:
pe = ''
for val in bigwigs:
pe += folder + val + ' '
bigwigs = pe
else:
bigwigs = folder + bigwigs + ' '
h.createFoldersFor(name)
cmd= ''
if not os.path.exists('.'.join(name.split('.')[:-1]) + ".gz") or torecompute:
cmd += "computeMatrix reference-point -S "
cmd += bigwigs
cmd += " --referencePoint "+refpoint
cmd += " --regionsFileName " + peaks
cmd += " --missingDataAsZero"
cmd += " --outFileName " + '.'.join(name.split('.')[:-1]) + ".gz"
cmd += " --upstream " + str(window) + " --downstream " + str(window)
cmd += " --numberOfProcessors " + str(numthreads) + ' && '
if type(name) is list:
cmd+= " --matrixFile " + '.gz '.join(name) + ".gz"
cmd += "plotHeatmap" if not onlyProfile else 'plotProfile'
cmd += " --matrixFile " + '.'.join(name.split('.')[:-1]) + ".gz"
cmd += " --outFileName " + name
cmd += " --refPointLabel "+ refpoint
if vmax is not None:
cmd += " -max "+str(vmax)
if vmin is not None:
cmd += " -min "+str(vmin)
if cluster>1:
cmd += " --perGroup --kmeans "+str(cluster)
if overlap:
if onlyProfile:
cmd += " --plotType overlapped_lines"
else:
raise ValueError("overlap only works when onlyProfile is set")
if legendLoc:
cmd+=" --legendLocation "+legendLoc
if len(peaknames) > 0:
pe = ''
for i in peaknames:
pe += ' ' + i
peaknames = pe
cmd += " --regionsLabel" + peaknames
if type(bigwigs) is list:
if len(bigwignames) > 0:
pe = ''
for i in bigwignames:
pe += ' ' + i
bigwignames = pe
cmd += " --samplesLabel" + bigwignames
if title:
cmd += " --plotTitle " + title
data = subprocess.run(cmd, shell=True, capture_output=True)
print(data)
else:
if 'relative_summit_pos' in peaks.columns:
center = [int((val['start'] + val['relative_summit_pos'])) for k, val in peaks.iterrows()]
else:
center = [int((val['start'] + val['end']) / 2) for k, val in peaks.iterrows()]
pd.set_option('mode.chained_assignment', None)
peaks['start'] = [c - window for c in center]
peaks['end'] = [c + window for c in center]
fig, ax = plt.subplots(1, len(bigwigs), figsize=[width, length], title=title if title else 'Chip Heatmap')
if sort:
peaks = peaks.sort_values(by=["foldchange"], ascending=False)
if numpeaks > len(peaks):
numpeaks = len(peaks) - 1
cov = {}
maxs = []
for num, bigwig in enumerate(bigwigs):
bw = pyBigWig.open(folder + bigwig)
co = np.zeros((numpeaks, window * 2), dtype=int)
scale = scale[bigwig] if scale is dict else 1
for i, (k, val) in enumerate(peaks.iloc[:numpeaks].iterrows()):
try:
co[i] = np.nan_to_num(bw.values(str(val.chrom), val.start, val.end), 0)
except RuntimeError as e:
print(str(val.chrom), val.start, val.end)
pass
cov[bigwig] = co
maxs.append(co.max())
for num, bigwig in enumerate(bigwigs):
sns.heatmap(cov[bigwig] * scale, ax=ax[num], vmax=max(maxs), yticklabels=[], cmap=cmaps[num],
cbar=True)
ax[num].set_title(bigwig.split('.')[0])
fig.subplots_adjust(wspace=0.1)
fig.show()
fig.savefig(name)
return cov, fig | fa8706c47b2c045270d177aacba5a178994ddfd6 | 28,555 |
def CalculatepHfromTA(param, TA, val, TP, TSi):
""" SUB CalculatepHfromTATC, version 04.01, 10-13-96, written by Ernie Lewis.
Inputs: TA, TC, TP, TSi
Output: pH
This calculates pH from TA and TC using K1 and K2 by Newton's method.
It tries to solve for the pH at which Residual = 0.
The starting guess is pH = 8.
Though it is coded for H on the total pH scale, for the pH values
occuring in seawater (pH > 6) it will be equally valid on any pH scale
(H terms negligible) as long as the K Constants are on that scale.
"""
# Declare global constants
global K0, K1, K2, KW, KB
# Set iteration parameters
pHGuess = 8.0 # this is the first guess
pHTol = 1.0e-4 # tolerance for iterations end
ln10 = np.log(10.0)
# creates a vector holding the first guess for all samples
if hasattr(TA, 'shape'):
pH = np.ones(TA.shape) * pHGuess
else:
pH = pHGuess
deltapH = pHTol + 1.0
# Begin iteration to find pH
while np.any(abs(deltapH) > pHTol):
H, Beta = CalculateHfrompH(pH)
NCAlk = CalculateNCAlkfrompH(H, TP, TSi)
if param is 'TC':
CAlk = val * K1 * (H + 2 * K2) / Beta
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
val * K1 * H * (H * H + K1 * K2 + 4.0 * H * K2)
/ Beta / Beta + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
elif param is 'pCO2':
HCO3 = K0 * K1 * val / H
CO3 = K0 * K1 * K2 * val / (H * H)
CAlk = HCO3 + 2 * CO3
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
elif param is 'CO3':
HCO3 = H * val / K2
CAlk = HCO3 + 2 * val
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
else:
raise ValueError('Unknown carbon param: {}'.format(param))
TA_calc = CAlk + NCAlk
Residual = TA - TA_calc
deltapH = Residual / Slope # this is Newton's method
# to keep the jump from being too big
while np.any(abs(deltapH) > 1):
deltapH = deltapH / 2.0
pH = pH + deltapH # Is on the same scale as K1 and K2 were calculated
return pH | b160decae54b25677b1158f77bbc9818abcfd0df | 28,556 |
from operator import inv
def transform_image(image, shiftx, shifty, angle, order=1):
"""
Apply shift and rotation to the image.
The translation is applied first, then the rotation. If no rotation is
requested (``angle=0``), then ``scipy.ndimage.shift()`` is called to
perform a translation. Otherwise, ``scipy.ndimage.affine_transform()`` is
called. In both cases the settings ``mode='wrap', prefilter=False`` are
used. Prefilter *must* be turned off because it applies lossy image
sharpening leading to artifacts.
Parameters
----------
image : numpy.ndarray
2D image input.
shiftx : float
Shift in the x-axis in pixels.
shifty : float
Shift in the y-axis in pixels.
angle : float
Rotation angle in radians (positive is clockwise).
order : int
(Optional, default: 1) Spline interpolation order. 1 for bilinear, 3
for bicubic (bilinear is the original behavior).
Returns
-------
numpy.ndarray
Transformed image.
Notes
-----
The transformation is implemented as sequence of affine transformations.
The ``scipy`` module takes a matrix of the form (ndim + 1, ndim + 1),
where it assumes that the transformation is specified using homogeneous
coordinates. This matrix has the 2x2 rotation matrix in the top left
corner, and the linear shifts in the top right. They are applied in this
order:
.. code-block:: text
1 0 shiftx
0 1 shifty
0 0 1
(translation by shift amounts)
1 0 -(X-1)/2
0 1 -(Y-1)/2
0 0 1
(translation to center rotation on the IDL rot center)
cos sin 0
-sin cos 0
0 0 1
(clockwise rotation)
1 0 +(X-1)/2
0 1 +(Y-1)/2
0 0 1
(undo translation for center of rotation)
"""
if shiftx == 0 and shifty == 0 and angle == 0:
return image
elif angle == 0:
return shift(image, [shifty, shiftx],
order=order, mode='wrap', prefilter=False)
else:
# The original IDL implementation performs the linear translation
# first (wrapping at borders), then rotates the image clockwise by an
# angle in degrees. The center of the rotation is (X-1)/2, (Y-1)/2. In
# both steps, bilinear interpolation is used.
# Numpy array coordinates are (y, x). This swaps dx and dy in the
# translation part, and the position of the -sin element in the
# rotation part, compared to the standard version for (x, y, 1).
# Beware that the coordinate transforms are calculated in the
# conventional (x, y) sense, but are written in numpy's (y, x) order
# when implementing them in the transformation matrix.
cx, sx = np.cos(angle), np.sin(angle)
# Center of rotation
rot_x = 0.5 * (image.shape[1] - 1)
rot_y = 0.5 * (image.shape[0] - 1)
dx = cx * (shiftx - rot_x) + sx * (shifty - rot_y) + rot_x
dy = -sx * (shiftx - rot_x) + cx * (shifty - rot_y) + rot_y
tx = np.array([[cx, -sx, dy],
[sx, cx, dx],
[0, 0, 1]], dtype=np.float64)
# print(cx, sx)
# print(tx)
# The prefilter option, which is turned on by default, applies an
# image sharpening. It must not be applied. The mode is set to 'wrap'
# to emulate the behavior of the original implementation of image
# shift. The spline interpolation order is 1 for bilinear, 3 for
# bicubic (bilinear is the original behavior).
return affine_transform(image, inv(tx),
order=order, mode='wrap', prefilter=False) | 203f06c42b68de0db834924fd302193c37629669 | 28,557 |
import json
import time
from typing import Callable
import dill
def instate():
"""
Same as calculate() but the results are not saved to the database
Use this to update the state of the server to further analyse the model
:return: id of the simulation and result of the calculation
"""
# get the name of the calculation setup
name = request.args.get('name')
if name is None:
return "Please provide 'name' argument to specify which model to instate"
if request.method == 'POST':
options = request.get_data(as_text=True)
options = json.loads(options)
else:
options = None
# ----------------------- MODEL UPDATE --------------------------------
parameters, msg = update_params(name=name)
if msg is not None:
return msg
model, idf = update_model(name=name, parameters=parameters)
# ----------------------- CALCULATIONS --------------------------------
tic = time.perf_counter()
impact_result, cost_result, energy_result, sim_id = run(name=name, model=model, idf=idf, simulation_options=options)
toc = time.perf_counter()
# measure execution time
exec_time = toc - tic
# ----------------------- EVALUATION --------------------------------
eval_type = Callable[[pd.DataFrame, pd.DataFrame, pd.DataFrame], pd.Series]
evaluate: eval_type = dill.loads(R.get(f'{name}:evaluate_func'))
result = evaluate(impacts=impact_result.impacts, costs=cost_result.costs, energy=energy_result)
data = {
'result': result.to_dict(),
'simulation_id': sim_id,
'calculation_time': exec_time
}
return jsonify(data) | e50549ff8ae5e9e49cd972799ce0abffed213912 | 28,558 |
import pickle
def pickler(obj=None, filename: str= None, mode: str = 'pickle'):
"""
pickles the file to filename, or
unpickles and returns the file
(to save the result of long running calculations)
Parameters
----------
obj :
the object to pickle
filename : str
file to pickle to
mode:
one of 'pickle' or 'depickle'
"""
unpickled = None
if mode == 'pickle':
pickle.dump(obj, open(filename,'wb'))
elif mode == 'unpickle':
unpickled = pickle.load(open(filename,'rb'))
return unpickled | b57e85a15099b5eed4e6c3d425bc4df7ff73d657 | 28,559 |
def csrgeam(m, n, descrA, csrValA, csrRowPtrA, csrColIndA, descrB, csrValB,
csrRowPtrB, csrColIndB, handle=None, alpha=1.0, beta=0.0,
nnzA=None, nnzB=None, check_inputs=True):
""" add two sparse matrices: C = alpha*A + beta*B.
higher level wrapper to cusparse<t>csrgemm routines.
"""
if check_inputs:
for array in [csrValA, csrRowPtrA, csrColIndA, csrValB, csrRowPtrB,
csrColIndB]:
if not isinstance(array, pycuda.gpuarray.GPUArray):
raise ValueError("all csr* inputs must be a pyCUDA gpuarray")
if cusparseGetMatType(descrA) != CUSPARSE_MATRIX_TYPE_GENERAL:
raise ValueError("Only general matrix type supported")
if cusparseGetMatType(descrB) != CUSPARSE_MATRIX_TYPE_GENERAL:
raise ValueError("Only general matrix type supported")
if handle is None:
handle = misc._global_cusparse_handle
if nnzA is None:
nnzA = csrValA.size
if nnzB is None:
nnzB = csrValB.size
dtype = csrValA.dtype
# perform some basic sanity checks
if check_inputs:
if csrValA.size != nnzA:
raise ValueError("length of csrValA array must match nnzA")
if csrValB.size != nnzB:
raise ValueError("length of csrValB array must match nnzB")
if (dtype != csrValB.dtype):
raise ValueError("incompatible dtypes")
if csrRowPtrA.size != m + 1:
raise ValueError("bad csrRowPtrA size")
if csrRowPtrB.size != m + 1:
raise ValueError("bad csrRowPtrB size")
# allocate output matrix C descr and row pointers
descrC = cusparseCreateMatDescr()
cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)
alloc = misc._global_cusparse_allocator
csrRowPtrC = gpuarray.zeros((m+1, ), dtype=np.int32, allocator=alloc)
# call csrgemmNnz to determine nnzC and fill in csrRowPtrC
nnzC = _csrgeamNnz(m, n, descrA, csrRowPtrA, csrColIndA, descrB,
csrRowPtrB, csrColIndB, handle=handle, descrC=descrC,
csrRowPtrC=csrRowPtrC, nnzA=nnzA, nnzB=nnzB,
check_inputs=False)
# allocated rest of C based on nnzC
csrValC = gpuarray.zeros((nnzC, ), dtype=dtype, allocator=alloc)
csrColIndC = gpuarray.zeros((nnzC, ), dtype=np.int32, allocator=alloc)
if dtype == np.float32:
fn = cusparseScsrgeam
elif dtype == np.float64:
fn = cusparseDcsrgeam
elif dtype == np.complex64:
fn = cusparseCcsrgeam
elif dtype == np.complex128:
fn = cusparseZcsrgeam
else:
raise ValueError("unsupported sparse matrix dtype: %s" % dtype)
fn(handle, m, n, alpha, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA,
beta, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC,
csrValC, csrRowPtrC, csrColIndC)
return (descrC, csrValC, csrRowPtrC, csrColIndC) | c51338336fda4a6e49529cee1f2137a826eb0b4d | 28,560 |
def is_json_request_accept(req):
"""Test if http request 'accept' header configured for JSON response.
:param req: HTTP request
:return: True if need to return JSON response.
"""
return (
type(req.accept) is accept.NoHeaderType or
type(req.accept) is accept.ValidHeaderType and (
req.accept.header_value == 'application/json' or
req.accept.header_value == '*/*'
)
) | 1a73946c5d090b905ceb09d2841efc316659a90d | 28,561 |
def make_hierarchy(parent_ps, relative_size, make_subsys, *args, **kwargs):
"""
"""
parent_size = parent_ps.radial_size
ps = ParticleSystem()
for p in parent_ps:
subsys = make_subsys(*args, **kwargs)
subsys.dynrescale_total_mass(p.mass)
subsys_size = relative_size * parent_size
subsys.dynrescale_radial_size(subsys_size)
subsys.com_to_origin()
subsys.com_move_to(p.com_r, p.com_v)
ps.append(subsys)
ps.id = range(ps.n)
return ps | 3381290bca4791f1ad342b9478855bdaf1646b22 | 28,562 |
import os
def device_exists(device):
"""Check if ethernet device exists."""
return os.path.exists('/sys/class/net/%s' % device) | 94c42317eb42007b9c96896a58e1b179b47e297e | 28,563 |
def get_site_stats(array, player_names):
"""
Return the summarized statistics for a given array corresponding
to the values sampled for a latent or response site.
"""
if len(array.shape) == 1:
df = pd.DataFrame(array).transpose()
else:
df = pd.DataFrame(array, columns=player_names).transpose()
return df.apply(pd.Series.describe, axis=1)[["mean", "std", "25%", "50%", "75%"]] | 7105e5cd932675f812ec9b7c3c4299b138af49b2 | 28,564 |
def new(data=None, custom=None):
"""Return a fresh instance of a KangarooTwelve object.
Args:
data (bytes/bytearray/memoryview):
Optional.
The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`update`.
custom (bytes):
Optional.
A customization byte string.
:Return: A :class:`K12_XOF` object
"""
return K12_XOF(data, custom) | 5177ce6dccdc7ec7b764f90748bfda48b1c6bf6f | 28,565 |
def document_edit(document_id: int):
"""Edits document entry.
Args:
document_id: ID of the document to be edited
"""
document = Upload.get_by_id(document_id)
if not document:
return abort(404)
form = DocumentEditForm()
if request.method == 'GET':
form.name.data = document.name
form.description.data = document.description
form.type.data = document.type
elif form.validate_on_submit():
document.name = form.name.data
document.description = form.description.data
document.type = form.type.data
if form.file.data:
document.replace(form.file.data)
db.session.commit()
return redirect_return()
return render_template('upload/document.html', form=form) | 5233466553b98566c624a887e0d14556f6edeae9 | 28,566 |
from typing import Union
from typing import List
def get_output_tensors(graph: Union[tf.Graph, GraphDef]) -> List[str]:
"""
Return the names of the graph's output tensors.
Args:
graph: Graph or GraphDef object
Returns:
List of tensor names
"""
return [node.tensor for node in get_output_nodes(graph)] | 598e1e5d223875bc2e094127ab40dfc84a05f2f8 | 28,567 |
def create_build_list(select_repo, all_repos_opt):
"""Create a list of repos to build depending on a menu that the user picks from."""
if all_repos_opt is True:
build_list = repo_info.REPO_LIST
print "Building repos: " + str(build_list)
print "\n"
return build_list
# If the user has selcted the repos to build, the indexes are used to select
# the repo names from the menu and they are appended to the build_list
select_repo_list = select_repo.split(',')
print "select_repo_list:", select_repo_list
select_repo_map = map(int, select_repo_list)
print "select_repo_map:", select_repo_map
build_list = []
for repo_num in select_repo_map:
repo_name = repo_info.REPO_LIST[repo_num]
build_list.append(repo_name)
if not build_list:
print "No applicable repos selected."
exit()
else:
print "Building repos: " + str(build_list)
print "\n"
return build_list | 3976d4479c2c8ee8c8381362e00aadb161dc5701 | 28,568 |
import os
import sys
def homeFolder():
""" home folder for current user """
f = os.path.abspath(os.curdir)
toks = f.split(os.sep)
if (sys.platform == 'win32'):
t = toks[0:2]
else:
t = toks[0:3]
return os.sep.join(t) | cd6467d76972a6765619a5280fb288bd8fe0e1bf | 28,569 |
def hinton(matrix, significant=None, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = [2 ** np.ceil(np.log(np.abs(matrix[i]).max()) / np.log(2)) for i in range(matrix.shape[0])]
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
if significant is None:
bcolor = color
else:
if np.abs(significant[x][y]) > 2.575:
bcolor = 'blue'
else:
bcolor = color
size = np.sqrt(np.abs(w) / max_weight[x])
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=bcolor)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
return ax | 93e3b4ed863e7542d243ccb5c33fe5187046f3a6 | 28,570 |
def to_bin(s):
"""
:param s: string to represent as binary
"""
r = []
for c in s:
if not c:
continue
t = "{:08b}".format(ord(c))
r.append(t)
return '\n'.join(r) | b4c819ae25983a66e6562b3677decd8389f5fbe2 | 28,571 |
def get_bppair(bamfile, bp_cand_df, \
seq_len = 50, seed_len = 5, min_nt = 5,
match_method = 'fuzzy_match'):
"""
get the bppairs from bp_cand_stats (a list of bps)
parameters:
seq_len - # bases within breakend
seed_len - # of bases up and down stream of the breakend
in assembled b sequence. up: seed_len; down: seed_len respectively
"""
bp_cand_df_sorted = bp_cand_df.sort_values(['Chrom', 'Coord', 'Clip'])
bplist = [x[1:4] for x in bp_cand_df_sorted.itertuples()]
# get the breakpoint pairs
# note: searching use a different (shorter) seq_len parameter
# to increase running efficiency
bpduo = get_breakpoint_duo(bamfile, bplist, seq_len, seed_len, min_nt)
# count the # of supporting reads (clip reads)
# note: counting use a different (longer) seq_len parameter
# to ensure the reads are fully coverred in the breakend
tt = [list(row[0:3] + row[3:6] \
+ tuple(join_breakpoint(bamfile, row[0:3], row[3:6], \
offset = row[7], match_method = match_method)) \
+ row[7:9])\
for row in bpduo]
# format output
t2 = [row[0:6] + [row[6][1] + row[7][1]] + row[8:10] \
for row in tt \
if row[6][1] > 0 and row[7][1] > 0 \
and row[6][0] == row[7][0]]
colnames = ["Chrom1", "Coord1", "Clip1",
"Chrom2", "Coord2", "Clip2", 'Count', 'offset', 'Seq']
bp_pair_df = pd.DataFrame(t2, columns = colnames)
return bp_pair_df.sort_values(
'Count', ascending=False).reset_index(drop=True) | be37ded59aac2cd481e3891e8660b4c08c7327ee | 28,572 |
def get_dropdown_items(df: pd.DataFrame, attribute: str) -> list:
"""
Returns a list of dropdown elements for a given attribute name.
:param df: Pandas DataFrame object which contains the attribute
:param attribute: str, can be either port, vessel_type, year, or month
:return: list of unique attribute values
"""
if attribute == "port":
return df["port"].unique().tolist()
elif attribute == "vessel_type":
return ["All", *sorted(df["ship_type"].unique().tolist())]
elif attribute == "year":
return df["year"].unique().tolist()
elif attribute == "month":
return df["month"].unique().tolist()
else:
raise KeyError("Invalid value for `argument`") | c66b17cc4e47e05604b7cc6fde83fd2536b25962 | 28,573 |
import time
def RunFromFile():
"""Take robot commands as input"""
lm = ev3.LargeMotor("outC")
assert lm.connected # left motor
rm = ev3.LargeMotor("outA")
assert rm.connected # right motor
drive = ReadInDirection()
t0 = time.time()
a = True
while a:
a = drive.run()
t1 = time.time()
lm.run_forever(speed_sp=(0))
rm.run_forever(speed_sp=(0))
return t1 - t0 | e77546cef50c27d292deb22f65a524a7d402a640 | 28,574 |
def veljavna(barva, di, dj, polje, i, j):
"""Če je poteza v smeri (di,dj) na polju (i,j) veljavna, vrne True, sicer vrne False"""
#parametra di in dj predstavljata spremembo koordinate i in koordinate j
#npr. če je di==1 in dj==1, se pomikamo po diagonali proti desnemu spodnjemu
#robu plošče in preverjamo, ali je v tej smeri poteza veljavna
k = 1
while (0 <= i + k * di <= 7) and (0 <= j + k * dj <= 7) and polje[i+k*di][j+k*dj] == drugi(barva):
k += 1
if (0 <= i +k * di <= 7) and (0 <= j + k * dj <= 7):
return polje[i+k*di][j+k*dj] == barva and k>1
else:
return False | 128cf01f8947a30d8c0e4f39d4fd54308892a103 | 28,575 |
def _do_filter(items, scores, filter_out, return_scores, n):
"""Filter items out of the recommendations.
Given a list of items to filter out, remove them from recommended items
and scores.
"""
# Zip items/scores up
best = zip(items, scores)
return _recommend_items_and_maybe_scores(
best=best, return_scores=return_scores, n=n,
filter_items=filter_out) | 644cdbe1072dfa397e58f9d51a21fc515d569afe | 28,576 |
def userlogout(request):
"""
Log out a client from the application.
This funtion uses django's authentication system to clear the session,
etc. The view will redirect the user to the index page after logging
out.
Parameters:
request -- An HttpRequest
Returns:
An HttpResponseRedirect to the root url.
"""
key = request.session.session_key
logout(request)
Session.objects.filter(session_key=key).delete()
if 'next' in request.GET:
return HttpResponseRedirect(request.GET['next'])
else:
return HttpResponseRedirect('/') | e12bb923268592841f0c98613ab0226f56c8cbf6 | 28,577 |
import os
def shard_filename(path, prefix, lang_pair, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%s-%.5d-of-%.5d" % (prefix, lang_pair, tag, shard_num, total_shards)) | 4e380d2f1314c2f222bcff75f8be9f74b09d29ba | 28,578 |
def get_builder_plugin():
"""
Get the builder plugin name default.
If not provided by CLI opt, start with
user pref in gitconfig,
Look for hint in cirrus conf or just resort
to a guess based on what python cirrus is using
"""
# TODO look up in git config
config = load_configuration()
builder = None
if config.has_gitconfig_param('builder'):
builder = str(config.get_gitconfig_param('builder'))
if builder:
LOGGER.info("Using Builder Plugin from gitconfig: {}".format(builder))
return builder
build_config = config.get('build', {})
builder = build_config.get('builder')
if builder is not None:
LOGGER.info("Using Builder Plugin from cirrus.conf: {}".format(builder))
return builder
# fall back to old defaults
if is_anaconda():
LOGGER.info("Using default CondaPip builder")
builder = "CondaPip"
else:
LOGGER.info("Using default VirtualenvPip builder")
builder = "VirtualenvPip"
return builder | e533ea56949b4c626280c790d7a7cdd5f9073449 | 28,579 |
def regularize(dn, a0, method):
"""Regularization (amplitude limitation) of radial filters.
Amplitude limitation of radial filter coefficients, methods according
to (cf. Rettberg, Spors : DAGA 2014)
Parameters
----------
dn : numpy.ndarray
Values to be regularized
a0 : float
Parameter for regularization (not required for all methods)
method : {'none', 'discard', 'softclip', 'Tikh', 'wng'}
Method used for regularization/amplitude limitation
(none, discard, hardclip, Tikhonov, White Noise Gain).
Returns
-------
dn : numpy.ndarray
Regularized values.
hn : array_like
"""
idx = np.abs(dn) > a0
if method == 'none':
hn = np.ones_like(dn)
elif method == 'discard':
hn = np.ones_like(dn)
hn[idx] = 0
elif method == 'hardclip':
hn = np.ones_like(dn)
hn[idx] = a0 / np.abs(dn[idx])
elif method == 'softclip':
scaling = np.pi / 2
hn = a0 / abs(dn)
hn = 2 / np.pi * np.arctan(scaling * hn)
elif method == 'Tikh':
a0 = np.sqrt(a0 / 2)
alpha = (1 - np.sqrt(1 - 1/(a0**2))) / (1 + np.sqrt(1 - 1/(a0**2)))
hn = 1 / (1 + alpha**2 * np.abs(dn)**2)
# hn = 1 / (1 + alpha**2 * np.abs(dn))
elif method == 'wng':
hn = 1/(np.abs(dn)**2)
# hn = hn/np.max(hn)
else:
raise ValueError('method must be either: none, ' +
'discard, hardclip, softclip, Tikh or wng')
dn = dn * hn
return dn, hn | fe4722a273060dc59b5489c0447e6e8a79a3046f | 28,580 |
from typing import Optional
def get_fields_by_queue(client: Client, queue: Optional[list]) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
if queue:
queues_id = queue
else:
queues_id = get_queue_ids(client)
fields: list = []
for q in queues_id:
client.update_token()
fields_by_queue = client.queues_list_fields_request(queue_number=str(q))
fields_by_queue = fields_by_queue.get('Fields', [])
for field in fields_by_queue:
if field.get('jsonKey') not in fields:
# get internal error 500 from server with related tickets
if field.get('jsonKey') != 'related_tickets' and field.get('jsonKey') != 'referring_tickets':
fields.append(field.get('jsonKey'))
return fields | a6ee562e50ec749ec9132bf39ca0e39b0336bdbc | 28,581 |
def emitter_20():
"""Interval, emit from center, velocity fixed speed around 360 degrees"""
e = arcade.Emitter(
center_xy=CENTER_POS,
emit_controller=arcade.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: arcade.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=arcade.rand_on_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_20.__doc__, e | 6a7d6689299cab15fbe6ab95e6bb62164ae09657 | 28,582 |
def add_mask_rncc_losses(model, blob_mask):
"""Add Mask R-CNN specific losses"""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_init32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients | e8ae0e80e2ca3ce6f7782173872f4d3e01c916c1 | 28,583 |
def fixed_discount(order: Order):
"""
5k fixed amount discount
"""
return Decimal("5000") | 867a98049e19aea03d421c37141dbc7acd651fc9 | 28,584 |
def index():
"""
The index page.
Just welcomes the user and asks them to start a quiz.
"""
return render_template('index.html') | 13e70c6fd82c11f3cd6aed94b043fd1110e65c3c | 28,585 |
from typing import BinaryIO
def parse_element(stream: BinaryIO):
"""Parse the content of the UPF file to determine the element.
:param stream: a filelike object with the binary content of the file.
:return: the symbol of the element following the IUPAC naming standard.
"""
lines = stream.read().decode('utf-8')
match = REGEX_ELEMENT_V2.search(lines)
if match:
return match.group('element')
match = REGEX_ELEMENT_V1.search(lines)
if match:
return match.group('element')
raise ValueError('could not parse the element from the UPF content.') | 2911d7ee97df77fd02bbd688a5044c8ba6f5434e | 28,586 |
from typing import Optional
from typing import List
from typing import Any
from typing import Type
def mention_subclass(
class_name: str,
cardinality: Optional[int] = None,
values: Optional[List[Any]] = None,
table_name: Optional[str] = None,
) -> Type[Mention]:
"""Create new mention.
Creates and returns a Mention subclass with provided argument names,
which are Context type. Creates the table in DB if does not exist yet.
Import using:
.. code-block:: python
from fonduer.candidates.models import mention_subclass
:param class_name: The name of the class, should be "camel case" e.g.
NewMention
:param table_name: The name of the corresponding table in DB; if not
provided, is converted from camel case by default, e.g. new_mention
:param values: The values that the variable corresponding to the Mention
can take. By default it will be [True, False].
:param cardinality: The cardinality of the variable corresponding to the
Mention. By default is 2 i.e. is a binary value, e.g. is or is not
a true mention.
"""
if table_name is None:
table_name = camel_to_under(class_name)
# If cardinality and values are None, default to binary classification
if cardinality is None and values is None:
values = [True, False]
cardinality = 2
# Else use values if present, and validate proper input
elif values is not None:
if cardinality is not None and len(values) != cardinality:
raise ValueError("Number of values must match cardinality.")
if None in values:
raise ValueError("`None` is a protected value.")
# Note that bools are instances of ints in Python...
if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
raise ValueError(
(
"Default usage of values is consecutive integers."
"Leave values unset if trying to define values as integers."
)
)
cardinality = len(values)
# If cardinality is specified but not values, fill in with ints
elif cardinality is not None:
values = list(range(cardinality))
args = ["context"]
class_spec = (args, table_name, cardinality, values)
if class_name in mention_subclasses:
if class_spec == mention_subclasses[class_name][1]:
return mention_subclasses[class_name][0]
else:
raise ValueError(
f"Mention subclass {class_name} "
f"already exists in memory with incompatible "
f"specification: {mention_subclasses[class_name][1]}"
)
else:
# Set the class attributes == the columns in the database
class_attribs = {
# Declares name for storage table
"__tablename__": table_name,
# Connects mention_subclass records to generic Mention records
"id": Column(
Integer, ForeignKey("mention.id", ondelete="CASCADE"), primary_key=True
),
# Store values & cardinality information in the class only
"values": values,
"cardinality": cardinality,
# Polymorphism information for SQLAlchemy
"__mapper_args__": {"polymorphic_identity": table_name},
# Helper method to get argument names
"__argnames__": args,
}
class_attribs["document_id"] = Column(
Integer, ForeignKey("document.id", ondelete="CASCADE")
)
class_attribs["document"] = relationship(
"Document",
backref=backref(table_name + "s", cascade="all, delete-orphan"),
foreign_keys=class_attribs["document_id"],
)
# Create named arguments, i.e. the entity mentions comprising the
# relation mention.
unique_args = []
for arg in args:
# Primary arguments are constituent Contexts, and their ids
class_attribs[arg + "_id"] = Column(
Integer, ForeignKey("context.id", ondelete="CASCADE")
)
class_attribs[arg] = relationship(
"Context", foreign_keys=class_attribs[arg + "_id"]
)
unique_args.append(class_attribs[arg + "_id"])
# Add unique constraints to the arguments
class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),)
# Create class
C = type(class_name, (Mention,), class_attribs)
# Create table in DB
if Meta.engine and not Meta.engine.has_table(table_name):
C.__table__.create(bind=Meta.engine) # type: ignore
mention_subclasses[class_name] = C, class_spec
# Make this dynamically created class picklable
# https://stackoverflow.com/a/39529149
globals()[class_name] = C
return C | 1639f19609b3b815a25e22729b3b0379caf13ac2 | 28,587 |
def undupe_column_names(df, template="{} ({})"):
"""
rename df column names so there are no duplicates (in place)
e.g. if there are two columns named "dog", the second column will be reformatted to "dog (2)"
Parameters
----------
df : pandas.DataFrame
dataframe whose column names should be de-duplicated
template : template taking two arguments (old_name, int) to use to rename columns
Returns
-------
df : pandas.DataFrame
dataframe that was renamed in place, for convenience in chaining
"""
new_names = []
seen = set()
for name in df.columns:
n = 1
new_name = name
while new_name in seen:
n += 1
new_name = template.format(name, n)
new_names.append(new_name)
seen.add(new_name)
df.columns = new_names
return df | 51d13bad25571bc60edd78026bb145ff99281e2d | 28,588 |
import os
def p(*args):
"""
Convenience function to join the temporary directory path
with the provided arguments.
"""
return os.path.join(temp_dir, *args) | 29a00fab7cb9f76bb7adc3adf52aa50f49c9ccc3 | 28,589 |
def get_example_data(dataset_name):
"""
This is a smart package loader that locates text files inside our package
:param dataset_name:
:return:
"""
provider = get_provider('ebu_tt_live')
manager = ResourceManager()
source = provider.get_resource_string(manager, 'examples/'+dataset_name)
return source | 5f5f3fd3485f63a4be2b85c2fed45a76e2d53f7c | 28,590 |
def js(data):
""" JSをミニファイ """
# 今のところは何もしない
return data | 2ee82b81dcb3cfb9d133ed218ba1c67b5d16f691 | 28,591 |
def _has_endpoint_name_flag(flags):
"""
Detect if the given flags contain any that use ``{endpoint_name}``.
"""
return '{endpoint_name}' in ''.join(flags) | e8827da778c97d3be05ec82ef3367686616d3a88 | 28,592 |
def convert_example(example,
tokenizer,
max_seq_len=512,
max_response_len=128,
max_knowledge_len=256,
mode='train'):
"""Convert all examples into necessary features."""
goal = example['goal']
knowledge = example['knowledge']
goal_knowledge = ' '.join([' '.join(lst) for lst in goal + knowledge])
if mode != 'test':
tokenized_example = tokenizer.dialogue_encode(
example['history'],
response=example['response'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_response_len=max_response_len,
max_knowledge_len=max_knowledge_len,
return_length=True)
response_start = tokenized_example['input_ids'].index(
tokenizer.cls_token_id, 1)
response_end = tokenized_example['seq_len']
# Use to gather the logits corresponding to the labels during training
tokenized_example['masked_positions'] = list(
range(response_start, response_end - 1))
tokenized_example['labels'] = tokenized_example['input_ids'][
response_start + 1:response_end]
return tokenized_example
else:
tokenized_example = tokenizer.dialogue_encode(
example['history'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_knowledge_len=max_knowledge_len,
add_start_token_as_response=True)
if 'response' in example:
tokenized_example['response'] = example['response']
return tokenized_example | 5ebce39468cda942f2d4e73cd18f8fa4dd837f0a | 28,593 |
def mag2Jy(info_dict, Mag):
"""Converts a magnitude into flux density in Jy
Parameters
-----------
info_dict: dictionary
Mag: array or float
AB or vega magnitude
Returns
-------
fluxJy: array or float
flux density in Jy
"""
fluxJy=info_dict['Flux_zero_Jy']*10**(-0.4*Mag)
return fluxJy | db8a56e1ca0529cd49abd68dea65ce6aeff7fd22 | 28,594 |
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a PyTorch data iterator.
Defined in :numref:`sec_utils`"""
dataset = ArrayData(data_arrays)
data_column_size = len(data_arrays)
dataset = ds.GeneratorDataset(source=dataset, column_names=[str(i) for i in range(data_column_size)], shuffle=is_train)
dataset = dataset.batch(batch_size)
return dataset | 804da2b88eceaeb84e2d5d6a3961aa12df958da3 | 28,595 |
def do_get_batched_targets(parser, token):
"""
Retrieves the list of broadcasters for an action and stores them in a context variable which has
``broadcasters`` property.
Example usage::
{% get_batched_targets action_id_list parent_action_id as batched_targets %}
"""
bits = token.contents.split()
if len(bits) != 5:
raise TemplateSyntaxError("'%s' tag takes exactly two arguments" % bits[0])
if bits[3] != 'as':
raise TemplateSyntaxError("second argument to '%s' tag must be 'as'" % bits[0])
return GetBatchedTargets(bits[1],bits[2],bits[4]) | b4847c5acc480b88c0a9cd7b44467f307eed0d65 | 28,596 |
def task_success_slack_alert(context):
"""
Callback task that can be used in DAG to alert of successful task completion
Args:
context (dict): Context variable passed in from Airflow
Returns:
None: Calls the SlackWebhookOperator execute method internally
"""
slack_webhook_token = BaseHook.get_connection(SLACK_CONN_ID).password
slack_msg = """
:large_blue_circle: Task Succeeded!
*Task*: {task}
*Dag*: {dag}
*Execution Time*: {exec_date}
*Log Url*: {log_url}
""".format(
task=context.get("task_instance").task_id,
dag=context.get("task_instance").dag_id,
ti=context.get("task_instance"),
exec_date=context.get("execution_date"),
log_url="<"+context.get("task_instance").log_url+"| LOGS>",
)
success_alert = SlackWebhookOperator(
task_id="slack_test",
http_conn_id="slack",
webhook_token=slack_webhook_token,
message=slack_msg,
username="airflow",
)
return success_alert.execute(context=context) | 596694b089f758a683eac677d7ca237a253a2bd2 | 28,597 |
import base64
def return_img_stream(img_local_path):
"""
工具函数:
获取本地图片流
:param img_local_path:文件单张图片的本地绝对路径
:return: 图片流
"""
img_stream = ''
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
img_stream = base64.b64encode(img_stream)
return img_stream | 7ddee56650fcfabf951ca9b5844a08c7ae5fb2b7 | 28,598 |
from typing import Optional
from typing import Dict
from typing import Union
from typing import Any
def filter_log_on_max_no_activities(log: EventLog, max_no_activities : int = 25, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Filter a log on a maximum number of activities
Parameters
-------------
log
Log
max_no_activities
Maximum number of activities
parameters
Parameters of the algorithm
Returns
-------------
filtered_log
Filtered version of the event log
"""
if parameters is None:
parameters = {}
activity_key = parameters[
PARAMETER_CONSTANT_ACTIVITY_KEY] if PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else DEFAULT_NAME_KEY
parameters[PARAMETER_CONSTANT_ATTRIBUTE_KEY] = activity_key
all_activities = sorted([(x, y) for x, y in get_attribute_values(log, activity_key).items()], key=lambda x: x[1],
reverse=True)
activities = all_activities[:min(len(all_activities), max_no_activities)]
activities = [x[0] for x in activities]
if len(activities) < len(all_activities):
log = apply_events(log, activities, parameters=parameters)
return log | b09d376a758a10f784fd2b9f7036cc6e0d58be05 | 28,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.