content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_available_adapters() -> dict:
"""Get information on all available adapters
Returns:
(dict) Where keys are adapter names and values are descriptions
"""
return _output_plugin_info(ExtensionManager(namespace='materialsio.adapter'))
|
5384931189492a6369a885498f4dfeaed315cb94
| 3,648,100
|
def _must_find_n(session, obj_outer, cls_inner, name_inner):
"""Searches the database for a "namespaced" object, such as a nic on a node.
Raises NotFoundError if there is none. Otherwise returns the object.
Arguments:
session - a SQLAlchemy session to use.
obj_outer - the "owner" object
cls_inner - the "owned" class
name_inner - the name of the "owned" object
"""
obj_inner = _namespaced_query(session, obj_outer, cls_inner, name_inner)
if obj_inner is None:
raise NotFoundError("%s %s on %s %s does not exist." %
(cls_inner.__name__, name_inner,
obj_outer.__class__.__name__, obj_outer.label))
return obj_inner
|
25546e1f528c54ae7283f6a0d404065a337eb977
| 3,648,101
|
def list_providers():
"""
Get list of names of all supported cloud providers
:rtype: list
"""
return [cls.provider_name() for cls in BaseHandler.__subclasses__()]
|
96f75695a40c9969cbd1507b3b3214df44039f1e
| 3,648,102
|
def GetRPCProxy(address=None, port=None, url=GOOFY_RPC_URL):
"""Gets an instance (for client side) to access the goofy server.
Args:
address: Address of the server to be connected.
port: Port of the server to be connected.
url: Target URL for the RPC server. Default to Goofy RPC.
"""
address = address or DEFAULT_GOOFY_ADDRESS
port = port or DEFAULT_GOOFY_PORT
return jsonrpc.ServerProxy(
'http://%s:%d%s' % (address, port, url))
|
3e66abf1e6961c9d2dd2f2fa562e43528d6e99a4
| 3,648,103
|
import argparse
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
createlapserates : int
Switch for processing lapse rates (default = 0 (no))
createtempstd : int
Switch for processing hourly temp data into monthly standard deviation (default = 0 (no))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="select pre-processing options")
# add arguments
parser.add_argument('-createlapserates', action='store', type=int, default=0,
help='option to create lapse rates or not (1=yes, 0=no)')
parser.add_argument('-createtempstd', action='store', type=int, default=0,
help='option to create temperature std of daily data or not (1=yes, 0=no)')
return parser
|
6ccde8f0e02124fa537205da398a842c88a62046
| 3,648,104
|
def dict_hash_table_100_buckets():
"""Test for hash table with 100 buckets, dictionary."""
ht = HashTable(100, naive_hash)
for word in dictionary_words:
ht.set(word, word)
return ht
|
eef6fa89811f6ac9d1ad17d11dc5aa38346a4e16
| 3,648,105
|
def white(N):
"""
White noise.
:param N: Amount of samples.
White noise has a constant power density. It's narrowband spectrum is therefore flat.
The power in white noise will increase by a factor of two for each octave band,
and therefore increases with 3 dB per octave.
"""
return np.random.randn(N)
|
5404c24b0cb79e3866d10eed1630ccdfebe9fae1
| 3,648,106
|
import warnings
def read_HiCPro(bedfile, matfile):
"""
Fast loading of the .matrix and .bed files derived from HiC-Pro
Parameters
----------
bedfile : str,
path to the .bed file which contains fragments info
matfile : str,
path to the .matrix file which contains contact counts
Returns
-------
counts : the interaction contacts map
lengths : the lengths of each chromosomes
chrs : the chromosome names
"""
### read and parse fragments file at first
bed_df = pd.read_csv(bedfile, sep='\t', comment="#", header=None, names=['chrs', 'starts', 'ends', 'idxs'])
# get lengths for each chromosome
chrs, indices, lengths = np.unique(bed_df.chrs.values, return_index=True, return_counts=True)
chrs = list(chrs[indices.argsort()])
lengths = lengths[indices.argsort()]
base = bed_df.idxs[0] # start index: 0 or 1
### read and parse counts file then
n = lengths.sum()
shape = (n, n)
# This is the interaction count files
mat_df = pd.read_csv(matfile, sep='\t', comment="#", header=None)
row, col, data = mat_df.values.T
row = row.astype(int)
col = col.astype(int)
# If there are NAs remove them
mask = np.isnan(data)
if np.any(mask):
warnings.warn(f'NAs detected in {mat_file}. Removing NAs and replacing with 0.')
row = row[np.invert(mask)] # invert True and False for mask
col = col[np.invert(mask)]
data = data[np.invert(mask)].astype(int)
# if index start from 1
if base not in [0, 1]:
raise ValueError('indices should start either at 0 or 1')
if base == 1:
col -= 1
row -= 1
# convert to a coo_matrix (lower triangular)
counts = coo_matrix((data, (row, col)), shape=shape)
# whether the matrix is lower or upper triangular
if np.all(row <= col):
triangular_upper = True
elif np.all(row >= col):
triangular_upper = False
else:
raise ValueError('The HiC matrix is neither lower nor upper triangular!')
# We need to deal with the fact that we should not duplicate entries for the diagonal
counts = counts.toarray()
if triangular_upper:
counts = counts + np.triu(counts, 1).T
else:
counts = counts + np.tril(counts, -1).T
return counts, lengths, chrs
|
1c52cdf62a20d4ff8168919afbd890890653de93
| 3,648,107
|
def get_objective_by_task(target, task):
"""Returns an objective and a set of metrics for a specific task."""
if task == 'classification':
if target.nunique() == 2:
objective = 'binary'
else:
objective = 'multi'
elif task == 'regression':
objective = 'regression'
else:
raise_invalid_task_error(task)
return objective
|
7859f84ba89246b735c61b3b31e421b38692de34
| 3,648,108
|
def pileupGenes(GenePositions,filename,pad=500000,doBalance=False,
TPM=0,CTCFWapldKO=False,TPMlargerthan=True,
minlength=0,maxlength=5000000,OE=None, useTTS=False):
"""
This function piles up Hi-C contact maps around genes, centered on TSSs or TTSs.
Inputs
------
GenePositions - pandas dataframe - with genes and their transcription intensity
filename - str - is path to cooler file
pad - int - half of the window size in bp
OE - str or None - path to scaling data to use as "expected" to compute observed over expected
useTTS - bool - False to pile on TSS, True to pile on TTS
other parameters do some optional filtering of the data frame
"""
sortString="start"
if useTTS:
sortString="end"
OrderedPositions=GenePositions.sort_values(sortString)
c = cooler.Cooler(filename)
res = c.info['bin-size']
chromsizes = bioframe.fetch_chromsizes('mm9')
chrmList = list(chromsizes.index)
runningCount = 0
pile = []
for mychr in chrmList: #Iterate over chromosomes
mychrstrCooler=mychr
mychrstrDataFrame=mychr#Chromosomes in the dataframe GenePositions
#are labeled 1 to 19, X Y and M, while in the cooler file they are labeld 0 to 21
current = OrderedPositions[OrderedPositions["chrom"] == mychrstrDataFrame]
if len(current) <= 0:
continue
#identify + and - so we can reorient genes
#genes for which strand is +, and current gene is not too long and not too short
currentPlusStrand=current[(current['strand']=='+')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
#genes for which strand is -, and current gene is not too long and not too short
currentMinusStrand=current[(current['strand']=='-')&(current['gene_length']<maxlength)
&(current['gene_length']>minlength)]
if TPMlargerthan: #filter by TPM > threshold
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']>=TPM)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']>=TPM)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']>=TPM)]
else: #filter by TPM < thresh
if CTCFWapldKO:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)&(currentPlusStrand['next_TPM_dKO']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_dKO+Adeno-Cre_30251-30253']<=TPM)
&(currentPlusStrand['TPM_dKO+Adeno-Cre_30251-30253']>0)]
else:
currentPlusStrand=currentPlusStrand[(currentPlusStrand['TPM_wildtype']<=TPM)
&(currentPlusStrand['next_TPM_wildtype']>0)]
currentMinusStrand=currentMinusStrand[(currentMinusStrand['TPM_wildtype']<=TPM)
&(currentMinusStrand['TPM_wildtype']>0)]
centerString="start"
if useTTS:
centerString="end"
for st, end in zip(currentPlusStrand[centerString].values, currentPlusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
#from balanced matrix, fetch regions
try:
mat = c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
mat=mat/OE[mychr]
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
centerString="end"
if useTTS:
centerString="start"
for st, end in zip(currentMinusStrand[centerString].values, currentMinusStrand[centerString].values):
reg1 = '{}:{}-{}'.format(mychrstrCooler, int(np.floor((st - pad) / res) * res),
int(np.floor((st + pad) / res) * res),)
reg2 = '{}:{}-{}'.format(mychrstrCooler,int(np.floor((end - pad) / res) * res),
int(np.floor((end + pad) / res) * res))
try:
temp=c.matrix(balance=doBalance).fetch(reg1, reg2)
if OE!=None:#Divide by expected
temp=temp/OE[mychr]
mat = temp[::-1].T[::-1].T #Rotate matrix 180 degrees to align genes
pile.append(mat)
except Exception as e:
print(e)
#mat = np.nan * np.ones((pad * 2 //res, pad * 2 //res))
print('Cannot retrieve a window:', reg1, reg2)
return pile
|
671f575b09131f44f07d659bc8e195a16cd9e2f9
| 3,648,109
|
def model_cnn_2layer(in_ch, in_dim, width, linear_size=128):
"""
CNN, small 2-layer (default kernel size is 4 by 4)
Parameter:
in_ch: input image channel, 1 for MNIST and 3 for CIFAR
in_dim: input dimension, 28 for MNIST and 32 for CIFAR
width: width multiplier
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
|
a08ec035cde3dd024ec32d85adb0af045fe845eb
| 3,648,110
|
import sys
import os
from unittest.mock import call
def snack_raw_formants_tcl(wav_fn, frame_shift, window_size, pre_emphasis, lpc_order, tcl_shell_cmd):
"""Implement snack_formants() by calling Snack through Tcl shell
tcl_shell_cmd is the name of the command to invoke the Tcl shell.
Note this method can only be used if Tcl is installed.
The vectors returned here are the raw Snack output, without padding.
For more info, see documentation for snack_raw_formants().
"""
# File path for wav file provided to Tcl script
in_file = wav_fn
# ERROR: wind_dur parameter must be between [0.0001, 0.1].
# ERROR: frame_step parameter must be between [1/sampling rate, 0.1].
# invalid/inconsistent parameters -- exiting.
# HACK: Tcl shell expects double backslashes in Windows path
if sys.platform == 'win32' or sys.platform == 'cygwin': # pragma: no cover
in_file = in_file.replace('\\', '\\\\')
tcl_file = os.path.join(os.path.dirname(wav_fn), 'tclforsnackformant.tcl')
# Write Tcl script to compute Snack formants
f = open(tcl_file, 'w')
script = "#!/usr/bin/env bash\n"
script += '# the next line restarts with tclsh \\\n'
script += 'exec {} "$0" "$@"\n\n'.format(tcl_shell_cmd)
# HACK: The variable user_snack_lib_path is a hack we use in continous
# integration testing. The reason is that we may not have the
# permissions to copy the Snack library to the standard Tcl library
# location. This is a workaround to load the Snack library from a
# different location, where the location is given by
# user_snack_lib_path.
if user_snack_lib_path is not None:
script += 'pkg_mkIndex {} snack.tcl libsnack.dylib libsound.dylib\n'.format(user_snack_lib_path)
script += 'lappend auto_path {}\n\n'.format(user_snack_lib_path)
script += 'package require snack\n\n'
script += 'snack::sound s\n\n'
script += 's read {}\n\n'.format(in_file)
script += 'set fd [open [file rootname {}].frm w]\n'.format(in_file)
script += 'puts $fd [join [s formant -windowlength {} -framelength {} -windowtype Hamming -lpctype 0 -preemphasisfactor {} -ds_freq 10000 -lpcorder {}]\n\n]\n'.format(window_size / 1000, frame_shift / 1000, pre_emphasis, lpc_order)
script += 'close $fd\n\n'
script += 'exit'
f.write(script)
f.close()
# Run Tcl script
try:
return_code = call([tcl_shell_cmd, tcl_file])
except OSError: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error while attempting to call Snack via Tcl shell. Is Tcl shell command {} correct?'.format(tcl_shell_cmd))
else:
if return_code != 0: # pragma: no cover
os.remove(tcl_file)
raise OSError('Error when trying to call Snack via Tcl shell script.')
# Load results from f0 file and save into return variables
frm_file = os.path.splitext(wav_fn)[0] + '.frm'
num_cols = len(sformant_names)
if os.path.isfile(frm_file):
frm_results = np.loadtxt(frm_file, dtype=float).reshape((-1, num_cols))
estimates_raw = {}
for i in range(num_cols):
estimates_raw[sformant_names[i]] = frm_results[:, i]
# Cleanup and remove f0 file
os.remove(frm_file)
else: # pragma: no cover
raise OSError('Snack Tcl shell error -- unable to locate .frm file')
# Cleanup and remove Tcl script file
os.remove(tcl_file)
return estimates_raw
|
a9ac0dcbfd1568774b15db6fa28fb228e50ab29e
| 3,648,111
|
from datasets.posetrack.poseval.py import evaluate_simple
def _run_eval(annot_dir, output_dir, eval_tracking=False, eval_pose=True):
"""
Runs the evaluation, and returns the "total mAP" and "total MOTA"
"""
(apAll, _, _), mota = evaluate_simple.evaluate(
annot_dir, output_dir, eval_pose, eval_tracking,
cfg.TRACKING.DEBUG.UPPER_BOUND_4_EVAL_UPPER_BOUND)
return apAll[-1][0], mota[-4][0]
|
e851006974b51c44c7ac7a59c4d54a3725b36004
| 3,648,112
|
def reserve_api():
"""Helper function for making API requests to the /reserve API endpoints
:returns: a function that can be called to make a request to /reserve
"""
def execute_reserve_api_request(method, endpoint, **kwargs):
master_api_client = master_api()
return master_api_client(method, "/reserve%s" % endpoint, **kwargs)
return execute_reserve_api_request
|
d9e07fcd72742685443cd83f44eaed074a8152dc
| 3,648,113
|
import re
def extract_user_id(source_open_url):
"""
extract the user id from given user's id
:param source_open_url: "sslocal://profile?refer=video&uid=6115075278" example
:return:
"""
if source_open_url[10:17] != 'profile':
return None
try:
res = re.search("\d+$", source_open_url).group(0)
return res.strip()
except (AttributeError, KeyError):
return None
|
36d3e41c4361a29306650fc67c9f396efe92cd66
| 3,648,114
|
def prolog_rule(line):
"""Specify prolog equivalent"""
def specify(rule):
"""Apply restrictions to rule"""
rule.prolog.insert(0, line)
return rule
return specify
|
dde4840dc2f8f725d4c4c123aed7c978ec1948f9
| 3,648,115
|
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
|
40e8fe203b195621b776ea3650bb531956769b48
| 3,648,116
|
import numpy
def quadratic_program() -> MPQP_Program:
"""a simple mplp to test the dimensional correctness of its functions"""
A = numpy.array(
[[1, 1, 0, 0], [0, 0, 1, 1], [-1, 0, -1, 0], [0, -1, 0, -1], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
[0, 0, 0, -1]])
b = numpy.array([350, 600, 0, 0, 0, 0, 0, 0]).reshape(8, 1)
c = 25 * make_column([1, 1, 1, 1])
F = numpy.array([[0, 0], [0, 0], [-1, 0], [0, -1], [0, 0], [0, 0], [0, 0], [0, 0]])
Q = 2.0 * numpy.diag([153, 162, 162, 126])
CRa = numpy.vstack((numpy.eye(2), -numpy.eye(2)))
CRb = numpy.array([1000, 1000, 0, 0]).reshape(4, 1)
H = numpy.zeros((F.shape[1], Q.shape[0]))
prog = MPQP_Program(A, b, c, H, Q, CRa, CRb, F)
prog.scale_constraints()
return prog
|
71d5d9b872572e05b86e640006c58cc407c76ec4
| 3,648,117
|
import os
import base64
def load_img(str_img):
"""
"""
str_b64 = None
if os.path.exists(str_img) and is_path_img(str_img):
with open(str_img, 'rb') as f:
content = f.read()
content_b64 = base64.b64encode(content)
str_b64 = content_b64.decode('utf-8')
elif str_img.startswith('http'):
res = rq.get(str_img)
if res.status_code == 200:
content_b64 = base64.b64encode(res.content)
str_b64 = content_b64.decode('utf-8')
elif isinstance(str_img, bytes):
str_img = str_img
return str_b64
|
6a2549794003b1c4218ce985162e25f4bc8680f3
| 3,648,118
|
def snr(flux, axis=0):
""" Calculates the S/N ratio of a spectra.
Translated from the IDL routine der_snr.pro """
signal = np.nanmedian(flux, axis=axis)
noise = 1.482602 / np.sqrt(6.) * np.nanmedian(np.abs(2.*flux - \
np.roll(flux, 2, axis=axis) - np.roll(flux, -2, axis=axis)), \
axis=axis)
return signal, noise, signal / noise
|
964362545e2fb8a0e7f15df71d90c5ce3e2f5815
| 3,648,119
|
def subtract_images(img_input, img_output, img_height, img_width):
"""Subtract input and output image and compute difference image and ela image"""
input_data = img_input.T
output_data = img_output.T
if len(input_data) != len(output_data):
raise Exception("Input and Output image have different sizes!")
diff = abs(input_data - output_data)
diff = diff.reshape(img_height, img_width, 3)
diff = np.clip(diff, 0, 255)
if auto:
args.multiplier = np.divide(255, diff.max())
diff_multiplied = diff * args.multiplier
diff_multiplied = np.clip(diff_multiplied, 0, 255)
if args.cupy:
diff_img = Image.fromarray(np.asnumpy(diff).astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(np.asnumpy(diff_multiplied).astype(np.uint8), 'RGB')
else:
diff_img = Image.fromarray(diff.astype(np.uint8), 'RGB')
diff_img_multiplied = Image.fromarray(diff_multiplied.astype(np.uint8), 'RGB')
return diff_img, diff_img_multiplied
|
7b2b2df57055cc73bec85bed2a5bece8187ddcdf
| 3,648,120
|
import ctypes
def k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size):
"""
K4ARECORD_EXPORT k4a_buffer_result_t k4a_playback_get_track_name(k4a_playback_t playback_handle,
size_t track_index,
char *track_name,
size_t *track_name_size);
"""
_k4a_playback_get_track_name = record_dll.k4a_playback_get_track_name
_k4a_playback_get_track_name.restype = k4a_buffer_result_t
_k4a_playback_get_track_name.argtypes = (k4a_playback_t,\
ctypes.c_size_t,\
ctypes.POINTER(ctypes.c_char),\
ctypes.POINTER(ctypes.c_size_t))
return _k4a_playback_get_track_name(playback_handle, track_index, track_name, track_name_size)
|
872c5aa73f9520f178fdbdfe47c314f9043282c0
| 3,648,121
|
def recommend_lowercase_d(data: pd.Series, **kwargs) -> int:
"""Returns the recommended value of differencing order 'd' to use
Parameters
----------
data : pd.Series
The data for which the differencing order needs to be calculated
*kwargs: Keyword arguments that can be passed to the difference test.
Values are:
alpha : float, optional
Significance Value, by default 0.05
test : str, optional
The test to use to test the order of differencing, by default 'kpss'
max_d : int, optional
maximum differencing order to try, by default 2
Returns
-------
int
The differencing order to use
"""
recommended_lowercase_d = ndiffs(data, **kwargs)
return recommended_lowercase_d
|
a249ed104c00e62f386f82c5c4aaecc7cf0c4001
| 3,648,122
|
from typing import Dict
import json
def get_player_current_games_to_move(username: str) -> Dict:
"""Public method that returns an array of Daily Chess games
where it is the player's turn to act
Parameters:
username -- username of the player
"""
r = _internal.do_get_request(f"/player/{username}/games/to-move")
return json.loads(r.data.decode('utf-8'))
|
1ce906d24fa278703d708d41ed753c2cb24b48d0
| 3,648,123
|
def get_rb_data_attribute(xmldict, attr):
"""Get Attribute `attr` from dict `xmldict`
Parameters
----------
xmldict : dict
Blob Description Dictionary
attr : str
Attribute key
Returns
-------
sattr : int
Attribute Values
"""
try:
sattr = int(xmldict["@" + attr])
except KeyError:
raise KeyError(
f"Attribute @{attr} is missing from "
"Blob Description. There may be some "
"problems with your file"
)
return sattr
|
dfc48ad47f67b2303874154ce4a164a176c1f4bf
| 3,648,124
|
from PythonPhot import photfunctions
import pdb
import pdb
def dopythonphot(image, xc, yc, aparcsec=0.4, system='AB', ext=None,
psfimage=None, psfradpix=3, recenter=False, imfilename=None,
ntestpositions=100, snthresh=0.0, zeropoint=None,
filtername=None, exptime=None, pixscale=None,
skyannarcsec=[6.0, 12.0], skyval=None,
skyalgorithm='sigmaclipping',
target=None, printstyle=None, exact=True, fitsconvention=True,
phpadu=None, returnflux=False, showfit=False,
verbose=False, debug=False):
""" Measure the flux through aperture(s) and/or psf fitting using the
PythonPhot package.
Inputs:
image : string giving image file name OR a list or 2-tuple giving
the header and data array as [hdr,data]
xc,yc : aperture center in pixel coordinates
aparcsec : aperture radius in arcsec, or a string with a comma-separated
list of aperture radii
psfimage : filename for a fits file containing a psf model
system : report AB or Vega mags ('AB','Vega')
snthresh : If the measured flux is below <snthresh>*fluxerr then the
resulting magnitude is reported as a lower limit.
zeropoint : fix the zeropoint (if not provided, we look it up from
hardcoded tables)
skyannarcsec : inner and outer radius of the sky annulus (in arcsec)
target : name of the target object (for printing in snanastyle)
printstyle : None or 'default' = report MJD, filter, and photometry
'verbose' or 'long' = include target name and position
'snana' = report mags in the format of a SNANA .dat file.
fitsconvention : xc,yc position follows the fits convention with (1,1)
as the lower left pixel. Otherwise, follow the python/pyfits
convention with (0,0) as the lower left pixel.
returnflux : instead of returning a list of strings containing all the
flux and magnitude information, simply return a single flux val
Note : No recentering is done (i.e. this does forced photometry at the
given pixel position)
"""
if debug == 1:
pdb.set_trace()
imhdr, imdat = getheaderanddata(image, ext=ext)
if imfilename is None:
if isinstance(image, str):
imfilename = image
elif 'FILENAME' in imhdr:
imfilename = imhdr['FILENAME']
else:
imfilename = 'unknown'
if imdat.dtype != 'float64':
imdat = imdat.astype('float64', copy=False)
if not filtername:
if 'FILTER1' in imhdr:
if 'CLEAR' in imhdr['FILTER1']:
filtername = imhdr['FILTER2']
else:
filtername = imhdr['FILTER1']
else:
filtername = imhdr['FILTER']
if not exptime:
if 'EXPTIME' in imhdr:
exptime = imhdr['EXPTIME']
else:
raise exceptions.RuntimeError(
"Cannot determine exposure time for %s" % imfilename)
if not pixscale:
pixscale = getpixscale(imhdr, ext=ext)
if not np.iterable(aparcsec):
aparcsec = np.array([aparcsec])
elif not isinstance(aparcsec, np.ndarray):
aparcsec = np.array(aparcsec)
appix = np.array([ap / pixscale for ap in aparcsec])
skyannpix = np.array([skyrad / pixscale for skyrad in skyannarcsec])
if len(appix) >= 1:
assert skyannpix[0] >= np.max(
appix), "Sky annulus must be >= largest aperture."
camera = getcamera(imhdr)
# Define the conversion factor from the values in this image
# to photons : photons per ADU.
if phpadu is None:
if 'BUNIT' not in imhdr:
if camera == 'WFC3-IR' and 'EXPTIME' in imhdr:
phpadu = imhdr['EXPTIME']
else:
phpadu = 1
elif imhdr['BUNIT'].lower() in ['cps', 'electrons/s']:
phpadu = imhdr['EXPTIME']
elif imhdr['BUNIT'].lower() in ['counts', 'electrons']:
phpadu = 1
assert (
phpadu is not None), "Can't determine units from the image header."
if fitsconvention:
xpy, ypy = xc - 1, yc - 1
else:
xpy, ypy = xc, yc
if recenter:
xim, yim = getxycenter([imhdr, imdat], xc, yc,
fitsconvention=True, radec=False,
verbose=verbose)
if verbose:
print("Recentered position (x,y) : %.2f %.2f" % (xim, yim))
ra, dec = xy2radec(imhdr, xim, yim)
print("Recentered position (ra,dec) : %.6f %.6f" % (ra, dec))
output_PythonPhot = photfunctions.get_flux_and_err(
imdat, psfimage, [xpy, ypy],
psfradpix=psfradpix, apradpix=appix, ntestpositions=ntestpositions,
skyannpix=skyannpix, skyalgorithm=skyalgorithm, setskyval=skyval,
recenter_target=False, recenter_fakes=True, exact=exact,
exptime=exptime, ronoise=1, phpadu=phpadu,
showfit=showfit, verbose=verbose, debug=debug)
apflux, apfluxerr, psfflux, psffluxerr, sky, skyerr = output_PythonPhot
if not np.iterable(apflux):
apflux = np.array([apflux])
apfluxerr = np.array([apfluxerr])
# Define aperture corrections for each aperture
if zeropoint is not None:
zpt = zeropoint
apcor = np.zeros(len(aparcsec))
aperr = np.zeros(len(aparcsec))
else:
zpt = hstzpt_apcorr.getzpt(image, system=system)
if camera == 'WFC3-IR':
# TODO: allow user to choose an alternate EE table?
apcor, aperr = hstzpt_apcorr.apcorrWFC3IR(filtername, aparcsec)
elif camera == 'WFC3-UVIS':
apcor, aperr = hstzpt_apcorr.apcorrWFC3UVIS(filtername, aparcsec)
elif camera == 'ACS-WFC':
apcor, aperr = hstzpt_apcorr.apcorrACSWFC(filtername, aparcsec)
# record the psf flux as a final infinite aperture for printing purposes
if psfimage is not None:
aparcsec = np.append(aparcsec, np.inf)
apflux = np.append(apflux, [psfflux])
apfluxerr = np.append(apfluxerr, [psffluxerr])
apcor = np.append(apcor, 0)
# apply aperture corrections to flux and mags
# and define upper limit mags for fluxes with significance <snthresh
mag, magerr = np.zeros(len(apflux)), np.zeros(len(apflux))
for i in range(len(apflux)):
if np.isfinite(aparcsec[i]):
# For actual aperture measurements (not the psf fitting flux),
# apply aperture corrections to the measured fluxes
# Flux rescaled to larger aperture:
apflux[i] *= 10 ** (0.4 * apcor[i])
# Flux error rescaled:
df = apfluxerr[i] * 10 ** (0.4 * apcor[i])
# Systematic err from aperture correction :
dfap = 0.4 * np.log(10) * apflux[i] * aperr[i]
apfluxerr[i] = np.sqrt(df ** 2 + dfap ** 2) # total flux err
if verbose > 1:
print(" FERRTOT FERRSTAT FERRSYS")
print(" %.5f %.5f %.5f" % (apfluxerr[i], df, dfap))
if apflux[i] < abs(apfluxerr[i]) * snthresh:
# no real detection. Report mag as an upper limit
sigmafactor = snthresh or 3
mag[i] = -2.5 * np.log10(sigmafactor * abs(apfluxerr[i])) \
+ zpt - apcor[i]
magerr[i] = -9.0
else:
# Good detection. convert to a magnitude (ap correction already
# applied)
mag[i] = -2.5 * np.log10(apflux[i]) + zpt
magerr[i] = 1.0857 * apfluxerr[i] / apflux[i]
if debug:
pdb.set_trace()
if returnflux:
return apflux
if 'EXPSTART' in imhdr and 'EXPEND' in imhdr:
mjdobs = (imhdr['EXPEND'] + imhdr['EXPSTART'])/2.
else:
mjdobs = 0.0
if verbose and printstyle == 'snana':
# Convert to SNANA fluxcal units and Construct a SNANA-style OBS
# line, e.g.
# OBS: 56456.500 H wol 0.000 8.630 25.160 -9.000
fluxcal = apflux * 10 ** (0.4 * (27.5 - zpt))
fluxcalerr = apfluxerr * 10 ** (0.4 * (27.5 - zpt))
print('VARLIST: MJD FLT FIELD FLUXCAL FLUXCALERR MAG '
'MAGERR ZPT')
elif verbose:
if printstyle.lower() in ['long', 'verbose']:
print('# TARGET RA DEC MJD FILTER '
' APER FLUX FLUXERR MAG MAGERR MAGSYS '
' ZP SKY SKYERR IMAGE')
else:
print('# MJD FILTER APER FLUX FLUXERR MAG '
'MAGERR MAGSYS ZP SKY SKYERR')
if printstyle is not None:
printstyle = printstyle.lower()
ra, dec = 0, 0
if (printstyle is not None and
printstyle.lower() in ['snana', 'long', 'verbose']):
if not target and 'FILENAME' in imhdr.keys():
target = imhdr['FILENAME'].split('_')[0]
elif not target:
target = 'target'
ra, dec = xy2radec(imhdr, xc, yc, ext=ext)
maglinelist = []
for iap in range(len(aparcsec)):
if printstyle == 'snana':
magline = 'OBS: %8.2f %6s %s %8.3f %8.3f '\
'%8.3f %8.3f %.3f' % (
float(mjdobs), FilterAlpha[filtername], target,
fluxcal[iap], fluxcalerr[iap], mag[iap], magerr[iap],
zpt)
elif printstyle in ['long', 'verbose']:
magline = '%-15s %10.5f %10.5f %.3f %6s %4.2f %9.4f %8.4f '\
' %9.4f %8.4f %5s %7.4f %7.4f %6.4f %s' % (
target, ra, dec, float(mjdobs), filtername,
aparcsec[iap],
apflux[iap], apfluxerr[iap], mag[iap], magerr[iap],
system,
zpt, sky, skyerr, imfilename)
else:
magline = '%.3f %6s %4.2f %9.4f %8.4f %9.4f %8.4f %5s ' \
'%7.4f %7.4f %6.4f' % (
float(mjdobs), filtername, aparcsec[iap],
apflux[iap], apfluxerr[iap], mag[iap], magerr[iap],
system,
zpt, sky, skyerr)
maglinelist.append(magline)
return maglinelist
|
d204355c8b5b860c4cb4ef3304ca949969404bd8
| 3,648,125
|
import os
import zipfile
def zip_dir(source_dir, archive_file, fnmatch_list=None):
"""Creates an archive of the given directory and stores it in the given
archive_file which may be a filename as well. By default, this function
will look for a .cfignore file and exclude any matching entries from the
archive.
"""
if fnmatch_list is None:
fnmatch_list = []
cwd = os.getcwd()
try:
with zipfile.ZipFile(
archive_file,
mode='w',
compression=zipfile.ZIP_DEFLATED) as zipf:
if os.path.isdir(source_dir):
os.chdir(source_dir)
files = list_files(source_dir, fnmatch_list)
for f in files:
name = f['fn'].replace(source_dir, '')
compress = zipfile.ZIP_STORED if f['fn'].endswith(
'/') else zipfile.ZIP_DEFLATED
zipf.write(f['fn'], arcname=name, compress_type=compress)
else:
zipf.write(
source_dir,
arcname=os.path.basename(source_dir),
compress_type=zipfile.ZIP_DEFLATED)
finally:
os.chdir(cwd)
return archive_file
|
ea605c1c05eb06dab7c119b5072e181bd9d3b4e6
| 3,648,126
|
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None
|
4b5d214a50fea60f5cb325fc7a0c93dfa9cb3c02
| 3,648,127
|
def is_admin():
"""Check if current user is an admin."""
try:
return flask.g.admin
except AttributeError:
return False
|
0922a93afacc4002068b60f1ab8a6594f0ddb44a
| 3,648,128
|
def statistic():
""" RESTful CRUD Controller """
return crud_controller()
|
02ed03d6d7d159046c12cbea98cc69c7bb0ae024
| 3,648,129
|
import argparse
def parse_args():
""" Parses command line arguments.
:return: argparse parser with parsed command line args
"""
parser = argparse.ArgumentParser(description='Godot AI Bridge (GAB) - DEMO Environment Action Client')
parser.add_argument('--id', type=int, required=False, default=DEFAULT_AGENT,
help=f'the id of the agent to which this action will be sent (default: {DEFAULT_AGENT})')
parser.add_argument('--host', type=str, required=False, default=DEFAULT_HOST,
help=f'the IP address of host running the GAB action listener (default: {DEFAULT_HOST})')
parser.add_argument('--port', type=int, required=False, default=DEFAULT_PORT,
help=f'the port number of the GAB action listener (default: {DEFAULT_PORT})')
parser.add_argument('--verbose', required=False, action="store_true",
help='increases verbosity (displays requests & replies)')
return parser.parse_args()
|
ff3e118179cb1a8d6f5d5d0067c7b11241522632
| 3,648,130
|
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
|
e20c9bb55d9d3e90beb20bed7a170d1066611ba9
| 3,648,131
|
import collections
def update_dict(d, u):
""" Recursively update dict d with values from dict u.
Args:
d: Dict to be updated
u: Dict with values to use for update
Returns: Updated dict
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
default = v.copy()
default.clear()
r = update_dict(d._get(k, default), v)
d[k] = r
else:
d[k] = v
return d
|
e0228d3d0946f20b4bad8bfbc94a725f62bddfc5
| 3,648,132
|
def get_dataset(args, tokenizer, evaluate=False):
"""Convert the text file into the GPT-2 TextDataset format.
Args:
tokenizer: The GPT-2 tokenizer object.
evaluate: Whether to evalute on the dataset.
"""
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size
)
else:
return TextDataset(
tokenizer=tokenizer,
file_path=file_path,
block_size=args.block_size,
overwrite_cache=args.overwrite_cache,
)
|
493bd0a5ca548b08052717d7e343e4f8b8911cb1
| 3,648,133
|
def test_function_decorators():
"""Function Decorators."""
# Function decorators are simply wrappers to existing functions. Putting the ideas mentioned
# above together, we can build a decorator. In this example let's consider a function that
# wraps the string output of another function by p tags.
# This is the function that we want to decorate.
def greeting(name):
return "Hello, {0}!".format(name)
# This function decorates another functions output with <p> tag.
def decorate_with_p(func):
def function_wrapper(name):
return "<p>{0}</p>".format(func(name))
return function_wrapper
# Now, let's call our decorator and pass the function we want decorate to it.
my_get_text = decorate_with_p(greeting)
# Here we go, we've just decorated the function output without changing the function itself.
assert my_get_text('John') == '<p>Hello, John!</p>' # With decorator.
assert greeting('John') == 'Hello, John!' # Without decorator.
# Now, Python makes creating and using decorators a bit cleaner and nicer for the programmer
# through some syntactic sugar There is a neat shortcut for that, which is to mention the
# name of the decorating function before the function to be decorated. The name of the
# decorator should be prepended with an @ symbol.
@decorate_with_p
def greeting_with_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_p('John') == '<p>Hello, John!</p>'
# Now let's consider we wanted to decorate our greeting function by one more functions to wrap a
# div the string output.
# This will be our second decorator.
def decorate_with_div(func):
def function_wrapper(text):
return "<div>{0}</div>".format(func(text))
return function_wrapper
# With the basic approach, decorating get_text would be along the lines of
# greeting_with_div_p = decorate_with_div(decorate_with_p(greeting_with_p))
# With Python's decorator syntax, same thing can be achieved with much more expressive power.
@decorate_with_div
@decorate_with_p
def greeting_with_div_p(name):
return "Hello, {0}!".format(name)
assert greeting_with_div_p('John') == '<div><p>Hello, John!</p></div>'
# One important thing to notice here is that the order of setting our decorators matters.
# If the order was different in the example above, the output would have been different.
# Passing arguments to decorators.
# Looking back at the example before, you can notice how redundant the decorators in the
# example are. 2 decorators(decorate_with_div, decorate_with_p) each with the same
# functionality but wrapping the string with different tags. We can definitely do much better
# than that. Why not have a more general implementation for one that takes the tag to wrap
# with as a string? Yes please!
def tags(tag_name):
def tags_decorator(func):
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags('div')
@tags('p')
def greeting_with_tags(name):
return "Hello, {0}!".format(name)
assert greeting_with_tags('John') == '<div><p>Hello, John!</p></div>'
|
03b3ba299ceb7a75b0de1674fabe892243abd8b3
| 3,648,134
|
def make_loci_field( loci ):
""" make string representation of contig loci """
codes = [L.code for L in loci]
return c_delim2.join( codes )
|
a643f70f02c3b79214d76f82bb4379ea0c0e1e84
| 3,648,135
|
import tqdm
def compute_wilderness_impact1(ground_truth_all, prediction_all, video_list, known_classes, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
""" Compute wilderness impact for each video (WI=Po/Pc < 1)
"""
wi = np.zeros((len(tiou_thresholds), len(known_classes)))
# # Initialize true positive and false positive vectors.
tp_u2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
tp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # TPc in WACV paper
fp_u2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPo in WACV paper
fp_k2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all))) # FPc in WACV paper
fp_k2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2u = np.zeros((len(tiou_thresholds), len(prediction_all)))
fp_bg2k = np.zeros((len(tiou_thresholds), len(known_classes), len(prediction_all)))
ground_truth_by_vid = ground_truth_all.groupby('video-id')
prediction_by_vid = prediction_all.groupby('video-id')
def _get_predictions_with_vid(prediction_by_vid, video_name):
"""Get all predicitons of the given video. Return empty DataFrame if there
is no predcitions with the given video.
"""
try:
return prediction_by_vid.get_group(video_name).reset_index(drop=True)
except:
return pd.DataFrame()
# compute the TP, FPo and FPc for each predicted segment.
vidx_offset = 0
all_scores, all_max_tious = [], []
for video_name in tqdm(video_list, total=len(video_list), desc='Compute WI'):
ground_truth = ground_truth_by_vid.get_group(video_name).reset_index()
prediction = _get_predictions_with_vid(prediction_by_vid, video_name)
if prediction.empty:
vidx_offset += len(prediction)
all_scores.extend([0] * len(prediction)) # only for confidence score
all_max_tious.extend([0] * len(prediction))
continue # no predictions for this video
all_scores.extend(prediction['score'].values.tolist())
lock_gt = np.zeros((len(tiou_thresholds),len(ground_truth)))
for idx, this_pred in prediction.iterrows():
tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values,
ground_truth[['t-start', 't-end']].values)
# attach each prediction with the gt that has maximum tIoU
max_iou = tiou_arr.max()
max_jdx = tiou_arr.argmax()
all_max_tious.append(max_iou)
label_pred = this_pred['label']
label_gt = int(ground_truth.loc[max_jdx]['label'])
for tidx, tiou_thr in enumerate(tiou_thresholds):
if max_iou > tiou_thr:
if label_pred == label_gt and lock_gt[tidx, max_jdx] == 0:
if label_gt == 0:
tp_u2u[tidx, vidx_offset + idx] = 1 # true positive (u2u), not used by WI by default
else:
tp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1 # true positive (k2k)
lock_gt[tidx, max_jdx] = 1 # lock this ground truth
else:
if label_gt == 0: # false positive (u2k)
fp_u2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # false positive (k2k, k2u)
if label_pred == 0:
fp_k2u[tidx, vidx_offset + idx] = 1
else:
fp_k2k[tidx, label_pred-1, vidx_offset + idx] = 1
else: # GT is defined to be background (known), must be FP
if label_pred == 0:
fp_bg2u[tidx, vidx_offset + idx] = 1
else:
fp_bg2k[tidx, label_pred-1, vidx_offset + idx] = 1
# move the offset
vidx_offset += len(prediction)
stats = {'tp_k2k': tp_k2k, 'tp_u2u': tp_u2u, 'fp_k2k': fp_k2k, 'fp_k2u': fp_k2u, 'fp_u2k': fp_u2k, 'fp_bg2k': fp_bg2k, 'fp_bg2u': fp_bg2u,
'scores': all_scores, 'max_tious': all_max_tious}
# Here we assume the background detections (small tIoU) are from the background class, which is a known class
fp_k2u += fp_bg2u
fp_k2k += fp_bg2k
tp_k2k_sum = np.sum(tp_k2k, axis=-1).astype(np.float)
fp_u2k_sum = np.sum(fp_u2k, axis=-1).astype(np.float)
fp_k2k_sum = np.sum(fp_k2k, axis=-1).astype(np.float)
wi = fp_u2k_sum / (tp_k2k_sum + fp_k2k_sum + 1e-6)
return wi, stats
|
5f921d93993df1431c7b703df591f710cb2b5628
| 3,648,136
|
import os
def write_data_header(ping, status):
""" write output log file header
:param status: p1125 information
:return: success <True/False>
"""
try:
with open(os.path.join(CSV_FILE_PATH, filename), "w+") as f:
f.write("# This file is auto-generated by p1125_example_mahrs_csv.py\n".format(filename))
f.write("# {}\n".format(filename))
f.write("# p1125_ping = {}\n".format(ping))
f.write("# p1125_status = {}\n".format(status))
f.write("# p1125_settings = {{'VOUT': {}, 'TIME_CAPTURE_WINDOW_S': {}, 'TIME_TOTAL_RUN_S': {}, "
"'CONNECT_PROBE': {}, 'DOWN_SAMPLE_FACTOR': {}}}\n".format(VOUT,
TIME_CAPTURE_WINDOW_S, TIME_TOTAL_RUN_S, CONNECT_PROBE, DOWN_SAMPLE_FACTOR))
f.write("# time, uA, Max uA\n")
except Exception as e:
logger.error(e)
return False
return True
|
c36a664da20eed120c7682b6c8555ad3e9278455
| 3,648,137
|
def require_client(func):
"""
Decorator for class methods that require a client either through keyword
argument, or through the object's client attribute.
Returns:
A wrapped version of the function. The object client attrobute will
be passed in as the client keyword if None is provided.
Raises:
AssertionError : Raised when the method is called without a client
keyword set and no client attribute.
"""
@wraps(func)
async def wrapper(self, *args, **kwargs):
client = kwargs.get("client", None) or getattr(self, "client", None)
if client is None:
msg = (
"{0} object does not have a client -- {0}.{1} will do "
"nothing. To set a client, initialize the object with "
"{0}(..., client=your_client). Alternatively, you can "
"use the client keyword argument in the method."
).format(
self.__class__.__name__,
func.__name__,
)
raise AssertionError(msg)
else:
kwargs["client"] = client
return await func(self, *args, **kwargs)
return wrapper
|
28cfd4821405a132cde4db8b95c68987db76b99d
| 3,648,138
|
def extract_subwindows_image(image, scoremap, mask, input_window_size, output_window_size, mode, mean_radius,
flatten=True, dataset_augmentation=False, random_state=42, sw_extr_stride=None,
sw_extr_ratio=None, sw_extr_score_thres=None, sw_extr_npi=None):
"""
Extract subwindows from the multi-spectral provided image.
Parameters
----------
image: array-like of shape (width, height, n_features)
The multi-spectral image.
scoremap: array-like of shape (width, height)
The corresponding scoremap.
mask: array-like of shape (width, height)
The corresponding mask.
input_window_size: tuple of two int
The size (width, height) of input subwindows.
output_window_size: tuple of two int
The size (width, height) of output subwindows.
mode: {'random', 'scoremap_constrained', 'sliding'}
The mode of extraction for input suwbindows.
mean_radius: int
The mean radius of objects
dataset_augmentation: bool, optional (default=False)
If dataset augmentation must be performed.
random_state: int, optional (default=42)
An optional seed to make random number generator predictable.
Returns
-------
X: array-like of shape (n_subwindows, input_width * input_height * n_features)
The input subwindows.
y: array-like of shape (n_subwindows, output_width * output_height)
The output subwindows.
"""
input_window_size_half = half_size(input_window_size)
output_window_size_half = half_size(output_window_size)
if dataset_augmentation:
np.random.seed(random_state)
methods = [np.fliplr, np.flipud, np.rot90,
partial(np.rot90, k=2), partial(np.rot90, k=3)]
else:
methods = []
if mode == 'random':
if sw_extr_npi is None:
raise ValueError('number_per_image parameter required/invalid')
window_centers = _extract_random(mask, sw_extr_npi)
elif mode == 'scoremap_constrained':
if sw_extr_ratio is None:
raise ValueError('bg_ratio parameter required/invalid')
if sw_extr_score_thres is None:
raise ValueError('score_threshold required/invalid')
window_centers = _extract_scoremap_constrained(mask, scoremap, sw_extr_ratio,
sw_extr_score_thres)
else:
raise ValueError('unknown mode')
X, y = list(), list()
for window_center in window_centers:
top, right, bottom, left = subwindow_box(input_window_size,
input_window_size_half,
window_center)
input_window = image[slice(top, bottom), slice(left, right), :]
top, right, bottom, left = subwindow_box(output_window_size,
output_window_size_half,
window_center)
output_window = scoremap[slice(top, bottom), slice(left, right)]
if flatten:
X.append(input_window.ravel())
y.append(output_window.ravel())
else:
X.append(input_window)
y.append(output_window)
# TODO
if dataset_augmentation:
for method in methods:
X.append(method(input_window).ravel())
if output_window.ndim > 1:
y.append(method(output_window).ravel())
else:
y.append(output_window.ravel())
del window_centers
return np.array(X), np.array(y)
|
477d66513d93a7682325ab0e28711f968b34171a
| 3,648,139
|
import base64
def format_template(string, tokens=None, encode=None):
"""Create an encoding from given string template."""
if tokens is None:
tokens = {}
format_values = {"config": config,
"tokens": tokens}
result = string.format(**format_values)
if encode == "base64":
result = base64.b64encode(result.encode("utf-8")).decode("utf-8")
else:
assert encode is None, f"Unknown encoding {encode}"
return result
|
91dafa48c0a9f17bbd663bcc14cfb800f6f57877
| 3,648,140
|
import os
import sys
def exists_case_sensitive(path: str) -> bool:
"""Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows,
Python can only import using the case of the real file.
"""
result = os.path.exists(path)
if (
sys.platform.startswith("win") or sys.platform == "darwin"
) and result: # pragma: no cover
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result
|
a879f950bcfb6739db3bfe620a75591de92a7a35
| 3,648,141
|
import six
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(
client._base_url)
host = parse_result.netloc
if parse_result.scheme == 'https':
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return GAPICDatastoreAPI(
channel=channel, lib_name='gccl', lib_version=__version__)
|
63ab5acf85bcd2df9d266d285887c624a7554c64
| 3,648,142
|
def _mul_certain(left, right):
"""Multiplies two values, where one is certain and the other is uncertain,
and returns the result."""
if _is_number(left):
return Uncertain(
value=right.value * left,
delta=right.delta,
)
return Uncertain(
value=left.value * right,
delta=left.delta,
)
|
ae6159d1f59daea13794aa0ee0fb8baadf647471
| 3,648,143
|
def felica_RequestSystemCode(): # -> (int, List[int]):
"""
Sends FeliCa Request System Code command
:returns: (status, systemCodeList)
status 1: Success, < 0: error
systemCodeList System Code list (Array length should longer than 16)
"""
cmd = bytearray([FELICA_CMD_REQUEST_SYSTEM_CODE]) + _felicaIDm[:8]
status, response = felica_SendCommand(cmd)
responseLength = len(response)
if (status != 1):
DMSG("Request System Code command failed\n")
return -1, []
numSystemCode = response[9]
# length check
if (responseLength < 10 + 2 * numSystemCode):
DMSG("Request System Code command failed (wrong response length)\n")
return -2, []
systemCodeList = []
for i in range(numSystemCode):
systemCodeList.append((response[10 + i * 2] << 8) + response[10 + i * 2 + 1])
return 1, systemCodeList
|
ff380077daef948a6a3ad274a9accccea22fcba1
| 3,648,144
|
from typing import Optional
from typing import Callable
import types
def get_regularizer(
regularizer_type: str, l_reg_factor_weight: float
) -> Optional[Callable[[tf.Tensor], Optional[tf.Tensor]]]:
"""Gets a regularizer of a given type and scale.
Args:
regularizer_type: One of types.RegularizationType
l_reg_factor_weight: Scale for regularization.
Returns:
A function with weights parameter that applies regularization.
"""
if regularizer_type == types.RegularizationType.NONE:
return None
elif regularizer_type == types.RegularizationType.L1:
return slim.l1_regularizer(scale=l_reg_factor_weight)
elif regularizer_type == types.RegularizationType.L2:
return slim.l2_regularizer(scale=l_reg_factor_weight)
else:
raise ValueError(f"Unknown regularization type {regularizer_type}")
|
c9f7f8dd227a7446c9b15aebf1e2f1065a3d810d
| 3,648,145
|
def metadata():
"""Returns shared metadata instance with naming convention."""
naming_convention = {
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(constraint_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s'}
return MetaData(naming_convention=naming_convention)
|
25363d243a2474dfbd43d57ff794899bfe30a44d
| 3,648,146
|
import time
def date_rss(dte=None):
"""Dtate au format RSS """
ctime = time if dte is None else time.mktime(dte.timetuple())
return ctime.strftime('%a, %d %b %Y %H:%M:%S %z')
|
6c343b675be5b89051fe9d76a9fb6437e89611bb
| 3,648,147
|
def warp_containing_points(img, pts, H, border=4, shape_only=False):
"""
display = img.copy()
for pt in pts.reshape((-1,2)).astype(int):
cv2.circle(display, tuple(pt), 4, (255, 0, 0),
-1, cv2.LINE_AA)
debug_show('warp', display)
"""
pts2 = cv2.perspectiveTransform(pts, H)
x0, y0, w, h = cv2.boundingRect(pts2)
print("got bounding rect", x0, y0, w, h)
T = translation(-x0 + border, -y0 + border)
TH = np.dot(T, H)
if shape_only:
return (h + 2 * border, w + 2 * border), TH
else:
dst = cv2.warpPerspective(
img, TH, (w + 2 * border, h + 2 * border), borderMode=cv2.BORDER_REPLICATE
)
return dst, TH
|
b60fdd8a9f548252a40515298ca0f35dd3d497e6
| 3,648,148
|
def generate_parallelogrammatic_board(width=5, height=5):
"""
Creates a board with a shape of a parallelogram.
Width and height specify the size (in fields) of the board.
"""
return [[1] * height for _ in range(width)]
|
1c9bd6e6e26f6693b434d44e6dbe4085ba9236b8
| 3,648,149
|
def function_is_even(latex_dict: dict) -> str:
"""
colloquially,
sympy.cos(x)==sympy.cos(-x)
sympy.cos(x) - sympy.cos(-x) == 0
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> function_is_even(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
|
0169440f0ebe373efdc18b79a1b8f48220fada13
| 3,648,150
|
def summarize_curriculum(
curriculum: AbstractCurriculum,
) -> str:
"""
Generate a detailed string summarizing the contents of the curriculum.
:return: A string that would print as a formatted outline of this curriculum's contents.
"""
def maybe_plural(num: int, label: str):
return f"{num} {label}" + ("" if num == 1 else "s")
block_summaries = []
for i_block, block in enumerate(curriculum.learn_blocks_and_eval_blocks()):
task_summaries = []
for i_task, task_block in enumerate(block.task_blocks()):
variant_summaries = []
for i_variant, task_variant in enumerate(task_block.task_variants()):
variant_summary = (
f"\n\t\t\tTask variant {i_variant+1}, "
f"{task_variant.task_label} - {task_variant.variant_label}: "
+ (
f"{maybe_plural(task_variant.num_episodes, 'episode')}."
if task_variant.num_episodes is not None
else f"{maybe_plural(task_variant.num_steps, 'step')}."
)
)
variant_summaries.append(variant_summary)
task_summary = (
f"\n\t\tTask {i_task+1}, {task_block.task_label}: "
f"{maybe_plural(len(variant_summaries), 'variant')}"
)
task_summaries.append(task_summary + "".join(variant_summaries))
block_summary = (
f"\n\n\tBlock {i_block+1}, "
f"{'learning' if block.is_learning_allowed else 'evaluation'}: "
f"{maybe_plural(len(task_summaries), 'task')}"
)
block_summaries.append(block_summary + "".join(task_summaries))
curriculum_summary = (
f"This curriculum has {maybe_plural(len(block_summaries), 'block')}"
+ "".join(block_summaries)
)
return curriculum_summary
|
010f3fb38473d0a572616204c381bde04f83fb0e
| 3,648,151
|
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
|
e511dbfdf013528f1bba3c7d794d776a2791e918
| 3,648,152
|
from typing import List
def perform_context_selection(
estimation_tasks: List[EstimationTask],
) -> List[EstimationTask]:
"""Changes the circuits in estimation tasks to involve context selection.
Args:
estimation_tasks: list of estimation tasks
"""
output_estimation_tasks = []
for estimation_task in estimation_tasks:
(
context_selection_circuit,
frame_operator,
) = get_context_selection_circuit_for_group(estimation_task.operator)
frame_circuit = estimation_task.circuit + context_selection_circuit
new_estimation_task = EstimationTask(
frame_operator, frame_circuit, estimation_task.number_of_shots
)
output_estimation_tasks.append(new_estimation_task)
return output_estimation_tasks
|
814a5dd72a6c4b76c52af606c3136a6fd1cb46d9
| 3,648,153
|
import torch
def where(condition, x, y):
"""Wrapper of `torch.where`.
Parameters
----------
condition : DTensor of bool
Where True, yield x, otherwise yield y.
x : DTensor
The first tensor.
y : DTensor
The second tensor.
"""
return torch.where(condition, x, y)
|
0ec419e19ab24500f1be6c511eb472d1d929fe2c
| 3,648,154
|
def ukhls_wave_prefix(columns, year):
""" Determine wave prefix for ukhls wave data.
Parameters
----------
columns : list
A list of column names to add wave prefixes to.
year : int
Which wave year is being processed.
Returns
-------
columns : list
Column names with wave prefixes added.
"""
#wave_letter = alphabet[year - 2008]
wave_letter = get_wave_letter(year)
exclude = ["pidp"]
for i, item in enumerate(columns):
if item not in exclude:
columns[i] = wave_letter + "_" + item # Looks stupid but needed to update the list.
return columns
|
726cc3a153ad9c43ebd99b96c405ab4bbd8ff56c
| 3,648,155
|
from typing import Iterable
from typing import Optional
from typing import List
from typing import cast
def sort_converters(converters: Iterable[Optional[GenericConverter]]) -> List[GenericConverter]:
"""
Sort a list of converters according to their priority.
"""
converters = cast(Iterable[GenericConverter], filter(bool, converters))
return sorted(converters, key=lambda c: c.priority, reverse=True)
|
8112bfe4da1154c0b0e5aa421cf3c6d90148cbed
| 3,648,156
|
import os
def _make_path_relative(origin, dest):
"""
Return the relative path between origin and dest.
If it's not possible return dest.
If they are identical return ``os.curdir``
Adapted from `path.py <http://www.jorendorff.com/articles/python/path/>`_ by Jason Orendorff.
"""
origin = os.path.abspath(origin).replace('\\', '/')
dest = os.path.abspath(dest).replace('\\', '/')
#
orig_list = splitall(os.path.normcase(origin))
# Don't normcase dest! We want to preserve the case.
dest_list = splitall(dest)
#
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
#
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
#
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return os.curdir
else:
# return os.path.join(*segments).replace('\\', '/')
return os.path.join(*segments)
|
977588d2cf64c42f5e620d78ebceb60d1ef220fe
| 3,648,157
|
def print_mf_weight_statistics():
""" Prints debug info about size of weights. """
def callback(i_epoch, model, loss_train, loss_val, subset=None, trainer=None, last_batch=None):
models, labels = [], []
try:
models.append(model.outer_transform)
labels.append("outer transform weights:")
except:
pass
try:
models.append(model.inner_transform)
labels.append("inner transform weights:")
except:
pass
try:
models.append(model.transform)
labels.append("transform weights:")
except:
pass
try:
models.append(model.encoder)
labels.append("encoder weights:")
except:
pass
try:
models.append(model.decoder)
labels.append("decoder weights:")
except:
pass
subset_str = " " if subset is None or trainer is None else " {:>2d} / {:>2d}:".format(subset, trainer)
for model_, label_ in zip(models, labels):
weights = np.hstack([param.detach().cpu().numpy().flatten() for param in model_.parameters()])
logger.debug(
"{} {:26.26s} mean {:>8.5f}, std {:>8.5f}, range {:>8.5f} ... {:>8.5f}".format(
subset_str, label_, np.mean(weights), np.std(weights), np.min(weights), np.max(weights)
)
)
return callback
|
dd5d14bddb7acff6547bb87a6804bf2c719a4a34
| 3,648,158
|
def createPolygon(fire):
"""
create a Polygon object from list of points
"""
points = []
for coordinate in fire["geometry"]["coordinates"][0]:
points.append(tuple(coordinate))
polygon = Polygon(points)
return polygon
|
ce985b494d0d56f9b44a684ab187fb290d0c5d4f
| 3,648,159
|
def change_anim_nodes(node_object="", in_tangent='linear', out_tangent='linear'):
"""
Changes the setting on all anim nodes.
:param node_object:
:param in_tangent:
:param out_tangent:
:return: <bool> True for success. <bool> False for failure.
"""
anim_nodes = object_utils.get_connected_anim(node_object)
cmds.keyTangent(anim_nodes, itt=in_tangent, ott=out_tangent)
return True
|
649f740669c2f292bfc180895a2ce5d295b2ef68
| 3,648,160
|
def is_x_degenerated(x, codon, table):
"""Determine if codon is x-fold degenerated.
@param codon the codon
@param table code table id
@param true if x <= the degeneration of the codon
"""
return (x <= len(altcodons(codon, table)))
|
b6a6bd8afc21a8e9b94dc7aa086255b6dfa44e85
| 3,648,161
|
import torch
def get_long_tensor(tokens_list, batch_size, pad_id=constant.PAD_ID):
""" Convert (list of )+ tokens to a padded LongTensor. """
sizes = []
x = tokens_list
while isinstance(x[0], list):
sizes.append(max(len(y) for y in x))
x = [z for y in x for z in y]
tokens = torch.LongTensor(batch_size, *sizes).fill_(pad_id)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
|
d088277fcd8f599d142ff7bdb1b8e018c5b6c1cb
| 3,648,162
|
def business():
"""
show business posts
"""
business = Post.query.filter_by(category="Business").all()
return render_template('business.html', post=business)
|
2fd3a46391681a25cdb3291f0a92e110dc0d4eb3
| 3,648,163
|
def tile_image(x_gen, tiles=()):
"""Tiled image representations.
Args:
x_gen: 4D array of images (n x w x h x 3)
tiles (int pair, optional): number of rows and columns
Returns:
Array of tiled images (1 x W x H x 3)
"""
n_images = x_gen.shape[0]
if not tiles:
for i in range(int(np.sqrt(n_images)), 0, -1):
if n_images % i == 0:
break
n_rows = i
n_cols = n_images // i
else:
n_rows, n_cols = tiles
full = [np.hstack(x_gen[c * n_rows:(c + 1) * n_rows]) for c in range(n_cols)]
return np.expand_dims(np.vstack(full), 0)
|
d95a9cd12f6ba4239724b84efd701575678f217f
| 3,648,164
|
def note_updated_data(note, role):
"""Return the data for updated date
:param note: the note that holds the data
:type note: :class:`jukeboxcore.djadapter.models.Note`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the updated date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = note.date_updated
return dt_to_qdatetime(dt)
|
987e78ae0c5a7a4570b619520d8b20f592adae07
| 3,648,165
|
def _xyz_atom_coords(atom_group):
"""Use this method if you need to identify if CB is present in atom_group and if not return CA"""
tmp_dict = {}
for atom in atom_group.atoms():
if atom.name.strip() in {"CA", "CB"}:
tmp_dict[atom.name.strip()] = atom.xyz
if 'CB' in tmp_dict:
return tmp_dict['CB']
elif 'CA' in tmp_dict:
return tmp_dict['CA']
else:
return float('inf'), float('inf'), float('inf')
|
fd7ef43b1935f8722b692ad28a7e8b309033b720
| 3,648,166
|
import pathlib
def InitBareRepository(path):
"""Returns the Repo object"""
assert isinstance(path, str)
pathlib.Path(path).parent.mkdir(parents=True,exist_ok=True)
return git.Repo.init(path,bare=True)
|
84b321c7b27ee8ab7101339671361f45ec474e91
| 3,648,167
|
def get_http_exception(code):
"""Return an exception class based on its code"""
try:
return http_exceptions[int(code)]
except:
return None
|
f7b5077331b4425d5ee49a8c61849bb6ba822049
| 3,648,168
|
def _zoom(restricted_func_and_grad, wolfe_one, wolfe_two, a_lo, phi_lo,
dphi_lo, a_hi, phi_hi, dphi_hi, g_0, pass_through):
"""
Implementation of zoom. Algorithm 3.6 from Wright and Nocedal, 'Numerical
Optimization', 1999, pg. 59-61. Tries cubic, quadratic, and bisection methods
of zooming.
"""
state = _ZoomState(
done=False,
failed=False,
j=0,
a_lo=a_lo,
phi_lo=phi_lo,
dphi_lo=dphi_lo,
a_hi=a_hi,
phi_hi=phi_hi,
dphi_hi=dphi_hi,
a_rec=(a_lo + a_hi) / 2.,
phi_rec=(phi_lo + phi_hi) / 2.,
a_star=1.,
phi_star=phi_lo,
dphi_star=dphi_lo,
g_star=g_0,
nfev=0,
ngev=0,
)
delta1 = 0.2
delta2 = 0.1
def body(state):
# Body of zoom algorithm. We use boolean arithmetic to avoid using jax.cond
# so that it works on GPU/TPU.
dalpha = (state.a_hi - state.a_lo)
a = jnp.minimum(state.a_hi, state.a_lo)
b = jnp.maximum(state.a_hi, state.a_lo)
cchk = delta1 * dalpha
qchk = delta2 * dalpha
# This will cause the line search to stop, and since the Wolfe conditions
# are not satisfied the minimization should stop too.
threshold = jnp.where((jnp.finfo(dalpha).bits < 64), 1e-5, 1e-10)
state = state._replace(failed=state.failed | (dalpha <= threshold))
# Cubmin is sometimes nan, though in this case the bounds check will fail.
a_j_cubic = _cubicmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi,
state.phi_hi, state.a_rec, state.phi_rec)
use_cubic = (state.j > 0) & (a_j_cubic > a + cchk) & (a_j_cubic < b - cchk)
a_j_quad = _quadmin(state.a_lo, state.phi_lo, state.dphi_lo, state.a_hi, state.phi_hi)
use_quad = (~use_cubic) & (a_j_quad > a + qchk) & (a_j_quad < b - qchk)
a_j_bisection = (state.a_lo + state.a_hi) / 2.
use_bisection = (~use_cubic) & (~use_quad)
a_j = jnp.where(use_cubic, a_j_cubic, state.a_rec)
a_j = jnp.where(use_quad, a_j_quad, a_j)
a_j = jnp.where(use_bisection, a_j_bisection, a_j)
# TODO(jakevdp): should we use some sort of fixed-point approach here instead?
phi_j, dphi_j, g_j = restricted_func_and_grad(a_j)
phi_j = phi_j.astype(state.phi_lo.dtype)
dphi_j = dphi_j.astype(state.dphi_lo.dtype)
g_j = g_j.astype(state.g_star.dtype)
state = state._replace(nfev=state.nfev + 1,
ngev=state.ngev + 1)
hi_to_j = wolfe_one(a_j, phi_j) | (phi_j >= state.phi_lo)
star_to_j = wolfe_two(dphi_j) & (~hi_to_j)
hi_to_lo = (dphi_j * (state.a_hi - state.a_lo) >= 0.) & (~hi_to_j) & (~star_to_j)
lo_to_j = (~hi_to_j) & (~star_to_j)
state = state._replace(
**_binary_replace(
hi_to_j,
state._asdict(),
dict(
a_hi=a_j,
phi_hi=phi_j,
dphi_hi=dphi_j,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
# for termination
state = state._replace(
done=star_to_j | state.done,
**_binary_replace(
star_to_j,
state._asdict(),
dict(
a_star=a_j,
phi_star=phi_j,
dphi_star=dphi_j,
g_star=g_j,
)
),
)
state = state._replace(
**_binary_replace(
hi_to_lo,
state._asdict(),
dict(
a_hi=state.a_lo,
phi_hi=state.phi_lo,
dphi_hi=state.dphi_lo,
a_rec=state.a_hi,
phi_rec=state.phi_hi,
),
),
)
state = state._replace(
**_binary_replace(
lo_to_j,
state._asdict(),
dict(
a_lo=a_j,
phi_lo=phi_j,
dphi_lo=dphi_j,
a_rec=state.a_lo,
phi_rec=state.phi_lo,
),
),
)
state = state._replace(j=state.j + 1)
# Choose higher cutoff for maxiter than Scipy as Jax takes longer to find
# the same value - possibly floating point issues?
state = state._replace(failed= state.failed | state.j >= 30)
return state
state = lax.while_loop(lambda state: (~state.done) & (~pass_through) & (~state.failed),
body,
state)
return state
|
ec4d1381e831c19f342d69136d5ce936fc20042e
| 3,648,169
|
from typing import Union
def _gradients_input(model: Union[tf.keras.models.Model, 'keras.models.Model'],
x: tf.Tensor,
target: Union[None, tf.Tensor]) -> tf.Tensor:
"""
Calculates the gradients of the target class output (or the output if the output dimension is equal to 1)
with respect to each input feature.
Parameters
----------
model
Tensorflow or keras model.
x
Input data point.
target
Target for which the gradients are calculated if the output dimension is higher than 1.
Returns
-------
Gradients for each input feature.
"""
with tf.GradientTape() as tape:
tape.watch(x)
preds = _run_forward(model, x, target)
grads = tape.gradient(preds, x)
return grads
|
816c1f932533de4cdfd96525c44a2c11bae60254
| 3,648,170
|
from Totoro.dbclasses import Set as Set
def fixBadSets(sets):
"""Splits bad sets into a series of valid, incomplete sets."""
toRemove = []
toAdd = []
for ss in sets:
if ss.getStatus(silent=True)[0] != 'Bad':
continue
toRemove.append(ss)
if len(ss.totoroExposures) == 1:
raise exceptions.TotoroError(
'found bad set with one exposure. This is probably a bug.')
elif len(ss.totoroExposures) == 2:
# If the bad set has two exposures, splits it.
toAdd += [Set.fromExposures(exp) for exp in ss.totoroExposures]
else:
# Tests all possible combinations of two exposures to check if one
# of them is a valid set.
validSets = []
for ii, jj in [[0, 1], [0, 2], [1, 2]]:
testSet = Set.fromExposures([ss.totoroExposures[ii], ss.totoroExposures[jj]])
if testSet.getStatus(silent=True)[0] != 'Bad':
validSets.append(testSet)
if len(validSets) == 0:
# If no valid combinations, each exposures goes to a set.
toAdd += [Set.fromExposures(exp) for exp in ss.totoroExposures]
else:
# Otherwise, selects the combination that produces an
# incomplete set with maximum SN2.
signalToNoise = [np.nansum(xx.getSN2Array()) for xx in validSets]
maxSet = validSets[np.argmax(signalToNoise)]
toAdd.append(maxSet)
missingExposure = [
exp for exp in ss.totoroExposures if exp not in maxSet.totoroExposures
]
toAdd.append(Set.fromExposures(missingExposure))
for ss in toRemove:
sets.remove(ss)
for ss in toAdd:
sets.append(ss)
return sets
|
fcb0716c0d798999f9572344bdce042bedaec28c
| 3,648,171
|
from typing import OrderedDict
def get_databases():
"""Return an ordered dict of (dbname: database). The order is
according to search preference, the first DB to contain a document
should be assumed to be the authoritative one."""
sql_dbs = [
_SQLDb(
XFormInstanceSQL._meta.db_table,
lambda id_: XFormInstanceSQL.get_obj_by_id(id_),
"XFormInstance",
lambda doc: XFormInstanceSQLRawDocSerializer(doc).data,
),
_SQLDb(
CommCareCaseSQL._meta.db_table,
lambda id_: CommCareCaseSQL.get_obj_by_id(id_),
"CommCareCase",
lambda doc: CommCareCaseSQLRawDocSerializer(doc).data,
),
_SQLDb(
SQLLocation._meta.db_table,
lambda id_: SQLLocation.objects.get(location_id=id_),
'Location',
lambda doc: doc.to_json()
),
]
all_dbs = OrderedDict()
for db in sql_dbs:
all_dbs[db.dbname] = db
couchdbs_by_name = couch_config.all_dbs_by_db_name
for dbname in sorted(couchdbs_by_name):
all_dbs[dbname] = _CouchDb(couchdbs_by_name[dbname])
return all_dbs
|
01180d4cafbcf0b1d48e7c7554069a7ee5bf1a65
| 3,648,172
|
from AppKit import \
def get_app_data_path(app_name):
"""Returns the OS-specific path to Application Data for the given App.
Creates the path if it doesn't already exist.
NOTE: Darwin: https://developer.apple.com/reference/foundation/1414224-nssearchpathfordirectoriesindoma?language=objc
"""
assert type(app_name) == str
if sys.platform.startswith('darwin'):
NSSearchPathForDirectoriesInDomains, \
NSApplicationSupportDirectory, \
NSUserDomainMask # pip install pyobjc
app_data_path = os.path.join(
NSSearchPathForDirectoriesInDomains(
NSApplicationSupportDirectory,
NSUserDomainMask,
True)[0], app_name)
elif sys.platform.startswith('win32'):
app_data_path = os.path.join(os.environ['APPDATA'], app_name)
elif sys.platform.startswith('linux') \
or sys.platform.startswith('freebsd'): # freebsd is untested
app_data_path = os.path.expanduser(os.path.join("~", "." + app_name))
else:
raise NotImplementedError("The platform, {}, is not supported."
.format(sys.platform))
if not os.path.exists(app_data_path):
os.mkdir(app_data_path)
return app_data_path
|
d95215bec859e6d9858a9c342cb26808814f9713
| 3,648,173
|
def list_examples():
"""List all examples"""
examples = ExampleModel.query()
form = ExampleForm()
if form.validate_on_submit():
example = ExampleModel(
example_name=form.example_name.data,
example_description=form.example_description.data,
added_by=users.get_current_user()
)
try:
example.put()
example_id = example.key.id()
flash(u'Example %s successfully saved.' % example_id, 'success')
return redirect(url_for('list_examples'))
except CapabilityDisabledError:
flash(u'App Engine Datastore is currently in read-only mode.', 'info')
return redirect(url_for('list_examples'))
return render_template('list_examples.html', examples=examples, form=form)
|
740e7005e1cadae22ba77095b43dadd6b4219012
| 3,648,174
|
import re
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(_MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
|
b442dc276cde0e28b56fbf855999feaeb199bff2
| 3,648,175
|
import types
def prepare_deep(schema: types.Schema, schemas: types.Schemas):
"""
Resolve $ref and merge allOf including for object properties and items.
Assume the schema is a valid JSONSchema.
Args:
schema: The schema to prepare.
schemas: The schemas from which to resolve any $ref.
Returns:
The prepared schema.
"""
schema = prepare(schema=schema, schemas=schemas)
# Resolve $ref in any properties
properties = schema.get(types.OpenApiProperties.PROPERTIES, None)
if properties is not None:
for name, prop_schema in properties.items():
properties[name] = prepare_deep(schema=prop_schema, schemas=schemas)
# Resolve $ref of any items
items_schema = peek.items(schema=schema, schemas={})
if items_schema is not None:
schema[types.OpenApiProperties.ITEMS] = prepare_deep(
schema=items_schema, schemas=schemas
)
return schema
|
e77f7f0e59a6400c2e7df78b0ec2a14bcc0b3ea6
| 3,648,176
|
def _resize_and_center_fundus(image, diameter):
"""
Helper function for scale normalizing image.
"""
copy = image.copy()
# Find largest contour in image.
contours = _find_contours(image)
# Return unless we have gotten some result contours.
if contours is None:
return None
center, radius = contours
# Calculate the min and max-boundaries for cropping the image.
x_min = max(0, int(center[0] - radius))
y_min = max(0, int(center[1] - radius))
z = int(radius*2)
x_max = x_min + z
y_max = y_min + z
# Crop the image.
copy = copy[y_min:y_max, x_min:x_max]
# Scale the image.
fx = fy = (diameter / 2) / radius
copy = cv2.resize(copy, (0, 0), fx=fx, fy=fy)
# Add padding to image.
shape = copy.shape
# Get the border shape size.
top = bottom = int((diameter - shape[0])/2)
left = right = int((diameter - shape[1])/2)
# Add 1 pixel if necessary.
if shape[0] + top + bottom == diameter - 1:
top += 1
if shape[1] + left + right == diameter - 1:
left += 1
# Define border of the image.
border = [top, bottom, left, right]
# Add border.
copy = cv2.copyMakeBorder(copy, *border,
borderType=cv2.BORDER_CONSTANT,
value=[0, 0, 0])
# Return the image.
return copy
|
f301740cec8ca423334ffd127005b90f54105ce5
| 3,648,177
|
def _pad_X_delta(X, delta, indices, padded_group_size):
"""Currently Unused."""
X_group = onp.take(X, indices, axis=0)
X_group = onp.pad(X_group, [(0, padded_group_size - X_group.shape[0]),
(0, 0)])
delta_group = onp.take(delta, indices, axis=0)
delta_group = onp.pad(delta_group, (
0,
padded_group_size - delta_group.shape[0],
))
return X_group, delta_group
|
d286b922efb6482382af8cd804c5e51b15d93088
| 3,648,178
|
def union(x, y=None):
"""Get sorted list of elements combined for two iterables."""
x, y = de_list_pair(x, y)
return sorted(list(set(x) | set(y)))
|
c24deb82e60569196e7a2d691db192c0ffcf91dd
| 3,648,179
|
from typing import get_origin
from typing import Tuple
def is_tuple(typ) -> bool:
"""
Test if the type is `typing.Tuple`.
"""
try:
return issubclass(get_origin(typ), tuple)
except TypeError:
return typ in (Tuple, tuple)
|
c8c75f4b1523971b20bbe8c716ced53199150b95
| 3,648,180
|
import functools
import unittest
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
|
4141cc1f99c84633bdf2e92941d9abf2010c11f6
| 3,648,181
|
from typing import Dict
import shutil
def _smash_all(job_context: Dict) -> Dict:
"""Perform smashing on all species/experiments in the dataset.
"""
start_smash = log_state("start smash", job_context["job"].id)
# We have already failed - return now so we can send our fail email.
if job_context['job'].success is False:
return job_context
try:
job_context['unsmashable_files'] = []
job_context['num_samples'] = 0
# Smash all of the sample sets
logger.debug("About to smash!",
dataset_count=len(job_context['dataset'].data),
job_id=job_context['job'].id)
# Once again, `key` is either a species name or an experiment accession
for key, input_files in job_context.pop('input_files').items():
job_context = _smash_key(job_context, key, input_files)
smashing_utils.write_non_data_files(job_context)
# Finally, compress all files into a zip
final_zip_base = "/home/user/data_store/smashed/" + str(job_context["dataset"].pk)
shutil.make_archive(final_zip_base, 'zip', job_context["output_dir"])
job_context["output_file"] = final_zip_base + ".zip"
except Exception as e:
logger.exception("Could not smash dataset.",
dataset_id=job_context['dataset'].id,
processor_job_id=job_context['job_id'],
num_input_files=job_context['num_input_files'])
job_context['dataset'].success = False
job_context['job'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].failure_reason = "Failure reason: " + str(e)
job_context['dataset'].save()
# Delay failing this pipeline until the failure notify has been sent
job_context['job'].success = False
job_context['failure_reason'] = str(e)
return job_context
job_context['dataset'].success = True
job_context['dataset'].save()
logger.debug("Created smash output!",
archive_location=job_context["output_file"])
log_state("end smash", job_context["job"].id, start_smash);
return job_context
|
b1ada544140c10c73e59430631e3bda67a160587
| 3,648,182
|
def permute_columns(df,
column_to_order: str,
ind_permute: bool = False,
columns_to_permute: list = []):
"""
Author: Allison Wu
Description: This function permutes the columns specified in columns_to_permute
:param df:
:param column_to_order:
:param ind_permute:
:param columns_to_permute:
:return: permuted_df
"""
window = Window.partitionBy().orderBy(col(column_to_order))
window_rand = Window.partitionBy().orderBy(rand())
df = df. \
withColumn('id', func.row_number().over(window)). \
withColumn('rand_id', func.row_number().over(window_rand))
rand_df = df. \
select(['rand_id'] + columns_to_permute).\
withColumnRenamed('rand_id', 'id')
for c in columns_to_permute:
rand_df = rand_df.\
withColumnRenamed(c, f'rand_{c}')
permuted_df = df.join(rand_df, ['id'], how = 'inner').cache()
return permuted_df
|
89da810d93586d66963d9a3ee3c7d6c0960596c9
| 3,648,183
|
def reset_dismissed(institute_id, case_name):
"""Reset all dismissed variants for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.reset_all_dimissed(store, institute_obj, case_obj)
return redirect(request.referrer)
|
c16dcc7e23be620212ee6756051db6e089014678
| 3,648,184
|
import os
def create_xst_script(config):
"""
given the configuration file create a script that will
build the verilog files declared within the configuration file
Args:
config (dictionary): configuraiton dictionary
Return:
(string) script file name
Raises:
Nothing
"""
xst_abs_dir = create_xst_dir(config)
flags = get_xst_flags(config)
#print "Flags: %s" % str(flags)
xst_dir = os.path.join(config["build_dir"], XST_DIR)
temp_dir = create_temp_dir(config)
project_dir = os.path.join(xst_dir, PROJECT_FILENAME)
top_module = config["top_module"]
output_file = os.path.join(xst_dir, top_module)
xst_script_fn = os.path.join(xst_abs_dir, XST_SCRIPT_FILENAME)
fp = open(xst_script_fn, "w")
fp.write("set -tmpdir \"%s\"%s" % (temp_dir, os.linesep))
fp.write("set -xsthdpdir \"%s\"%s" % (xst_dir, os.linesep))
#fp.write("set -xsthdpini \"%s\"%s" % (xst_dir, os.linesep))
fp.write("run%s" % os.linesep)
fp.write("-ifn %s%s" % (project_dir, os.linesep))
fp.write("-ofn %s%s" % (output_file, os.linesep))
fp.write("-ofmt NGC%s" % (os.linesep))
fp.write("-p %s%s" % (config["device"], os.linesep))
fp.write("-top %s%s" % (top_module, os.linesep))
coregen_files = coregen_utils.get_target_files(config)
if len(coregen_files) > 0:
fp.write("-sd %s%s" % (coregen_utils.get_coregen_dir(config, absolute = True), os.linesep))
#print "flags[lso] = %s" % str(flags["-lso"]["value"])
if ("-lso" not in flags.keys()) or (len(flags["-lso"]["value"]) == 0):
#print "creating custom lso file"
flags["-lso"]["value"] = create_lso_file(config)
for flag in flags:
if len(flags[flag]["value"]) == 0:
continue
#print "flag: %s: %s" % (flag, flags[flag]["value"])
fp.write("%s %s%s" % (flag, flags[flag]["value"], os.linesep))
fp.close()
return xst_script_fn
|
f9308dc4ace542d63ef2075a73502a5b1dd2fe97
| 3,648,185
|
def texts_from_array(x_train, y_train, x_test=None, y_test=None,
class_names = [],
max_features=MAX_FEATURES, maxlen=MAXLEN,
val_pct=0.1, ngram_range=1, preprocess_mode='standard', verbose=1):
"""
Loads and preprocesses text data from arrays.
Args:
x_train(list): list of training texts
y_train(list): list of integers representing classes
x_val(list): list of training texts
y_val(list): list of integers representing classes
class_names (list): list of strings representing class labels
shape should be (num_examples,1) or (num_examples,)
max_features(int): max num of words to consider in vocabulary
maxlen(int): each document can be of most <maxlen> words. 0 is used as padding ID.
ngram_range(int): size of multi-word phrases to consider
e.g., 2 will consider both 1-word phrases and 2-word phrases
limited by max_features
val_pct(float): Proportion of training to use for validation.
Has no effect if x_val and y_val is supplied.
preprocess_mode (str): Either 'standard' (normal tokenization) or 'bert'
tokenization and preprocessing for use with
BERT text classification model.
verbose (boolean): verbosity
"""
if not class_names:
classes = list(set(y_train))
classes.sort()
class_names = ["%s" % (c) for c in classes]
if x_test is None or y_test is None:
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=val_pct)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# return preprocessed the texts
preproc_type = tpp.TEXT_PREPROCESSORS.get(preprocess_mode, None)
if None: raise ValueError('unsupported preprocess_mode')
preproc = preproc_type(maxlen,
max_features,
classes = class_names,
ngram_range=ngram_range)
trn = preproc.preprocess_train(x_train, y_train, verbose=verbose)
val = preproc.preprocess_test(x_test, y_test, verbose=verbose)
return (trn, val, preproc)
|
ecb120070ac76d21da34fd5f0d27799fe5ba093b
| 3,648,186
|
def display2(depts, level=0):
"""
[[a, 1], [b, 2], [c, 3], [d, 3], [a, 1]]
:param depts:
:return:
"""
lists = []
for d in depts:
lists.append([d, level])
children = Department.objects.filter(parent_id=d.id)
if children:
lists.extend(display2(children, level + 1))
return lists
|
6187c91dd5653ab3f7a7e68d6b811ffda2afb035
| 3,648,187
|
def _get_target_id_to_skill_opportunity_dict(suggestions):
"""Returns a dict of target_id to skill opportunity summary dict.
Args:
suggestions: list(BaseSuggestion). A list of suggestions to retrieve
opportunity dicts.
Returns:
dict. Dict mapping target_id to corresponding skill opportunity dict.
"""
target_ids = set(s.target_id for s in suggestions)
opportunity_id_to_opportunity_dict = {
opp_id: (opp.to_dict() if opp is not None else None)
for opp_id, opp in opportunity_services.get_skill_opportunities_by_ids(
list(target_ids)).items()
}
opportunity_id_to_skill = {
skill.id: skill
for skill in skill_fetchers.get_multi_skills([
opp['id']
for opp in opportunity_id_to_opportunity_dict.values()
if opp is not None])
}
for opp_id, skill in opportunity_id_to_skill.items():
if skill is not None:
opportunity_id_to_opportunity_dict[opp_id]['skill_rubrics'] = [
rubric.to_dict() for rubric in skill.rubrics]
return opportunity_id_to_opportunity_dict
|
faf708abb5876a56b06282b99c4cc7221c33c5bd
| 3,648,188
|
def default_loc_scale_fn(
is_singular=False,
loc_initializer=tf.random_normal_initializer(stddev=0.1),
untransformed_scale_initializer=tf.random_normal_initializer(
mean=-3., stddev=0.1),
loc_regularizer=None,
untransformed_scale_regularizer=None,
loc_constraint=None,
untransformed_scale_constraint=None):
"""Makes closure which creates `loc`, `scale` params from `tf.get_variable`.
This function produces a closure which produces `loc`, `scale` using
`tf.get_variable`. The closure accepts the following arguments:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Args:
is_singular: Python `bool` indicating if `scale is None`. Default: `False`.
loc_initializer: Initializer function for the `loc` parameters.
The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`.
untransformed_scale_initializer: Initializer function for the `scale`
parameters. Default value: `tf.random_normal_initializer(mean=-3.,
stddev=0.1)`. This implies the softplus transformed result has mean
approximately `0.05` and std. deviation approximately `0.005`.
loc_regularizer: Regularizer function for the `loc` parameters.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_regularizer: Regularizer function for the `scale`
parameters. The default (`None`) is to use the `tf.get_variable` default.
loc_constraint: An optional projection function to be applied to the
loc after being updated by an `Optimizer`. The function must take as input
the unprojected variable and must return the projected variable (which
must have the same shape). Constraints are not safe to use when doing
asynchronous distributed training.
The default (`None`) is to use the `tf.get_variable` default.
untransformed_scale_constraint: An optional projection function to be
applied to the `scale` parameters after being updated by an `Optimizer`
(e.g. used to implement norm constraints or value constraints). The
function must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are not
safe to use when doing asynchronous distributed training. The default
(`None`) is to use the `tf.get_variable` default.
Returns:
default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale`
parameters from args: `dtype, shape, name, trainable, add_variable_fn`.
"""
def _fn(dtype, shape, name, trainable, add_variable_fn):
"""Creates `loc`, `scale` parameters."""
loc = add_variable_fn(
name=name + '_loc',
shape=shape,
initializer=loc_initializer,
regularizer=loc_regularizer,
constraint=loc_constraint,
dtype=dtype,
trainable=trainable)
if is_singular:
return loc, None
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=shape,
initializer=untransformed_scale_initializer,
regularizer=untransformed_scale_regularizer,
constraint=untransformed_scale_constraint,
dtype=dtype,
trainable=trainable)
scale = (np.finfo(dtype.as_numpy_dtype).eps +
tf.nn.softplus(untransformed_scale))
return loc, scale
return _fn
|
54eeec1b5739a71473abfa5a5ffb7aa5aa572505
| 3,648,189
|
from skimage.morphology import disk
from cv2 import dilate
def db_eval_boundary(args):
"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""
foreground_mask, gt_mask, ignore_mask, bound_th, class_id, pred_is_boundary = args
assert np.atleast_3d(foreground_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(foreground_mask.shape))
# print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask] = 0
gt_mask[ignore_mask] = 0
# Get the pixel boundaries of both masks
if pred_is_boundary:
fg_boundary = foreground_mask
else:
fg_boundary = seg2bmap(foreground_mask)
gt_boundary = seg2bmap(gt_mask)
def binary_dilation(x, d): return dilate(
x.astype(np.uint8), d).astype(np.bool)
fg_dil = binary_dilation(fg_boundary, disk(bound_pix))
gt_dil = binary_dilation(gt_boundary, disk(bound_pix))
# Get the intersection
gt_match = gt_boundary * fg_dil
fg_match = fg_boundary * gt_dil
# Area of the intersection
n_fg = np.sum(fg_boundary)
n_gt = np.sum(gt_boundary)
# % Compute precision and recall
if n_fg == 0 and n_gt > 0:
precision = 1
recall = 0
elif n_fg > 0 and n_gt == 0:
precision = 0
recall = 1
elif n_fg == 0 and n_gt == 0:
precision = 1
recall = 1
else:
precision = np.sum(fg_match) / float(n_fg)
recall = np.sum(gt_match) / float(n_gt)
# Compute F measure
if precision + recall == 0:
F = 0
else:
F = 2 * precision * recall / (precision + recall)
return F, precision
|
0aa23d0d8b45681e50f3b99c72dfcb5851dd92a6
| 3,648,190
|
def angle_trunc(a):
"""
helper function to map all angles onto [-pi, pi]
"""
while a < 0.0:
a += pi * 2
return ((a + pi) % (pi * 2)) - pi
|
36e5d97affab5f3a1155b837df9985fe26795d76
| 3,648,191
|
from typing import Optional
def get_tag_or_default(
alignment: pysam.AlignedSegment, tag_key: str, default: Optional[str] = None
) -> Optional[str]:
"""Extracts the value associated to `tag_key` from `alignment`, and returns a default value
if the tag is not present."""
try:
return alignment.get_tag(tag_key)
except KeyError:
return default
|
bf3b1495224d7409c410f96daa20af8f70a27efa
| 3,648,192
|
def vtln_warp_mel_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, mel_freq):
"""
Inputs:
vtln_low_cutoff (float): lower frequency cutoffs for VTLN
vtln_high_cutoff (float): upper frequency cutoffs for VTLN
low_freq (float): lower frequency cutoffs in mel computation
high_freq (float): upper frequency cutoffs in mel computation
vtln_warp_factor (float): Vtln warp factor
mel_freq (Tensor): given frequency in Mel
Outputs:
Tensor: mel_freq after vtln warp
"""
return mel_scale(vtln_warp_freq(vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq,
vtln_warp_factor, inverse_mel_scale(mel_freq)))
|
e6432c0298e559dc958011bdf5d52c3e92544213
| 3,648,193
|
def _squared_loss_and_spatial_grad_derivative(X, y, w, mask, grad_weight):
"""
Computes the derivative of _squared_loss_and_spatial_grad.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Design matrix.
y : ndarray, shape (n_samples,)
Target / response vector.
w : ndarray shape (n_features,)
Unmasked, ravelized weights map.
grad_weight: float
l1_ratio * alpha
Returns
-------
ndarray, shape (n_features,)
Derivative of _squared_loss_and_spatial_grad function.
"""
data_section = np.dot(X, w) - y
image_buffer = np.zeros(mask.shape)
image_buffer[mask] = w
return (np.dot(X.T, data_section)
- grad_weight * _div(_gradient(image_buffer))[mask])
|
05ce984609f5d31fda33d7fb0263ecc056c2f0ed
| 3,648,194
|
import ctypes
def destructor(cfunc):
"""
Make a C function a destructor.
Destructors accept pointers to void pointers as argument. They are also wrapped as a staticmethod for usage in
classes.
:param cfunc: The C function as imported by ctypes.
:return: The configured destructor.
"""
cfunc.argtypes = [ctypes.POINTER(ctypes.c_void_p)]
cfunc.restype = None
return staticmethod(cfunc)
|
05abd181649a2178d4dce704ef93f61eb5418092
| 3,648,195
|
def invalid_auth_header(jwt):
"""Produce invalid JWT tokens for use in tests."""
return {'Authorization': 'Bearer ' + jwt.create_jwt(claims=TestJwtClaims.invalid, header=JWT_HEADER)}
|
91f82b4a9be3740e115da4182325c71fa84440b7
| 3,648,196
|
def update_file_info_in_job(job, file_infos):
"""
Update the 'setup.package.fileInformations' data in the JSON to append new file information.
"""
for file_info in file_infos:
try:
job['setup']['package']['fileInformations'].append(file_info)
except (KeyError, TypeError, AttributeError):
# If we get here, 'setup.package.fileInformations' does not exist yet.
print('Job file input is missing required setup.package.fileInformations data.')
exit(1)
return job
|
9902173548d72fcd35c8f80bb44b59aac27d9401
| 3,648,197
|
def _FirstStatementsInScriptElements(contents):
"""Returns a list of first statements found in each <script> element."""
soup = parse_html.BeautifulSoup(contents)
script_elements = soup.find_all('script', src=None)
return [_FirstStatement(e.get_text()) for e in script_elements]
|
7b2a3bddfa63a6ab4765906862037adf786c253a
| 3,648,198
|
def load_image(input_file_path):
"""
Load the 'input_file_path' and return a 2D numpy array of the image it contains.
"""
image_array = np.array(pil_img.open(input_file_path).convert('L'))
return image_array
|
b5783b9bcca55be355a91c6e9e2d2fcd09d1989b
| 3,648,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.