content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def rhand (x,y,z,iopt,parmod,exname,inname):
"""
Calculates the components of the right hand side vector in the geomagnetic field
line equation (a subsidiary subroutine for the subroutine step)
:param x,y,z:
:param iopt:
:param parmod:
:param exname: name of the subroutine for the external field.
:param inname: name of the subroutine for the internal field.
Last mofification: March 31, 2003
Author: N.A. Tsyganenko
:return: r1,r2,r3.
"""
# common /geopack1/ a(15),psi,aa(10),ds3,bb(8)
global a, psi, aa, ds3, bb
bxgsm,bygsm,bzgsm = call_external_model(exname, iopt, parmod, psi, x,y,z)
hxgsm,hygsm,hzgsm = call_internal_model(inname, x,y,z)
bx=bxgsm+hxgsm
by=bygsm+hygsm
bz=bzgsm+hzgsm
b=ds3/np.sqrt(bx**2+by**2+bz**2)
r1=bx*b
r2=by*b
r3=bz*b
return r1,r2,r3 | 008912796a0ac5c61de3b1fa5de90edbf8ed1f61 | 30,600 |
def unique_slug_generator_by_email(instance, new_slug=None):
"""
This is for a Django project and it assumes your instance
has a model with a slug field and a title character (char) field.
"""
slug = new_slug if new_slug is not None else slugify(instance.email)
Klass = instance.__class__
qs_exists = Klass.objects.filter(email=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator_by_email(instance, new_slug=new_slug)
return slug | a1e1ae8b25e67a9a5f1d93164deb4b769afb4588 | 30,601 |
import logging
def register_provider(price_core_min=1):
"""Register Provider"""
mine(1)
web3.eth.defaultAccount = accounts[0]
prices = [price_core_min, price_data_transfer, price_storage, price_cache]
tx = config.ebb.registerProvider(
GPG_FINGERPRINT,
provider_email,
federation_cloud_id,
ipfs_address,
available_core_num,
prices,
commitmentBlockNum,
{"from": accounts[0]},
)
provider_registered_bn = tx.block_number
print(f"Block number when the provider is registered={provider_registered_bn}")
gpg_fingerprint = remove_zeros_gpg_fingerprint(tx.events["LogProviderInfo"]["gpgFingerprint"])
assert gpg_fingerprint == GPG_FINGERPRINT
logging.info(f"gpg_fingerprint={gpg_fingerprint}")
orc_id = "0000-0001-7642-0442"
orc_id_as_bytes = str.encode(orc_id)
assert not config.ebb.isOrcIDVerified(accounts[0]), "orc_id initial value should be false"
config.ebb.authenticateOrcID(accounts[0], orc_id_as_bytes, {"from": accounts[0]})
assert config.ebb.isOrcIDVerified(accounts[0]), "isOrcIDVerified is failed"
# orc_id should only set once for the same user
with brownie.reverts():
config.ebb.authenticateOrcID(accounts[0], orc_id_as_bytes, {"from": accounts[0]})
*_, b = config.ebb.getRequesterInfo(accounts[0])
assert orc_id == b.decode("utf-8").replace("\x00", ""), "orc_id set false"
return provider_registered_bn | 9385a0291af2f306075bc2775d53cd67b442d985 | 30,602 |
from typing import Callable
def get_signature_and_params(func: Callable):
"""Get the parameters and signature from a coroutine.
func: Callable
The coroutine from whom the information should be extracted.
Returns
-------
Tuple[List[Union[:class:`str`, :class:`inspect.Parameter`]]]
Signature and list of parameters of the coroutine.
"""
if isclass(func):
func = getattr(func, "__init__")
if func is object.__init__:
return [], []
sig = signature(func).parameters
params = list(sig)
if should_pass_cls(func):
del params[0]
return sig, params | cc53ba8f8cf54d8cf6167b57bc6ecb626605d333 | 30,603 |
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2 | 5501abff2fadcd237e3cb0efc4bca615eef455da | 30,604 |
def handle_domain_deletion_commands(client: Client, demisto_args: dict) -> str:
"""
Removes domains from the inbound blacklisted list.
:type client: ``Client``
:param client: Client to use.
:type demisto_args: ``dict``
:param demisto_args: The demisto arguments.
:return: A message which says that the domains were successfully deleted from the list.
:rtype: ``str``
"""
demisto_args = handle_args(demisto_args)
domain = demisto_args.get('domain')
if not domain:
raise DemistoException('A domain must be provided in order to remove it from the inbound blacklisted list.')
demisto_args['domain'] = ','.join(argToList(domain))
raw_result = client.inbound_blacklisted_domain_remove_command(demisto_args)
if raw_result.status_code != 204:
raise DemistoException(
f'Failed to remove the Domains from the inbound blacklisted list [{raw_result.status_code}]')
return 'Domains were successfully removed from the inbound blacklisted list' | 54f9174a3b8db9820e612cd3782dac4b9af6554e | 30,605 |
def create_l2_lag_interface(name, phys_ports, lacp_mode="passive", mc_lag=False, fallback_enabled=False,
vlan_ids_list=[], desc=None, admin_state="up", **kwargs):
"""
Perform a POST call to create a Port table entry for L2 LAG interface.
:param name: Alphanumeric name of LAG Port
:param phys_ports: List of physical ports to aggregate (e.g. ["1/1/1", "1/1/2", "1/1/3"])
:param lacp_mode: Should be either "passive" or "active." Defaults to "passive" if not specified.
:param mc_lag: Boolean to determine if the LAG is multi-chassis. Defaults to False if not specified.
:param fallback_enabled: Boolean to determine if the LAG uses LACP fallback. Defaults to False if not specified.
:param vlan_ids_list: Optional list of integer VLAN IDs to add as trunk VLANS. Defaults to empty list if not specified.
:param desc: Optional description for the interface. Defaults to nothing if not specified.
:param admin_state: Optional administratively-configured state of the port.
Defaults to "up" if not specified
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
if kwargs["url"].endswith("/v1/"):
return _create_l2_lag_interface_v1(name, phys_ports, lacp_mode, mc_lag, fallback_enabled, vlan_ids_list, desc,
admin_state, **kwargs)
else: # Updated else for when version is v10.04
success = _create_l2_lag_interface(name, phys_ports, lacp_mode, mc_lag, fallback_enabled, vlan_ids_list, desc,
admin_state, **kwargs)
if mc_lag or fallback_enabled:
return success and _update_l2_lag_interface(name, mc_lag, fallback_enabled, **kwargs)
else:
return success | 7dcce04a7c9dd5d533bcf40bdda94c5fc8ff2951 | 30,606 |
import argparse
def main_args_parser() -> argparse.Namespace:
""" Implements an easy user-friendly command-line interface.
It creates three main subparser (classify, train, eval) and add the appropriated arguments for each subparser.
Returns
-------
args: argparse.Namespace
input arguments provided by the user
"""
#region - MAIN parser
parser = argparse.ArgumentParser(description='Handwritten long number recognition')
subparsers = parser.add_subparsers(dest='mode',
help='<required> program execution mode: classify with a pre-trained model or re-train the model',
required=True)
#endregion
#region - CLASSIFY subparser
parser_classify = subparsers.add_parser('classify',
help='classify an input image using the pre-trained model',
description='CLASSIFY mode: classify an input image using a pre-trained model')
image_from = parser_classify.add_mutually_exclusive_group()
image_from.add_argument('-f', '--folder',
type=str,
help='input image from folder, if not specified from webcam',
metavar='PATH_TO_IMAGE',
default=None)
which_model = parser_classify.add_mutually_exclusive_group()
which_model.add_argument('-a', '--augmentation',
action='store_true',
help='use model trained WITH data augmentation')
which_model.add_argument('-m', '--model',
type=str,
help='user custom model from path',
metavar='PATH_TO_MODEL')
parser_classify.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
#region - TRAIN subparser
parser_train = subparsers.add_parser('train',
help='re-train the model in your machine and save it to reuse in classify phase',
description='TRAIN mode: re-train the model in your machine and save it to reuse in classify phase')
parser_train.add_argument('-a', '--augmentation',
action='store_true',
help='set data-augmentation procedure ON (RandomRotation and RandomResizedCrop)')
parser_train.add_argument('-s', '--splits',
nargs=2,
type=float,
help='(default=[0.7,0.3]) proportions for the dataset split into training and validation set',
default=[0.7,0.3],
metavar=('TRAIN', 'VAL'))
parser_train.add_argument('-b', '--batch_size',
type=int,
help='(default=64) mini-batch size',
default=64)
parser_train.add_argument('-e', '--epochs',
type=int,
help='(default=10) number of training epochs',
default=10)
parser_train.add_argument('-l', '--learning_rate',
type=float,
help='(default=10) learning rate',
default=0.001)
parser_train.add_argument('-w', '--num_workers',
type=int,
help='(default=3) number of workers',
default=3)
parser_train.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
#region - EVAL subparser
parser_eval = subparsers.add_parser('eval',
help='evaluate the model accuracy on the test set of MNIST',
description='EVAL mode: evaluate the model accuracy on the test set of MNIST')
parser_eval.add_argument('model',
type=str,
help='<required> path to the model to be evaluated',
metavar='PATH_TO_MODEL')
parser_eval.add_argument('-d', '--device',
type=str,
help='(default=cpu) device to be used for computations {cpu, cuda:0, cuda:1, ...}',
default='cpu')
#endregion
args = parser.parse_args()
return args | ef2e079e6fd95576d34d3dc6ab3f07aac6dc6aad | 30,607 |
def get_train_image_matrices(folder_name, num_images=4):
"""Gets image matrices for training images.
:param folder_name: String with name of training image folder in
input_data/train_images directory path.
:param num_images: Integer with number of images.
:return: Matrices from training images.
"""
image_matrices = []
path = './input_data/train_images/' + folder_name + '/'
for image_num in range(4, 4 + num_images):
image_name = path + str(image_num) + '.tif'
image_matrices.append(utils.read_image(image_name=image_name))
return image_matrices | be75cd1246421b13830931fd6551c94c0bd673f6 | 30,608 |
import torch
def to_chainer_device(device):
"""Create a chainer device from a given torch device.
Args:
device (torch.device): Device to be converted.
Returns:
A ``chainer.device`` object corresponding to the given input.
"""
if not isinstance(device, torch.device):
raise TypeError('The argument should be torch device.')
if device.type == 'cpu':
return chainer.get_device('@numpy')
if device.type == 'cuda':
device_index = 0 if device.index is None else device.index
return chainer.get_device('@cupy:{}'.format(device_index))
raise ValueError('{} is not supported.'.format(device.type)) | d2d1c9ddf50792225260133f1d434e3166b6338b | 30,609 |
def decode_region(code):
""" Returns the region name for the given region code.
For example: decode_region("be") => "Belgium".
"""
for tag, (language, region, iso639, iso3166) in LANGUAGE_REGION.iteritems():
if iso3166 == code.upper():
return region | 5a4467088d8824a8647d9c7ed89381b94ddab096 | 30,610 |
import zipfile
import os
def unzip_whatsapp_file(whatsapp_file):
"""
unzips a whatsapp .zip file and returns the path of the _chat.txt file that was extracted from the zip
Parameters
----------
whatsapp_file: str
path to a .zip file with the exported data from Whatsapp.
Returns
-------
str
path to the _chat.txt file that was extracted from the .zip
"""
zip_ref = zipfile.ZipFile(whatsapp_file, 'r')
zip_ref.extractall(os.path.split(whatsapp_file)[0])
zip_ref.close()
zip_dir = os.path.split(whatsapp_file)[0]
return os.path.join(zip_dir, '_chat.txt') | 694aa88575a35fe07fd781b020db17f217fe01a0 | 30,611 |
def neighbourhood_peaks(signal, n=10):
"""Computes the number of peaks from a defined neighbourhood of the signal.
Reference: Christ, M., Braun, N., Neuffer, J. and Kempa-Liehr A.W. (2018). Time Series FeatuRe Extraction on basis
of Scalable Hypothesis tests (tsfresh -- A Python package). Neurocomputing 307 (2018) 72-77
Parameters
----------
signal : nd-array
Input from which the number of neighbourhood peaks is computed
n : int
Number of peak's neighbours to the left and to the right
Returns
-------
int
The number of peaks from a defined neighbourhood of the signal
"""
signal = np.array(signal)
subsequence = signal[n:-n]
# initial iteration
peaks = ((subsequence > np.roll(signal, 1)[n:-n]) & (subsequence > np.roll(signal, -1)[n:-n]))
for i in range(2, n + 1):
peaks &= (subsequence > np.roll(signal, i)[n:-n])
peaks &= (subsequence > np.roll(signal, -i)[n:-n])
return np.sum(peaks) | b684419844a747633d667abab9b6819f61d13d05 | 30,612 |
def check_success(env, policy, act_noise_pct, render=False):
"""Tests whether a given policy solves an environment
Args:
env (metaworld.envs.MujocoEnv): Environment to test
policy (metaworld.policies.policies.Policy): Policy that's supposed to
succeed in env
act_noise_pct (float): Decimal value indicating std deviation of the
noise as a % of action space
render (bool): Whether to render the env in a GUI
Returns:
(bool, int): Success flag, Trajectory length
"""
action_space_ptp = env.action_space.high - env.action_space.low
env.reset()
env.reset_model()
o = env.reset()
assert o.shape == env.observation_space.shape
t = 0
done = False
success = False
while not success and not done:
a = policy.get_action(o)
a = np.random.normal(a, act_noise_pct * action_space_ptp)
try:
o, r, done, info = env.step(a)
if render:
env.render()
t += 1
success |= bool(info['success'])
except ValueError:
break
return success, t | 260a03bc47c3864894b5d2922a636bd54b8d1253 | 30,613 |
import glob
def import_data(file_regex, index_col_val=None, parse_dates=None,
date_format=None):
"""
takes in a regular expression describing the filepath to
the data files and returns a pandas dataFrame
Usage1:
var_name = import_data.import_data("./hackathon_data/*20*.dat")
Usage2:
var_name = import_data.import_data("./hackathon_data/*20*.dat",
"column to index with", "column of dates", "format of dates")
"""
all_files = glob.glob(file_regex)
all_files.sort()
list_ = []
for file_ in all_files:
if index_col_val is not None and parse_dates is not None and \
date_format is not None:
df = pd.read_csv(file_, parse_dates=[parse_dates],
index_col=index_col_val,
date_parser=lambda x:
parse_date(x, date_format))
elif index_col_val is not None:
df = pd.read_csv(file_, index_col=index_col_val)
elif parse_dates is not None and date_format is not None:
df = pd.read_csv(file_, parse_dates=[parse_dates],
date_parser=lambda x:
parse_date(x, date_format))
else:
df = pd.read_csv(file_)
list_.append(df)
ret = pd.concat(list_)
ret = ret[ret.index.notnull()]
ret.on_promotion.replace(('Y', 'N'), (1, 0), inplace=True)
return ret | 411f767bd27cb40d9aaea28feb94da9f29b1f5aa | 30,614 |
def shift_num_right_by(num: int, digits: int) -> int:
"""Shift a number to the right by discarding some digits
We actually use string conversion here since division can provide
wrong results due to precision errors for very big numbers. e.g.:
6150000000000000000000000000000000000000000000000 // 1e27
6.149999999999999e+21 <--- wrong
"""
try:
return int(str(num)[:-digits])
except ValueError:
# this can happen if num is 0, in which case the shifting code above will raise
# https://github.com/rotki/rotki/issues/3310
# Also log if it happens for any other reason
if num != 0:
log.error(f'At shift_num_right_by() got unecpected value {num} for num')
return 0 | ff29f5fbc53c8cfa5fa4172fd4e6e7c0b8b4e27b | 30,615 |
def index_handler(request):
"""
List latest 6 articles, or post a new article.
"""
if request.method == 'GET':
return get_article_list(request)
elif request.method == 'POST':
return post_article(request) | b64a81c4bde4d83f99663ffb6384ff6cde8a217c | 30,616 |
def getRawOutput(seqs, tmpfile, command, func):
"""
Returns output from a given subprocess command that is run iteratively
on a given sequence list. It writes to a temporary file on the disk that
should be specified. Also, a text processing function can be passed that
takes the command line program's output + the original sequence and
translates theme into a list of values. This is than cast into a Pandas
Series so care should be taken when using this function.
"""
results = []
raw = []
# TODO: parallelize this. Sequentially is too slow for >10000 runs
f = open(tmpfile, 'w')
for i in xrange(len(seqs)):
seq = seqs[i]
# Write the sequence to a temporary file
f.seek(0)
f.write('>seq\n%s'%(seq))
f.truncate()
# If we're missing the sequence data then just add a blank column
if(pd.isnull(seq)):
results.append(pd.Series([np.nan]))
continue
# Run command on the temporary file; Process text
probs_list = func(command(), seq)
results.append(pd.Series(probs_list if len(probs_list) > 0 else [np.nan]))
f.close()
return results | 56af903386d395c88a91313ef1f4a46b56688179 | 30,617 |
import sys
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32" | bb4f78364bd2182b79b7517466eb251731826464 | 30,618 |
import os
import multiprocessing
def hillis_steeles_scan_inclusive_parallel(inp_list):
"""
Takes in a list, performs hillis-steeles inclusive scan parallelly, returns a shared memory array with the results
input: inp_list, 1D list of n elements.
returns: 1D Array, newArr of n elements.
"""
numProcessors = os.cpu_count()
inp_list, removeLast = pad_if_needed(inp_list)
n = len(inp_list)
prev_list = list(inp_list)
newArr = multiprocessing.Array('i',n)
newArr[:] = inp_list
for j in range(int(np.ceil(np.log2(n)))):
procArr = []
division = get_division(n - 2**j, numProcessors)
for i in range(len(division)-1):
ind1, ind2 = division[i] + 2**j - 1, division[i + 1] + 2**j
if ind2 > len(prev_list):
ind2 = len(prev_list)
p = multiprocessing.Process(target=hillis_steeles_loop, args=(j, prev_list, newArr, (ind1, ind2)))
procArr.append(p)
for p in procArr:
p.start()
for p in procArr:
p.join()
prev_list = list(newArr)
if removeLast:
newArr = newArr[:-1]
return newArr | 13ffd70a98c27360d2032a300375f72dfa62eb37 | 30,619 |
import math
import os
def confusion_matrices(prediction, matrix=False, save=False, OutputPath=None, name=None):
"""
This function plot the confusion matrix at every level of the classification tree.
"""
cnf_matrix_level_1 = confusion_matrix(prediction['label_level_1'].values,prediction['level_1'].values, labels=['blank', 'not_blank'])
cnf_matrix_level_2 = confusion_matrix(prediction['label_level_2'].values.astype('str'),prediction['level_2'].values.astype('str'), labels=['animal', 'no_animal',math.nan])
cnf_matrix_level_3 = confusion_matrix(prediction['label_level_3'].values.astype('str'),prediction['level_3'].values.astype('str'), labels=['bird', 'mammal', 'human', 'pickup', math.nan])
cnf_matrix_level_4 = confusion_matrix(prediction['label_level_4'].values.astype('str'),prediction['level_4'].values.astype('str'), labels=['small_mammal', 'large_mammal', math.nan])
cnf_matrix_level_5 = confusion_matrix(prediction['label_level_5'].values.astype('str'),prediction['level_5'].values.astype('float32').astype('str'), labels=['9.0','5.0','6.0','14.0', '0.0','7.0','11.0','1.0','4.0','12.0','13.0','15.0','16.0',math.nan])
plot_confusion_matrices(cnf_matrix_level_1, ['Blank', 'Not blank'], size=(8,4))
fig1 = plt.gcf()
plot_confusion_matrices(cnf_matrix_level_2, ['Animal', 'No animal',math.nan])
fig2 = plt.gcf()
plot_confusion_matrices(cnf_matrix_level_3, ['Bird', 'Mammal', 'Human', 'PickupSetup', math.nan])
fig3 = plt.gcf()
plot_confusion_matrices(cnf_matrix_level_4, ['small mammal', 'Large mammal', math.nan])
fig4 = plt.gcf()
plot_confusion_matrices(cnf_matrix_level_5, ['Mouse','squirrel','hare','hedgehog','ass','horse','fox','marten','cat', 'dog','mouflon','deer','boar', math.nan])
fig5 = plt.gcf()
if save:
fig1.savefig(os.path.join(OutputPath, 'Confusion_'+name+'_level_1.pdf'), format='pdf', dpi=1000, bbox_inches="tight")
fig2.savefig(os.path.join(OutputPath, 'Confusion_'+name+'_level_2.pdf'), format='pdf', dpi=1000, bbox_inches="tight")
fig3.savefig(os.path.join(OutputPath, 'Confusion_'+name+'_level_3.pdf'), format='pdf', dpi=1000, bbox_inches="tight")
fig4.savefig(os.path.join(OutputPath, 'Confusion_'+name+'_level_4.pdf'), format='pdf', dpi=1000, bbox_inches="tight")
fig5.savefig(os.path.join(OutputPath, 'Confusion_'+name+'_level_5.pdf'), format='pdf', dpi=1000, bbox_inches="tight")
if matrix:
return cnf_matrix_level_1, cnf_matrix_level_2, cnf_matrix_level_3, cnf_matrix_level_4, cnf_matrix_level_5 | 35bdc3911244d8cc30b5cc830b13cca0c7339018 | 30,620 |
import numpy
def back_propogation(weights, aa, zz, y1hot, lam=0.0):
"""Perform a back propogation step
Args:
weights (``list`` of numpy.ndarray): weights between each layer
aa (``list`` of numpy.ndarray): activation of nodes for
each layer. The last item in the list is the hypothesis.
zz (``list`` of numpy.ndarray): input into nodes for each layer.
y1hot (numpy.ndarray) 2-D array of one-hot vectors (1 per row)
lam (``float``): regularization parameter
Returns:
weights_grad (``list`` of numpy.ndarray): d_J/d_weight
"""
weights_grad = []
m = y1hot.shape[0]
n_layers = len(weights) + 1
di_plus_1 = aa[-1] - y1hot
i = n_layers - 2
while i > 0:
ones_col = numpy.ones(zz[i].shape[0])
di = (
di_plus_1.dot(weights[i]) *
sigmoid_gradient(numpy.c_[ones_col, zz[i]])
)
di = di[:, 1:]
weights_grad.append(di_plus_1.T.dot(aa[i]))
i -= 1
di_plus_1 = di.copy()
weights_grad.append(di.T.dot(aa[0]))
# we built it backwards
weights_grad.reverse()
# normalize by m
weights_grad = [wg/m for wg in weights_grad]
# add regularization (skip first columns)
for i in range(n_layers-1):
weights_grad[i][:, 1:] += lam/m * weights[i][:, 1:]
return weights_grad | 2909809699ae3b3fd5ab97b6294391322cf3d8bb | 30,621 |
async def get_reverse_objects_topranked_for_lst(entities):
"""
get pairs that point to the given entity as the primary property
primary properties are those with the highest rank per property
see https://www.wikidata.org/wiki/Help:Ranking
"""
# some lookups just take too long, so we remove them here
remEntities = set()
for entity in ['Q2']:
if entity in entities:
entities.remove(entity)
remEntities.add(entity)
# short-circuit, if nothing is left
if not entities:
return {k: [] for k in remEntities}
# run the query
res = await runQuerySingleKey(cacheReverseObjectTop, entities, """
SELECT ?base ?prop ?parent
WHERE {
hint:Query hint:optimizer "None".
VALUES ?base { %s }
?parent ?prop ?base .
[] wikibase:directClaim ?prop .
}
""")
# add the skipped entities again
for k in remEntities:
res[k] = []
return res | 7266b4f29e3c3878abc14c995da7713a8d7121e0 | 30,622 |
import stat
def compute_confidence_interval(data,confidence=0.95):
"""
Function to determine the confidence interval
:param data: input data
:param confidence: confidence level
:return: confidence interval
"""
a = 1.0 * np.array(data)
n = len(a)
se = stat.sem(a)
h = se * stat.t.ppf((1 + confidence) / 2., n-1)
return h | b7f64935cefdb2f60a7ca7fdc720b3ecddf7e89c | 30,623 |
def adjacent_powerset(iterable):
"""
Returns every combination of elements in an iterable where elements remain ordered and adjacent.
For example, adjacent_powerset('ABCD') returns ['A', 'AB', 'ABC', 'ABCD', 'B', 'BC', 'BCD', 'C', 'CD', 'D']
Args:
iterable: an iterable
Returns:
a list of element groupings
"""
return [iterable[a:b] for a in range(len(iterable)) for b in range(a + 1, len(iterable) + 1)] | 951418b30d541e1dcdd635937ae609d429e3cd70 | 30,624 |
from typing import Iterator
from typing import Counter
import tqdm
def export_ngrams(
docs: Iterator[str], nlp: spacy.language.Language, n: str, patterns=False
) -> Counter:
"""
Extracts n-gram frequencies of a series of documents
Parameters
----------
docs : Iterator[str]
An iterator of documents, e.g. abstracts
nlp : spacy.language.Language
A spaCy language model, e.g. en_core_web_sm
patterns : bool, optional
Further analysis of neighboring tokens, by default False.
If True, a spaCy matcher will be used to filter most of the stopword
combinations that might not be of interest.
The matcher will also extract bigrams made up of three tokens, like
"Alzheimer's disease" and "human-like AI", while filtering most of the
other punctuation.
Returns
-------
Counter
n-gram frequencies
Raises
------
ValueError
In case that the 'patterns' options is used for anything but bigrams
"""
n_grams = Counter()
if "-" in n:
parts = n.split("-")
if len(parts) != 2:
raise ValueError(f"Order of n-grams has wrong format: {n}")
# Potential ValueErrors might be raised here
start = int(parts[0])
end = int(parts[1])
if start > end:
# Just switch it instead of raising an error
end, start = start, end
ns = range(start, end + 1)
else:
ns = [int(n)]
if patterns:
if not all(1 <= i <= 5 for i in ns):
raise ValueError("Patterns can only be used for n-grams with n <= 5.")
matcher = Matcher(nlp.vocab)
for i in ns:
matcher.add(f"{i}-grams", ngram_masks[i])
for doc in tqdm(nlp.pipe(docs)):
matches = matcher(doc)
candidates = (
doc[start:end].text
for _, start, end in matches
if (start - 1 >= 0 and doc[start - 1].text not in ("-") or start == 0)
if (
end != len(doc)
and doc[end].text not in ("-", "*")
or end == len(doc)
)
)
# some n-grams are part of bigger m-grams and might
# start or end with a '-' because of that
n_grams.update(
c
for c in candidates
if not c[0] in ("-", "*", "%") and not c.endswith("-")
)
else:
for doc in tqdm(nlp.pipe(docs)):
for sent in doc.sents:
for i in ns:
n_words = ngrams(sent.text.split(), n=i)
n_grams.update(list(" ".join(words) for words in n_words))
return n_grams | 242d0b3fcb2dffd2d35ae76416dfc7861bdfb916 | 30,625 |
import argparse
def getArguments():
"""
Gets the name of the gameFile.
:return: The arguments provided by the user
"""
parser = argparse.ArgumentParser()
parser.add_argument('gameFile', help='The ini formatted file with the game configuration')
return parser.parse_args() | b8f3d440e3cd2976e946e7745fb06ff86f179f8a | 30,626 |
import torch
def solve2D_system(
pde_system, conditions, xy_min=None, xy_max=None,
single_net=None, nets=None, train_generator=None, shuffle=True, valid_generator=None,
optimizer=None, criterion=None, additional_loss_term=None, batch_size=16,
max_epochs=1000,
monitor=None, return_internal=False, return_best=False
):
"""Train a neural network to solve a PDE with 2 independent variables.
:param pde_system: The PDEsystem to solve. If the PDE is :math:`F_i(u_1, u_2, ..., u_n, x, y) = 0` where :math:`u_i` is the i-th dependent variable and :math:`x` and :math:`y` are the independent variables,
then `pde_system` should be a function that maps :math:`(u_1, u_2, ..., u_n, x, y)` to a list where the i-th entry is :math:`F_i(u_1, u_2, ..., u_n, x, y)`.
:type pde_system: function
:param conditions: The initial/boundary conditions. The ith entry of the conditions is the condition that :math:`x_i` should satisfy.
:type conditions: list[`neurodiffeq.pde.DirichletBVP2D` or `neurodiffeq.pde.IBVP1D` or `neurodiffeq.pde.NoCondition`]
:param xy_min: The lower bound of 2 dimensions, if we only care about :math:`x \\geq x_0` and :math:`y \\geq y_0`, then `xy_min` is `(x_0, y_0)`, only needed when train_generator or valid_generator are not specified, defaults to None
:type xy_min: tuple[float, float], optional
:param xy_max: The upper bound of 2 dimensions, if we only care about :math:`x \\leq x_1` and :math:`y \\leq y_1`, then `xy_min` is `(x_1, y_1)`, only needed when train_generator or valid_generator are not specified, defaults to None
:type xy_max: tuple[float, float], optional
:param single_net: The single neural network used to approximate the solution. Only one of `single_net` and `nets` should be specified, defaults to None
:param single_net: `torch.nn.Module`, optional
:param nets: The neural networks used to approximate the solution, defaults to None.
:type nets: list[`torch.nn.Module`], optional
:param train_generator: The example generator to generate 1-D training points, default to None.
:type train_generator: `neurodiffeq.pde.ExampleGenerator2D`, optional
:param shuffle: Whether to shuffle the training examples every epoch, defaults to True.
:type shuffle: bool, optional
:param valid_generator: The example generator to generate 1-D validation points, default to None.
:type valid_generator: `neurodiffeq.pde.ExampleGenerator2D`, optional
:param optimizer: The optimization method to use for training, defaults to None.
:type optimizer: `torch.optim.Optimizer`, optional
:param criterion: The loss function to use for training, defaults to None.
:type criterion: `torch.nn.modules.loss._Loss`, optional
:param additional_loss_term: Extra terms to add to the loss function besides the part specified by `criterion`. The input of `additional_loss_term` should be the same as `pde_system`
:type additional_loss_term: function
:param batch_size: The size of the mini-batch to use, defaults to 16.
:type batch_size: int, optional
:param max_epochs: The maximum number of epochs to train, defaults to 1000.
:type max_epochs: int, optional
:param monitor: The monitor to check the status of nerual network during training, defaults to None.
:type monitor: `neurodiffeq.pde.Monitor2D`, optional
:param return_internal: Whether to return the nets, conditions, training generator, validation generator, optimizer and loss function, defaults to False.
:type return_internal: bool, optional
:param return_best: Whether to return the nets that achieved the lowest validation loss, defaults to False.
:type return_best: bool, optional
:return: The solution of the PDE. The history of training loss and validation loss.
Optionally, the nets, conditions, training generator, validation generator, optimizer and loss function.
The solution is a function that has the signature `solution(xs, ys, as_type)`.
:rtype: tuple[`neurodiffeq.pde.Solution`, dict]; or tuple[`neurodiffeq.pde.Solution`, dict, dict]
"""
########################################### subroutines ###########################################
def train(train_generator, net, nets, pde_system, conditions, criterion, additional_loss_term, shuffle, optimizer):
train_examples_x, train_examples_y = train_generator.get_examples()
train_examples_x, train_examples_y = train_examples_x.reshape((-1, 1)), train_examples_y.reshape((-1, 1))
n_examples_train = train_generator.size
idx = np.random.permutation(n_examples_train) if shuffle else np.arange(n_examples_train)
train_loss_epoch = 0.0
batch_start, batch_end = 0, batch_size
while batch_start < n_examples_train:
if batch_end > n_examples_train:
batch_end = n_examples_train
batch_idx = idx[batch_start:batch_end]
xs, ys = train_examples_x[batch_idx], train_examples_y[batch_idx]
train_loss_batch = calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term)
train_loss_epoch += train_loss_batch.item() * (batch_end - batch_start) / n_examples_train
optimizer.zero_grad()
train_loss_batch.backward()
optimizer.step()
batch_start += batch_size
batch_end += batch_size
return train_loss_epoch
def valid(valid_generator, net, nets, pde_system, conditions, criterion, additional_loss_term):
valid_examples_x, valid_examples_y = valid_generator.get_examples()
xs, ys = valid_examples_x.reshape((-1, 1)), valid_examples_y.reshape((-1, 1))
valid_loss_epoch = calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term)
valid_loss_epoch = valid_loss_epoch.item()
return valid_loss_epoch
def calculate_loss(xs, ys, net, nets, pde_system, conditions, criterion, additional_loss_term):
us = _trial_solution_2input(net, nets, xs, ys, conditions)
Fuxys = pde_system(*us, xs, ys)
loss = sum(
criterion(Fuxy, torch.zeros_like(xs))
for Fuxy in Fuxys
)
if additional_loss_term is not None:
loss += additional_loss_term(*us, xs, ys)
return loss
###################################################################################################
if single_net and nets:
raise RuntimeError('Only one of net and nets should be specified')
# defaults to use a single neural network
if (not single_net) and (not nets):
net = FCNN(n_input_units=2, n_output_units=len(conditions), n_hidden_units=32, n_hidden_layers=1, actv=nn.Tanh)
if single_net:
# mark the Conditions so that we know which condition correspond to which output unit
for ith, con in enumerate(conditions):
con.set_impose_on(ith)
if not train_generator:
if (xy_min is None) or (xy_max is None):
raise RuntimeError('Please specify xy_min and xy_max when train_generator is not specified')
train_generator = ExampleGenerator2D((32, 32), xy_min, xy_max, method='equally-spaced-noisy')
if not valid_generator:
if (xy_min is None) or (xy_max is None):
raise RuntimeError('Please specify xy_min and xy_max when valid_generator is not specified')
valid_generator = ExampleGenerator2D((32, 32), xy_min, xy_max, method='equally-spaced')
if (not optimizer) and single_net: # using a single net
optimizer = optim.Adam(single_net.parameters(), lr=0.001)
if (not optimizer) and nets: # using multiple nets
all_parameters = []
for net in nets:
all_parameters += list(net.parameters())
optimizer = optim.Adam(all_parameters, lr=0.001)
if not criterion:
criterion = nn.MSELoss()
loss_history = {'train': [], 'valid': []}
if return_best:
valid_loss_epoch_min = np.inf
solution_min = None
for epoch in range(max_epochs):
train_loss_epoch = train(train_generator, single_net, nets, pde_system, conditions, criterion, additional_loss_term, shuffle, optimizer)
loss_history['train'].append(train_loss_epoch)
valid_loss_epoch = valid(valid_generator, single_net, nets, pde_system, conditions, criterion, additional_loss_term)
loss_history['valid'].append(valid_loss_epoch)
if monitor and epoch % monitor.check_every == 0:
monitor.check(single_net, nets, conditions, loss_history)
if return_best and valid_loss_epoch < valid_loss_epoch_min:
valid_loss_epoch_min = valid_loss_epoch
solution_min = Solution(single_net, nets, conditions)
if return_best:
solution = solution_min
else:
solution = Solution(single_net, nets, conditions)
if return_internal:
internal = {
'single_net': single_net,
'nets': nets,
'conditions': conditions,
'train_generator': train_generator,
'valid_generator': valid_generator,
'optimizer': optimizer,
'criterion': criterion
}
return solution, loss_history, internal
else:
return solution, loss_history | f9763819a3df3477df88dea395c45d7a357c25c7 | 30,627 |
def model_criterion(preds, labels):
"""
Function: Model criterion to train the model
"""
loss = nn.CrossEntropyLoss()
return loss(preds, labels) | c4005131b30c2e5bab03d13ec00fcf96657b4fbb | 30,628 |
def get_dbmapping(syn: Synapse, project_id: str) -> dict:
"""Gets database mapping information
Args:
syn: Synapse connection
project_id: Project id where new data lives
Returns:
{'synid': database mapping syn id,
'df': database mapping pd.DataFrame}
"""
project_ent = syn.get(project_id)
dbmapping_synid = project_ent.annotations.get("dbMapping", "")[0]
database_mappingdf = get_syntabledf(
syn, f'select * from {dbmapping_synid}'
)
return {'synid': dbmapping_synid,
'df': database_mappingdf} | cee2daf40886a68871b400ae06298eff095a8205 | 30,629 |
def end_position(variant_obj):
"""Calculate end position for a variant."""
alt_bases = len(variant_obj['alternative'])
num_bases = max(len(variant_obj['reference']), alt_bases)
return variant_obj['position'] + (num_bases - 1) | e49110a1102ea2ca53053858597247799065f8e1 | 30,630 |
def cast_to_server(server_params, topic, msg):
"""
Invoke a remote method that does not return anything
"""
return _get_impl().cast_to_server(cfg.CONF, server_params, topic, msg) | 0fb92932dbe6f23cbc230bd2f23891a514bffd7a | 30,631 |
def get_classification_systems():
"""Retrieve all classification systems available in service."""
system = db.session.query(LucClassificationSystem).all()
return ClassificationSystemSchema().dump(system, many=True) | 03ca32de57f319144c1d185a2f5260ffab269a15 | 30,632 |
def read_annotations(filename, tagset, labeled):
""" Read tsv data and return sentences and [word, tag, sentenceID, filename] list """
with open(filename, encoding="utf-8") as f:
sentence = []
sentence.append(["[CLS]", -100, -1, -1, None])
sentences = []
sentenceID=0
for line in f:
if len(line) > 0:
if line == '\n':
sentenceID+=1
sentence.append(["[SEP]", -100, -1, -1, None])
sentences.append(sentence)
sentence = []
sentence.append(["[CLS]", -100, -1, -1, None])
else:
data=[]
split_line = line.rstrip().split('\t')
data.append(split_line[0])
data.append(tagset[split_line[1]] if labeled else 0)
data.append(sentenceID)
data.append(filename)
sentence.append(data)
sentence.append(["[SEP]", -100, -1, -1, None])
if len(sentence) > 2:
sentences.append(sentence)
return sentences | bbb210fe631f1e10432ab6c18146d69933fe7187 | 30,633 |
def find_rmse(data_1, data_2, ax=0):
"""
Finds RMSE between data_1 and data_2
Inputs
------
data_1 (np.array)
data_2 (np.array)
ax (int) The axis (or axes) to mean over
Outpts
------
(int) RMSE between data_1 and data_2
"""
return np.sqrt(np.mean((data_1 - data_2)**2, axis=ax)) | aed7ee0d6fda234f452056a91eb70495343579ac | 30,634 |
def validate_tag_update(update):
"""
Property: ResourceUpdateConstraint.TagUpdateOnProvisionedProduct
"""
valid_tag_update_values = [
"ALLOWED",
"NOT_ALLOWED",
]
if update not in valid_tag_update_values:
raise ValueError("{} is not a valid tag update value".format(update))
return update | c2abd7af00be52cf8cfecb5790d88a04d3207253 | 30,635 |
def bollinger_band(df: pd.DataFrame, window: int = 20, window_dev: int = 2) -> pd.DataFrame:
"""Implementation of bollinger band."""
df_with_signals = df.copy()
typical_price = (df["close"] + df["low"] + df["high"]) / 3
df_with_signals["typical_price"] = typical_price
std_dev = df_with_signals["typical_price"].rolling(window=window).std(ddof=0)
df_with_signals["BOLA"] = df_with_signals["typical_price"].rolling(window=window).mean()
df_with_signals["BOLU"] = df_with_signals["BOLA"] + window_dev * std_dev
df_with_signals["BOLD"] = df_with_signals["BOLA"] - window_dev * std_dev
return df_with_signals | 69fb61a09512967c92fc997134cad67e7659774f | 30,636 |
def close_corner_contour(contour: np.ndarray, shape: tuple) -> np.ndarray:
"""Check if contours are in the corner, and close them if needed.
Contours which cover a corner cannot be closed by joining the first
and last element, because some of the area is missed. This algorithm
adds the corner point to close the contours.
Parameters
----------
contour : (n,2) np.ndarray
List of coordinates describing a contour.
shape : tuple
Shape of the source image. Used to check which corners the
contour touches.
Returns
-------
contour : (n+1,2) or (n,2) np.array
Return a contour with a corner point added if needed,
otherwise return the input contour
"""
xmin, ymin = contour.min(axis=0)
xmax, ymax = contour.max(axis=0)
xdim, ydim = np.array(shape) - 1
left = (xmin == 0)
right = (xmax == xdim)
bottom = (ymin == 0)
top = (ymax == ydim)
if bottom and left:
extra_point = (0, 0)
elif top and left:
extra_point = (0, ydim)
elif top and right:
extra_point = (xdim, ydim)
elif bottom and right:
extra_point = (xdim, 0)
else:
# all good
return contour
contour = np.vstack([contour, extra_point])
return contour | 62564816c5e00131a5ec59242467cee464d6f5ac | 30,637 |
import os
import argparse
def writable_prefix(prefix):
"""
Checks if this prefix is writable and exists.
:param prefix: str - prefix to check
:return: str - prefix
"""
directory = os.path.dirname(prefix)
if not os.path.exists(directory):
error = "Output directory %s does not exist (%s)" % (directory, prefix)
# report.log_str(error, priority=logging.ERROR)
raise argparse.ArgumentTypeError(error)
if not os.access(directory, os.W_OK):
error = "Output directory %s is not writable (%s)" % (directory, prefix)
# report.log_str(error, priority=logging.ERROR)
raise argparse.ArgumentTypeError(error)
return prefix | 89fd163c7d3bd9aaca3e26e4b72aef0c98236d8b | 30,638 |
def simulate_spatial_ratiometric_reading(
do, temperature, sealed_patch_do=0, sealed_patch_kwargs={}, unsealed_patch_kwargs={}
):
""" Simulate a "spatial ratiometric" reading using a sealed DO patch as the ratiometric reference
Args:
do: Dissolved Oxygen partial pressure in mmHg in the unsealed patch
temperature: Temperature in degrees Celcius
sealed_patch_do: Optional (default=0). Dissolved Oxygen partial pressure in mmHg in the sealed patch
sealed_patch_kwargs: Optional. Additional args passed to get_optical_reading_normalized for the sealed patch
unsealed_patch_kwargs: Optional. Additional args passed to get_optical_reading_normalized for the unsealed patch
Returns:
A spatial ratiometric result: the ratio between normalized optical readings of the unsealed and sealed patches
"""
unsealed_patch_reading = get_optical_reading_normalized(
do, temperature, **unsealed_patch_kwargs
)
sealed_patch_reading = get_optical_reading_normalized(
sealed_patch_do, temperature, **sealed_patch_kwargs
)
return unsealed_patch_reading / sealed_patch_reading | 17bc66583c6d9c8a9c77b6e9e19f3adee2e73617 | 30,639 |
import json
async def create_pool(uri, **kwargs) -> asyncpg.pool.Pool:
"""Creates a connection pool to the specified PostgreSQL server"""
def _encode_jsonb(value):
return b'\x01' + json.dumps(value).encode('utf-8')
def _decode_jsonb(value):
return json.loads(value[1:].decode('utf-8'))
async def init(con):
await con.set_type_codec('jsonb', schema='pg_catalog', encoder=_encode_jsonb, decoder=_decode_jsonb,
format="binary")
try:
log.debug("Creating connection pool")
pool = await asyncpg.create_pool(uri, init=init, **kwargs)
except ValueError:
log.error("PostgreSQL error: Invalid URI, check postgresql.txt. "
"Format must be 'postresql://user:password@host/database'")
except asyncpg.PostgresError as e:
log.error(f"PostgreSQL error: {e}")
except TimeoutError:
log.error("PostgreSQL error: Connection timed out.")
except Exception as e:
log.error(f"Unexpected error: {e.__class__.__name__}: {e}")
else:
return pool | e6de8369412a63466ecdc6ccc7ae23889fb2745f | 30,640 |
from .interactive._iplot_state import iplot_state
from ._state_visualization import plot_state as plot
from .interactive._iplot_state import iplot_state
from ._state_visualization import plot_state as plot
def plot_state(rho, method='city', filename=None, options=None, mode=None,
show=False):
"""Plot a quantum state.
This function provides several methods to plot a quantum state. There are
two rendering backends either done in python using matplotlib or using js
in a jupyter notebook using an externally hosted graphing library. To use
the js you need to be running in jupyter and have network connectivity to
the external server where the js library is hosted.
Args:
rho (ndarray): statevector or density matrix representation of a
quantum state
method (str): The plotting method to use. Valid choices are:
- 'city': Plots the cityscape, two 3d bargraphs of the mixed state
rho) of the quantum state. This is the default.
- 'paulivec': Plot the paulivec representation, a bar graph of the
mixed state rho over the pauli matrices, of a quantum
state
- 'qsphere': Plot the qsphere representation of the quantum state
- 'bloch': Plot the bloch vector for each qubit in the quantum state
- 'wigner': Plot the equal angle slice spin Wigner function of an
arbitrary quantum state.
filename (str): If using the `mpl` mode save the output visualization
as an image file to this path
options (dict): An dict with options for visualization in `interactive`
mode. The valid fields are:
- width (int): graph horizontal size, must be specified with
height to have an effect
- height (integer): graph vertical size, must be specified with
width to have an effect
- slider (bool): activate slider (only used for the `paulivec`
method)
mode (str): The visualization mode to use, either `mpl` or
`interactive`. Interactive requires running in jupyter and external
network connectivity to work. By default this will use `mpl` unless
you are running in jupyter and you have external connectivity.
show (bool): If set to true the rendered image will open in a new
window (mpl only)
Returns:
None: If used in interactive mode there is no return
matplotlib.Figure: If used in mpl mode the matplotlib.Figure of the
histogram will be returned.
Raises:
VisualizationError: If invalid mode is specified
ImportError: If matplotlib is used but it's not installed or configured
"""
fig = None
if not mode:
if INTERACTIVE:
iplot_state(rho, method=method, options=options)
elif HAS_MATPLOTLIB:
fig = plot(rho, method=method, filename=filename, show=show)
else:
raise ImportError(_MSG % "plot_state")
else:
if mode == 'interactive':
iplot_state(rho, method=method, options=options)
elif mode == 'mpl':
if HAS_MATPLOTLIB:
fig = plot(rho, method=method, filename=filename, show=show)
else:
raise ImportError(_MSG % "plot_state")
else:
raise VisualizationError(
"Invalid mode: %s, valid choices are 'interactive' or 'mpl'")
if HAS_MATPLOTLIB:
if fig:
plt.close(fig)
return fig | 3266a41986b8c77a966fd5b76fb55e2b330dd05e | 30,641 |
from datetime import datetime
def tick_format(ticktime):
"""
Format the tick date/time
"""
datetime_object = datetime.strptime(ticktime, '%Y-%m-%dT%H:%M:%S.%fZ')
return datetime_object.strftime("%H:%M:%S UTC %A %d %B") | 6fa02f7627bc947646046a47ab7298aad68399d8 | 30,642 |
def add_finite_filter_to_scorer(score_func):
"""Takes a scorer and returns a scorer that ignores NA / infinite elements in y_true.
sklearn scorers (and others) don't handle arrays with 0 length. In that case, return None
:param score_func: function that maps two arrays to a number. E.g. (y_true, y_pred) -> error
:return: scorer that drops records where y_true is not finite
"""
def score_func_finite(y_true, y_pred, **kwargs):
y_true, y_pred = valid_elements_for_evaluation(y_true, y_pred)
if len(y_true) == 0: # returns None if there are no elements
return None
return score_func(y_true, y_pred, **kwargs)
return score_func_finite | a6ee3874b12213fa2b5ea385a8343c8ba3e1462b | 30,643 |
import gc
def lsst_fit(lc, grp):
"""Take full mock LC and SDSS cadence to find best_fit params.
Args:
lc: Kali LC object, full mock LC.
grp: HDF5 group storing the MCMC chains.
"""
best_param = [] # store best-fit params
ref_ls = []
task = kali.carma.CARMATask(1, 0, nsteps=nsteps, nwalkers=nwalkers, nthreads=1)
for cad_idx in range(cad_min, cad_max):
# for new maf output
cad = maf['cad'][cad_idx]
ra = maf['ra'][cad_idx]
dec = maf['dec'][cad_idx]
# loop over required bands
for band in bands:
# start fitting
task.clear()
lc_down = lsstlc(ra, dec, cad[cad['filter'] == band]['expDate'], lc, fix_dt=True, band=band)
task.fit(lc_down)
# fitted params and chains to array and pass back
fit = list(task.bestTau)
fit.append(band)
fit.append(ra)
fit.append(dec)
best_param.append(fit)
mcmc_rec = np.rec.array([task.LnPosterior, task.Chain[0], task.Chain[1], task.rootChain[0], task.rootChain[1]], dtype=dtype)
# create hdf5 dataset given id as combination of ra, dec and band
dset = grp.create_dataset('{}_{}_{}'.format(ra, dec, band), dtype=dtype, data=mcmc_rec, shape=())
# create reference to this dataset and store in para_fit dataframe
ref_ls.append(dset.ref)
df_p = pd.DataFrame(best_param, columns=['tau', 'sigma', 'band', 'ra', 'dec'])
df_p['ref2chain'] = ref_ls
# flush data into file
grp.file.flush()
gc.collect()
return df_p | 44cd48fe3c7d3de50fdab2c007a0f4c947ae3116 | 30,644 |
def getStudioModeStatus():
"""
Indicates if Studio Mode is currently enabled.
"""
return __createJSON("GetStudioModeStatus", {}) | 544ffccc459259b52b395aadb94c0439d824f7b4 | 30,645 |
from typing import Tuple
def calc_long_short_prec(
pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False
) -> Tuple[pd.Series, pd.Series]:
"""
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
"""
if is_alpha:
label = label - label.mean(level=date_col)
if int(1 / quantile) >= len(label.index.get_level_values(1).unique()):
raise ValueError("Need more instruments to calculate precision")
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
N = lambda x: int(len(x) * quantile)
# find the top/low quantile of prediction and treat them as long and short target
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True)
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True)
groupll = long.groupby(date_col)
l_dom = groupll.apply(lambda x: x > 0)
l_c = groupll.count()
groups = short.groupby(date_col)
s_dom = groups.apply(lambda x: x < 0)
s_c = groups.count()
return (l_dom.groupby(date_col).sum() / l_c), (s_dom.groupby(date_col).sum() / s_c) | e74c6666922786522f55190d8f4d9125bb86c94d | 30,646 |
def prepare_tuple_argument(arg, n, arg_name, validate_args=False):
"""Helper which processes `Tensor`s to tuples in standard form."""
arg_size = ps.size(arg)
arg_size_ = tf.get_static_value(arg_size)
assertions = []
if arg_size_ is not None:
if arg_size_ not in (1, n):
raise ValueError('The size of `{}` must be equal to `1` or to the rank '
'of the convolution (={}). Saw size = {}'.format(
arg_name, n, arg_size_))
elif validate_args:
assertions.append(assert_util.assert_equal(
ps.logical_or(arg_size == 1, arg_size == n),
True,
message=('The size of `{}` must be equal to `1` or to the rank of the '
'convolution (={})'.format(arg_name, n))))
with tf.control_dependencies(assertions):
arg = ps.broadcast_to(arg, shape=[n])
arg = ps.unstack(arg, num=n)
return arg | 51f94eb8e4eef0b69df443ca71fdc9def3fd55a1 | 30,647 |
import zipfile
def isValidLibreOfficeFile(file_path):
"""
Return true if given file is valid LibreOffice ods file containing
manifest.xml, false otherwise.
"""
try:
with zipfile.ZipFile(file_path, 'a') as open_document:
open_document.open(DOCUMENT_MANIFEST_PATH)
return True
except KeyError:
return False | 3e36bea3c7f3bd72b91cefba94087ea8afc5116e | 30,648 |
def mean_absolute_percentage_error(y_true, y_pred, zeros_strategy='mae'):
"""
Similar to sklearn https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html
with options for behaviour for around zeros
:param y_true:
:param y_pred:
:param zeros_strategy:
:return:
"""
epsilon = np.finfo(np.float64).eps
if zeros_strategy == 'epsilon':
ape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
elif zeros_strategy == 'mae':
ae = np.abs(y_pred - y_true)
ape = ae / np.maximum(np.abs(y_true), epsilon)
# When true values are very small, we take MAE
small_y_mask = y_true < epsilon
ape = np.where(small_y_mask, ae, ape)
else:
raise ValueError(f'Undefined zeros_strategy {zeros_strategy}')
return np.mean(ape) | 5720343835378e50399caafeada31685effde5de | 30,649 |
import json
def __get_pretty_body__(headers, body):
"""
Return a pretty printed body using the Content-Type header information
:param headers: Headers for the request/response (dict)
:param body: Body to pretty print (string)
:return: Body pretty printed (string)
"""
if HEADER_CONTENT_TYPE in headers:
if HEADER_REPRESENTATION_XML == headers[HEADER_CONTENT_TYPE]:
xml_parsed = parseString(body)
pretty_xml_as_string = xml_parsed.toprettyxml()
return pretty_xml_as_string
else:
if HEADER_REPRESENTATION_JSON in headers[HEADER_CONTENT_TYPE]:
parsed = json.loads(body)
return json.dumps(parsed, sort_keys=True, indent=4)
else:
return body
else:
return body | 4cb173c8c5d8c924b58b0c39f5595e353e514eee | 30,650 |
def bell_sigmoid(ds, a=None, bc=None, d=None, inplace=True):
"""
Apply a fuzzy membership function to data
using bell-shaped sigmoidal function. Requires a
low left inflection (a), a mid-point (bc), and a low
right inflection (d) point to set the bounds in which to
rescale all values to. Values at or closer to the bc
inflection point will be boosted, where as values on
right and left sides will be reduced. The output dataset
will have values rescaled to 0-1.
Parameters
----------
ds: xarray dataset/array
A dataset with x, y dims.
a : int
Lower left slope inflection point.
bc : int
Mid slope inflection point.
d : int
Lower right slope inflection point.
Returns
----------
ds : xarray dataset or array.
"""
# check inputs
if a is None or bc is None or d is None:
raise ValueError('Must provide values for a, bc and d.')
elif a > bc or a > d:
raise ValueError('Value for \'a\' must be less than value for \'bc\' and \'d\'.')
elif bc < d:
raise ValueError('Value for \'bc\' must be less than value for \'d\'.')
# create copy
if not inplace:
ds = ds.copy(deep=True)
# create masks to handle out of bound values
mask_lt_bc = xr.where((ds >= a) & (ds <= bc), True, False)
mask_gt_bc = xr.where((ds > bc) & (ds <= d), True, False)
# perform inc sigmoidal (left side of bell curve)
left = np.cos((1 - ((ds - a) / (bc - a))) * (np.pi / 2))**2
left = left.where(mask_lt_bc, 0.0)
# perform dec sigmoidal (right side of bell curve)
right = right = np.cos(((ds - bc) / (d - bc)) * (np.pi / 2))**2
right = right.where(mask_gt_bc, 0.0)
# sum
ds = left + right
return ds | 3e0476a4df3d2c63646aedbc4a64e8ee3656bc43 | 30,651 |
import os
import logging
def post_binary(binary, byte_start, byte_data):
"""Accept a binary file or packet.
binary = binary to get the file to write to from
byte_start = offset from beginning of file to begin writing
byte_data = an iterable that contains data
"""
result = True
message = 'EOF'
# raise an exception if start is past end of file or return true with an
# EOF marker
if byte_start > binary.total_size:
# TODO use a better exception
raise Exception
elif byte_start == binary.total_size:
return result, message
# Open file
with open(binary.data.path, "r+b") as dest:
bytes_written = 0
byte_start = int(byte_start)
dest.seek(0, os.SEEK_END)
# seek to byte start marker
if dest.tell() != byte_start:
dest.seek(byte_start, os.SEEK_SET)
# Write bytes and throw if we go past end
for chunk in byte_data:
logging.info("Writing chunk of size %d bytes." % len(chunk))
dest.write(chunk)
bytes_written += len(chunk)
if dest.tell() > binary.total_size:
# TODO use a better exception
raise Exception
dest.flush()
if dest.tell() > binary.upload_progress:
binary.upload_progress = dest.tell()
binary.save()
# EOF check
if binary.upload_progress < binary.total_size:
result = False
message = ("Uploaded binary: %s, (%s/%s) pointer at %s" %
(binary.pk, binary.upload_progress, binary.total_size,
dest.tell()))
return result, message | 0af0f540a2c7bd92df3228d38adda0a4548a3ea9 | 30,652 |
def get_composite(name, error=DontCatchError, error_message=None,
identifier=None, component_category='unknown'):
"""
Gets a Composite Singleton
:param:
- `name`: name to register singleton (clients that want same singleton, use same name)
- `error`: exception to catch (``DontCatchError`` default)
- `error_message`: message to log on catching the error
- `identifier`: an identifier for the component (for logging, etc.)
- `component_category`: classifier for Composite.components
:return: Composite singleton
"""
if SingletonEnum.composite not in singletons:
singletons[SingletonEnum.composite] = {}
if name not in singletons[SingletonEnum.composite]:
if error_message is None:
error_message = "{0} component error".format(name)
if identifier is None:
identifier = name
singletons[SingletonEnum.composite][name] = Composite(error=error,
error_message=error_message,
identifier=identifier,
component_category=component_category)
return singletons[SingletonEnum.composite][name] | 39bfe67a1482c7c157e655c3f1accb308fa211b0 | 30,653 |
def is_list_of_float(value):
"""
Check if an object is a liat of floats
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, float) for elem in value) | 35ec9531bcddc33166e0f17d3fc59a08341d2d95 | 30,654 |
def get_example_params(example_index):
"""
Gets used variables for almost all visualizations, like the image, model etc.
Args:
example_index (int): Image id to use from examples
returns:
original_image (numpy arr): Original image read from the file
prep_img (numpy_arr): Processed image
target_class (int): Target class for the image
pretrained_model(Pytorch model): Model to use for the operations
"""
# Pick one of the examples
# Read image
original_image = Image.open(img_path).convert('RGB')
# Process image
prep_img = preprocess_image(original_image)
# Define model
#pretrained_model = model
return (original_image,
prep_img,
target_class) | 6e2760e4d91d888ce9f1443787b9bfa864fe7118 | 30,655 |
from geometrylab.geometry import Polyline
def bezier_curve(points, nTimes=500, is_poly=False, is_crv=False):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
Hui Note: given points, the returned crv points in reverse direction:
Q[0] == points[-1]; Q[-1] == points[0]
"""
if is_poly:
return Polyline(points,closed=False)
else:
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
zPoints = np.array([p[2] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
zvals = np.dot(zPoints, polynomial_array)
Q = np.flip(np.c_[xvals,yvals,zvals],axis=0)
if is_crv:
crv = Polyline(Q,closed=False)
return crv
else:
return Q | 0b60554a2b8697c665822ecf408a089b89df7107 | 30,656 |
import functools
def decorator_with_keywords(func=None, **dkws):
# NOTE: ONLY ACCEPTS KW ARGS
"""
A decorator that can handle optional keyword arguments.
When the decorator is called with no optional arguments like this:
@decorator
def function ...
The function is passed as the first argument and decorate returns the decorated function, as expected.
If the decorator is called with one or more optional arguments like this:
@decorator(optional_argument1='some value')
def function ....
Then decorator is called with the function argument with value None, so a function that decorates
is returned, as expected.
"""
# print('WHOOP', func, dkws)
def _decorate(func):
@functools.wraps(func)
def wrapped_function(*args, **kws):
# print('!!')
return func(*args, **kws)
return wrapped_function
if func:
return _decorate(func)
return _decorate | 64c4ddd26cc04a43cbf559600652113db81b79ae | 30,657 |
from datetime import datetime
def parse_line(line):
"""
Extract all the data we want from each line.
:param line: A line from our log files.
:return: The data we have extracted.
"""
time = line.split()[0].strip()
response = line.split(' :')
message = response[len(response) - 1].strip('\n')
channel = response[1].split('#')
username = channel[0].split('!')
username = username[0]
channel = channel[1]
time = datetime.strptime(time, '%Y-%m-%d_%H:%M:%S')
return time, channel, username, message | 72b4362b7628d31996075941be00e4ddcbd5edbc | 30,658 |
def semi_lagrangian(field: GridType,
velocity: Field,
dt: float,
integrator=euler) -> GridType:
"""
Semi-Lagrangian advection with simple backward lookup.
This method samples the `velocity` at the grid points of `field`
to determine the lookup location for each grid point by walking backwards along the velocity vectors.
The new values are then determined by sampling `field` at these lookup locations.
Args:
field: quantity to be advected, stored on a grid (CenteredGrid or StaggeredGrid)
velocity: vector field, need not be compatible with with `field`.
dt: time increment
integrator: ODE integrator for solving the movement.
Returns:
Field with same sample points as `field`
"""
lookup = integrator(field.elements, velocity, -dt)
interpolated = reduce_sample(field, lookup)
return field.with_values(interpolated) | b265e660100a9855a99e03f3c03cbd4bad0f79c8 | 30,659 |
def pptx_to_bbox(left, top, width, height):
""" Convert matplotlib bounding box format to pptx format
Parameters
----------
left : float
top : float
width : float
height : float
Returns
-------
bottom, left, width, height
"""
return top-height, left, width, height | 3cdc186301d7e6e97ea44923ca6859e2e51f0774 | 30,660 |
from typing import Counter
def reindex(labels):
"""
Given a list of labels, reindex them as integers from 1 to n_labels
Also orders them in nonincreasing order of prevalence
"""
old2new = {}
j = 1
for i, _ in Counter(labels).most_common():
old2new[i] = j
j += 1
old2newf = lambda x: old2new[x]
return [old2newf(a) for a in labels] | c12afd3b6431f10ccc43cce858e71bc504088a6e | 30,661 |
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group %s is in varyoff state." % vg
return False, msg
elif vg in current_active_vgs:
msg = "Volume group %s is in varyon state." % vg
return True, msg
else:
msg = "Volume group %s does not exist." % vg
return None, msg | c5d68f69243f1ca24140f09c7047269b7012ed6c | 30,662 |
from typing import List
def news_items(news_index_page) -> List[News]:
"""Fixture providing 10 News objects attached to news_index_page
"""
rv = []
for _ in range(0, 10):
p = _create_news_page(f"Test News Page {_}", news_index_page)
rv.append(p)
return rv | e7e8f417cefd713b9d79e6e28b654df9dd0ca0da | 30,663 |
def is_annotated(procedure):
"""Return True if procedure is annotated."""
procedure = annotatable(procedure)
try:
ann = procedure.func_annotations
return ann.are_for(procedure) and bool(ann)
except AttributeError:
return False | 70eccace122462584e3c536fafe272b5397ac659 | 30,664 |
import io
import os
def read_matrix(name):
"""\
Helper function to read a matrix from /ref_matrix. The file extension .txt
is added automatically.
:return: A tuple of bytearrays
"""
matrix = []
with io.open(os.path.join(os.path.dirname(__file__), 'ref_matrix/{0}.txt'.format(name)), 'rt') as f:
for row in f:
matrix.append(bytearray([int(i) for i in row if i != '\n']))
return tuple(matrix) | b1b70818b36957f2684320d3406bc1e877dfd366 | 30,665 |
def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
"""
Helper API to enable or disable pim on interfaces
Parameters
----------
* `tgen` : Topogen object
* `topo` : json file data
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured.
* `build` : Only for initial setup phase this is set as True.
Returns
-------
list of config
"""
config_data = []
# Enable pim on interfaces
for destRouterLink, data in sorted(topo[router]["links"].items()):
if "pim" in data and data["pim"] == "enable":
# Loopback interfaces
if "type" in data and data["type"] == "loopback":
interface_name = destRouterLink
else:
interface_name = data["interface"]
cmd = "interface {}".format(interface_name)
config_data.append(cmd)
config_data.append("ip pim")
# pim global config
if "pim" in input_dict[router]:
pim_data = input_dict[router]["pim"]
del_action = pim_data.setdefault("delete", False)
for t in [
"join-prune-interval",
"keep-alive-timer",
"register-suppress-time",
]:
if t in pim_data:
cmd = "ip pim {} {}".format(t, pim_data[t])
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
return config_data | 4408ac212126895ba161f834e7e076a0c14d864f | 30,666 |
from typing import Union
def linear_interpolation_formula(
left: Union[float, np.array],
right: Union[float, np.array],
gamma: Union[float, np.array],
) -> Union[float, np.array]:
"""
Compute the linear interpolation weighted by gamma on each point of two same shape array.
"""
return gamma * right + (1 - gamma) * left | cdcb915f6bfc60db2f3754044ab5b67432d66370 | 30,667 |
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'):
"""Estimates a mask of the visible object surface in the estimated pose.
For an explanation of why the visibility mask is calculated differently for
the estimated and the ground-truth pose, see equation (14) and related text in
Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16.
:param d_test: Distance image of a scene in which the visibility is estimated.
:param d_est: Rendered distance image of the object model in the est. pose.
:param visib_gt: Visibility mask of the object model in the GT pose (from
function estimate_visib_mask_gt).
:param delta: Tolerance used in the visibility test.
:param visib_mode: See _estimate_visib_mask.
:return: Visibility mask.
"""
visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode)
visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0))
return visib_est | 90f2de0a4e489207e128668510ba8b08a0bd361f | 30,668 |
import itertools
def combine_assertions(input_filename, output_filename):
"""
Take in a tab-separated, sorted "CSV" files, indicated by
`input_filename`, that should be grouped together into assertions.
Output a msgpack stream of assertions the file indicated by
`output_filename`.
The input file should be made from multiple sources of assertions by
concatenating and sorting them.
The combined assertions will all have the dataset of the first edge that
produces them, and the license of the strongest license being combined.
This process requires its input to be a sorted CSV so that all edges for
the same assertion will appear consecutively.
"""
def group_func(line):
"Group lines by their URI (their first column)."
return line.split('\t', 1)[0]
out = MsgpackStreamWriter(output_filename)
out_bad = MsgpackStreamWriter(output_filename + '.reject')
with open(input_filename, encoding='utf-8') as stream:
for key, line_group in itertools.groupby(stream, group_func):
assertion = make_assertion(line_group)
if assertion is None:
continue
if assertion['weight'] > 0:
destination = out
else:
destination = out_bad
destination.write(assertion)
out.close()
out_bad.close() | 87e2e7df2484dcff7f315da91ef39472991c2351 | 30,669 |
import os
def find_testclass(package, program, testclass, file_required=False):
"""Find the relative path of the test-class file"""
name = f'{program.lower()}.clas.testclasses.abap'
for root, _, files in os.walk('.'):
if name in files:
return os.path.join(root, name)[2:]
if file_required:
return None
return package + '/' + program + '=>' + testclass | 9da2aeaadb042da868b0c6d6e5069842cc014654 | 30,670 |
def sanitize_mobile_number(number):
"""Add country code and strip leading zeroes from the phone number."""
return "254" + str(number).lstrip("0") | 944e6e5baef92ee7c59249714a9ba3463ff5981f | 30,671 |
def fakebaraxis(ticks, painter=fakebarpainter(),*args, **kwargs):
"""Return a PyX linear axis that can be used to make fake bar plots.
Use "keyticks" to create the ticks expected by this function."""
return axis.linear(
min=-0.75,
max=len(ticks)-0.25,
parter=None,
manualticks=ticks,
painter=painter,
*args,
**kwargs
) | 99b30a9b76b9da8e4c8e1c937431aa509b47ab16 | 30,672 |
import csv
def get_score_sent_pairs_from_tsv(tsv_filepath, encoding="ISO-8859-1"):
"""expects tokenized sentences in tsv file!"""
with open(tsv_filepath, encoding=encoding) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
score_sent_pairs = [[float(row[0]), row[1]] for row in reader]
return score_sent_pairs | 44f5c150d40b407b50a93cd0ad968658fd5ef431 | 30,673 |
def test_timings_trie(port, individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie | dfca4a92715063620b3a110df3ea29b2de3bb0b6 | 30,674 |
def prop_end(wf, **kwargs):
"""Set variables needed to properly conclude a propagation run.
Parameters
----------
wf : obj
The current WaveFront class object
Returns
-------
wf.wfarr : numpy ndarray
Wavefront array
sampling : float
Sampling in meters
Other Parameters
----------------
EXTRACT : int
Returns the dx by dx pixel central portion of the wavefront.
NOABS : bool
If set, the complex-values wavefront field is returned. By default, the
intensity (modulus squared) of the field is returned.
"""
sampling = proper.prop_get_sampling(wf)
if ("NOABS" in kwargs and kwargs["NOABS"]):
wf.wfarr = proper.prop_shift_center(wf.wfarr)
else:
wf.wfarr = proper.prop_shift_center(np.abs(wf.wfarr)**2)
if "EXTRACT" in kwargs:
EXTRACT = kwargs["EXTRACT"]
ny, nx = wf.wfarr.shape
wf.wfarr = wf.wfarr[ny/2-EXTRACT/2:ny/2+EXTRACT/2,nx/2-EXTRACT/2:nx/2+EXTRACT/2]
return (wf.wfarr, sampling) | d294939f5e26df7672611ae6b58ac7039e8d22c0 | 30,675 |
def add_others_ta(df, close, fillna=False):
"""Add others analysis features to dataframe.
Args:
df (pandas.core.frame.DataFrame): Dataframe base.
close (str): Name of 'close' column.
fillna(bool): if True, fill nan values.
Returns:
pandas.core.frame.DataFrame: Dataframe with new features.
"""
df['others1'] = daily_return(df[close], fillna=fillna)
df['others2'] = cumulative_return(df[close], fillna=fillna)
return df | 97185202663cb83ed1dc5f4bd02320b0ce02c4aa | 30,676 |
def _fixture_union(caller_module, name, fixtures, idstyle, scope="function", ids=fixture_alternative_to_str,
unpack_into=None, autouse=False, **kwargs):
"""
Internal implementation for fixture_union
:param caller_module:
:param name:
:param fixtures:
:param idstyle:
:param scope:
:param ids:
:param unpack_into:
:param autouse:
:param kwargs:
:return:
"""
# test the `fixtures` argument to avoid common mistakes
if not isinstance(fixtures, (tuple, set, list)):
raise TypeError("fixture_union: the `fixtures` argument should be a tuple, set or list")
# validate the idstyle
idstyle = IdStyle(idstyle)
# first get all required fixture names
f_names = []
for f in fixtures:
# possibly get the fixture name if the fixture symbol was provided
f_names.append(get_fixture_name(f) if not isinstance(f, str) else f)
if len(f_names) < 1:
raise ValueError("Empty fixture unions are not permitted")
# then generate the body of our union fixture. It will require all of its dependent fixtures and receive as
# a parameter the name of the fixture to use
@with_signature("(%s, request)" % ', '.join(f_names))
def _new_fixture(request, **all_fixtures):
if not is_used_request(request):
return NOT_USED
else:
alternative = request.param
if isinstance(alternative, UnionFixtureAlternative):
fixture_to_use = alternative.fixture_name
return all_fixtures[fixture_to_use]
else:
raise TypeError("Union Fixture %s received invalid parameter type: %s. Please report this issue."
"" % (name, alternative.__class__))
_new_fixture.__name__ = name
# finally create the fixture per se.
# WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded
f_decorator = pytest_fixture_plus(scope=scope,
params=[UnionFixtureAlternative(_name, idstyle) for _name in f_names],
autouse=autouse, ids=ids, **kwargs)
fix = f_decorator(_new_fixture)
# Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424
check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture)
setattr(caller_module, name, fix)
# if unpacking is requested, do it here
if unpack_into is not None:
_unpack_fixture(caller_module, argnames=unpack_into, fixture=name)
return fix | 7063ab888b99cc0aa10890de9f4575f0ce758017 | 30,677 |
def service_class(cls):
"""
A class decorator enabling the instances of the class to be used
as a ``services``-provider in `JSONRpc Objects`_
and `BSONRpc Objects`_.
Use decorators ``request``, ``notification``, ``rpc_request`` and
``rpc_notification`` to expose methods for the RPC peer node.
"""
cls._request_handlers = {}
cls._notification_handlers = {}
for name, method in cls.__dict__.items():
if hasattr(method, '_request_handler'):
cls._request_handlers[name] = method
if hasattr(method, '_notification_handler'):
cls._notification_handlers[name] = method
return cls | 7c146b1d04415cd494e62fb9ee310364c345c217 | 30,678 |
def lightcurveplain(request, tcs_transient_objects_id):
"""lightcurveplain.
Args:
request:
tcs_transient_objects_id:
"""
transient = get_object_or_404(TcsTransientObjects, pk=tcs_transient_objects_id)
mjdLimit = 55347.0 # Hard wired to 31st May 2010
# 2012-07-18 KWS Changed this code to call the custom query from a
# dedicated file full of custom queries for lightcurves.
recurrences = lightcurvePlainQuery(transient.id, mjdLimit = mjdLimit, djangoRawObject = CustomAllObjectOcurrencesPresentation)
return render(request, 'psdb/lightcurve.txt',{'transient' : transient, 'table' : recurrences }, content_type="text/plain") | 0db9b5ccdb5df9c65fab971fe72d5cec6da84676 | 30,679 |
def locate_address(ip_list, ip_attack):
"""
for each line in the file pointer
define the ip ranges and country codes
if the attacking ip is in between the range then return country code
:param ip_list - list of ip address ranges and country code:
:param ip_attack - attacking ip as an integer:
:return country_code - country code as a string:
"""
for line in ip_list:
start_ip = line[0]
end_ip = line[1]
country_code = line[2]
if ip_attack >= start_ip and ip_attack <= end_ip:
return country_code
else:
pass | 82a8f9ed0cf79a2ba39d21348779687c1f8c19a8 | 30,680 |
import scipy
def invertnd(f, x, *other_vars, kind='linear', vectorized=False):
"""
Invert a multivariate function numerically
Args:
f: Function to invert
x: Domain to invert the function on (range of inverted function)
*other_vars: Domain to invert the function on (parameters of inverted function)
kind: Specifies the kind of interpolation as a string ('linear', 'nearest', 'cubic')
(cubic only available for 1 or 2 variables)
vectorized: Specifies if the input function is vectorized
Returns:
Inverted function where the first argument corresponds to the output of the original function
"""
n = len(x)
reshape_dim = np.ones(len(other_vars) + 1, dtype=int)
reshape_dim[0] = n
x_reshape = np.reshape(x, reshape_dim)
reshape_dim[0] = 1
if not np.issubdtype(x_reshape.dtype, np.number):
raise ValueError('Input domain is not numeric')
dim = [1, *(len(v) for v in other_vars)]
x_arr = np.tile(x_reshape, dim)
dim[0] = n
v_arrs = []
for i, v in enumerate(other_vars):
reshape_dim[i + 1] = len(v)
v_reshape = np.reshape(v, reshape_dim)
reshape_dim[i + 1] = 1
if not np.issubdtype(v_reshape.dtype, np.number):
raise ValueError('Input domain is not numeric')
dim[i + 1] = 1
v_arrs.append(np.tile(v_reshape, dim))
dim[i + 1] = len(v)
if vectorized:
y = f(x_arr, *v_arrs)
else:
def recursive_f(x_in, *v_in):
if hasattr(x_in, '__iter__'):
return [recursive_f(x_n, *v_n) for x_n, v_n in zip(x_in, zip(*v_in))]
return f(x_in, *v_in)
y = np.array(recursive_f(x_arr, *v_arrs))
if not np.issubdtype(y.dtype, np.number):
raise ValueError('Input function is not numeric')
points = np.array(list(zip(y.flat, *(v.flat for v in v_arrs))))
values = np.array(x_arr.flat)
def f_inverse(x_new, *v_new):
return scipy.interpolate.griddata(points, values, (x_new, *v_new), method=kind)
return f_inverse | bc2798e382a700755a1d6a5d59743b969d96a02d | 30,681 |
def most_mentioned(msgs, limit=20):
"""Top mentions by '@' references
"""
mentions = {}
for m in msgs:
for at in preproc.extract_ats_from_text(m['text']):
mentions[at] = mentions[at] + 1 if at in mentions else 1
return sorted(mentions.items(),
key=lambda x: x[1],
reverse=True)[:limit] | 10aa70248d33325d585fb19875a13965f67896b5 | 30,682 |
import string
def is_valid_matlab_field_label(label):
""" Check that passed string is a valid MATLAB field label """
if not label.startswith(tuple(string.ascii_letters)):
return False
VALID_CHARS = set(string.ascii_letters + string.digits + "_")
return set(label).issubset(VALID_CHARS) | ea1358e94f4fc936cb12b9cad5d7285ee39dba55 | 30,683 |
def identity(n, dtype=DEFAULT_FLOAT_DTYPE):
"""
Returns the identity tensor.
Args:
n (int): Number of rows and columns in the output, must be larger than 0.
dtype (Union[mstype.dtype, str], optional): Designated tensor dtype, can
be in format of np.float32, or `float32`. Default is mstype.float32.
Returns:
result (Tensor): A tensor of shape (n,n). A tensor where all elements
are equal to zero, except for the diagonal, whose values are equal to one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.identity(2))
[[1. 0.]
[0. 1.]]
"""
dtype = _check_dtype(dtype)
return eye(n, dtype=dtype) | 7ad0025b5fb5bc02b8f07039b8beb15e8c402c11 | 30,684 |
def _chain_connectivity(edges, chains):
"""Returns chain connectivity treated as clustered entities represented by nodes"""
chain_connectivity = np.empty((len(edges), 2), dtype=np.int64)
starts = defaultdict(list)
for section_index, chain in enumerate(chains):
starts[chain[0][0]].append(section_index)
i_connection = 0
for section_index, chain in enumerate(chains):
section_end = chain[-1][1]
for child_section in starts[section_end]:
if section_index != child_section:
chain_connectivity[i_connection, 0] = section_index
chain_connectivity[i_connection, 1] = child_section
i_connection += 1
return chain_connectivity[:i_connection] | 5687b433a1c47a75641442aef1b8cff4c8cb4e17 | 30,685 |
def reading2celsius(self, reading):
""" Converts sensor reading to celsius """
celsius = reading / 50 - 273.15
return celsius | 72e6933002c9725165145451e10bbf98c162b625 | 30,686 |
import sqlite3
def evaluate_csp(website_id, test_weights):
"""
Checks:
no fallback to default:
base-uri
form-action
frame-ancestors
report-to/uri
sandbox
upgrade-insecure-requests
src:
child-src
connect-src
default-src
font-src
frame-src - fallsback to child-src which falls back to default
img-src
manifest-src
media-src
object-src
style-src
script-src
strict-dynamic
unsafe-hashes
worker-src
if a check is to be done on script-src for example but it's not explicitly defined but default-src is, use the score from default-src instead
"""
score_dict = {'default-src': 0, 'child-src': 0, 'connect-src': 0, 'font-src': 0, 'frame-src': 0, 'img-src': 0, 'manifest-src': 0, 'media-src': 0, 'object-src': 0, 'script-src': 0, 'style-src': 0, 'worker-src': 0, 'report-to/uri': 0, 'base-uri': 0, 'form-action': 0, 'frame-ancestors': 0, 'sandbox': 0, 'upgrade-insecure-requests': 0}
csp_data = None
with closing(sqlite3.connect("results.db")) as connection:
with closing(connection.cursor()) as cursor:
csp_src_directives = ["default-src","child-src","connect-src","font-src","frame-src","img-src","manifest-src","media-src","object-src","script-src","style-src","worker-src"]
csp_default_directive_score = 0
csp_child_src_directive_score = 0
cursor.execute("SELECT scheme FROM website WHERE id = ?", (website_id,))
redirected_scheme = cursor.fetchone()
if redirected_scheme != None:
redirected_scheme = redirected_scheme[0]
else:
#Assume http
redirected_scheme = "http"
for directive in csp_src_directives:
cursor.execute("SELECT csp_data FROM csp WHERE website_id = ? AND csp_type = ?", (website_id, directive))
csp_data = cursor.fetchall()
if len(csp_data) > 0:
result = csp_src_check(csp_data, redirected_scheme)
if directive == "default-src":
csp_default_directive_score = result
elif directive == "child-src":
csp_child_src_directive_score = result
score_dict[directive] = round(result * test_weights[directive], 4)
elif directive == "frame-src":
score_dict[directive] = round(csp_child_src_directive_score * test_weights[directive], 4)
elif directive == "child-src":
score_dict[directive] = round(csp_default_directive_score * test_weights[directive], 4)
csp_child_src_directive_score = csp_default_directive_score
elif directive != "default-src":
score_dict[directive] = round(csp_default_directive_score * test_weights[directive], 4)
csp_directives = ["base-uri","form-action","frame-ancestors","report-to","report-uri","sandbox","upgrade-insecure-requests"]
for directive in csp_directives:
cursor.execute("SELECT csp_data FROM csp WHERE website_id = ? AND csp_type = ?", (website_id, directive))
csp_data = cursor.fetchall()
if len(csp_data) > 0:
result = 0
if directive == 'base-uri' or directive == 'form-action':
result = csp_src_check(csp_data, redirected_scheme)
elif directive == 'frame-ancestors':
result = csp_frame_ancestors_check(csp_data, redirected_scheme)
elif directive == 'report-to' or directive == 'report-uri':
result = 1
elif directive == 'sandbox':
result = 1
elif directive == 'upgrade-insecure-requests':
result = 1
if directive == 'report-to' or directive == 'report-uri':
score_dict['report-to/uri'] = round(result * test_weights['report-to/uri'], 4)
else:
score_dict[directive] = round(result * test_weights[directive], 4)
return score_dict | a5c24968ad98790eb3361db8310416b977e4adc7 | 30,687 |
def get_analysis_alias_from_metadata(eload_cfg):
"""
Returns analysis alias only if we find a metadata spreadsheet and it has exactly one analysis.
Otherwise provides an error message and raise an error.
"""
metadata_spreadsheet = eload_cfg.query('submission', 'metadata_spreadsheet')
if metadata_spreadsheet:
reader = EvaXlsxReader(metadata_spreadsheet)
if len(reader.analysis) == 1:
return reader.analysis[0].get('Analysis Alias')
if len(reader.analysis) > 1:
logger.error("Can't assign analysis alias: multiple analyses found in metadata!")
else:
logger.error("Can't assign analysis alias: no analyses found in metadata!")
else:
logger.error("Can't assign analysis alias: no metadata found!")
logger.error("Try running upgrade_config and passing an analysis alias explicitly.")
raise ValueError("Can't find an analysis alias for config upgrade.") | ac3ecc7aa14f37fa2a25f9b7995923013c68a5c3 | 30,688 |
from imcsdk.mometa.bios.BiosProfileManagement import BiosProfileManagement
from imcsdk.mometa.bios.BiosProfileManagement import \
def bios_profile_backup_running(handle, server_id=1, **kwargs):
"""
Backups up the running configuration of various bios tokens to create a
'cisco_backup_profile'.
Will overwrite the existing backup profile if it exists.
Args:
handle (ImcHandle)
server_id (int): Id of the server to perform
this operation on C3260 platforms
kwargs : Key-Value paired arguments for future use
Returns:
BiosProfile object corresponding to the backup profile created
Raises:
ImcOperationError if the backup profile is not created
Examples:
bios_profile_backup_running(handle, server_id=1)
"""
BiosProfileManagementConsts
mo = BiosProfileManagement(parent_mo_or_dn=_get_bios_dn(handle, server_id))
mo.admin_action = BiosProfileManagementConsts.ADMIN_ACTION_BACKUP
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
return _get_bios_profile(handle, name='cisco_backup_profile') | e1c1a7b498df6af5238914522eae56e666df328f | 30,689 |
def variantCombinations(items):
""" Calculates variant combinations for given list of options. Each item in the items list represents
unique value with it's variants.
:param list items: list of values to be combined
>>> c = variantCombinations([["1.1", "1.2"], ["2.1", "2.2"], ["3.1", "3.2"]])
>>> len(c)
8
>>> for combination in c:print combination
['1.1', '2.1', '3.1']
['1.1', '2.1', '3.2']
['1.1', '2.2', '3.1']
['1.1', '2.2', '3.2']
['1.2', '2.1', '3.1']
['1.2', '2.1', '3.2']
['1.2', '2.2', '3.1']
['1.2', '2.2', '3.2']
"""
assert isinstance(items, list) and list
if len(items) == 1:
result = items[0]
else:
result = []
subItems = variantCombinations(items[1:])
for masterItem in items[0]:
for subItem in subItems:
if isinstance(subItem, list):
item = [masterItem]
item.extend(subItem)
result.append(item)
else:
result.append([masterItem, subItem])
return result | 72bfdb19db3cf692e4260a5f75d10324e562f20e | 30,690 |
import regex
def bm_regex(regex_string):
"""Compile best multiline regex."""
return regex.compile(regex_string, regex.B | regex.M) | 9c6507708b1d04ef91783bfd04f4949a9dfc6b76 | 30,691 |
def test_enable_8021q_1(monkeypatch):
"""Verify that enable_802q_1 function return exception when 802.1q is not supported by current os.
"""
def mockreturn(command):
return CmdStatus("", "", 0)
# monkeypatch.setattr(CLISSHNetNS, 'exec_command', mockreturn)
lh = GenericLinuxHost(LH_CFG, OPTS)
monkeypatch.setattr(lh.ssh, 'exec_command', mockreturn)
with pytest.raises(Exception) as excepinfo:
lh.enable_8021q()
result = "Current OS doesn't support 802.1q."
assert result == str(excepinfo.value) | 4c3261ef788b369d185c4caff0f02a67818c5cc8 | 30,692 |
def qlearning_dataset(env, dataset=None, terminate_on_end=False, **kwargs):
"""
Returns datasets formatted for use by standard Q-learning algorithms,
with observations, actions, next_observations, rewards, and a terminal
flag.
Args:
env: An OfflineEnv object.
dataset: An optional dataset to pass in for processing. If None,
the dataset will default to env.get_dataset()
terminate_on_end (bool): Set done=True on the last timestep
in a trajectory. Default is False, and will discard the
last timestep in each trajectory.
**kwargs: Arguments to pass to env.get_dataset().
Returns:
A dictionary containing keys:
observations: An N x dim_obs array of observations.
actions: An N x dim_action array of actions.
next_observations: An N x dim_obs array of next observations.
rewards: An N-dim float array of rewards.
terminals: An N-dim boolean array of "done" or episode termination flags.
"""
if dataset is None:
dataset = env.get_dataset(**kwargs)
N = dataset['rewards'].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
# The newer version of the dataset adds an explicit
# timeouts field. Keep old method for backwards compatability.
use_timeouts = False
if 'timeouts' in dataset:
use_timeouts = True
episode_step = 0
for i in range(N-1):
obs = dataset['observations'][i].astype(np.float32)
new_obs = dataset['observations'][i+1].astype(np.float32)
action = dataset['actions'][i].astype(np.float32)
reward = dataset['rewards'][i].astype(np.float32)
done_bool = bool(dataset['terminals'][i])
if use_timeouts:
final_timestep = dataset['timeouts'][i]
else:
final_timestep = (episode_step == env._max_episode_steps - 1)
if (not terminate_on_end) and final_timestep:
# Skip this transition and don't apply terminals on the last step of an episode
episode_step = 0
continue
if done_bool or final_timestep:
episode_step = 0
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
episode_step += 1
return {
'observations': np.array(obs_),
'actions': np.array(action_),
'next_observations': np.array(next_obs_),
'rewards': np.array(reward_),
'terminals': np.array(done_),
} | bcc59e159ada77d2b3acaed530f190d3fcf8a706 | 30,693 |
import torch
def build_save_dataset(corpus_type, fields, opt): # corpus_type: train or valid
""" Building and saving the dataset """
assert corpus_type in ["train", "valid"] # Judging whether it is train or valid
if corpus_type == "train":
src_corpus = opt.train_src # 获取源端、目标端和结构信息的path
tgt_corpus = opt.train_tgt
structure_corpus = opt.train_structure
mask_corpus = opt.train_mask
relation_lst_corpus = opt.train_relation_lst
relation_mat_corpus = opt.train_relation_mat
align_corpus = opt.train_align
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
structure_corpus = opt.valid_structure
mask_corpus = opt.valid_mask
relation_lst_corpus = opt.valid_relation_lst
relation_mat_corpus = opt.valid_relation_mat
align_corpus = opt.valid_align
if opt.shard_size > 0:
return build_save_in_shards_using_shards_size(
src_corpus,
tgt_corpus,
structure_corpus,
mask_corpus,
relation_lst_corpus,
relation_mat_corpus,
align_corpus,
fields,
corpus_type,
opt,
)
# We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard to do this should users need this feature.
src_iter = make_text_iterator_from_file(src_corpus)
tgt_iter = make_text_iterator_from_file(tgt_corpus)
structure_iter = make_text_iterator_from_file(structure_corpus)
mask_iter = make_text_iterator_from_file(mask_corpus)
relation_iter = make_text_iterator_from_file(relation_lst_corpus)
relation_iter_2 = make_text_iterator_from_file(relation_mat_corpus)
align_iter = make_text_iterator_from_file(align_corpus)
dataset = build_dataset(
fields,
src_iter,
tgt_iter,
structure_iter,
mask_iter,
relation_iter,
relation_iter_2,
align_iter,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}_{:s}.pt".format(opt.save_data, corpus_type)
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file] | 85594737b15ff356da3dcb431bab9c648122f57a | 30,694 |
def gaussian_filter(image, sigma):
"""Returns image filtered with a gaussian function of variance sigma**2"""
i, j = np.meshgrid(np.arange(image.shape[0]),
np.arange(image.shape[1]),
indexing='ij')
mu = (int(image.shape[0]/2.0),
int(image.shape[1]/2.0))
gaussian = 1.0/(2.0*np.pi*sigma*sigma)*np.exp(-0.5*(((i-mu[0])/sigma)**2+\
((j-mu[1])/sigma)**2))
gaussian = np.roll(gaussian, (-mu[0], -mu[1]), axis=(0, 1))
image_fft = np.fft.rfft2(image)
gaussian_fft = np.fft.rfft2(gaussian)
image = np.fft.irfft2(image_fft*gaussian_fft)
return image | 18f8d59ebe82fbeb5cc6090c3c01460923cbbf08 | 30,695 |
def get_lattice_points(strand):
"""
格子点の情報を取得
@param ストランドの格子点の対
@return ストランドの格子点の始点と終点
"""
strand_list = eval(strand)
strand_from = strand_list[0]
strand_to = strand_list[1]
return strand_from, strand_to | a69902c15b9d8ce9f518891f4dea55d9aca186cf | 30,696 |
def while_(condition):
"""
A while loop that can be used in a workchain outline.
Use as::
while_(cls.conditional)(
cls.step1,
cls.step2
)
Each step can, of course, also be any valid workchain step e.g. conditional.
:param condition: The workchain method that will return True or False
"""
return _While(condition) | 6594c6da24d6a27d674ddb18713a0e521f0dc2dd | 30,697 |
def new(init=None):
"""Return a new Whirlpool object. An optional string argument
may be provided; if present, this string will be automatically
hashed."""
return Whirlpool(init) | 2d6bc8ce41009d642c78d92b022b44a23f67c496 | 30,698 |
def extract_bcr(tab, rep_col='CDR3_aa'):
""" Extract BCR repertorie for each patient
Args:
tab: data table from TRUST BCR outputs
rep_col: 'CDR3_aa' or 'complete_CDR3_sequences' or a list of keys
Output: a Series vector containing lists of BCR CDR3 sequences
"""
tab['patient'] = tab.TCGA_id.str.slice(0,12)
tab['Sample_Type'] = tab.TCGA_id.str.slice(13,15)
## https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes
## Code Definition Short Letter Code
## 01 Primary Solid Tumor TP
## 02 Recurrent Solid Tumor TR
## 03 Primary Blood Derived Cancer - Peripheral Blood TB
## 04 Recurrent Blood Derived Cancer - Bone Marrow TRBM
## 05 Additional - New Primary TAP
## 06 Metastatic TM
## 07 Additional Metastatic TAM
## 08 Human Tumor Original Cells THOC
## 09 Primary Blood Derived Cancer - Bone Marrow TBM
## 10 Blood Derived Normal NB
## 11 Solid Tissue Normal NT
## 12 Buccal Cell Normal NBC
## 40 Recurrent Blood Derived Cancer - Peripheral Blood TRB
## 50 Cell Lines CELL
## 60 Primary Xenograft Tissue XP
## 61 Cell Line Derived Xenograft Tissue XCL
tumor = tab[tab.Sample_Type.isin(['01','02','06','07'])] ## select tumor samples
normal = tab[tab.Sample_Type.isin(['10','11'])] ## select normal samples
normal['patient'] = 'Normal_'+normal['patient'] ## rename Normal sample Ids
print('Tumor data of shape', tumor.shape)
print('Normal data of shape', normal.shape)
# 对数据集进行分组,并且应用了to——list函数
out = [ tumor.groupby('patient')[rep_col].apply(to_list),
normal.groupby('patient')[rep_col].apply(to_list) ]
return pd.concat(out) | 998a3cfd6619b2fa3ae791e523f258a5a82e584b | 30,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.