content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import json
def harmonize_credentials(secrets_file=None, cromwell_username=None, cromwell_password=None):
"""
Takes all of the valid ways of providing authentication to cromwell and returns a username
and password
:param str cromwell_password:
:param str cromwell_username:
:param str secrets_file: json file containing fields cromwell_user and cromwell_password
:return tuple: (string of cromwell username, string of cromwell password)
"""
if cromwell_username is None or cromwell_password is None:
if secrets_file is None:
raise ValueError('One form of cromwell authentication must be provided, please pass '
'either cromwell_user and cromwell_password or a secrets_file.')
else:
with open(secrets_file) as f:
secrets = json.load(f)
cromwell_username = secrets['cromwell_user']
cromwell_password = secrets['cromwell_password']
return cromwell_username, cromwell_password
|
f0802b3e65ebec76393090f608c77abea312867b
| 16,087
|
import requests
import json
import os
def get_topN_json( mainURL = 'https://tanimislam.sfo3.digitaloceanspaces.com/covid19movies/covid19_topN_LATEST.json',
verify = True,
topN_json = None ):
"""
Gets the summary COVID-19 cumulative cases and deaths for the top :math:`N > 1` (by population) MSA_\ s in the United States.
:param str mainURL: location of the top N JSON_ file showing the summary COVID-19 statistics of the top :math:`N > 1` MSA\ s in the US.
:param bool verify: whether to verify SSL connections to ``mainURL``. Default is ``True``.
.. _MSA: https://en.wikipedia.org/wiki/Metropolitan_statistical_area
"""
if topN_json is None:
response = requests.get( mainURL, verify = verify )
if response.status_code != 200:
raise ValueError( "Error, could not access %s." % mainURL )
json_data = json.loads( response.content )
else:
assert( os.path.isfile( topN_json ) )
json_data = json.load( open( topN_json, 'r' ) )
def _reformat_entry( entry ):
return dict(map(lambda tup: ( '_'.join( tup[0].split()).replace(".",""), tup[1] ), entry.items()))
return list(map(_reformat_entry, json_data))
|
235f57fbd07f221d1e05fd54c7e308c45f5086bc
| 16,088
|
import pathlib
def lambda_filtered_paths(directory: str):
"""
Return list of filepaths for lambda layers and functions. Unecessary
files are filtered out.
"""
paths = pathlib.Path(directory).rglob("*")
return [
f
for f in paths
if not any(pat in str(f) for pat in ["__pycache__", ".mypy_cache", "~"])
]
|
1638e821a249244fde95e26a027176d1e6d87491
| 16,089
|
def rescale(values, old_min, old_max, new_min, new_max):
"""Rescale a set of values into a new min and max
"""
output = []
for v in values:
new_v = (new_max - new_min) * (v - old_min) / (old_max - old_min) + new_min
output.append(new_v)
return output
|
c07173fca2f6ba0d1e1e32c257b9e4f4a39fe5a7
| 16,091
|
def KeyValuePairMessagesToMap(key_value_pair_messages):
"""Transform a list of KeyValuePair message to a map.
Args:
key_value_pair_messages: a list of KeyValuePair message.
Returns:
a map with a string as key and a string as value
"""
return {msg.key: msg.value for msg in key_value_pair_messages}
|
7ab0d9a3dea7da762a559efa00ae50247ee8d2d4
| 16,092
|
from pathlib import Path
import yaml
import zipfile
import hashlib
def create_zip(validate_dir: Path):
"""
# create zip file containing:
# config.yml
# {self.train_dir_}/specs.yml
# {self.train_dir_}/weights/{epoch:04d}*.pt
# {self.validate_dir_}/params.yml
"""
existing_zips = list(validate_dir.glob("*.zip"))
if len(existing_zips) == 1:
existing_zips[0].unlink()
elif len(existing_zips) > 1:
msg = (
f"Looks like there are too many torch.hub zip files " f"in {validate_dir}."
)
raise NotImplementedError(msg)
params_yml = validate_dir / "params.yml"
with open(params_yml, "r") as fp:
params = yaml.load(fp, Loader=yaml.SafeLoader)
epoch = params["epoch"]
xp_dir = validate_dir.parents[3]
config_yml = xp_dir / "config.yml"
train_dir = validate_dir.parents[1]
weights_dir = train_dir / "weights"
specs_yml = train_dir / "specs.yml"
hub_zip = validate_dir / "hub.zip"
with zipfile.ZipFile(hub_zip, "w") as z:
z.write(config_yml, arcname=config_yml.relative_to(xp_dir))
z.write(specs_yml, arcname=specs_yml.relative_to(xp_dir))
z.write(params_yml, arcname=params_yml.relative_to(xp_dir))
for pt in weights_dir.glob(f"{epoch:04d}*.pt"):
z.write(pt, arcname=pt.relative_to(xp_dir))
sha256_hash = hashlib.sha256()
with open(hub_zip, "rb") as fp:
for byte_block in iter(lambda: fp.read(4096), b""):
sha256_hash.update(byte_block)
hash_prefix = sha256_hash.hexdigest()[:10]
target = validate_dir / f"{hash_prefix}.zip"
hub_zip.rename(target)
return target
|
8551e5507976f6ebe4c999092fa24f199f073ffb
| 16,093
|
def concatenate_pos(sentence, position, element, pos_rem):
"""
concatenate an element in sentence at a position
Input=sentence, position+element to concatenate, letter's number to remove
Output=sentence
"""
# We perform concatenation
sentence = sentence[:position + 1] + element + sentence[position + 1:]
# We remove the superfusion part
sentence[position] = sentence[position][:len(sentence[position]) - pos_rem]
return sentence
|
2a457ebab9e56858e251a17548e3652e3ac826c4
| 16,095
|
def parse_result(results):
"""
Given a string, return a dictionary of the different
key:value pairs separated by semicolons
"""
if not results:
return {}
rlist = results.split(";")
keyvalpairs = [pair.split(":") for pair in rlist]
keydict = {
pair[0].strip(): pair[1].strip() for pair in keyvalpairs if len(pair) == 2
}
return keydict
|
eca808c2baa0b5c95e6fd052f2afabf53b05bd3a
| 16,096
|
def case(bin_spec: str, default: str = "nf") -> str:
""" Return the case specified in the bin_spec string """
c = default
if "NF" in bin_spec:
c = "nf"
elif "ÞF" in bin_spec:
c = "þf"
elif "ÞGF" in bin_spec:
c = "þgf"
elif "EF" in bin_spec:
c = "ef"
return c
|
b2fdab5d1a48e1d20c3a561707033970cac55356
| 16,097
|
def ul(depth_and_txt_list):
"""
Wants a list of tuples containing depths and txt.
Don't know how robust this function is to depth errors.
depth_and_txt_list = [ (0, 'Things I want for x-mas'),
(1, 'Computer'),
(1, 'Fancy shoes'),
(0, 'New Years Resolutions'),
(1, 'Exercise more, program less'),
(1, 'Eat out less'),
(2, 'Except for Taco Time'),
(2, 'And Subway') ]
yields:
<ul>
<li>Things I want for x-mas
<ul>
<li>Computer</li>
<li>Fancy shoes</li>
</ul></li>
<li>New Years Resolutions
<ul>
<li>Exercise more, program less</li>
<li>Eat out less
<ul>
<li>Except for Taco Time</li>
<li>And Subway</li>
</ul></li>
</ul></li>
</ul>
"""
d = [depth_and_txt_list[0][0]]
li=['\t'*d[-1]+'<ul>\n']
for (req_d,txt) in depth_and_txt_list:
if req_d == d[-1]:
li.append('\t'*d[-1]+'<li>%s'%txt)
li.append('</li>\n')
d.append(req_d)
elif req_d > d[-1]:
li.pop()
li.append('\n'+'\t'*req_d+'<ul>\n')
li.append('\t'*req_d+'<li>%s'%txt)
li.append('</li>\n')
d.append(req_d)
elif req_d < d[-1]:
li.append('\t'*req_d+'</ul></li>\n')
li.append('\t'*req_d+'<li>%s'%txt)
li.append('</li>\n')
d.append(req_d)
for i in range(d[-1]-d[0]):
li.append('\t'*d[-1]+'</ul></li>\n')
d.append(d[-1]-1)
li.append('\t'*d[-1]+'</ul>\n')
return ''.join(li)
|
3303e01efd6b2bb6ee0f296471dca6baf11a4da7
| 16,099
|
def name_func(testcase_func, _, param):
"""Create a name for the test function."""
return '{}_{}_{}'.format(testcase_func.__name__, param.args[0],
param.args[1].__name__)
|
804f593850cff07758a61bd0ae2ccd92b2e46b19
| 16,100
|
def get_spk_agent_one_hot_vec(context, agent_index_dict, max_n_agents):
"""
:param context: 1D: n_prev_sents, 2D: n_words
:param agent_index_dict: {agent id: agent index}
:param max_n_agents: the max num of agents that appear in the context (=n_prev_sents+1); int
:return: 1D: n_prev_sents, 2D: max_n_agents
"""
speaking_agent_one_hot_vector = []
for c in context:
vec = [0 for i in range(max_n_agents)]
speaker_id = c[1]
vec[agent_index_dict[speaker_id]] = 1
speaking_agent_one_hot_vector.append(vec)
return speaking_agent_one_hot_vector
|
9353e4f3f447048bb2e0c0f692c287eda40b1d8a
| 16,101
|
def Convert(string):
"""converts string to list"""
li = list(string.split(" "))
return li
|
a446d46be5d7c2df7139460a461e0825784f5e89
| 16,102
|
import pathlib
def get_paths_to_patient_files(path_to_imgs, append_mask=True):
"""
Get paths to all data samples, i.e., CT & PET images (and a mask) for each patient.
Parameters
----------
path_to_imgs : str
A path to a directory with patients' data. Each folder in the directory must corresponds to a single patient.
append_mask : bool
Used to append a path to a ground truth mask.
Returns
-------
list of tuple
A list wherein each element is a tuple with two (three) `pathlib.Path` objects for a single patient.
The first one is the path to the CT image, the second one - to the PET image. If `append_mask` is True,
the path to the ground truth mask is added.
"""
path_to_imgs = pathlib.Path(path_to_imgs)
#patients = [p for p in os.listdir(path_to_imgs) if os.path.isdir(path_to_imgs / p)]
patients = [f.name.split("_")[0] for f in path_to_imgs.rglob("*_ct*")]
print(str(patients))
paths = []
for p in patients:
path_to_ct = path_to_imgs / (p + '_ct.nii.gz')
path_to_pt = path_to_imgs / (p + '_pt.nii.gz')
if append_mask:
path_to_mask = path_to_imgs / (p + '_gtvt.nii.gz')
paths.append((path_to_ct, path_to_pt, path_to_mask))
else:
paths.append((path_to_ct, path_to_pt))
return paths
|
61480fee3e300d2ca97e819fae875cf4c7a637e1
| 16,103
|
def check_chars_in_positions(password, left, right, in_char):
"""
Check if password is valid based on if char count is in exactly one of
position left or position right
returns bool (True = valid password)
"""
is_in_left = password[left-1] == in_char
is_in_right = password[right-1] == in_char
# need to xor the two
return is_in_left != is_in_right
|
36a80525307ecf359cf631079e128617c2d22bc3
| 16,104
|
from typing import Dict
def _create_stats_dict_from_values(
total_sum_w: float, total_sum_w2: float, total_sum_wx: float, total_sum_wx2: float
) -> Dict[str, float]:
"""Create a statistics dictionary from the provided set of values.
This is particularly useful for ensuring that the dictionary values are created uniformly.
Args:
total_sum_w: Total sum of the weights (ie. the frequencies).
total_sum_w2: Total sum of the weights squared (ie. sum of Sumw2 array).
total_sum_wx: Total sum of weights * x.
total_sum_wx2: Total sum of weights * x * x.
Returns:
Statistics dict suitable for storing in the metadata.
"""
return {
"_total_sum_w": total_sum_w,
"_total_sum_w2": total_sum_w2,
"_total_sum_wx": total_sum_wx,
"_total_sum_wx2": total_sum_wx2,
}
|
4ef02ef12b903a4a0a14f3c5fa9ce7edf11f6380
| 16,107
|
def mask_to_surface_type(ds, surface_type, surface_type_var="land_sea_mask"):
"""
Args:
ds: xarray dataset, must have variable slmsk
surface_type: one of ['sea', 'land', 'seaice', 'global']
Returns:
input dataset masked to the surface_type specified
"""
if surface_type == "global":
return ds
elif surface_type not in ["sea", "land", "seaice"]:
raise ValueError(
"Must mask to surface_type in ['sea', 'land', 'seaice', 'global']."
)
surface_type_codes = {"sea": 0, "land": 1, "seaice": 2}
mask = ds[surface_type_var].astype(int) == surface_type_codes[surface_type]
ds_masked = ds.where(mask)
return ds_masked
|
0ee07b852c142d0041841288c54bbc68afdd65f8
| 16,108
|
import os
from datetime import datetime
def name_out(file_csv):
""" Return name of out file with datetime. Example: function('my_file.txt')"""
name = os.path.basename(file_csv)
file_name = os.path.splitext(name)[0]
file_type = os.path.splitext(name)[1]
file_location = os.path.dirname(file_csv) + "/"
date = "_" + datetime.now().strftime('%d-%m-%Y.%H-%M-%S')
return file_location + file_name + date + file_type
|
62e6541d607725b77b3a3cea5214f31dc6218b50
| 16,109
|
import numpy as np
def _interpolate_lsf(en,lsfener,lsfdata,lsfepix,):
"""
interpolate the LSF data for a different energy
parameters
===========
en : float (not a list/array)
energy (keV) for wavelength for which LSF is desired
lsfwav : numpy array
list of wavelengths at which we have LSF
lsfepix : numpy array
for given wavelength, give LSF value for a channel
the size of each channel is 0.5 pixel
lsfdata : numpy array
2-d array. first index relates to lsfwav, second to channels
returns
=======
lsf[channels] for wavelength w
method
========
the LSF data near w are linearly interpolated for each channel
"""
if not ((type(en) == float) | (type(en) == np.float32)):
print("en = ",en)
print("type en = ", type(en))
raise IOError("_interpolate_lsf only works on one *en* element at a time")
# find index of the nearest LSF
indx = np.argsort(lsfener) # indices
jj = lsfener.searchsorted(en,sorter=indx)
j = indx[jj-1]
k = lsfener.shape[0]
if j == 0:
lsf = lsfdata[0,:].flatten()
elif ((j > 0) & (j < k) ):
e1 = lsfener[j-1]
e2 = lsfener[j]
frac = (en-e1)/(e2-e1)
lsf1 = lsfdata[j-1,:].flatten()
lsf2 = lsfdata[j,:].flatten()
lsf = ((1-frac) * lsf1 + frac * lsf2)
else:
lsf = lsfdata[k-1,:].flatten()
return lsf
|
27f81fc26b362f88b3f3309d33f77fa687dd2856
| 16,110
|
def postfix_to_prefix(expression):
""" Algorithm for Postfix to Prefix:
Read the Postfix expression from left to right
If the symbol is an operand, then push it onto the Stack
If the symbol is an operator, then pop two operands from the Stack
Create a string by concatenating the two operands and the operator before them.
string = operator + operand2 + operand1
And push the resultant string back to Stack
Repeat the above steps until end of Prefix expression."""
# input postfix expression
exprstn = expression
stack2 = []
operators = set(['+','-','*','/','^'])
for i in exprstn:
if i in operators:
op1 = stack2.pop()
op2 = stack2.pop()
temp = i + op2 + op1
stack2.append(temp)
else:
stack2.append(i)
return stack2;
|
7f47cac2a5d309fd89d9f495ab27655bd2e2ed00
| 16,111
|
def clip(value, lower, upper):
""" clip a value between lower and upper """
if upper < lower:
lower, upper = upper, lower # Swap variables
return min(max(value, lower), upper)
|
c7324ecd1e6e734a613071c26c2c00c33d2c487e
| 16,112
|
import re
def build_subst_table(file):
"""
Read the substitutions defined in ``file`` and build a substitution table.
"""
table = {}
for line in file:
old, new = line.rstrip('\r\n').split(':')
table[re.compile(r'\b%s\b' % old.strip())] = new.strip()
return table
|
dfcd37f354ef4d51eafe2c61f4d715803d14dbba
| 16,114
|
import asyncio
async def do_after_sleep(delay: float, coro, *args, **kwargs):
"""
Performs an action after a set amount of time.
This function only calls the coroutine after the delay,
preventing asyncio complaints about destroyed coros.
:param delay: Time in seconds
:param coro: Coroutine to run
:param args: Arguments to pass to coroutine
:param kwargs: Keyword arguments to pass to coroutine
:return: Whatever the coroutine returned.
"""
await asyncio.sleep(delay)
return await coro(*args, **kwargs)
|
913998318c8438b2afaf9fcb6e687c4e4a8149e6
| 16,115
|
def normalizeAddress(address):
"""We need this because we internally store email addresses in this format
in the black- and whitelists
"""
if address.startswith("<"):
return address
else:
return "<" + address + ">"
|
fb544e1b96d3f48484e5b5848ccc886a423b30b8
| 16,116
|
from typing import Dict
def extract_tags(repo_info: Dict) -> Dict:
"""Extracts the tags from a repository's metadata.
Args:
repo_info: The repository's metadata.
Returns:
The repository's tags.
"""
tags = {}
repo_tags = repo_info.get("tags")
if repo_tags:
for repo_tag in repo_tags:
# Restrict splitting to the first ":" in case the value also contains a ":"
split_tags = repo_tag.split(":", maxsplit=1)
if len(split_tags) == 2:
tags[split_tags[0]] = split_tags[1]
return tags
|
108e343a84e7a076a6d42e4a3c2cdf0c12389e6b
| 16,117
|
def xgcd(a: int, b: int) -> tuple:
"""Extended Euclidean (GCD) algorithm
gcd(a, b) = u*a + v*b
Returns gdc, u, v
"""
x, x1, y, y1 = 1, 0, 0, 1
while b:
q, a, b = a // b, b, a % b
x, x1 = x1, x - q * x1
y, y1 = y1, y - q * y1
return a, x, y
|
3dba9fee305f2c423f84607177ca1b9025d33978
| 16,118
|
def max_insertion(seqs, gene, domain):
"""
length of largest insertion
"""
seqs = [i[2] for i in list(seqs.values()) if i[2] != [] and i[0] == gene and i[1] == domain]
lengths = []
for seq in seqs:
for ins in seq:
lengths.append(int(ins[2]))
if lengths == []:
return 100
return max(lengths)
|
0dd4cde839243672ef3942e38a21386ee2e82b47
| 16,119
|
def flatten(data):
"""Recursively flatten lists"""
result = []
for item in data:
if isinstance(item, list):
result.append(flatten(item))
else:
result.append(item)
return result
|
bb4a7a91f921f67bed3d70e74246e1cc8eec825c
| 16,120
|
def sentinel(start_sentinel, request, start_server):
"""Starts redis-sentinel instance with one master -- masterA."""
# Adding master+slave for normal (no failover) tests:
master_no_fail = start_server('master-no-fail')
start_server('slave-no-fail', slaveof=master_no_fail)
# Adding master+slave for failover test;
masterA = start_server('masterA')
start_server('slaveA', slaveof=masterA)
return start_sentinel('main', masterA, master_no_fail)
|
588b8637af12a7705e9f3f5bc2655c85fe3a8164
| 16,121
|
import sys
def minimumPayment(currentBalance, percent):
"""Take in a current balance and minimum payment percent, then return the
minimum payment"""
currentBalance = float(currentBalance) #convert argument to float
percent = float(percent) #convert argument to float
percent = percent / 100
if percent >= 100:
sys.exit("Please provide a minimum payment percentage value less" + \
"than 100 (eg. 2.5, 13, etc.).")
payment = currentBalance * percent
return payment
|
34e4d1906e92d62e09aa7cf23d529ead50f0bad3
| 16,122
|
def get_train_test_modifiers(modifier=None):
"""Append the modifier to 'train' and 'test'"""
modifier_train = 'train'
modifier_test = 'test'
if modifier is not None:
modifier_train = modifier_train + '_' + modifier
modifier_test = modifier_test + '_' + modifier
return modifier_train, modifier_test
|
6adb8e2a6bb427de059ef7cd127476342ba39a3f
| 16,123
|
def logp_gradient(variable, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to variable.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
return variable.logp_partial_gradient(variable, calculation_set) + sum(
[child.logp_partial_gradient(variable, calculation_set) for child in variable.children])
|
8c1dd9207e1c08826d51053cb887a86d06306367
| 16,124
|
def mock_db_url(tmpdir):
"""Fixture to provide mock db url
This url is intended to be the location of where to place the local sqlite
databases for each unit test"""
dbname="dcae_cli.test.db"
config_dir = tmpdir.mkdir("config")
return "/".join(["sqlite://", str(config_dir), dbname])
|
a6f06b15edfb685ebb0ff38ae421091f06039563
| 16,125
|
import requests
import json
import subprocess
def rpc(args, command, params):
"""
Run the command. This could be either over the CLI or the API.
Here we run over the API either using `rclone rc --loopback` which
is useful for making sure state is saved properly or to an
existing rclone rcd if `--rc` is used on the command line.
"""
if args.rc:
kwargs = {
"json": params,
}
if args.user:
kwargs["auth"] = (args.user, args.password)
r = requests.post('http://localhost:5572/'+command, **kwargs)
if r.status_code != 200:
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
return r.json()
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(result.stdout)
|
07965414a2fe951d49ef99e3ed46019541b72da5
| 16,126
|
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
|
2f6d5e6306c3115a17ad37ecbebcf67aae056406
| 16,128
|
def reproject_extent(extent, current_extent):
"""
Changes an extent from its current spatial reference to the spatial reference on another extent object
:param extent:
:param current_extent:
:return:
"""
return extent.projectAs(current_extent.spatialReference)
|
b64e98c15cda62e1a4855050a63b7ff7fcd38610
| 16,129
|
def _get_feature_help(module: object) -> str:
"""
Get the docstring of the imported module
:param module: the imported module object
:return: The docstring as string
"""
return module.__doc__ or "[ERROR] Missing the module docstring"
|
804f3586137adf7a22231225bd326fff2fcbd704
| 16,130
|
def clean_num_list(array):
"""
Patch function to make array consistent with usage
Parameters
----------
array : 1-D array
"""
new_array = []
for i in array:
try:
new_array.append(i[0])
except:
new_array.append(i)
return new_array
|
86844949a0976187b1036b8aec03ed9b0c850339
| 16,131
|
def get_model_parameters(model):
"""FUNCTION::GET_MODEL_PARAMETERS: Gets all the parameters of a model.
---
Arguments:
---
>- model {keras.Model} -- The model to analyze.
Returns:
---
>- {list[string]} -- layer Names
>- {list[np.array]} -- layer Weights
>- {list[tensor]} -- layer Outputs
>- {list[tensor]} -- layer Activations."""
layerNames = []; layerOutputs = []; layerWeights = []; layerAct = []
for layer in model.layers:
layerNames.append(layer.name)
layerOutputs.append(layer.output)
layerAct.append(layer.activation)
layerWeights.append(layer.get_weights)
return layerNames,layerWeights,layerOutputs,layerAct
|
96341de438b0359425381ebed400ab38da35304a
| 16,132
|
def displayavailableappts(vaccdata):
"""
Displays number of appointments, on this date, at this location, if there are appts available
:param vaccdata: Dataframe of Location, Date, Appointments
:return:
"""
if vaccdata[vaccdata.Appointments >= 1].empty:
print("No available appointments.")
return vaccdata[vaccdata.Appointments >= 1]
|
0acd5e2afd714d637a42610839200a9ebca34ca3
| 16,133
|
def two_letter_language_code(feed):
"""
feed.language conforms to
http://www.rssboard.org/rss-language-codes
sometimes it is of the form de-de, de-au providing a hint of dialect
thus, we only pick the first two letters of this code
:param feed:
:return:
"""
return feed.language[:2]
|
f4da54d15e73a1c317a90cce08de4a44d55ae273
| 16,135
|
from typing import Type
import importlib
def _string_to_class(string: str) -> Type:
"""
Parse a string into a Python class.
Args:
string: a fully qualified string of a class, e.g. 'mypackage.foo.MyClass'.
Returns:
The class.
"""
components = string.split(".")
class_name = components[-1]
module_name = ".".join(components[:-1])
module = importlib.import_module(module_name)
cls = getattr(module, class_name)
assert isinstance(cls, type)
return cls
|
e756b35a5d34aaeffa8ddc4e96130da3a3bc5d04
| 16,137
|
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
"""
if len(input) == 0:
raise ValueError(
'input cannot be empty.')
elif len(input) == 1:
output = input[0]
if not isinstance(output, list):
output = [output]
else:
output = list(input)
return output
|
72ed4da8a244762f468a5dac52583ad5f01d9179
| 16,138
|
def roundup(val, width):
"""roundup a value to N x width-bits"""
x = (width >> 3) - 1
return (val + x) & ~x
|
45a303ca8f2ba5312fe89e73542606dbe68ed627
| 16,142
|
def upload_file(word_id, body=None): # noqa: E501
"""uploads an image
# noqa: E501
:param word_id: ID of word to update
:type word_id: str
:param body:
:type body: dict | bytes
:rtype: ApiResponse
"""
"""
# READY BUT COMMENTED OUT UNTIL SECURITY IS IMPLEMENTED"
if connexion.request.is_json:
body = Object.from_dict(connexion.request.get_json()) # noqa: E501
try:
doc_ref = config.words_ref.document(word_id)
name = doc_ref.get().to_dict()['name']
destination_blob_name = 'images/' + name
except:
print('Cannot get the filename')
return None
# Uploads the file to the bucket.
# Instantiates a client
storage_client = storage.Client()
bucket = storage_client.bucket('words-storage-romank')
blob = bucket.blob(destination_blob_name)
# Upload the file
blob.upload_from_string(body)
# Update the filename in the Firestore database
doc_ref.update({'imageurl': name})
"""
return True
|
ef586e3db77ec17f5d349c4e38b55ce4773810a6
| 16,143
|
def heuristic(checkpoints):
"""На полном ходу летим к следующему флагу"""
return f"{checkpoints[0]} {checkpoints[1]} 200"
|
984c0dc2007aaabc1c8cf7ed9744453dfc1c8f21
| 16,146
|
import hashlib
def get_file_md5(filename, blocksize=2**20):
"""get file md5"""
md5 = hashlib.md5()
with open(filename, "rb") as fin:
while True:
buf = fin.read(blocksize)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
|
c1456e1c942002d8c92c35869f21a2fab58fa091
| 16,148
|
def sum_keep_digits(m: int, n: int, digit_count: int) -> int:
"""Finds the last base-10 digits of the sum of two non-negative integers.
Args:
m: The first non-negative integer addend.
n: The second non-negative integer addend.
digit_count: The number of least significant digits of the resulting
sum that should be preserved.
Returns:
An integer consisting of the last ``digit_count`` digits (from most to
least significant) of the sum ``m + n`` when written in base 10.
"""
mod = 10**digit_count
return ((m % mod) + (n % mod)) % mod
|
c09bf6a9bf681deae133bb787e2f1b50711dd24a
| 16,150
|
import os
def get_pilot_id():
"""
Get the pilot id from the environment variable GTAG
:return: pilot id (string)
"""
return os.environ.get("GTAG", "unknown")
|
50dd90bc2fe80efa419a01022f31b49964c210c0
| 16,151
|
def remove_deprecated_elements(deprecated, elements, version):
"""Remove deprecated items from a list or dictionary"""
# Attempt to parse the major, minor, and revision
(major, minor, revision) = version.split('.')
# Sanitize alphas and betas from revision number
revision = revision.split('-')[0]
# Iterate over deprecation lists and remove any keys that were deprecated
# prior to the current version
for dep in deprecated:
if (major >= dep['major']) \
or (major == dep['major'] and minor >= dep['minor']) \
or (major == dep['major'] and minor == dep['minor']
and revision >= dep['revision']):
if type(elements) is list:
for key in dep['keys']:
if key in elements:
elements.remove(key)
if type(elements) is dict:
for key in dep['keys']:
if key in elements:
del elements[key]
return elements
|
d7cbfbf5b26e10644d24450a53fb742e1461c58f
| 16,153
|
def collapse_hits(hits):
"""
For hits with same HPC k-mer content and distance, collapse into the last base
"""
collapsed = []
for i, j in zip(hits[:-1], hits[1:]):
if j[0] == i[0] + 1 and j[1] == i[1]:
continue
else:
collapsed.append(j)
return collapsed
|
0fa53a68d48b6ef22784e82a799bf57166eae91f
| 16,154
|
import argparse
import os
def CheckExt(choices):
"""Wrapper to return the class
"""
class Act(argparse.Action):
"""Class to allow checking of filename extensions in argparse. Taken
from https://stackoverflow.com/questions/15203829/python-argparse-file-extension-checking
"""
def __call__(self, parser, namespace, fname, option_string=None):
ext = os.path.splitext(fname)[1][1:]
if ext not in choices:
option_string = '({})'.format(
option_string) if option_string else ''
parser.error("Wrong filetype: file doesn't end with {}{}".format(
choices, option_string))
else:
setattr(namespace, self.dest, fname)
return Act
|
dd566f58d92daca9e23f10f1eee14838a6d4c1df
| 16,156
|
def split_layouts_by_arrow(s: str) -> tuple:
"""
Splits a layout string by first arrow (->).
:param s: string to split
:return: tuple containing source and target layouts
"""
arrow = s.find('->')
if arrow != -1:
source_layout = s[:arrow]
target_layout = s[arrow + 2:]
if source_layout == '':
source_layout = None
if target_layout == '':
target_layout = None
return source_layout, target_layout
else:
return s, None
|
30781f9a5d7b8b342c2a7d9fefb2085b2e990994
| 16,157
|
import pathlib
def parse_bind_volume(bind_arg):
"""unpack volume bind bind_arg (e.g., /foo:/bar) to dict where the key is a
resolve pathlib.Path and the value is an unresolved (in-container)
pathlib.PosixPath."""
# can be up to three, but we only want the first two
bind_arg = bind_arg.split(":")
src, dst = bind_arg[:2]
assert len(bind_arg) < 4, "unexpected number of bind_arg"
return {pathlib.Path(src).resolve(): pathlib.PosixPath(dst)}
|
ea2b4ac8ba03e3829ab4db69684ebf49767e0c94
| 16,158
|
from functools import reduce
import operator
def binCoeff(n, k):
"""
n choose k (the binomial coefficient)
"""
if k < n / 2.0:
return binCoeff(n, n - k)
else:
return reduce(operator.mul, [j for j in range(k + 1, n + 1)], 1)
|
57a69350b704fb7e0dd34ebb82fecdf0d7b73c95
| 16,159
|
def strip_lines(s, side='r'):
"""
Splits the given string into lines (at newlines), strips each line
and rejoins. Is careful about last newline and default to stripping
on the right only (side='r'). Use 'l' for left strip or anything
else to strip both sides.
"""
strip = (str.rstrip if side == 'r' else str.lstrip if side == 'l'
else str.strip)
end = '\n' if s.endswith('\n') else ''
return '\n'.join([strip(line) for line in s.splitlines()]) + end
|
4995710e9df31e7e210621b3f9be0af38642324e
| 16,160
|
def check_gap_extendable(covratio, mincov, checkext, blocks, chrom):
"""
:param blocks:
:param chrom:
:return:
"""
ts = blocks[0][1]
qs = blocks[0][6]
te = blocks[-1][2]
qe = blocks[-1][7]
max_len = max(te - ts, qe - qs)
tcov = sum([b[2] - b[1] for b in blocks])
qcov = sum([b[7] - b[6] for b in blocks])
assert tcov == qcov, 'Coverage not equal ({} vs {}) for blocks: {}'.format(tcov, qcov, blocks)
if tcov < mincov:
return blocks
outblocks = []
for ext in checkext:
new_len = max_len // ext * ext + ext
if chrom[qs:qs+new_len].mask.sum() == qcov:
# no overlapping blocks from other chains in this gap, extend
if qcov / (qs+new_len - qs) < covratio:
continue
outblocks = [(blocks[0][0], ts, ts+new_len, blocks[0][3],
blocks[0][4],
blocks[0][5], qs, qs+new_len, blocks[0][8])]
chrom[qs:qs+new_len].mask = 1
break
if not outblocks:
if chrom[qs:qs+max_len].mask.sum() == qcov:
if not qcov / (qs+max_len - qs) < covratio:
outblocks = [(blocks[0][0], ts, ts+max_len, blocks[0][3],
blocks[0][4],
blocks[0][5], qs, qs+max_len, blocks[0][8])]
chrom[qs:qs+max_len].mask = 1
else:
outblocks = blocks
else:
outblocks = blocks
return outblocks
|
6e5dfe1742c21caaed5835f3b111f65baa793bf5
| 16,161
|
def _draw_bbox_pil(canvas, bbox, color, stroke):
"""Draws BBox onto PIL.ImageDraw
:param bbox: BBoxDiim
:param color: Color
:param stroke: int
:returns PIL.ImageDraw
"""
xyxy = bbox.xyxy_int
if stroke == -1:
canvas.rectangle(xyxy, fill=color.rgb_int)
else:
canvas.rectangle(xyxy, outline=color.rgb_int, width=stroke)
return canvas
|
a5dd958cd18a9f9129567af55d5057b105d4fe59
| 16,162
|
def _prepare_url_params(tile_id, bbox, end_date, start_date, absolute_orbit):
""" Constructs dict with URL params
:param tile_id: original tile identification string provided by ESA (e.g.
'S2A_OPER_MSI_L1C_TL_SGS__20160109T230542_A002870_T10UEV_N02.01')
:type tile_id: str
:param bbox: bounding box of requested area in WGS84 CRS
:type bbox: common.BBox
:param start_date: beginning of time range in ISO8601 format
:type start_date: str
:param end_date: end of time range in ISO8601 format
:type end_date: str
:param absolute_orbit: An absolute orbit number of Sentinel-2 L1C products as defined by ESA
:type absolute_orbit: int
:return: dictionary with parameters as properties when arguments not None
:rtype: dict
"""
url_params = {
'identifier': tile_id,
'startDate': start_date,
'completionDate': end_date,
'orbitNumber': absolute_orbit,
'box': bbox
}
return {key: str(value) for key, value in url_params.items() if value}
|
029d6b2ecd0871fde521a98d3edb09f2a6bf947b
| 16,164
|
def is_match(source, *patterns):
"""
gets a value indicating that given file name end, matches with any of given patterns.
:param str source: source file path.
:param str patterns: file name end pattern. for example it could
be `'.py', '.html'`. it will match all file
names if no pattern is provided.
:rtype: bool
"""
if len(patterns) <= 0:
return True
patterns = tuple(item.lower() for item in patterns)
return source.lower().endswith(patterns)
|
88190d3fdb384f78b2a8f300b8e7e89b2428a19b
| 16,165
|
def padding_string(pad, pool_size):
"""Get string defining the border mode.
Parameters
----------
pad: tuple[int]
Zero-padding in x- and y-direction.
pool_size: list[int]
Size of kernel.
Returns
-------
padding: str
Border mode identifier.
"""
if pad == (0, 0):
padding = 'valid'
elif pad == (pool_size[0] // 2, pool_size[1] // 2):
padding = 'same'
elif pad == (pool_size[0] - 1, pool_size[1] - 1):
padding = 'full'
else:
raise NotImplementedError(
"Padding {} could not be interpreted as any of the ".format(pad) +
"supported border modes 'valid', 'same' or 'full'.")
return padding
|
985828faf9c072b623776be23e8ef0b7c680aba8
| 16,166
|
def apply_eqn(x, eqn):
"""
Given a value "x" and an equation tuple in the format:
(m, b)
where m is the slope and b is the y-intercept,
return the "y" generated by:
y = mx + b
"""
m, b = eqn
return (m * x) + b
|
1d5078905a20e6b83c2186f6aefbedaa70863fea
| 16,167
|
def ext_euclid_alg (m, n):
""" Extended Euclidean algorithm for gcd.
Finds the greatest common divisor of
two integers a and bm and solves for integer
x, y such that ax + by = 1. From Knuth, TAOCP
vol. I, p. 14.
Variables --
q, r -- quotient and remainder
a, b -- am + bn = gcd
apme, bpme -- a prime and b prime
t -- temporary
"""
m, n = abs(m), abs(n)
q, r = m // n, m % n
apme = b = 1
a = bpme = 0
while r != 0:
m, n = n, r
t = apme
apme = a
a = t - q * a
t = bpme
bpme = b
b = t - q * b
""" reset q and r """
q, r = m // n, m % n
return (n, a, b)
|
f6ee18c045dd6c26023ff70ab43dde550071023c
| 16,168
|
def drop_empty_properties(field):
"""Remove empty properties, as they cause the validation to fail.
"""
return {
key: value
for key, value
in field.items()
if value
}
|
03341ab59c7730ff0938633dafa474649afd503e
| 16,169
|
import torch
def get_prior_precision(args, device):
""" Obtain the prior precision parameter from the cmd arguments """
if type(args.prior_precision) is str: # file path
prior_precision = torch.load(args.prior_precision, map_location=device)
elif type(args.prior_precision) is float:
prior_precision = args.prior_precision
else:
raise ValueError('Algorithm not happy with inputs prior precision :(')
return prior_precision
|
b3a34d3831c42b490f1750c47de06743e5ff0b8a
| 16,170
|
import subprocess
def update_node():
"""When called the node will run apt update followed by apt upgrade"""
node_update_log_1 = subprocess.check_output(["sudo", "apt", "update"])
node_update_log_2 = subprocess.check_output(["sudo", "apt", "upgrade", "-y"])
return [node_update_log_1, node_update_log_2]
|
ff28a65e6b84ac5b31b81558f06813149d2bff09
| 16,171
|
def _split_tree_ensemble_atts(attrs, split):
"""
Splits the attributes of a TreeEnsembleRegressor into
multiple trees in order to do the summation in double instead of floats.
"""
trees_id = list(sorted(set(attrs['nodes_treeids'])))
results = []
index = 0
while index < len(trees_id):
index2 = min(index + split, len(trees_id))
subset = set(trees_id[index: index2])
indices_node = []
indices_target = []
for j, v in enumerate(attrs['nodes_treeids']):
if v in subset:
indices_node.append(j)
for j, v in enumerate(attrs['target_treeids']):
if v in subset:
indices_target.append(j)
if (len(indices_node) >= len(attrs['nodes_treeids']) or
len(indices_target) >= len(attrs['target_treeids'])):
raise RuntimeError( # pragma: no cover
"Initial attributes are not consistant."
"\nindex=%r index2=%r subset=%r"
"\nnodes_treeids=%r\ntarget_treeids=%r"
"\nindices_node=%r\nindices_target=%r" % (
index, index2, subset,
attrs['nodes_treeids'], attrs['target_treeids'],
indices_node, indices_target))
ats = {}
for name, att in attrs.items():
if name == 'nodes_treeids':
new_att = [att[i] for i in indices_node]
new_att = [i - att[0] for i in new_att]
elif name == 'target_treeids':
new_att = [att[i] for i in indices_target]
new_att = [i - att[0] for i in new_att]
elif name.startswith("nodes_"):
new_att = [att[i] for i in indices_node]
assert len(new_att) == len(indices_node)
elif name.startswith("target_"):
new_att = [att[i] for i in indices_target]
assert len(new_att) == len(indices_target)
elif name == 'name':
new_att = "%s%d" % (att, len(results))
else:
new_att = att
ats[name] = new_att
results.append(ats)
index = index2
return results
|
fee9e54cd850a205130c2e4ac3bf6a1f4b2ea57f
| 16,172
|
from typing import Union
import os
import json
def load_loose_json(load_path_or_vectorized_data: Union[str, list]):
"""Load vectorized jsonl data. Takes either path to the file or iterable
Arguments:
load_path_or_vectorized_data {Union[str, list]} -- Path to vectorized data or iterable containing data
Raises:
ValueError: Error in input
Returns:
list -- Loaded data
"""
if isinstance(load_path_or_vectorized_data, str):
assert os.path.exists(
load_path_or_vectorized_data
), "[ERROR] - Load path does not exist."
data = open(load_path_or_vectorized_data, "r", encoding="utf-8")
elif isinstance(load_path_or_vectorized_data, list):
data = load_path_or_vectorized_data
else:
raise ValueError(load_path_or_vectorized_data)
rows = []
for line in data:
rows.append(json.loads(line))
return rows
|
e315a370e4c444434fefc9a9d4eb4e7a5e25cdf6
| 16,174
|
def parse_besancon_dict(bd):
"""
Turn a dict like default_keys into a list of tuples (must be a list of
tuples because there are some repeated entries, which dictionaries do not
support)
"""
http_dict = []
for key,val in bd.iteritems():
if type(val) is list:
if "[]" in key:
for listval in val:
http_dict.append((key,listval))
else:
for ii,listval in enumerate(val):
if type(listval) is list:
for jj,lv in enumerate(listval):
http_dict.append((key+"[%i][%i]" % (ii,jj),lv))
else:
http_dict.append((key+"[%i]" % (ii) , listval))
else:
http_dict.append((key , val))
return http_dict
|
05a893dc8f790a01ab5c159dee6a8de969f4d594
| 16,175
|
def has_unaccent(cr):
""" Test if the database has an unaccent function.
The unaccent is supposed to be provided by the PostgreSQL unaccent contrib
module but any similar function will be picked by OpenERP.
"""
cr.execute("SELECT proname FROM pg_proc WHERE proname='unaccent'")
return len(cr.fetchall()) > 0
|
5f132439fff3c52f9d0a406bcef98581d9355b6c
| 16,176
|
import os
def gen_entry_points(pkgdir, e_prefix):
""" Generate entry_points={'console_scripts': []} records
relative to `pkgdir`.
"""
results = []
root = pkgdir
for f in os.listdir(pkgdir):
# Skip sub-directories
if os.path.isdir(f):
continue
# Skip non-script files and __init__
if not f.endswith('.py') or f.endswith('__init__.py'):
continue
# Python module name is derived from the filename without its extension
modname = os.path.splitext(f)[0]
# Python module path is the absolute path using "." instead of "/"
modpath = os.path.join(root, modname).replace(os.sep, ".")
# Create record
result = "{}_{}={}:{}".format(e_prefix, modname, modpath, "main")
# Save record
results += [result]
return results
|
7516fec813ffc9e52a8ef5a305e41458e8bd3bd7
| 16,178
|
def bit_count(val):
"""
Fast way to count 1's in a 64 bit integer. Based on Hamming weight
"""
val = val - ((val >> 1) & 0x5555555555555555)
val = (val & 0x3333333333333333) + ((val >> 2) & 0x3333333333333333)
return (((val + (val >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56
|
fe9983efc6651e72e5db2cbb701162b58bad19ce
| 16,179
|
from enum import Enum
def enumify(TheModel, name_field="name", val_field="id"):
"""
Converts a model rows into an enum
Can be effective cache for mostly unchanging data.
Limitation: No auto updates. If you update the model and you are using process manager like gunicorn you
would need to restart to rnsure enums are updated
eg.
>>> class Week(BaseModel):
day = CharField()
num = IntField()
>>> weekenum = enumify(Week, 'day', 'num')
>>> print(weekenum.monday.num)
"""
fields = getattr(TheModel, name_field), getattr(TheModel, val_field)
data = list(
(name.replace(" ", "_").lower(), v)
for (name, v) in TheModel.select(*fields).tuples()
)
return Enum(TheModel.__name__, data)
|
b8112d3feb4756f8a41e160f90d7b0a8c9736f73
| 16,182
|
def convert_labels_to_ids(labels):
"""
Converts labels to integer IDs
"""
label_to_id = {}
counter = 0
id_list = []
for label in labels:
if label not in label_to_id.keys():
label_to_id[label] = counter
counter += 1
id_list.append(label_to_id[label])
return id_list
|
4f9fffcd59884a20b3a8bcf40858e0ed9a7fc7da
| 16,184
|
def to_redfish_version(ver32):
"""
Converts a PLDM ver32 number to a Redfish version in the format vMajor_Minor_Errata
"""
if ver32 == 0xFFFFFFFF: # un-versioned
return ''
else:
return 'v'+str((ver32 >> 24) & 0x0F)+'_'+str((ver32 >> 16) & 0x0F)+'_'+str((ver32 >> 8) & 0x0F)
|
56ccd29c01b7307fa8ee47ae5e7fa4fb89163523
| 16,185
|
import requests
def retry_get_request(url, max_retries):
"""Run a get request with multiple retries"""
response = requests.Response()
num_retries = 0
while num_retries < max_retries:
try:
response = requests.get(url)
except:
pass
if response.status_code == 200:
return response
print("Bad status code ({}). Retrying.".format(response.status_code))
num_retries += 1
return response
|
559d9770081e531738eae2a3e3a608dc346a317b
| 16,186
|
def restrictToHostsOnCurrentStage(statsboardData, hostsOnCurrentStage):
"""
Removes data for hosts not on current stage from statsboardData, and
returns the new version.
:param statsboardData:
:param hostsOnCurrentStage:
:return:
"""
# NOTE: can be optimized if necessary.
newData = []
for dataSlice in statsboardData:
if "tags" in dataSlice:
if "host" in dataSlice["tags"]:
if dataSlice["tags"]["host"] in hostsOnCurrentStage:
newData.append(dataSlice)
return newData
|
740f2c55a14472c36b3f6109fe2e89ef5880e04b
| 16,187
|
def POUF_unblind(params, ai, zi):
""" Unblind the messages to recover the raw outputs of the POUF """
G, g, o = params
xi = [a.mod_inverse(o) * z for a,z in zip(ai, zi)]
return xi
|
2fb5d1e2d758422dd7df9ff16baa1c834e320f7b
| 16,188
|
from typing import List
def align_lines(lines: list, column_separator: str = "|") -> List[str]:
"""
Pads lines so that all rows in single column match. Columns separated by '|' in every line.
:param lines: list of lines
:param column_separator: column separator. default is '|'
:return: list of lines
"""
rows = []
col_len: List[int] = []
for line in lines:
line = str(line)
cols = []
for col_index, col in enumerate(line.split(column_separator)):
col = str(col).strip()
cols.append(col)
if col_index >= len(col_len):
col_len.append(0)
col_len[col_index] = max(col_len[col_index], len(col))
rows.append(cols)
lines_out: List[str] = []
for row in rows:
cols_out = []
for col_index, col in enumerate(row):
if col_index == 0:
col = col.ljust(col_len[col_index])
else:
col = col.rjust(col_len[col_index])
cols_out.append(col)
lines_out.append(" ".join(cols_out))
return lines_out
|
2ec537752bfce3d261d7202e9b9266a9ae0dd87f
| 16,189
|
def check_point(x, y, d, size):
"""
Проверяет точку на принадлежность стенке экранирующей выход
Точка в ск Федера
"""
if 0 <= x <= size:
return y >= d/2 or y <= -d/2
|
17177da9ba35f2f5dc82537383ce4f0426a76304
| 16,190
|
import argparse
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='String generator')
parser.add_argument('--length', type=str, default=64, help="String Length")
parser.add_argument('--hc', action='store_true', help="Force hash collision")
parser.add_argument('--level', type=int, choices=[1, 2], default=1,
help="Level of hash collision. Depends on --hc flag")
parser.add_argument('--count', type=int, default=10, help="Number of strings to generate")
parser.add_argument('--hc_val', type=int, default=45, help="Hash collision value")
parser.add_argument('--store', type=str, required=True, choices=['console', 'file', 'sqlite', 'redis'],
default="console", help="Where to store generated data")
args = parser.parse_args()
return args
|
6f1fd3b588a68ec5bfcdbc00982f5cf1d4eac65c
| 16,191
|
from typing import SupportsIndex
def ireplace(text: str, old_: str, new_: str, count_: SupportsIndex = -1) -> str:
"""Case-insensitive :py:meth:`str.replace` alternative.
:param text: String to search through.
:param old_: Case-insensitive substring to look for.
:param new_: Case-sensitive substring to replace with.
:param count_: Maximum number of occurrences to replace. -1 (the default value) means replace all occurrences.
:return: A string with all substrings matching old_ replaced with new_.
"""
index: int = 0
if not old_:
return text.replace('', new_)
if text.lower() == old_.lower() and count_.__index__() != 0:
return new_
while index < len(text) and count_.__index__() < 0:
index_l = text.lower().find(old_.lower(), index)
if index_l == -1:
return text
text = text[:index_l] + new_ + text[index_l + len(old_):]
index = index_l + len(new_)
return text
|
8509314192458c867b6471a381dc6470a2546902
| 16,192
|
def Get_qvals_mtx_v2(qvals_result,distance_bins):
"""makes q-values array, used for clusterisation"""
return(qvals_result[:,distance_bins:qvals_result.shape[1]-distance_bins])
|
2d8d5e643a156ecebc17dc520f65a97ae2684e19
| 16,193
|
def alpha(requestContext, seriesList, alpha):
"""
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
"""
for series in seriesList:
series.options['alpha'] = alpha
return seriesList
|
25ef9bad1b39549585bdb175e487236591d0816f
| 16,194
|
def ask_yes_no_question(question):
"""Prints a question on the CLI that the user must respond
with a yes or no.
:param question: Text of the question. Include question mark on it.
:type question: str
:return: Whether the user said 'y' (True) or 'n' (False).
:rtype: bool
"""
answer = ''
while answer not in ('y', 'n'):
print(f'{question} [y/n](n):')
answer = input()
if not answer:
answer = 'n'
if answer == 'y':
return True
return False
|
6a70b135879e0fcd27ba6bc7fbc330a0660bd729
| 16,196
|
def to_uptime_format(seconds):
"""
将秒转换为友好的天、时、分、秒
:param seconds:
:return:
"""
one_minute_seconds = 60
one_hour_seconds = 60 * 60
one_day_seconds = 24 * 60 * 60
days = seconds // one_day_seconds
left_seconds = seconds % one_day_seconds
hours = left_seconds // one_hour_seconds
left_seconds = left_seconds % one_hour_seconds
minutes = left_seconds // one_minute_seconds
_seconds = left_seconds % one_minute_seconds
result = ''
if days:
result += '%d day(s), ' % days
if hours:
result += '%d hour(s), ' % hours
if minutes:
result += '%d minute(s), ' % minutes
result += '%d second(s)' % _seconds
return result
|
6e908d6341010bfe63535763149d60e42de98e78
| 16,197
|
def group_recognition_results(data, clump_threshold_ms=1000):
"""Groups together objects that have been detected in previous frames."""
sorted_data = sorted(data, key=lambda k: (str(k['found']), int(k['start'])))
results = []
prev_result = None
for result in sorted_data:
if not prev_result:
prev_result = result
else:
clumped_together = (result['start'] - prev_result['end'] < clump_threshold_ms) or (
prev_result['start'] == result['start'])
same_object = prev_result['found'] == result['found']
if clumped_together and same_object:
prev_result['end'] = result['end']
else:
results.append(prev_result)
prev_result = result
if prev_result:
results.append(prev_result)
return results
|
7116fdcaef5fe99e4cffcb40037cd3e5a7644f98
| 16,198
|
def oc2_query_features(method):
"""One-use-only decorator
Use on one function you implement when inheriting
from OpenC2CmdDispatchBase.
Will be called when we receive query-features command
Example:
class MyDispatch(OOpenC2CmdDispatchBase):
...
@oc2_query_features
def some_function(oc2_cmd):
...
"""
method.oc2_query_features = True
return method
|
0f84ff57b130e7aaf68a1205bc39a345102043e9
| 16,200
|
import os
def get_woufile(shot, time_ms, fit_name='fit1', v3fit_dir='/home/v3fit/fits'):
"""
Use this to load the woutfile (V3Fit reconstruction output) for a particular shot and time point.
"""
fname = os.path.join(v3fit_dir, str(shot), '{0:.1f}'.format(time_ms), fit_name, 'wout_{0:}.nc'.format(fit_name))
if not os.path.isfile(fname):
raise FileNotFoundError('Could not find woutfile for requested shot.')
else:
return fname
|
5288c71c8ba2201a9cc02062568d61ac26b32d92
| 16,202
|
def get_mincostroute_edge2edge(id_rootedge, id_targetedge, D, P):
"""
Returns cost and shortest path from rootedge to a specific targetedge.
D, P must be precalculated for rootnode with function dijkstraPlainEdge
"""
route = [id_targetedge]
if not P.has_key(id_targetedge):
return 0.0, []
e = id_targetedge
while e != id_rootedge:
id_edge = P[e]
route.append(id_edge)
e = id_edge
route.reverse()
return D[id_targetedge], route
|
310b174c7cadb90f1a83c8ec16c00cfd94e224a8
| 16,203
|
def akmcsInfocheck(akmcsInfo):
""" Helper function to check the AKMCS dictionary.
Checks akmcsInfo dict and sets default values, if required
parameters are not supplied.
Args:
akmcsInfo (dict): Dictionary that contains AKMCS information.
Returns:
akmcsInfo: Checked/Modified AKMCS Information.
"""
if "init_samp" not in akmcsInfo:
raise ValueError('akmcsInfo["init_samp"] must be defined')
else:
pass
if "maxupdate" not in akmcsInfo:
akmcsInfo["maxupdate"] = 120
print("Maximum update is set to ", akmcsInfo["maxupdate"])
else:
print("Maximum update is set to ", akmcsInfo["maxupdate"], " by user.")
if "problem" not in akmcsInfo:
raise ValueError('akmcsInfo["problem"] must be defined')
else:
pass
return akmcsInfo
|
5e70785cb2c6050f9e10b8936e64ed19b71cf4d5
| 16,204
|
def is_null(value, replace: str = ""):
"""
Checks if something is empty and will replace it with the given value
Parameters
----------
value
A thing to check if empty
replace : string
The thing to replace it with
"""
return value if value else replace
|
ae81cf7842de03653b7496984f577d67c3edecf2
| 16,205
|
from typing import Any
from typing import Union
def compare_values(
test_value: Any, expected_value: Any, tolerance: Union[float, None] = None
):
"""A dispatch function to compare value against an expecte value.
:param Any test_value: The value to test.
:param Any expected_value: The value expected.
:param Union[float,None] tolerance: The tolerance (absolute) level to use when testing.
:return: A tuple with first position being bool and the result of the comparison and the
second is the comaprison message (if True the message is an empty string).
:rtype: Tuple[bool, str]
"""
return compare_values(str(test_value), str(expected_value))
|
d6322c85135c7a6a6338f75a4abcf2851e933ef6
| 16,206
|
def mean(values):
"""
Mean function.
"""
m = 0.0
for value in values:
m = m + value/len(values)
return m
|
4415b4f9aa373b54418683301ffe751249270d4c
| 16,207
|
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
|
ca326624a83252411aa996b31bd6cfd8dd6c5baa
| 16,209
|
def evs_to_policy(search_policy_evs, *, temperature=1.0, use_softmax=True):
"""Compute policy targets from EVs.
Args:
search_policy_evs: Float tensor [T, B, 7, max_actions]. Invalid values are marked with -1.
temperature: temperature for softmax. Ignored if softmax is not used.
use_softmax: whether to apply exp() before normalizing.
Returns:
search_policy_probs: Float tensor [T, B, 7, max_actions].
"""
search_policy_probs = search_policy_evs.clone().float()
invalid_mask = search_policy_evs < -0.5
if use_softmax:
search_policy_probs /= temperature
# Using -1e8 instead of -inf so that softmax is defined even if all
# orders are masked out.
search_policy_probs[invalid_mask] = -1e8
search_policy_probs = search_policy_probs.softmax(-1)
else:
search_policy_probs.masked_fill_(invalid_mask, 0.0)
search_policy_probs /= (invalid_mask + 1e-20).sum(-1, keepdim=True)
return search_policy_probs.to(search_policy_evs.dtype)
|
46ac6e9fee0a8c010aef23519cbc594f0c0eb09d
| 16,210
|
def extract_list (data_str, to_float=False):
""" Extract a list of floating point values from a string
"""
split_str = data_str.split(',')
if to_float == True:
split_str = [float(x) for x in split_str]
return split_str
|
09825f9d7533bfbe9812bd6956b096cc33cb0be1
| 16,211
|
def prompt(message, values=None):
"""Prompt a message and return the user input.
Set values to None for binary input ('y' or 'n'), to 0 for integer input, or to a list of possible inputs.
"""
if values is None:
message += ' (y/n)'
message += ' > '
while 1:
ans = input(message)
if values is None:
if ans.lower() == 'y':
return True
elif ans.lower() == 'n':
return False
elif values == 0:
try:
read = int(ans)
except:
continue
return read
elif ans in values:
return ans
|
4d6b87b84ed1dfd95722e0055a8df3b1b34f1e84
| 16,212
|
def words_vec(w2v, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(w2v, 'words_vec', None)):
return w2v.words_vec(words, use_norm)
return {word: w2v.wv.word_vec(word, use_norm) for word in words if word in w2v.wv}
|
46447f389800e36d7149da245fb0d38be24bbbe3
| 16,213
|
def soft_rescale(data):
"""Soft scale data back to [0, 1]
If data is in [0, 1], do nothing. Otherwise, scale the side outside this bound back to [0, 1]
Args:
data:
Returns:
"""
a_max = max(data.max(), 1)
a_min = min(data.min(), 0)
data = (data - a_min) / (a_max - a_min)
return data
|
748f1b5ef4f7a30b54bdb7b01201b19e9a58cb5c
| 16,214
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.