content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import List
def load_actions(action_file: str) -> List[str]:
"""
Load unique actions from an action file
"""
return load_uniq_lines(action_file) | a5ffe3ccac462bc8277da6174a5eb81071a6fb84 | 30,500 |
def get_formula(name, sbml_model, params, assignment_rules, replacements):
"""
get a string representation for a function definition with parameters already replaced
@param name: function name
@type name: str
@param sbml_model: libsbml model
@type sbml_model: libsbml.model
@param params: parameter values
@type params: list
@param assignment_rules: dictionary of assignment rules
@type assignment_rules: dict
@param replacements: dictionary of replacements
@type replacements: dict
@return: formula
@rtype: str
"""
func = sbml_model.getFunctionDefinition(name)
p_dict = dict(zip([func.getArgument(x).getName() for x in range(func.getNumArguments())], params))
# print p_dict
# TODO: the formulaToString method does not work for functions such as log, exp, etc ...
# TODO: unify the math conversion (decide whether to use the ast_to_string parser or the libsbml mehtod
formula = ' ' + ast_to_string(func.getBody(), sbml_model, assignment_rules, replacements) + ' '
for param in p_dict:
formula = formula.replace(' ' + param + ' ', str(p_dict[param]))
return formula | eb7b521d8349f0bbb156faf10faa57a84ebe3034 | 30,501 |
def ConvertFile(filename_in, filename_out, loglevel='INFO'):
"""
Converts an ANSYS input file to a python pyansys script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
Returns
-------
clines : list
List of lines translated
"""
clines = []
with open(filename_in) as file_in:
with open(filename_out, 'w') as file_out:
file_out.write('import pyansys\n')
file_out.write('ansys = pyansys.ANSYS(loglevel="%s")\n' % loglevel)
for line in file_in.readlines():
cline = ConvertLine(line)
file_out.write(cline)
clines.append(cline)
cline = 'ansys.Exit()\n'
file_out.write(cline)
clines.append(cline)
return clines | b044b565193ca0d78edd77706cdeb1711d95f063 | 30,502 |
from typing import Optional
def get_metadata_saml(idp_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMetadataSamlResult:
"""
Use this data source to retrieve SAML IdP metadata from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.idp.get_metadata_saml(id="<idp id>")
```
:param str idp_id: The id of the IdP to retrieve metadata for.
"""
__args__ = dict()
__args__['idpId'] = idp_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:idp/getMetadataSaml:getMetadataSaml', __args__, opts=opts, typ=GetMetadataSamlResult).value
return AwaitableGetMetadataSamlResult(
assertions_signed=__ret__.assertions_signed,
authn_request_signed=__ret__.authn_request_signed,
encryption_certificate=__ret__.encryption_certificate,
entity_id=__ret__.entity_id,
http_post_binding=__ret__.http_post_binding,
http_redirect_binding=__ret__.http_redirect_binding,
id=__ret__.id,
idp_id=__ret__.idp_id,
metadata=__ret__.metadata,
signing_certificate=__ret__.signing_certificate) | 5b4e1fdf72d7e11d7f4eee878e88e174b452cfa5 | 30,503 |
from typing import Dict
def _average_latency(row: Dict):
"""
Calculate average latency for Performance Analyzer single test
"""
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(row.get(f, 0)) for f in avg_sum_fields)
return avg_latency | f321cb4d55af605298225f2f0146a9a71ee7895b | 30,504 |
from typing import Dict
from typing import Any
import json
import re
def load_dict_from_string(string: str) -> Dict[str, Any]:
"""Convert string to JSON string, convert to a dictionary, and return."""
logger.debug('Loading Dictionary from string: "{}"'.format(string))
json_string = jsonify_string(string)
try:
dict_ = json.loads(json_string)
except json.decoder.JSONDecodeError:
raise ValueError('Error loading from json string: "{}"'.format(json_string))
# Provide specialized handling of certain strings
for key, val in dict_.items():
if isinstance(val, str):
match = re.match(r"([+-]?nan|[+-]?inf)", val)
if match:
dict_[key] = float(match.group(1))
elif "unit." in val:
dict_[key] = eval(val)
return dict_ | cf1282fe0e8cbf636222ed26f6ac2dc62488eebf | 30,505 |
def _(expr, assumptions):
"""
Integer**Integer -> !Prime
"""
if expr.is_number:
return _PrimePredicate_number(expr, assumptions)
if ask(Q.integer(expr.exp), assumptions) and \
ask(Q.integer(expr.base), assumptions):
return False | da1b018ef6fdb6987666806ce34ee784d03cff9b | 30,506 |
import tqdm
import torch
def train(model, trainLoader, optimizer, loss_function, device, trainParams):
"""
Function to train the model for one iteration. (Generally, one iteration = one epoch, but here it is one step).
It also computes the training loss, CER and WER. The CTC decode scheme is always 'greedy' here.
"""
trainingLoss = 0
trainingCER = 0
trainingWER = 0
for batch, (inputBatch, targetBatch, inputLenBatch, targetLenBatch) in enumerate(tqdm(trainLoader, leave=False, desc="Train",
ncols=75)):
inputBatch, targetBatch = (inputBatch.float()).to(device), (targetBatch.int()).to(device)
inputLenBatch, targetLenBatch = (inputLenBatch.int()).to(device), (targetLenBatch.int()).to(device)
optimizer.zero_grad()
model.train()
outputBatch = model(inputBatch)
with torch.backends.cudnn.flags(enabled=False):
loss = loss_function(outputBatch, targetBatch, inputLenBatch, targetLenBatch)
loss.backward()
optimizer.step()
trainingLoss = trainingLoss + loss.item()
predictionBatch, predictionLenBatch = ctc_greedy_decode(outputBatch.detach(), inputLenBatch, trainParams["eosIx"])
trainingCER = trainingCER + compute_cer(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch)
trainingWER = trainingWER + compute_wer(predictionBatch, targetBatch, predictionLenBatch, targetLenBatch, trainParams["spaceIx"])
trainingLoss = trainingLoss/len(trainLoader)
trainingCER = trainingCER/len(trainLoader)
trainingWER = trainingWER/len(trainLoader)
return trainingLoss, trainingCER, trainingWER | f2726ad6f63997abd670c5f4614b1cef1e35dec7 | 30,507 |
def reorder(A, B):
"""Change coefficient order from y**2 xy x**2 to x**2 xy y**2 in both A and B.
Parameters
----------
A : array
polynomial coefficients
B : array
polynomial coefficients
Returns
-------
A2, B2: numpy arrays
coefficients with changed order
"""
poly_degree = polynomial_degree(len(A))
A2 = np.zeros((len(A)))
B2 = np.zeros((len(B)))
for i in range(poly_degree + 1):
ti = i * (i + 1) // 2
for j in range(i + 1):
A2[ti + j] = A[ti + i - j]
B2[ti + j] = B[ti + i - j]
return A2, B2 | 6a4740a0423bc3a804e66b0cda4a444a7f58072e | 30,508 |
import functools
import collections
def collate_revs(old, new, key=lambda x: x, merge=lambda old, new: new):
"""
Given revision sets old and new, each containing a series
of revisions of some set of objects, collate them based on
these rules:
- all items from each set are yielded in stable order
- items in old are yielded first
- items in new are yielded last
- items that match are yielded in the order in which they
appear, giving preference to new
Items match based on the 'key' parameter (identity by default).
Items are merged using the 'merge' function, which accepts the old
and new items to be merged (returning new by default).
This algorithm requires fully materializing both old and new in memory.
>>> rev1 = ['a', 'b', 'c']
>>> rev2 = ['a', 'd', 'c']
>>> result = list(collate_revs(rev1, rev2))
'd' must appear before 'c'
>>> result.index('d') < result.index('c')
True
'b' must appear before 'd' because it came chronologically
first.
>>> result.index('b') < result.index('d')
True
>>> result
['a', 'b', 'd', 'c']
>>> list(collate_revs(['a', 'b', 'c'], ['d']))
['a', 'b', 'c', 'd']
>>> list(collate_revs(['b', 'a'], ['a', 'b']))
['a', 'b']
>>> list(collate_revs(['a', 'c'], ['a', 'b', 'c']))
['a', 'b', 'c']
Given two sequences of things out of order, regardless
of which order in which the items are merged, all
keys should always be merged.
>>> from more_itertools import consume
>>> left_items = ['a', 'b', 'c']
>>> right_items = ['a', 'c', 'b']
>>> consume(collate_revs(left_items, right_items, merge=print))
a a
c c
b b
>>> consume(collate_revs(right_items, left_items, merge=print))
a a
b b
c c
The merge should not suppress non-True items:
>>> consume(collate_revs([0, 1, 2, None, ''], [0, None, ''], merge=print))
None None
<BLANKLINE>
0 0
"""
missing = object()
def maybe_merge(*items):
"""
Merge any non-null items
"""
def not_missing(ob):
return ob is not missing
return functools.reduce(merge, filter(not_missing, items))
new_items = collections.OrderedDict((key(el), el) for el in new)
old_items = collections.OrderedDict((key(el), el) for el in old)
# use the old_items as a reference
for old_key, old_item in _mutable_iter(old_items):
if old_key not in new_items:
yield old_item
continue
# yield all new items that appear before the matching key
before, match_new, new_items = _swap_on_miss(partition_dict(new_items, old_key))
for new_key, new_item in before.items():
# ensure any new keys are merged with previous items if
# they exist
yield maybe_merge(new_item, old_items.pop(new_key, missing))
yield merge(old_item, match_new)
# finally, yield whatever is leftover
# yield from new_items.values()
for item in new_items.values():
yield item | 06f37d895fd906513aa3b85fb2ff48e0f2f2b625 | 30,509 |
from typing import Tuple
from typing import List
def load_conversation(
filename: str,
dictionary: corpora.Dictionary,
with_symbol: bool=True
) -> (Tuple[List[int], List[int]]):
"""対話コーパスをロードする。
Args:
filename (str): コーパスファイル
コーパスファイルの一行は
何 が 好き です か ?,Python が 好き です 。
のように、(source string, separator, target string) が単語ごとに
スペースで分割された文字列が格納されている必要がある。
dictionary:
with_symbol:
Returns:
Tuple[List[int], List[int]]
([1, 2, 3], [4, 5, 6])
"""
# load conversation sentences
if with_symbol:
tokens = [(
# [config.START_SYMBOL] + src.split() + [config.END_SYMBOL],
# [config.START_SYMBOL] + dst.split() + [config.END_SYMBOL]
list(src.split()) + [config.END_SYMBOL],
[config.END_SYMBOL] + dst.split() + [config.END_SYMBOL]
) for src, dst in (sent.split(config.SEPARATOR)
for sent in open(filename))
]
else:
tokens = [
(list(src.split()), dst.split())
for src, dst in (sent.split(config.SEPARATOR)
for sent in open(filename))
]
print("loaded sentences from {}".format(filename))
return tokens | fb7aec9ea228fe528d6744564c1a2b298a33575c | 30,510 |
def close_incons_reduction(incons: list):
"""
Two step:
0. under the same backends pair
1. the same input, choose largest.(done before)
* 2. different inputs with small distance. Do not update(not used)
"""
def is_duplicate(t: tuple, li: list):
"""unique inconsistency"""
for l in li:
if abs(t[1] - l[1]) <= distance_threshold:
return True,l
return False,None
result = list()
relation_dict = dict()
for incon in incons:
status, l = is_duplicate(incon, result)
if not status:
result.append(incon)
else:
relation_dict[incon] = l
return result,relation_dict | 5fd581471ff361d2351b2dd8285d606399667e21 | 30,511 |
def mf2tojf2(mf2):
"""I'm going to have to recurse here"""
jf2={}
items = mf2.get("items",[])
jf2=flattenProperties(items,isOuter=True)
#print jf2
return jf2 | 399fa35f592bb6ec042003ed2884f94078ac01fd | 30,512 |
def import_all(filename):
""" Imports file contents from user with parameters from nueral net and later calculations
currently not robust to missing or incorrect arguments from file
currently does not convert values to int; done in later functions
inputs: filename - name of input file, currently as inputs.csv
output: file_list - list of parameters specified by user"""
with open(filename,'r') as file:
file_read = csv.reader(file)
file_list = []
for line in file_read:
file_list.append(line)
print "Imported file: " + str(filename)
return file_list | ab5b2fecb6cadd2754d52cc9333d110955ca10c7 | 30,513 |
from typing import Union
from pathlib import Path
from typing import Dict
from typing import Tuple
from typing import Any
def fill_database(path: Union[str, Path], settings: SettingsConfig,
inputs: MeasurementInputs, alchemy: Alchemy,
parent_location_id: int, sex_id: int, child_prior: Dict[str, Dict[str, np.ndarray]],
mulcov_prior: Dict[Tuple[str, str, str], _Prior],
options: Dict[str, Any]) -> DismodFiller:
"""
Fill a DisMod database at the specified path with the inputs, model, and settings
specified, for a specific parent and sex ID, with options to override the priors.
"""
df = DismodFiller(
path=path, settings_configuration=settings, measurement_inputs=inputs,
grid_alchemy=alchemy, parent_location_id=parent_location_id, sex_id=sex_id,
child_prior=child_prior, mulcov_prior=mulcov_prior,
)
df.fill_for_parent_child(**options)
return df | 39836b397a7cf384bf3ca493914f9d024baf9f6f | 30,514 |
def ydhms2dt(year,doy,hh,mm,ss):
"""
ydhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
return dto | 3d8fd1a6086f3dd35c80c2d862e820b7aecc5e5b | 30,515 |
def create_test_db(verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# If the database backend wants to create the test DB itself, let it
creation_module = get_creation_module()
if hasattr(creation_module, "create_test_db"):
creation_module.create_test_db(settings, connection, verbosity, autoclobber)
return
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database. Using the TEST_DATABASE_NAME setting you can still choose
# to run on a physical database.
if settings.DATABASE_ENGINE == "sqlite3":
if settings.TEST_DATABASE_NAME and settings.TEST_DATABASE_NAME != ":memory:":
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database..."
if os.access(TEST_DATABASE_NAME, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
os.remove(TEST_DATABASE_NAME)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if verbosity >= 1:
print "Creating test database..."
else:
TEST_DATABASE_NAME = ":memory:"
else:
suffix = {
'postgresql': get_postgresql_create_suffix,
'postgresql_psycopg2': get_postgresql_create_suffix,
'mysql': get_mysql_create_suffix,
'mysql_old': get_mysql_create_suffix,
}.get(settings.DATABASE_ENGINE, lambda: '')()
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
qn = connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s %s" % (qn(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % qn(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (qn(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
call_command('syncdb', verbosity=verbosity, interactive=False)
if settings.CACHE_BACKEND.startswith('db://'):
cache_name = settings.CACHE_BACKEND[len('db://'):]
call_command('createcachetable', cache_name)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
return TEST_DATABASE_NAME | 87659029e01399f6d46780dbd3e2809bc809b70c | 30,516 |
import _ctypes
def key_import(data, key_type=KEY_TYPE.SYMMETRIC, password=b''):
"""Imports a key or key generation parameters."""
key = _ctypes.c_void_p()
_lib.yaca_key_import(key_type.value, _ctypes.c_char_p(password),
data, len(data), _ctypes.byref(key))
return Key(key) | 9cbe8dfcab3e854b096a5628c6be52d6873e8ca1 | 30,517 |
def test_training_arguments_timestamp(monkeypatch, grim_config):
"""Test TrainingWrapperArguments correctly applies a timestamp."""
def mock_return():
return '2019-06-29_17-13-41'
monkeypatch.setattr(grimagents.common, "get_timestamp", mock_return)
grim_config['--timestamp'] = True
arguments = TrainingWrapperArguments(grim_config)
arguments_string = arguments.get_arguments_as_string()
assert '--run-id 3DBall-2019-06-29_17-13-41' in arguments_string | 391b2fdbf716bfdc22f8449a3692679d6018f200 | 30,518 |
def to_vsizip(zipfn, relpth):
""" Create path from zip file """
return "/vsizip/{}/{}".format(zipfn, relpth) | 6f5baf380bd7ab8a4ea92111efbc0f660b10f6f8 | 30,519 |
import sys
def readMashDBParams(dbPrefix, kmers, sketch_sizes, mash_exec = 'mash'):
"""Get kmers lengths and sketch sizes from existing database
Calls :func:`~getKmersFromReferenceDatabase` and :func:`~getSketchSize`
Uses passed values if db missing
Args:
dbPrefix (str)
Prefix for sketch DB files
kmers (list)
Kmers to use if db not found
sketch_sizes (list)
Sketch size to use if db not found
mash_exec (str)
Location of mash executable
Default = 'mash'
Returns:
kmers (list)
List of k-mer lengths used in database
sketch_sizes (list)
List of sketch sizes used in database
"""
db_kmers = getKmersFromReferenceDatabase(dbPrefix)
if len(db_kmers) == 0:
sys.stderr.write("Couldn't find mash sketches in " + dbPrefix + "\n"
"Using command line input parameters for k-mer and sketch sizes\n")
else:
kmers = db_kmers
sketch_sizes = getSketchSize(dbPrefix, kmers, mash_exec)
return kmers, sketch_sizes | 6cdf5f0b337c123412ee6f3c437ca671ed463e02 | 30,520 |
import fastr
def crossval(config, label_data, image_features,
param_grid=None, use_fastr=False,
fastr_plugin=None, tempsave=False,
fixedsplits=None, ensemble={'Use': False}, outputfolder=None,
modus='singlelabel'):
"""
Constructs multiple individual classifiers based on the label settings
Parameters
----------
config: dict, mandatory
Dictionary with config settings. See the Github Wiki for the
available fields and formatting.
label_data: dict, mandatory
Should contain the following:
patient_IDs (list): IDs of the patients, used to keep track of test and
training sets, and label data
label (list): List of lists, where each list contains the
label status for that patient for each
label
label_name (list): Contains the different names that are stored
in the label object
image_features: numpy array, mandatory
Consists of a tuple of two lists for each patient:
(feature_values, feature_labels)
param_grid: dictionary, optional
Contains the parameters and their values wich are used in the
grid or randomized search hyperparamater optimization. See the
construct_classifier function for some examples.
use_fastr: boolean, default False
If False, parallel execution through Joblib is used for fast
execution of the hyperparameter optimization. Especially suited
for execution on mutlicore (H)PC's. The settings used are
specified in the config.ini file in the IOparser folder, which you
can adjust to your system.
If True, fastr is used to split the hyperparameter optimization in
separate jobs. Parameters for the splitting can be specified in the
config file. Especially suited for clusters.
fastr_plugin: string, default None
Determines which plugin is used for fastr executions.
When None, uses the default plugin from the fastr config.
tempsave: boolean, default False
If True, create a .hdf5 file after each cross validation containing
the classifier and results from that that split. This is written to
the GSOut folder in your fastr output mount. If False, only
the result of all combined cross validations will be saved to a .hdf5
file. This will also be done if set to True.
fixedsplits: string, optional
By default, random split cross validation is used to train and
evaluate the machine learning methods. Optionally, you can provide
a .xlsx file containing fixed splits to be used. See the Github Wiki
for the format.
ensemble: dictionary, optional
Contains the configuration for constructing an ensemble.
modus: string, default 'singlelabel'
Determine whether one-vs-all classification (or regression) for
each single label is used ('singlelabel') or if multilabel
classification is performed ('multilabel').
Returns
----------
panda_data: pandas dataframe
Contains all information on the trained classifier.
"""
if tempsave:
# Define all possible regressors
regressors = ['SVR', 'RFR', 'SGDR', 'Lasso', 'ElasticNet']
# Process input data
patient_IDs = label_data['patient_IDs']
label_value = label_data['label']
label_name = label_data['label_name']
if outputfolder is None:
logfilename = os.path.join(os.getcwd(), 'classifier.log')
else:
logfilename = os.path.join(outputfolder, 'classifier.log')
print("Logging to file " + str(logfilename))
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=logfilename, level=logging.DEBUG)
N_iterations = config['CrossValidation']['N_iterations']
test_size = config['CrossValidation']['test_size']
classifier_labelss = dict()
logging.debug('Starting classifier')
# We only need one label instance, assuming they are all the sample
feature_labels = image_features[0][1]
# Check if we need to use fixedsplits:
if fixedsplits is not None and '.xlsx' in fixedsplits:
# fixedsplits = '/home/mstarmans/Settings/RandomSufflingOfData.xlsx'
wb = xlrd.open_workbook(fixedsplits)
wb = wb.sheet_by_index(1)
if modus == 'singlelabel':
print('Performing Single class classification.')
logging.debug('Performing Single class classification.')
elif modus == 'multilabel':
print('Performing Multi label classification.')
logging.debug('Performing Multi class classification.')
label_value = [label_value]
label_name = [label_name]
else:
m = ('{} is not a valid modus!').format(modus)
logging.debug(m)
raise ae.WORCKeyError(m)
for i_class, i_name in zip(label_value, label_name):
if modus == 'singlelabel':
i_class_temp = i_class.ravel()
save_data = list()
for i in range(0, N_iterations):
print(('Cross validation iteration {} / {} .').format(str(i + 1), str(N_iterations)))
logging.debug(('Cross validation iteration {} / {} .').format(str(i + 1), str(N_iterations)))
random_seed = np.random.randint(5000)
# Split into test and training set, where the percentage of each
# label is maintained
if any(clf in regressors for clf in param_grid['classifiers']):
# We cannot do a stratified shuffle split with regression
stratify = None
else:
if modus == 'singlelabel':
stratify = i_class_temp
elif modus == 'multilabel':
# Create a stratification object from the labels
# Label = 0 means no label equals one
# Other label numbers refer to the label name that is 1
stratify = list()
for pnum in range(0, len(i_class[0])):
plabel = 0
for lnum, slabel in enumerate(i_class):
if slabel[pnum] == 1:
plabel = lnum + 1
stratify.append(plabel)
# Sklearn multiclass requires rows to be objects/patients
# i_class = i_class.reshape(i_class.shape[1], i_class.shape[0])
i_class_temp = np.zeros((i_class.shape[1], i_class.shape[0]))
for n_patient in range(0, i_class.shape[1]):
for n_label in range(0, i_class.shape[0]):
i_class_temp[n_patient, n_label] = i_class[n_label, n_patient]
i_class_temp = i_class_temp
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
if fixedsplits is None:
# Use Random Split. Split per patient, not per sample
unique_patient_IDs, unique_indices =\
np.unique(np.asarray(patient_IDs), return_index=True)
if any(clf in regressors for clf in param_grid['classifiers']):
unique_stratify = None
else:
unique_stratify = [stratify[i] for i in unique_indices]
try:
unique_PID_train, indices_PID_test\
= train_test_split(unique_patient_IDs,
test_size=test_size,
random_state=random_seed,
stratify=unique_stratify)
except ValueError as e:
e = str(e) + ' Increase the size of your validation set.'
raise ae.WORCValueError(e)
# Check for all IDs if they are in test or training
indices_train = list()
indices_test = list()
patient_ID_train = list()
patient_ID_test = list()
for num, pid in enumerate(patient_IDs):
if pid in unique_PID_train:
indices_train.append(num)
# Make sure we get a unique ID
if pid in patient_ID_train:
n = 1
while str(pid + '_' + str(n)) in patient_ID_train:
n += 1
pid = str(pid + '_' + str(n))
patient_ID_train.append(pid)
else:
indices_test.append(num)
# Make sure we get a unique ID
if pid in patient_ID_test:
n = 1
while str(pid + '_' + str(n)) in patient_ID_test:
n += 1
pid = str(pid + '_' + str(n))
patient_ID_test.append(pid)
# Split features and labels accordingly
X_train = [image_features[i] for i in indices_train]
X_test = [image_features[i] for i in indices_test]
if modus == 'singlelabel':
Y_train = i_class_temp[indices_train]
Y_test = i_class_temp[indices_test]
elif modus == 'multilabel':
Y_train = i_class_temp[indices_train, :]
Y_test = i_class_temp[indices_test, :]
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
else:
# Use pre defined splits
indices = wb.col_values(i)
indices = [int(j) for j in indices[1:]] # First element is "Iteration x"
train = indices[0:121]
test = indices[121:]
# Convert the numbers to the correct indices
ind_train = list()
for j in train:
success = False
for num, p in enumerate(patient_IDs):
if str(j).zfill(3) == p[0:3]:
ind_train.append(num)
success = True
if not success:
raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!")
ind_test = list()
for j in test:
success = False
for num, p in enumerate(patient_IDs):
if str(j).zfill(3) == p[0:3]:
ind_test.append(num)
success = True
if not success:
raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!")
X_train = np.asarray(image_features)[ind_train].tolist()
Y_train = np.asarray(i_class_temp)[ind_train].tolist()
patient_ID_train = patient_IDs[ind_train]
X_test = np.asarray(image_features)[ind_test].tolist()
Y_test = np.asarray(i_class_temp)[ind_test].tolist()
patient_ID_test = patient_IDs[ind_test]
# Find best hyperparameters and construct classifier
config['HyperOptimization']['use_fastr'] = use_fastr
config['HyperOptimization']['fastr_plugin'] = fastr_plugin
n_cores = config['General']['Joblib_ncores']
trained_classifier = random_search_parameters(features=X_train,
labels=Y_train,
param_grid=param_grid,
n_cores=n_cores,
**config['HyperOptimization'])
# Create an ensemble if required
if ensemble['Use']:
trained_classifier.create_ensemble(X_train, Y_train)
# We only want to save the feature values and one label array
X_train = [x[0] for x in X_train]
X_test = [x[0] for x in X_test]
temp_save_data = (trained_classifier, X_train, X_test, Y_train,
Y_test, patient_ID_train, patient_ID_test, random_seed)
save_data.append(temp_save_data)
# Create a temporary save
if tempsave:
panda_labels = ['trained_classifier', 'X_train', 'X_test', 'Y_train', 'Y_test',
'config', 'patient_ID_train', 'patient_ID_test',
'random_seed']
panda_data_temp =\
pd.Series([trained_classifier, X_train, X_test, Y_train,
Y_test, config, patient_ID_train,
patient_ID_test, random_seed],
index=panda_labels,
name='Constructed crossvalidation')
panda_data = pd.DataFrame(panda_data_temp)
n = 0
filename = os.path.join(fastr.config.mounts['tmp'], 'GSout', 'RS_' + str(i) + '.hdf5')
while os.path.exists(filename):
n += 1
filename = os.path.join(fastr.config.mounts['tmp'], 'GSout', 'RS_' + str(i + n) + '.hdf5')
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
panda_data.to_hdf(filename, 'SVMdata')
del panda_data, panda_data_temp
[classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set,
patient_ID_train_set, patient_ID_test_set, seed_set] =\
zip(*save_data)
panda_labels = ['classifiers', 'X_train', 'X_test', 'Y_train', 'Y_test',
'config', 'patient_ID_train', 'patient_ID_test',
'random_seed', 'feature_labels']
panda_data_temp =\
pd.Series([classifiers, X_train_set, X_test_set, Y_train_set,
Y_test_set, config, patient_ID_train_set,
patient_ID_test_set, seed_set, feature_labels],
index=panda_labels,
name='Constructed crossvalidation')
if modus == 'singlelabel':
i_name = ''.join(i_name)
elif modus == 'multilabel':
i_name = ','.join(i_name)
classifier_labelss[i_name] = panda_data_temp
panda_data = pd.DataFrame(classifier_labelss)
return panda_data | 90d543973861152f2e89acc6e444b3e1a9c465f5 | 30,521 |
def flw3i8e(ex, ey, ez, ep, D, eq=None):
"""
Compute element stiffness (conductivity)
matrix for 8 node isoparametric field element.
Parameters:
ex = [x1,x2,x3,...,x8]
ey = [y1,y2,y3,...,y8] element coordinates
ez = [z1,z2,z3,...,z8]
ep = [ir] Ir: Integration rule
D = [[kxx,kxy,kxz],
[kyx,kyy,kyz],
[kzx,kzy,kzz]] constitutive matrix
eq heat supply per unit volume
Output:
Ke element 'stiffness' matrix (8 x 8)
fe element load vector (8 x 1)
"""
ir = ep[0]
ngp = ir*ir*ir
if eq == None:
q = 0
else:
q = eq
if ir == 2:
g1 = 0.577350269189626
w1 = 1
gp = np.mat([
[-1, -1, -1],
[1, -1, -1],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, 1],
[1, -1, 1],
[1, 1, 1],
[-1, 1, 1]
])*g1
w = np.mat(np.ones((8, 3)))*w1
elif ir == 3:
g1 = 0.774596669241483
g2 = 0.
w1 = 0.555555555555555
w2 = 0.888888888888888
gp = np.mat(np.zeros((27, 3)))
w = np.mat(np.zeros((27, 3)))
I1 = np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1])
I2 = np.array([0, -1, 0, 0, 1, 0, 0, 1, 0])
gp[:, 0] = np.mat([I1, I1, I1]).reshape(27, 1)*g1
gp[:, 0] = np.mat([I2, I2, I2]).reshape(27, 1)*g2+gp[:, 0]
I1 = abs(I1)
I2 = abs(I2)
w[:, 0] = np.mat([I1, I1, I1]).reshape(27, 1)*w1
w[:, 0] = np.mat([I2, I2, I2]).reshape(27, 1)*w2+w[:, 0]
I1 = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1])
I2 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0])
gp[:, 1] = np.mat([I1, I1, I1]).reshape(27, 1)*g1
gp[:, 1] = np.mat([I2, I2, I2]).reshape(27, 1)*g2+gp[:, 1]
I1 = abs(I1)
I2 = abs(I2)
w[:, 1] = np.mat([I1, I1, I1]).reshape(27, 1)*w1
w[:, 1] = np.mat([I2, I2, I2]).reshape(27, 1)*w2+w[:, 1]
I1 = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1])
I2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
I3 = abs(I1)
gp[:, 2] = np.mat([I1, I2, I3]).reshape(27, 1)*g1
gp[:, 2] = np.mat([I2, I3, I2]).reshape(27, 1)*g2+gp[:, 2]
w[:, 2] = np.mat([I3, I2, I3]).reshape(27, 1)*w1
w[:, 2] = np.mat([I2, I3, I2]).reshape(27, 1)*w2+w[:, 2]
else:
info("Used number of integration points not implemented")
return
wp = np.multiply(np.multiply(w[:, 0], w[:, 1]), w[:, 2])
xsi = gp[:, 0]
eta = gp[:, 1]
zet = gp[:, 2]
r2 = ngp*3
N = np.multiply(np.multiply((1-xsi), (1-eta)), (1-zet))/8.
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1-eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1+eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1+eta)), (1-zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1-eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1-eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1+xsi), (1+eta)), (1+zet))/8., axis=1)
N = np.append(N, np.multiply(np.multiply(
(1-xsi), (1+eta)), (1+zet))/8., axis=1)
dNr = np.mat(np.zeros((r2, 8)))
dNr[0:r2:3, 0] = np.multiply(-(1-eta), (1-zet))
dNr[0:r2:3, 1] = np.multiply((1-eta), (1-zet))
dNr[0:r2:3, 2] = np.multiply((1+eta), (1-zet))
dNr[0:r2:3, 3] = np.multiply(-(1+eta), (1-zet))
dNr[0:r2:3, 4] = np.multiply(-(1-eta), (1+zet))
dNr[0:r2:3, 5] = np.multiply((1-eta), (1+zet))
dNr[0:r2:3, 6] = np.multiply((1+eta), (1+zet))
dNr[0:r2:3, 7] = np.multiply(-(1+eta), (1+zet))
dNr[1:r2+1:3, 0] = np.multiply(-(1-xsi), (1-zet))
dNr[1:r2+1:3, 1] = np.multiply(-(1+xsi), (1-zet))
dNr[1:r2+1:3, 2] = np.multiply((1+xsi), (1-zet))
dNr[1:r2+1:3, 3] = np.multiply((1-xsi), (1-zet))
dNr[1:r2+1:3, 4] = np.multiply(-(1-xsi), (1+zet))
dNr[1:r2+1:3, 5] = np.multiply(-(1+xsi), (1+zet))
dNr[1:r2+1:3, 6] = np.multiply((1+xsi), (1+zet))
dNr[1:r2+1:3, 7] = np.multiply((1-xsi), (1+zet))
dNr[2:r2+2:3, 0] = np.multiply(-(1-xsi), (1-eta))
dNr[2:r2+2:3, 1] = np.multiply(-(1+xsi), (1-eta))
dNr[2:r2+2:3, 2] = np.multiply(-(1+xsi), (1+eta))
dNr[2:r2+2:3, 3] = np.multiply(-(1-xsi), (1+eta))
dNr[2:r2+2:3, 4] = np.multiply((1-xsi), (1-eta))
dNr[2:r2+2:3, 5] = np.multiply((1+xsi), (1-eta))
dNr[2:r2+2:3, 6] = np.multiply((1+xsi), (1+eta))
dNr[2:r2+2:3, 7] = np.multiply((1-xsi), (1+eta))
dNr = dNr/8.
Ke1 = np.mat(np.zeros((8, 8)))
fe1 = np.mat(np.zeros((8, 1)))
JT = dNr*np.mat([ex, ey, ez]).T
for i in range(ngp):
indx = np.array([3*(i+1)-2, 3*(i+1)-1, 3*(i+1)])
detJ = np.linalg.det(JT[indx-1, :])
if detJ < 10*np.finfo(float).eps:
info("Jacobi determinant == 0")
JTinv = np.linalg.inv(JT[indx-1, :])
B = JTinv*dNr[indx-1, :]
Ke1 = Ke1+B.T*D*B*detJ*np.asscalar(wp[i])
fe1 = fe1+N[i, :].T*detJ*wp[i]
if eq != None:
return Ke1, fe1*q
else:
return Ke1 | 90377f5ba6205e0f3bf1bc4f1fa0b84a4c69eec9 | 30,522 |
def cidr_to_netmask(value):
"""
Converts a CIDR prefix-length to a network mask.
Examples:
>>> "{{ '24'|cidr_to_netmask }}" -> "255.255.255.0"
"""
return str(netaddr.IPNetwork("1.1.1.1/{}".format(value)).netmask) | 232f4fb65be712bfb040d75ada40ed0450d84e2d | 30,523 |
def rgb_to_name(rgb_triplet: IntTuple, spec: str = CSS3) -> str:
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used. The default is
CSS3.
If there is no matching name, ``ValueError`` is raised.
"""
return hex_to_name(rgb_to_hex(normalize_integer_triplet(rgb_triplet)), spec=spec) | fdce9304c4d16d348fe37a920ae7cf44f3c3f56b | 30,524 |
def get_rr_Lix(N, Fmat, psd, x):
"""
Given a rank-reduced decomposition of the Cholesky factor L, calculate
L^{-1}x where x is some vector. This way, we don't have to built L, which
saves memory and computational time.
@param N: Vector with the elements of the diagonal matrix N
@param Fmat: (n x m) matrix consisting of the reduced rank basis
@param psd: PSD of the rank-reduced approximation
@param x: Vector we want to process as Lx
@return L^{-1}x
"""
n = N.shape[0]
m = Fmat.shape[1]
y = np.zeros(n)
t = np.zeros(m)
Z, B, D = get_rr_cholesky_rep(N, Fmat, psd)
BD = (B.T * np.sqrt(D)).T
for ii in range(n):
y[ii] = (x[ii] - np.dot(Z[ii,:], t)) / np.sqrt(D[ii])
t = t + y[ii] * BD[ii,:]
return y | 3658342296f18f3afdedf2cc790e1d0062e6c49d | 30,525 |
def ParameterSet_Create(*args):
"""
Create() -> ParameterSet
ParameterSet_Create(std::string const & publicID) -> ParameterSet
"""
return _DataModel.ParameterSet_Create(*args) | c6d6ef68505119b1b146d2be267a416341e09271 | 30,526 |
def generate_html_from_cli_args(cli_dict_for_command):
"""
Turn the dict into an html representation of the cli args and options.
:param cli_dict_for_command:
:return str:
"""
# def arg_md(opt, long_opt, default, help):
# return f"*) {opt}, {long_opt}, {help}\n"
text = ""
# eval_output = ""
if "usage" in cli_dict_for_command:
text += "\n<em>usage: " + str(cli_dict_for_command["usage"]) + "</em><br>\n"
if "epilog" in cli_dict_for_command:
pass
# text += "\n" + cli_dict_for_command["epilog"]
if "args" in cli_dict_for_command:
# text += "\n\n<h4>Command Line Options</h4>\n"
text += "<ul>\n"
for arg in cli_dict_for_command["args"]:
text += f"<li>{arg[0]}</li>\n"
# eval_cmd = f"arg_md({arg[0]})"
# eval_output = eval(eval_cmd) + "\n"
# print("EVAL_OUT: " + eval_output)
text += "</ul>\n"
# eval(eval_text)
return "\n\n" + text | 15c3b6f98141ab989cbe229a2b30778d5c664c9a | 30,527 |
def conical_sigma_Mach_walldeflection(Mach, deflection, gamma=defg._gamma):
"""computes shock angle sigma from upstream Mach number and wall deflection
Args:
Mach: param deflection:
gamma: Default value = defg._gamma)
deflection:
Returns:
"""
def local_def(sig):
"""internal wrapping function to iterative solve
"""
return conical_deflection_Mach_sigma(Mach, sig, gamma)
return ITS.secant_solve(local_def, deflection, degree.asin(1./Mach)+deflection) | 97fc3ac999f4a598860dcf25b4fd537bfcaf9326 | 30,528 |
def get_hashrate_info(results, miner, algo):
"""
Get Hashrate Information for a particular Miner and Algo
Returns:
dict
"""
# do the lookup
hashrate_info = results.get_hashrate_info(miner, algo)
if hashrate_info is None:
logger.warning("Model/Algo combination does not exist for "
"miner model '{}' and algo '{}'".format(miner.model.model, algo))
return hashrate_info | e94d0d7345a54181a9d5547afd1209419a92b497 | 30,529 |
import hashlib
def verify_verification_code(doctype, document_name, verification_code):
"""This method verfies the user verification code by fetching the originally sent code by the system from cache.
Args:
doctype (str): Name of the DocType.
document_name (str): Name of the document of the DocType.
verification_code (int): User verification code
Returns:
boolean: True/False upon verification code being verified.
"""
try:
employee_user_email = frappe.session.user
cache_search_key = hashlib.md5((employee_user_email + doctype + document_name).encode('utf-8')).hexdigest()
verification_hash = hashlib.md5((employee_user_email + doctype + document_name + str(verification_code)).encode('utf-8')).hexdigest()
if not frappe.cache().get(cache_search_key):
return False
if verification_hash != frappe.cache().get(cache_search_key).decode('utf-8'):
return False
frappe.cache().delete(cache_search_key)
return True
except Exception as e:
frappe.throw(e) | 4013d2c2ff3eb318c16af8d43ca8b50af53379ea | 30,530 |
def orient1(ppos, apos, bpos):
"""
ORIENT1 return orientation of PP wrt. the line [PA, PB].
"""
#---------------------------------------------- calc. det(S)
smat = np.empty(
(2, 2, ppos.shape[0]), dtype=ppos.dtype)
smat[0, 0, :] = \
apos[:, 0] - ppos[:, 0]
smat[0, 1, :] = \
apos[:, 1] - ppos[:, 1]
smat[1, 0, :] = \
bpos[:, 0] - ppos[:, 0]
smat[1, 1, :] = \
bpos[:, 1] - ppos[:, 1]
sign = \
smat[0, 0, :] * smat[1, 1, :] - \
smat[0, 1, :] * smat[1, 0, :]
return np.reshape(sign, (sign.size)) | 705a27bde14c31262471b5d6a3621a695d8c091d | 30,531 |
def get_storm_data(storm_path):
""" Obtain raster grid of the storm with rasterio
Arguments:
*storm_path* (string) -- path to location of storm
"""
with rio.open(storm_path) as src:
# Read as numpy array
array = src.read(1)
array = np.array(array,dtype='float32')
affine_storm = src.affine
return array,affine_storm | 6db69cbd6970da467021a186f3062beaa6ed7387 | 30,532 |
import functools
def wrap_with_spectral_norm(module_class,
sn_kwargs=None,
pow_iter_collection=None):
"""Returns a constructor for the inner class with spectral normalization.
This function accepts a Sonnet AbstractModule class as argument (the class,
*not* an instance of that class) alongside an optional dictionary of keyword
arguments for the spectral_norm function, and returns a constructor which can
be treated identically to the constructor of the input class, but with
spectral normalization applied to the weights created by the class.
Internally, this is just a partially evaluated SpectralNormWrapper module.
`pow_iter_collection`, if not None, is treated as the name of a TensorFlow
global collection. Each time the module's weight matrix is accessed ops are
built for performing one step of power iteration to approximate that weight's
first singular follow and ops are created for saving this new approximation in
an internal variable. At build-time the resulting object takes a special
boolean 'enable_power_iteration' keyword argument. If this is True (the
default), a control dependency on the operation for updating this internal
variable is attached to the returned weight. Otherwise, the update is *not*
attached as a control dependency, but an op is placed into the
`pow_iter_collection` global collection which causes the internal variable to
be updated. It is then up to the user to choose whether to run this update.
Args:
module_class: A constructor/class reference for a Sonnet module you would
like to wrap and automatically apply spectral normalization.
sn_kwargs: Keyword arguments to be passed to the spectral_norm function
in addition to the weight tensor.
pow_iter_collection: The name of a global collection for potentially
storing ops for updating internal variables.
Returns:
An snt.AbstractModule class representing the original with spectral norm.
"""
sn_kwargs = sn_kwargs or {}
return functools.partial(
SpectralNormWrapper, module_class, sn_kwargs, pow_iter_collection) | 5c849f2ee4dd8cd818ff7bebfb0564857bbb18de | 30,533 |
def ask_daemon_sync(view, ask_type, ask_kwargs, location=None):
"""Jedi sync request shortcut.
:type view: sublime.View
:type ask_type: str
:type ask_kwargs: dict or None
:type location: type of (int, int) or None
"""
daemon = _get_daemon(view)
return daemon.request(
ask_type,
ask_kwargs or {},
*_prepare_request_data(view, location)
) | 665a302445c4661d3e5610914bde688cd4512968 | 30,534 |
import logging
import time
def _etl_epacems(etl_params, datapkg_dir, pudl_settings, ds_kwargs):
"""Extract, transform and load CSVs for EPA CEMS.
Args:
etl_params (dict): ETL parameters required by this data source.
datapkg_dir (path-like): The location of the directory for this
package, wihch will contain a datapackage.json file and a data
directory in which the CSV file are stored.
pudl_settings (dict) : a dictionary filled with settings that mostly
describe paths to various resources and outputs.
Returns:
list: Names of PUDL DB tables output by the ETL for this data source.
"""
epacems_dict = pudl.etl._validate_params_epacems(etl_params)
epacems_years = epacems_dict['epacems_years']
epacems_states = epacems_dict['epacems_states']
# If we're not doing CEMS, just stop here to avoid printing messages like
# "Reading EPA CEMS data...", which could be confusing.
if not epacems_states or not epacems_years:
logger.info('Not ingesting EPA CEMS.')
# NOTE: This a generator for raw dataframes
epacems_raw_dfs = pudl.extract.epacems.extract(
epacems_years, epacems_states, Datastore(**ds_kwargs))
# NOTE: This is a generator for transformed dataframes
epacems_transformed_dfs = pudl.transform.epacems.transform(
epacems_raw_dfs=epacems_raw_dfs,
datapkg_dir=datapkg_dir)
logger.info("Loading tables from EPA CEMS into PUDL:")
if logger.isEnabledFor(logging.INFO):
start_time = time.monotonic()
epacems_tables = []
# run the cems generator dfs through the load step
for transformed_df_dict in epacems_transformed_dfs:
pudl.load.csv.dict_dump(transformed_df_dict,
"EPA CEMS",
datapkg_dir=datapkg_dir)
epacems_tables.append(list(transformed_df_dict.keys())[0])
if logger.isEnabledFor(logging.INFO):
delta_t = time.strftime("%H:%M:%S", time.gmtime(
time.monotonic() - start_time))
time_message = f"Loading EPA CEMS took {delta_t}"
logger.info(time_message)
start_time = time.monotonic()
return epacems_tables | 5e9b951205c8e5d50d8b07f5b7661fcbc4595a80 | 30,535 |
def GetNvccOptions(argv):
"""Collect the -nvcc_options values from argv.
Args:
argv: A list of strings, possibly the argv passed to main().
Returns:
1. The string that can be passed directly to nvcc.
2. The leftover options.
"""
parser = ArgumentParser()
parser.add_argument('-nvcc_options', nargs='*', action='append')
args, leftover = parser.parse_known_args(argv)
if args.nvcc_options:
options = _update_options(sum(args.nvcc_options, []))
return (['--' + a for a in options], leftover)
return ([], leftover) | bb143edb6099eb6182fe7b79e53422321cb3e03d | 30,536 |
import json
def show_node(request, name='', path='', revision=''):
"""
View for show_node page, which provides context for show_node.html
Shows description for yang modules.
:param request: Array with arguments from webpage data submition.
:param module: Takes first argument from url if request does not
contain module argument.
:param path: Path for node.
:param revision: revision for yang module, if specified.
:return: returns context for show_node.html
"""
alerts = []
context = dict()
try:
if not revision:
revision = get_latest_mod(name)
revision = revision.split('@')[1]
query = json.load(open('search/templates/json/show_node.json', 'r'))
query['query']['bool']['must'][0]['match_phrase']['module.keyword']['query'] = name
query['query']['bool']['must'][1]['match_phrase']['path']['query'] = path
query['query']['bool']['must'][2]['match_phrase']['revision']['query'] = revision
hits = es.search(index='yindex', doc_type='modules', body=query)['hits']['hits']
if len(hits) == 0:
alerts.append('Could not find data for {} at {}'.format(name, path))
else:
result = hits[0]['_source']
context['show_node'] = result
context['properties'] = json.loads(result['properties'])
except:
alerts.append('Module and path must be specified')
context['alerts'] = alerts
return render(request, 'search/show_node.html', context) | 20e3a87be8f2d85632fe26cc86a3cc7742d2de33 | 30,537 |
def compute_F1(TP, TN, FP, FN):
"""
Return the F1 score
"""
numer = 2 * TP
denom = 2 * TP + FN + FP
F1 = numer/denom
Acc = 100. * (TP + TN) / (TP + TN + FP + FN)
return F1, Acc | 6f012246337534af37ff233ad78d9645907739e3 | 30,538 |
def name_full_data():
"""Full name data."""
return {
"name": "Doe, John",
"given_name": "John",
"family_name": "Doe",
"identifiers": [
{
"identifier": "0000-0001-8135-3489",
"scheme": "orcid"
}, {
"identifier": "gnd:4079154-3",
"scheme": "gnd"
}
],
"affiliations": [
{
"id": "cern"
},
{
"name": "CustomORG"
}
]
} | ac590635dbe33e68dc88acd890d16dd3137befb2 | 30,539 |
def platypus(in_file, data):
"""Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it.
"""
filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '
'(TC < 13 && %QUAL < 10) || '
'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)')
limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None
return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth",
extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions) | 00979a3de36b051882e42e2231cac69a67dfec20 | 30,540 |
def delete(i):
"""
Input: { See 'rm' function }
Output: { See 'rm' function }
"""
return rm(i) | 048742483608b7530ee217a60c96f4c4f6ec6fb0 | 30,541 |
import logging
def _get_remote_image_id(s3_object) -> str:
"""
Get the image id of the docker cache which is represented by the S3 object
:param s3_object: S3 object
:return: Image id as string or None if object does not exist
"""
try:
if S3_METADATA_IMAGE_ID_KEY in s3_object.metadata:
cached_image_id = s3_object.metadata[S3_METADATA_IMAGE_ID_KEY]
return cached_image_id
else:
logging.debug('No cached image available for {}'.format(s3_object.key))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logging.debug('{} does not exist in S3 yet'.format(s3_object.key))
else:
raise
return None | 07b73824c1f03ae24f584ef38ab0a2c37f3d436e | 30,542 |
def test_circuit_str(default_compilation_configuration):
"""Test function for `__str__` method of `Circuit`"""
def f(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
inputset = range(2 ** 3)
circuit = hnp.compile_numpy_function(f, {"x": x}, inputset, default_compilation_configuration)
assert str(circuit) == format_operation_graph(circuit.op_graph) | abf2955b7cd440124eb2e2acf685aa84d69a3e4a | 30,543 |
def add_game():
"""Adds game to database"""
check_admin()
add_game = True
form = GameForm()
# Checks if form is valid
if form.validate_on_submit():
game = Game(name=form.name.data)
try:
db.session.add(game)
db.session.commit()
flash('Game successfully added')
except:
flash('Error: game name already exists')
return redirect(url_for('admin.list_games'))
return render_template('admin/games/game.html',
action='Add',
add_game=add_game,
form=form,
title='Add Game') | 9134516408a1931a41a901b4523713871f580da0 | 30,544 |
def requires_moderation(page):
"""Returns True if page requires moderation
"""
return bool(page.get_moderator_queryset().count()) | 8f1cfa852cbeccfae6157e94b7ddf61d9597936e | 30,545 |
from typing import Optional
def _get_node_info(
node: NodeObject,
current_path: str,
node_type: str,
label: Optional[str] = None,
is_leaf: bool = True
) -> NodeInfo:
"""
Utility method for generating a NodeInfo from a NodeObject
:param node: NodeObject to convert into a NodeInfo.
node.name will be used for the label of the node (unless label is provided)
node.oid will be appended to the end of the current URI to create the node's path
:param current_path: URI provided in the request to expand/refresh
:param node_type: Node type, determines icon used in UI
:param label: Overrides the node.name if provided, display name of the node displayed as-is
:param is_leaf: Whether or not the node is a leaf. Default is true. If false, a trailing slash
will be added to the node path to indicate it behaves as a folder
:return: NodeInfo based on the NodeObject provided
"""
# Generate the object metadata
metadata = ObjectMetadata(node.urn, None, type(node).__name__, node.name, None)
node_info: NodeInfo = NodeInfo()
node_info.is_leaf = is_leaf
node_info.label = label if label is not None else node.name
node_info.metadata = metadata
node_info.node_type = node_type
# Build the path to the node. Trailing slash is added to indicate URI is a folder
trailing_slash = '' if is_leaf else '/'
node_info.node_path = urljoin(current_path, str(node.name) + trailing_slash)
return node_info | d0084dc757dd9501dc2853a1445524cbde9a0756 | 30,546 |
def get_peers():
"""Retrieve PeerIds and SSIDs for peers that are ready for OOB transfer"""
query = 'SELECT Ssid, PeerId from EphemeralState WHERE PeerState=1'
data = exec_query(query, db_path_peer)
return data | 9ab81631cf1f779b3a80b246e0a6144e46599a64 | 30,547 |
from bs4 import BeautifulSoup
def get_html_text(html):
"""
Return the raw text of an ad
"""
if html:
doc = BeautifulSoup(html, "html.parser")
return doc.get_text(" ")
return "" | 14353f368078ea6b1673d1066b0a529cc3e257d9 | 30,548 |
def valid_parentheses(string):
"""
Takes a string of parentheses, and determines if the order of the parentheses is valid.
:param string: a string of parentheses and characters.
:return: true if the string is valid, and false if it's invalid.
"""
stack = []
for x in string:
if x == "(":
stack.append(x)
elif x == ")":
if len(stack) > 0:
stack.pop()
else:
return False
return not stack | e8438404c461b7a113bbbab6417190dcd1056871 | 30,549 |
import os
import ctypes
def find_microbit():
"""
Returns a path on the filesystem that represents the plugged in BBC
micro:bit that is to be flashed. If no micro:bit is found, it returns
None.
Works on Linux, OSX and Windows. Will raise a NotImplementedError
exception if run on any other operating system.
"""
# Check what sort of operating system we're on.
if os.name == 'posix':
# 'posix' means we're on Linux or OSX (Mac).
# Call the unix "mount" command to list the mounted volumes.
mount_output = check_output('mount').splitlines()
mounted_volumes = [x.split()[2] for x in mount_output]
for volume in mounted_volumes:
if volume.endswith(b'MICROBIT'):
return volume.decode('utf-8') # Return a string not bytes.
elif os.name == 'nt':
# 'nt' means we're on Windows.
def get_volume_name(disk_name):
"""
Each disk or external device connected to windows has an attribute
called "volume name". This function returns the volume name for
the given disk/device.
Code from http://stackoverflow.com/a/12056414
"""
vol_name_buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(disk_name), vol_name_buf,
ctypes.sizeof(vol_name_buf), None, None, None, None, 0)
return vol_name_buf.value
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1)
try:
for disk in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
path = '{}:\\'.format(disk)
if (os.path.exists(path) and
get_volume_name(path) == 'MICROBIT'):
return path
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode)
else:
# No support for unknown operating systems.
raise NotImplementedError('OS "{}" not supported.'.format(os.name)) | 967460400a32b0e18ec7f5193712b108f903113d | 30,550 |
def in_2core_graph_slow(cats: ArrayLike) -> BoolArray:
"""
Parameters
----------
cats: {DataFrame, ndarray}
Array containing the category codes of pandas categoricals
(nobs, ncats)
Returns
-------
retain : ndarray
Boolean array that marks non-singleton entries as True
Notes
-----
This is a reference implementation that can be very slow to remove
all singleton nodes in some graphs.
"""
if isinstance(cats, DataFrame):
cats = np.column_stack([np.asarray(cats[c].cat.codes) for c in cats])
if cats.shape[1] == 1:
return in_2core_graph(cats)
nobs, ncats = cats.shape
retain_idx = np.arange(cats.shape[0])
num_singleton = 1
while num_singleton > 0 and cats.shape[0] > 0:
singleton = np.zeros(cats.shape[0], dtype=bool)
for i in range(ncats):
ucats, counts = np.unique(cats[:, i], return_counts=True)
singleton |= np.isin(cats[:, i], ucats[counts == 1])
num_singleton = int(singleton.sum())
if num_singleton:
cats = cats[~singleton]
retain_idx = retain_idx[~singleton]
retain = np.zeros(nobs, dtype=bool)
retain[retain_idx] = True
return retain | 46fc377643849b68d9071c9e592c1bba68a23a83 | 30,551 |
def has_form_encoded_header(header_lines):
"""Return if list includes form encoded header"""
for line in header_lines:
if ":" in line:
(header, value) = line.split(":", 1)
if header.lower() == "content-type" \
and "x-www-form-urlencoded" in value:
return True
return False | e4fe797e4884161d0d935853444634443e6e25bb | 30,552 |
from pathlib import Path
def get_best_checkpoint_path(path: Path) -> Path:
"""
Given a path and checkpoint, formats a path based on the checkpoint file name format.
:param path to checkpoint folder
"""
return path / LAST_CHECKPOINT_FILE_NAME_WITH_SUFFIX | b0637dd0fac5df3b7645cceec62f23fc3d48d4eb | 30,553 |
def storage_charge_rule(model, technology, timepoint):
"""
Storage cannot charge at a higher rate than implied by its total installed power capacity.
Charge and discharge rate limits are currently the same.
"""
return model.Charge[technology, timepoint] + model.Provide_Power[technology, timepoint] <= model.capacity[technology] | 9f437d11f1eb1ce894381de10b719c9c08271396 | 30,554 |
def blck_void(preprocessor: Preprocessor, args: str, contents: str) -> str:
"""The void block, processes commands inside it but prints nothing"""
if args.strip() != "":
preprocessor.send_warning("extra-arguments", "the void block takes no arguments")
preprocessor.context.update(preprocessor.current_position.end, "in void block")
contents = preprocessor.parse(contents)
preprocessor.context.pop()
return "" | 841be11c1f8f7b9d4c3552cdafe0aaa590b8fb9d | 30,555 |
import typing
def resize_image(image: np.ndarray,
width: typing.Optional[int] = None,
height: typing.Optional[int] = None,
interpolation=cv2.INTER_AREA):
"""
Resize image using given width or/and height value(s).
If both values are passed, aspect ratio is not preserved.
If none of the values are given, original image is returned.
"""
img_h, img_w = image.shape[:2]
if not width and not height:
return image
elif width and height:
dim = (width, height)
else:
if not width:
ratio = height / float(img_h)
dim = (int(img_w * ratio), height)
else:
ratio = width / float(img_w)
dim = (width, int(img_h * ratio))
return cv2.resize(image, dim, interpolation=interpolation) | ee8bf8424bb23a941a7858a97d1b4ff6b5187d38 | 30,556 |
def attr(*args, **kwargs):
"""Decorator that adds attributes to classes or functions
for use with unit tests runner.
"""
def wrapped(element):
for name in args:
setattr(element, name, True)
for name, value in kwargs.items():
setattr(element, name, value)
return element
return wrapped | 77d20af87cef526441aded99bd6e24e21e5f81f9 | 30,557 |
def convert_units(table_name, value, value_unit, targets):
"""
Converts a given value in a unit to a set of target units.
@param table_name Name of table units are contained in
@param value Value to convert
@param value_unit Unit value is currently in
@param targets List of units to convert to
@return List of conversions
"""
table = get_table(table_name)
results = list()
for target in targets:
result = {'dest_unit': target}
try:
result['converted_value'] = table.convert(value_unit, target, value)
except ValueError:
continue
results.append(result)
return results | b8cdbeafa78ec71450e69cec6913e805bd26fa8a | 30,558 |
import os
def find_vital_library_path(use_cache=True):
"""
Discover the path to a VITAL C interface library based on the directory
structure this file is in, and then to system directories in the
LD_LIBRARY_PATH.
:param use_cache: Store and use the cached path, preventing redundant
searching (default = True).
:type use_cache: bool
:return: The string path to the VITAL C interface library
:rtype: str
:raises RuntimeError: If vital library path was not found.
"""
global __LIBRARY_PATH_CACHE__
if use_cache and __LIBRARY_PATH_CACHE__:
return __LIBRARY_PATH_CACHE__
# Otherwise, find the Vital C library
search_dirs = [os.path.dirname(os.path.abspath(__file__))]
# NOTE this does not cover all possible systems
if 'LD_LIBRARY_PATH' in os.environ:
search_dirs.extend(os.environ['LD_LIBRARY_PATH'].split(_system_path_separator()))
if 'DYLD_LIBRARY_PATH' in os.environ:
search_dirs.extend(os.environ['DYLD_LIBRARY_PATH'].split(_system_path_separator()))
for d in search_dirs:
r = _search_up_directory(d, __LIBRARY_NAME_RE__)
if r is not None:
if use_cache:
__LIBRARY_PATH_CACHE__ = r
return r
# No library found in any paths given at this point
raise RuntimeError("Failed to find a valid '%s' library!"
% __LIBRARY_NAME__) | c124666e62be6bc9ea687fc812c2879342b437a2 | 30,559 |
def hex2binary(hex_num):
""" converts from hexadecimal to binary """
hex1 = h[hex_num[0]]
hex2 = h[hex_num[1]]
return str(hex1) + str(hex2) | 78d2a804d5f02c985d943e6242bc66143905df2f | 30,560 |
def nn(value: int) -> int:
"""Casts value to closest non negative value"""
return 0 if value < 0 else value | 08672feaefa99881a110e3fc629d4a9256f630af | 30,561 |
def resolve_vcf_counts_data(vcf_data, maf_data, matched_normal_sample_id, tumor_sample_data_col):
""" Resolves VCF allele counts data. """
vcf_alleles = [vcf_data["REF"]]
vcf_alleles.extend(vcf_data["ALT"].split(","))
tumor_sample_format_data = vcf_data["MAPPED_TUMOR_FORMAT_DATA"]
normal_sample_format_data = None
if matched_normal_sample_id in vcf_data.keys():
normal_sample_format_data = vcf_data["MAPPED_NORMAL_FORMAT_DATA"]
variant_allele_idx = get_vcf_variant_allele_idx(tumor_sample_format_data, normal_sample_format_data, vcf_alleles)
(t_ref_count, t_alt_count, t_depth) = resolve_vcf_allele_depth_values(tumor_sample_format_data, vcf_alleles, variant_allele_idx, vcf_data)
maf_data["t_ref_count"] = t_ref_count
maf_data["t_alt_count"] = t_alt_count
maf_data["t_depth"] = t_depth
# only resolve values for normal allele depths if "NORMAL" data is present in VCF
if normal_sample_format_data:
(n_ref_count, n_alt_count, n_depth) = resolve_vcf_allele_depth_values(normal_sample_format_data, vcf_alleles, variant_allele_idx, vcf_data)
maf_data["n_ref_count"] = n_ref_count
maf_data["n_alt_count"] = n_alt_count
maf_data["n_depth"] = n_depth
return maf_data | 5e10d54038a84bc93a4d6b09ae5368e61ae3312f | 30,562 |
def example_profile_metadata_target():
"""Generates an example profile metadata document.
>>> root = example_profile_metadata_target()
>>> print_tree(root)
<?xml version='1.0' encoding='UTF-8'?>
<Profile xmlns="http://soap.sforce.com/2006/04/metadata">
<classAccesses>
<apexClass>ARTransactionsTest</apexClass>
<enabled>false</enabled>
</classAccesses>
<classAccesses>
<apexClass>AccountAddressManager</apexClass>
<enabled>true</enabled>
</classAccesses>
<classAccesses>
<apexClass>AccountHierarchyBuilder</apexClass>
<enabled>true</enabled>
</classAccesses>
<classAccesses>
<apexClass>TransactionTestData</apexClass>
<enabled>false</enabled>
</classAccesses>
</Profile>
<BLANKLINE>
"""
root = example_profile_metadata_source()
example_class_access_element(root, 'AccountHierarchyBuilder', 'true')
example_class_access_element(root, 'TransactionTestData', 'false')
return root | 6e43847aec021e188c001ad59e297ecdfc31d202 | 30,563 |
def separate(expr, deep=False):
"""Rewrite or separate a power of product to a product of powers
but without any expanding, ie. rewriting products to summations.
>>> from sympy import *
>>> x, y, z = symbols('x', 'y', 'z')
>>> separate((x*y)**2)
x**2*y**2
>>> separate((x*(y*z)**3)**2)
x**2*y**6*z**6
>>> separate((x*sin(x))**y + (x*cos(x))**y)
x**y*cos(x)**y + x**y*sin(x)**y
#>>> separate((exp(x)*exp(y))**x)
#exp(x*y)*exp(x**2)
Notice that summations are left un touched. If this is not the
requested behaviour, apply 'expand' to input expression before:
>>> separate(((x+y)*z)**2)
z**2*(x + y)**2
>>> separate((x*y)**(1+z))
x**(1 + z)*y**(1 + z)
"""
expr = Basic.sympify(expr)
if isinstance(expr, Basic.Pow):
terms, expo = [], separate(expr.exp, deep)
#print expr, terms, expo, expr.base
if isinstance(expr.base, Mul):
t = [ separate(Basic.Pow(t,expo), deep) for t in expr.base ]
return Basic.Mul(*t)
elif isinstance(expr.base, Basic.exp):
if deep == True:
return Basic.exp(separate(expr.base[0], deep)*expo)
else:
return Basic.exp(expr.base[0]*expo)
else:
return Basic.Pow(separate(expr.base, deep), expo)
elif isinstance(expr, (Basic.Add, Basic.Mul)):
return type(expr)(*[ separate(t, deep) for t in expr ])
elif isinstance(expr, Basic.Function) and deep:
return expr.func(*[ separate(t) for t in expr])
else:
return expr | ae30943f0073508d85212f97d4298f63e16fcc05 | 30,564 |
import os
def get_sub_dirs(path: str):
"""Get sub-directories contained in a specified directory
Args:
path (str): path to directory
Returns:
str: lists of absolute paths
str: list of dir names
"""
try:
dirs = os.walk(path).next()[1]
except AttributeError:
try:
dirs = next(os.walk(path))[1]
except StopIteration:
dirs = []
dirs.sort()
dir_paths = [os.path.abspath(os.path.join(path, dir)) for dir in dirs]
return dirs, dir_paths | f9f58521622020b7c164e7b3ba5096b92fc28848 | 30,565 |
def app_base(request):
"""
This should render the required HTML to start the Angular application. It is the only entry point for
the pyramid UI via Angular
:param request: A pyramid request object, default for a view
:return: A dictionary of variables to be rendered into the template
"""
dev_endpoints = ['localhost', '0.0.0.0', '127.0.', '192.168.', '10.19.', 'dev.squizzlezig.com']
is_dev = False
for point in dev_endpoints:
if request.host.split(':', 1)[0].startswith(point) or request.remote_addr.startswith(point):
is_dev = True
return { 'is_dev': is_dev, 'some_key': request.registry.settings['some_key']} | 3a097e920b33248b436e2eea00e05b5708b35779 | 30,566 |
from typing import List
import re
def check_lists(document: Document, args: Args) -> List[Issue]:
"""Check that markdown lists items:
- Are preceded by a blank line.
- Are not left empty.
- End with a period if they're a list of sentences.
- End without a period if they're a list of items."""
issues = []
markdown_list_re = re.compile(r"\s*(\d+\.|-) \s*(.*)\n")
is_front_matter = False
is_inside_list = False
for number, line in enumerate(document.lines):
# We skip lines inside the front matter as that's YAML data.
if line.startswith("---"):
is_front_matter = not is_front_matter
if is_front_matter:
continue
match = markdown_list_re.match(line)
if not match:
if is_inside_list:
is_inside_list = False
continue
# Figure out if this is the first item in the list.
# If it is, we need to check that the previous line was blank.
if not is_inside_list:
is_inside_list = True
if document.lines[number - 1].strip() != "":
issues.append(
Issue(
line=number + 1,
column_start=0,
column_end=0,
message="Missing blank line before list.",
rule=Rules.missing_blank_line_before_list,
)
)
content = match.group(2).strip()
is_pascal_case_sequence = (
re.match(r"^\*?[A-Z]\w*\*?( [A-Z]\w*)*\*?$", content) is not None
)
if is_pascal_case_sequence and content.endswith("."):
issues.append(
Issue(
line=number + 1,
column_start=match.start(2),
column_end=match.end(2),
message="List item ends with a period.",
rule=Rules.list_item_ends_with_period,
)
)
elif not is_pascal_case_sequence and not content[-1] in ".?!":
issues.append(
Issue(
line=number + 1,
column_start=match.start(2),
column_end=match.end(2),
message="Sentence in list does not end with a period.",
rule=Rules.list_item_does_not_end_with_period,
)
)
elif content.strip() == "":
issues.append(
Issue(
line=number + 1,
column_start=match.start(),
column_end=match.end(),
message=f"Empty list item.",
rule=Rules.empty_lists,
)
)
return issues | e224206b0683239fe957dd78795b8f2de69d4149 | 30,567 |
def transform_one(mt, vardp_outlier=100_000) -> Table:
"""transforms a gvcf into a form suitable for combining
The input to this should be some result of either :func:`.import_vcf` or
:func:`.import_vcfs` with `array_elements_required=False`.
There is a strong assumption that this function will be called on a matrix
table with one column.
"""
mt = localize(mt)
if mt.row.dtype not in _transform_rows_function_map:
f = hl.experimental.define_function(
lambda row: hl.rbind(
hl.len(row.alleles), '<NON_REF>' == row.alleles[-1],
lambda alleles_len, has_non_ref: hl.struct(
locus=row.locus,
alleles=hl.cond(has_non_ref, row.alleles[:-1], row.alleles),
rsid=row.rsid,
__entries=row.__entries.map(
lambda e:
hl.struct(
DP=e.DP,
END=row.info.END,
GQ=e.GQ,
LA=hl.range(0, alleles_len - hl.cond(has_non_ref, 1, 0)),
LAD=hl.cond(has_non_ref, e.AD[:-1], e.AD),
LGT=e.GT,
LPGT=e.PGT,
LPL=hl.cond(has_non_ref,
hl.cond(alleles_len > 2,
e.PL[:-alleles_len],
hl.null(e.PL.dtype)),
hl.cond(alleles_len > 1,
e.PL,
hl.null(e.PL.dtype))),
MIN_DP=e.MIN_DP,
PID=e.PID,
RGQ=hl.cond(
has_non_ref,
e.PL[hl.call(0, alleles_len - 1).unphased_diploid_gt_index()],
hl.null(e.PL.dtype.element_type)),
SB=e.SB,
gvcf_info=hl.case()
.when(hl.is_missing(row.info.END),
hl.struct(
ClippingRankSum=row.info.ClippingRankSum,
BaseQRankSum=row.info.BaseQRankSum,
MQ=row.info.MQ,
MQRankSum=row.info.MQRankSum,
MQ_DP=row.info.MQ_DP,
QUALapprox=row.info.QUALapprox,
RAW_MQ=row.info.RAW_MQ,
ReadPosRankSum=row.info.ReadPosRankSum,
VarDP=hl.cond(row.info.VarDP > vardp_outlier,
row.info.DP, row.info.VarDP)))
.or_missing()
))),
),
mt.row.dtype)
_transform_rows_function_map[mt.row.dtype] = f
transform_row = _transform_rows_function_map[mt.row.dtype]
return Table(TableMapRows(mt._tir, Apply(transform_row._name, TopLevelReference('row')))) | 7961d5ea3d0b0e58332552c9c3c72692f34868db | 30,568 |
def volume_rebalance(volume: str) -> Result:
"""
# This function doesn't do anything yet. It is a place holder because
# volume_rebalance is a long running command and I haven't decided how to
# poll for completion yet
# Usage: volume rebalance <VOLNAME> fix-layout start | start
# [force]|stop|status
:param volume: str. The name of the volume to start rebalancing
:return: Result. Ok or Err
"""
arg_list = ["volume", "rebalance", volume, "start"]
return run_command("gluster", arg_list, True, True) | 03df7752b45d90f84720be12f32703c1109d71c2 | 30,569 |
import json
def get_droplet_ip():
"""get droplet ip from cache."""
cached_droplet_info_file = 'droplet_info.json'
with open(cached_droplet_info_file, 'r') as info_f:
droplet_info = json.load(info_f)
return droplet_info['networks']['v4'][0]['ip_address'] | 21d0bfbbe6aebd7e88cc6465d49b221da271753a | 30,570 |
def country_converter(text_input, abbreviations_okay=True):
"""
Function that detects a country name in a given word.
:param text_input: Any string.
:param abbreviations_okay: means it's okay to check the list for abbreviations, like MX or GB.
:return:
"""
# Set default values
country_code = ""
country_name = ""
if len(text_input) <= 1: # Too short, can't return anything for this.
pass
elif (
len(text_input) == 2 and abbreviations_okay is True
): # This is only two letters long
text_input = text_input.upper() # Convert to upper case
for country in COUNTRY_LIST:
if text_input == country[1]: # Matches exactly
country_code = text_input
country_name = country[0]
elif len(text_input) == 3 and abbreviations_okay is True: # three letters long code
text_input = text_input.upper() # Convert to upper case
for country in COUNTRY_LIST:
if text_input == country[2]: # Matches exactly
country_code = country[1]
country_name = country[0]
else: # It's longer than three, probably a name. Or abbreviations are disabled.
text_input = text_input.title()
for country in COUNTRY_LIST:
if text_input == country[0]: # It's an exact match
country_code = country[1]
country_name = country[0]
return country_code, country_name # Exit the loop, we're done.
elif text_input in country[0] and len(text_input) >= 3:
country_code = country[1]
country_name = country[0]
if country_code == "" and country_name == "": # Still nothing
# Now we check against a list of associated words per country.
for country in COUNTRY_LIST:
try:
country_keywords = country[
4
] # These are keywords associated with it.
for keyword in country_keywords:
if text_input.title() == keyword: # A Match!
country_code = country[1]
country_name = country[0]
except IndexError:
# No keywords associated with this country.
pass
if "," in country_name: # There's a comma.
country_name = country_name.split(",")[0].strip()
# Take first part if there's a comma (Taiwan, Province of China)
return country_code, country_name | 19bdd3be63ee2a1165d8fc121203694da9732fea | 30,571 |
def lat_long_to_idx(gt, lon, lat):
"""
Take a geotransform and calculate the array indexes for the given lat,long.
:param gt: GDAL geotransform (e.g. gdal.Open(x).GetGeoTransform()).
:type gt: GDAL Geotransform tuple.
:param lon: Longitude.
:type lon: float
:param lat: Latitude.
:type lat: float
"""
return (int((lat - gt[3]) / gt[5]),
int((lon - gt[0]) / gt[1])) | 3fafcc4750daa02beaedb330ab6273eab6abcd56 | 30,572 |
def bugs_mapper(bugs, package):
"""
Update package bug tracker and support email and return package.
https://docs.npmjs.com/files/package.json#bugs
The url to your project's issue tracker and / or the email address to
which issues should be reported.
{ "url" : "https://github.com/owner/project/issues"
, "email" : "project@hostname.com"
}
You can specify either one or both values. If you want to provide only a
url, you can specify the value for "bugs" as a simple string instead of an
object.
"""
if isinstance(bugs, basestring):
package.bug_tracking_url = bugs
elif isinstance(bugs, dict):
package.bug_tracking_url = bugs.get('url')
support_contact = bugs.get('email')
if support_contact:
package.support_contacts = [support_contact]
else:
raise Exception('Invalid package.json bugs item')
return package | 81f037ecb314dde5d7643da8e021662ac74daa7f | 30,573 |
def BSMlambda(delta: float, S: float, V: float) -> float:
"""Not really a greek, but rather an expression of leverage.
Arguments
---------
delta : float
BSM delta of the option
V : float
Spot price of the option
S : float
Spot price of the underlying
Returns
-------
float
lambda
Note
----
Percentage change in the option price per percentage change in the underlying asset's price.
"""
return delta*(S / V) | ea9bf546a7cf46b3c2be01e722409663b05248e1 | 30,574 |
import pwd
def uid_to_name(uid):
"""
Find the username associated with a user ID.
:param uid: The user ID (an integer).
:returns: The username (a string) or :data:`None` if :func:`pwd.getpwuid()`
fails to locate a user for the given ID.
"""
try:
return pwd.getpwuid(uid).pw_name
except Exception:
return None | f9054e4959a385d34c18d88704d376fb4b718e47 | 30,575 |
def fit_poly(data, error_func, degree = 3):
""" Fit a polynomial to given data, using supplied error function.
Parameters
----------
data: 2D array where each row is a point (X0, Y)
error_func: function that computes the error between a polynomial and observed data
degree: polynomial degree
Returns line that optimizes the error function.
"""
# Generate initial guess for polynomial model (all coeffs = 1)
Cguess = np.poly1d(np.ones(degree + 1, dtype = np.float32))
# Plot initial guess (optional)
x = np.linspace(-5, 5, 21)
plt.plot(x, np.polyval(Cguess, x), 'm--', linewidth = 2.0, label = 'Initial guess')
# Call optimizer to minimize error function
result = spo.minimize(error_func, Cguess, args = (data, ), method = 'SLSQP', options = {'disp': True})
return np.poly1d(result.x) | 007693c1e01edc69cee27dd1da0836087d8a2d11 | 30,576 |
def find_skyrmion_center_2d(fun, point_up=False):
"""
Find the centre the skyrmion, suppose only one skyrmion
and only works for 2d mesh.
`fun` accept a dolfin function.
`point_up` : the core of skyrmion, points up or points down.
"""
V = fun.function_space()
mesh = V.mesh()
coods = V.dofmap().tabulate_all_coordinates(mesh).reshape(3, -1)[0]
coods.shape = (-1, mesh.topology().dim())
xs = coods[:, 0]
ys = coods[:, 1]
mxys = fun.vector().array().reshape(3, -1)
mzs = mxys[2]
if point_up:
mzs = - mxys[2]
mins = [i for i, u in enumerate(mzs) if u < -0.9]
xs_max = np.max(xs[mins])
xs_min = np.min(xs[mins])
ys_max = np.max(ys[mins])
ys_min = np.min(ys[mins])
xs_refine = np.linspace(xs_min, xs_max, 101)
ys_refine = np.linspace(ys_min, ys_max, 101)
coods_refine = np.array([(x, y) for x in xs_refine for y in ys_refine])
mzs_refine = np.array([fun(xy)[2] for xy in coods_refine])
min_id = np.argmin(mzs_refine)
if point_up:
min_id = np.argmax(mzs_refine)
center = coods_refine[min_id]
return center[0], center[1] | 030c704681a48cdeca1f880f08fe9fb039572640 | 30,577 |
import time
def test(num_games, opponent, silent):
""" Test running a number of games """
def autoplayer_creator(state):
""" Create a normal autoplayer instance """
return AutoPlayer(state)
def minimax_creator(state):
""" Create a minimax autoplayer instance """
return AutoPlayer_MiniMax(state)
players = [None, None]
if opponent == "minimax":
players[0] = ("AutoPlayer", autoplayer_creator)
players[1] = ("MiniMax", minimax_creator)
else:
players[0] = ("AutoPlayer A", autoplayer_creator)
players[1] = ("AutoPlayer B", autoplayer_creator)
gameswon = [0, 0]
totalpoints = [0, 0]
sumofmargin = [0, 0]
t0 = time.time()
# Run games
for ix in range(num_games):
if not silent:
print("\nGame {0}/{1} starting".format(ix + 1, num_games))
if ix % 2 == 1:
# Odd game: swap players
players[0], players[1] = players[1], players[0]
p1, p0 = test_game(players, silent)
# Swap back
players[0], players[1] = players[1], players[0]
else:
# Even game
p0, p1 = test_game(players, silent)
if p0 > p1:
gameswon[0] += 1
sumofmargin[0] += p0 - p1
elif p1 > p0:
gameswon[1] += 1
sumofmargin[1] += p1 - p0
totalpoints[0] += p0
totalpoints[1] += p1
t1 = time.time()
print(
"Test completed, {0} games played in {1:.2f} seconds, "
"{2:.2f} seconds per game".format(
num_games, t1 - t0, (t1 - t0) / num_games)
)
def reportscore(player):
""" Report the result of a number of games """
if gameswon[player] == 0:
print(
"{2} won {0} games and scored an average of {1:.1f} points per game"
.format(
gameswon[player],
float(totalpoints[player]) / num_games,
players[player][0],
)
)
else:
print(
"{3} won {0} games with an average margin of {2:.1f} and "
"scored an average of {1:.1f} points per game"
.format(
gameswon[player],
float(totalpoints[player]) / num_games,
float(sumofmargin[player]) / gameswon[player],
players[player][0],
)
)
reportscore(0)
reportscore(1) | c7560e2d298039b5f201b57779e14b4a38054160 | 30,578 |
import webbrowser
def pseudo_beaker(UserId: str, SessionId: str, replay=True, scope=True, browser=None, OrgId: str=None, is_staging: bool=True) -> dict:
"""
Mimic the Beaker admin tool in opening up one or both of session replay and
Scope tools for a given User Id and Session Id.
Option to specify a browser (e.g. "safari", "chrome") otherwise the system
default is used.
"""
url_dict = get_beaker_lookup(UserId, SessionId, OrgId, is_staging)
if browser is None:
w = webbrowser
else:
w = webbrowser.get(browser)
if replay:
w.open_new(url_dict["session_url"])
if scope:
w.open_new(url_dict["scope_url"])
return url_dict | 6cf905762b76d90a4d32459d9259b452ffc89240 | 30,579 |
def table_parse(table):
"""
"""
data = []
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
return data | 528008ada0ad7d594554ed5d577472a126df0cd1 | 30,580 |
import pandas
import numpy
def scan_mv_preprocessing_fill_pivot_nan(df):
"""
Value imputation.
Impute missing data in pivot table.
Parameters
----------
df : dataframe
Pivot table data with potentially missing values.
Returns
-------
df : dataframe
Pivot table data with no missing values.
"""
df_new = pandas.DataFrame()
for group in set(df.index.get_level_values('Group')):
df_group = df.loc[df.index.get_level_values('Group') == group]
for analyte in df_group.columns[~df_group.columns.isin(['Component Name'])]:
series_fill = df_group[analyte].copy()
# Missing at random
series_fill[pandas.isna(series_fill)] = round(numpy.nanmean(series_fill))
# Missing not at random
if True in set(pandas.isna(series_fill)):
series_fill = numpy.nanmin(df_new[analyte])/2
df_group[analyte] = series_fill
df_new = df_new.append(df_group)
# Get group and analytes with all nan
df_filled = df_new.copy()
return df_filled | e88d1b2b0a3d4fc27afe29a10512116323046cef | 30,581 |
def image_show(request,item_container):
""" zeigt die Beschreibung der Datei an """
app_name = 'image'
vars = get_item_vars_show(request, item_container, app_name)
file_path = DOWNLOAD_PATH + item_container.container.path
file_name = file_path + item_container.item.name
width, height = get_image_size(file_name)
p = item_container.container.is_protected()
vars['size'] = get_file_size(item_container, p)
vars['width'] = width
vars['height'] = height
vars['mtime'] = get_file_modification_date(item_container, _('german'), p)
vars['link'] = show_link(get_file_url(item_container, p), _(u'Download/Anzeigen'))
return render_to_response ( 'app/file/base_details.html', vars ) | b68286bedd92aba7991e8994bf76d84bfe5d4c2e | 30,582 |
import logging
import collections
import functools
def train_and_eval():
"""Train and evaluate StackOver NWP task."""
logging.info('Show FLAGS for debugging:')
for f in HPARAM_FLAGS:
logging.info('%s=%s', f, FLAGS[f].value)
hparam_dict = collections.OrderedDict([
(name, FLAGS[name].value) for name in HPARAM_FLAGS
])
train_dataset_computation, train_set, validation_set, test_set = _preprocess_stackoverflow(
FLAGS.vocab_size, FLAGS.num_oov_buckets, FLAGS.sequence_length,
FLAGS.num_validation_examples, FLAGS.client_batch_size,
FLAGS.client_epochs_per_round, FLAGS.max_elements_per_user)
input_spec = train_dataset_computation.type_signature.result.element
stackoverflow_metrics = _get_stackoverflow_metrics(FLAGS.vocab_size,
FLAGS.num_oov_buckets)
if FLAGS.use_tff_learning:
iterative_process, evaluate_fn, server_state_update_fn = _build_tff_learning_model_and_process(
input_spec, stackoverflow_metrics)
else:
iterative_process, evaluate_fn, server_state_update_fn = _build_custom_model_and_process(
input_spec, stackoverflow_metrics)
iterative_process = tff.simulation.compose_dataset_computation_with_iterative_process(
dataset_computation=train_dataset_computation, process=iterative_process)
if FLAGS.total_epochs is None:
def client_dataset_ids_fn(round_num: int, epoch: int):
return _sample_client_ids(FLAGS.clients_per_round, train_set, round_num,
epoch)
logging.info('Sample clients for max %d rounds', FLAGS.total_rounds)
total_epochs = 0
else:
client_shuffer = training_loop.ClientIDShuffler(FLAGS.clients_per_round,
train_set)
client_dataset_ids_fn = client_shuffer.sample_client_ids
logging.info('Shuffle clients for max %d epochs and %d rounds',
FLAGS.total_epochs, FLAGS.total_rounds)
total_epochs = FLAGS.total_epochs
training_loop.run(
iterative_process,
client_dataset_ids_fn,
validation_fn=functools.partial(evaluate_fn, dataset=validation_set),
total_epochs=total_epochs,
total_rounds=FLAGS.total_rounds,
experiment_name=FLAGS.experiment_name,
train_eval_fn=None,
test_fn=functools.partial(evaluate_fn, dataset=test_set),
root_output_dir=FLAGS.root_output_dir,
hparam_dict=hparam_dict,
rounds_per_eval=FLAGS.rounds_per_eval,
rounds_per_checkpoint=FLAGS.rounds_per_checkpoint,
rounds_per_train_eval=2000,
server_state_epoch_update_fn=server_state_update_fn) | 30eb088295ae8bc9ef5cd67fc042802f91c85627 | 30,583 |
def common_kwargs(cfg, bin_count, pointing):
"""Creates a prepfold-friendly dictionary of common arguments to pass to prepfold"""
name = generate_prep_name(cfg, bin_count, pointing)
prep_kwargs = {}
if cfg["run_ops"]["mask"]:
prep_kwargs["-mask"] = cfg["run_ops"]["mask"]
prep_kwargs["-o"] = name
prep_kwargs["-n"] = bin_count
prep_kwargs["-start"] = cfg["source"]["enter_frac"]
prep_kwargs["-end"] = cfg["source"]["exit_frac"]
prep_kwargs["-runavg"] = ""
prep_kwargs["-noxwin"] = ""
prep_kwargs["-noclip"] = ""
prep_kwargs["-nsub"] = 256
prep_kwargs["-pstep"] = 1
prep_kwargs["-pdstep"] = 2
prep_kwargs["-dmstep"] = 1
prep_kwargs["-npart"] = 120
prep_kwargs["-npfact"] = 1
prep_kwargs["-ndmfact"] = 1
if bin_count >= 300: #greatly reduces search time
prep_kwargs["-nopdsearch"] = ""
if bin_count == 100 or bin_count == 50: #init fold - do large search
prep_kwargs["-npfact"] = 4
prep_kwargs["-ndmfact"] = 3
if cfg["source"]["ATNF_P"] < 0.005: # period less than 50ms
prep_kwargs["-npfact"] = 4
prep_kwargs["-ndmfact"] = 3
prep_kwargs["-dmstep"] = 3
prep_kwargs["-npart"] = 40
prep_kwargs["-dm"] = cfg["source"]["ATNF_DM"]
prep_kwargs["-p"] = cfg["source"]["ATNF_P"]
if cfg["source"]["my_DM"]:
prep_kwargs["-dm"] = cfg["source"]["my_DM"]
if cfg["source"]["my_P"]:
prep_kwargs["-p"] = cfg["source"]["my_P"]
return prep_kwargs | c6a1f2ceb475e8f0d2b3e905d8109f79d77d3b79 | 30,584 |
def quatMultiply(q1,q2):
"""Returns a quaternion that is a composition of two quaternions
Parameters
----------
q1: 1 x 4 numpy array
representing a quaternion
q2: 1 x 4 numpy array
representing a quatnernion
Returns
-------
qM: 1 x 4 numpy array
representing a quaternion that is the rotation of q1 followed by
the rotation of q2
Notes
-----
q2 * q1 is the correct order for applying rotation q1 and then
rotation q2
"""
Q2 = np.array([[q2[0],-q2[1],-q2[2],-q2[3]],[q2[1],q2[0],-q2[3],q2[2]],
[q2[2],q2[3],q2[0],-q2[1]],[q2[3],-q2[2],q2[1],q2[0]]])
qM = np.dot(Q2,q1)
return qM | 2c32f0390d01b36258c9bcabc290a47dca592ded | 30,585 |
def expandingPrediction(input_list, multiple=5):
"""
:param input_list:
:param multiple:
:return:
"""
expanded_list = []
for prediction in input_list:
for i in range(multiple):
expanded_list.append(prediction)
return expanded_list | 9a502adb15160e656bd727748eb5dae73858d7f8 | 30,586 |
def pages_siblings_menu(context, page, url='/'):
"""Get the parent page of the given page and render a nested list of its
child pages. Good for rendering a secondary menu.
:param page: the page where to start the menu from.
:param url: not used anymore.
"""
lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE)
page = get_page_from_string_or_id(page, lang)
if page:
siblings = page.get_siblings()
context.update({'children': siblings, 'page': page})
return context | 723249cd73ec95b947f279a99e88afe2ec51868d | 30,587 |
def aes(img, mask=None, canny_edges=None, canny_sigma=2):
"""Calculate the Average Edge Strength
Reference:
Aksoy, M., Forman, C., Straka, M., Çukur, T., Hornegger, J., & Bammer, R. (2012).
Hybrid prospective and retrospective head motion correction to mitigate cross-calibration errors.
Magnetic Resonance in Medicine, 67(5), 1237–1251. https://doi.org/10.1002/mrm.23101
Args:
img (np.array): Image
mask (np.array, optional): Brain mask. Defaults to None.
canny_edges (np.array, optional): Edges to use for calculation, calculates if `None`. Defaults to None.
canny_sigma (int, optional): Sigma for canny edge detection filter. Defaults to 2.
Returns:
float, np.array, np.array: aes, edges, canny edge mask
"""
imax = np.quantile(img[mask == 1], 0.99)
if canny_edges is None:
canny_edges = np.zeros_like(img)
for z in range(img.shape[2]):
canny_edges[:, :, z] = canny(
img[:, :, z], sigma=canny_sigma)
canny_edges *= mask
img_edges = sobel(img/imax) * canny_edges
aes = np.mean(img_edges[canny_edges == 1])
return aes, img_edges, canny_edges | 52cdcf45609e7ee35eb7d05a2529d4330b6509b4 | 30,588 |
def projection(basis, vectors):
"""
The vectors live in a k dimensional space S and the columns of the basis are vectors of the same
space spanning a subspace of S. Gives a representation of the projection of vector into the space
spanned by basis in term of the basis.
:param basis: an n-by-k array, a matrix whose vertical columns are the vectors of the basis
:param vectors: an m-by-k array, a vector to be represented in the basis
:return: an m-by-k array
"""
return matrix_mult(matrix_mult(vectors, basis), basis.T) | 107a1db030d0af7af346128fea10e5f7657b1a6a | 30,589 |
import json
import os
def _build_execution_context(worker_index, ports):
"""
Create execution context for the model.
:param worker_index: The index of this worker in a distributed setting.
:param ports: A list of port numbers that will be used in setting up the servers.
:return: The generated execution context.
"""
TF_CONFIG = {'cluster': {'worker': [f'localhost:{ports[i]}' for i in range(_NUM_WORKERS)]},
'task': {'index': worker_index, 'type': 'worker'}}
os.environ["TF_CONFIG"] = json.dumps(TF_CONFIG)
tf_config_json = json.loads(os.environ["TF_CONFIG"])
cluster = tf_config_json.get('cluster')
cluster_spec = tf.train.ClusterSpec(cluster)
execution_context = {
constants.TASK_TYPE: tf_config_json.get('task', {}).get('type'),
constants.TASK_INDEX: tf_config_json.get('task', {}).get('index'),
constants.CLUSTER_SPEC: cluster_spec,
constants.NUM_WORKERS: tf.train.ClusterSpec(cluster).num_tasks(constants.WORKER),
constants.NUM_SHARDS: tf.train.ClusterSpec(cluster).num_tasks(constants.WORKER),
constants.SHARD_INDEX: tf_config_json.get('task', {}).get('index'),
constants.IS_CHIEF: tf_config_json.get('task', {}).get('index') == 0
}
return execution_context | c036656637bae0ec543fe31445305bb32e3572a6 | 30,590 |
import numpy
def grab(sequence, random = numpy.random):
"""
Return a randomly-selected element from the sequence.
"""
return sequence[random.randint(len(sequence))] | 1760dc08b5971647f55248bd1b1f04d700dac38e | 30,591 |
def csl_url_args_retriever():
"""Returns the style and locale passed as URL args for CSL export."""
style = resource_requestctx.args.get("style")
locale = resource_requestctx.args.get("locale")
return style, locale | 96f87dd927f998b9599663432a95c2330b15b2d0 | 30,592 |
import glob
import os
def list_available_filter():
"""
List all available filter responses
Returns
-------
filter_dict : dict
Dictionary of filter names containing lists of available instruments
"""
filter_list = glob(FILTER_DIR + '*_*.dat')
filter_dict = {}
for filter_file in filter_list:
filter_file = os.path.basename(filter_file)
instrument, band = filter_file.split('.')[0].split('_')
if band not in filter_dict:
filter_dict[band] = [instrument]
else:
filter_dict[band].append(instrument)
return filter_dict | 01037a742cb0ce4b4f3918ce060d94e07c7e74db | 30,593 |
import random
def select_parents(population, m):
"""Select randomly parents for the new population from sorted by
fitness function existing population."""
fitness_population = sorted(
population,
key=lambda child: fitness_function(child, m),
reverse=True)
# ordered_population = []
total_cost = get_population_cost(fitness_population, m)
rand_num = random()
for item in fitness_population:
item_cost = fitness_function(item, m)
percentage = item_cost / total_cost
if percentage > rand_num:
return item
break
else:
rand_num -= percentage | 3f0a2de28da7355ce34f692f7bb0722896903c51 | 30,594 |
def rsqrt(x: Tensor):
"""Computes reciprocal of square root of x element-wise.
Args:
x: input tensor
Returns:
output tensor
Examples:
>>> x = tf.constant([2., 0., -2.])
>>> rsqrt(x)
<Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
"""
return tf.math.rsqrt(x) | 39b4574311eb74ccef18ddb936d1d92fbb0c1fd9 | 30,595 |
def averageObjPeg(objpegpts, planet, catalog=None, sceneid='NO_POL'):
"""
Average peg points.
"""
logger.info('Combining individual peg points: %s' % sceneid)
peg = stdproc.orbit.pegManipulator.averagePeg([gp.getPeg() for gp in objpegpts], planet)
pegheights = [gp.getAverageHeight() for gp in objpegpts]
pegvelocities = [gp.getProcVelocity() for gp in objpegpts]
peg.averageheight = float(sum(pegheights)) / len(pegheights)
peg.averagevelocity = float(sum(pegvelocities)) / len(pegvelocities)
if catalog is not None:
isceobj.Catalog.recordInputsAndOutputs(catalog, peg,
"runSetmocomppath.averagePeg.%s" % sceneid,
logger,
"runSetmocomppath.averagePeg.%s" % sceneid)
return peg | 92e41d33d3aa21ee6036e3f1a6550d81d793129e | 30,596 |
def _bytes_chr_py2(i):
"""
Returns a byte string of length 1 whose ordinal value is i in Python 2.
Do not call directly, use bytes_chr instead.
"""
return chr(i) | de524d1ec303cc297d7981570ef30aa9ae6840ed | 30,597 |
from typing import Any
def convert(parser: Any) -> c2gtypes.ParserRep:
"""Convert getopt to a dict.
Args:
parser (Any): docopt parser
Returns:
c2gtypes.ParserRep: dictionary representing parser object
"""
return {"parser_description": "", "widgets": extract(parser)} | cf6e53bd514bdb114c3bc5d3b7429c6a8f17881d | 30,598 |
def redirect_vurlkey(request, vurlkey, *args, **kwargs):
"""redirect_vurlkey(vurlkey) looks up the Vurl with base58-encoded index VURLKEY and issues a redirect to the target URL"""
v = Vurl.get_with_vurlkey(vurlkey.encode('utf-8'))
return v.http_response() | 99e4be6b43a8b983f9c8efdb60ccf2873ce3caf2 | 30,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.