content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_maxlevel(divs, maxlevel):
"""
Returns the maximum div level.
"""
for info in divs:
if info['level'] > maxlevel:
maxlevel = info['level']
if info.get('subdivs', None):
maxlevel = get_maxlevel(info['subdivs'], maxlevel)
return maxlevel | b7153ef84cb260a4b48c58315aa63fc5179fc06c | 29,400 |
def prenut(ra, dec, mjd, degrees=True):
"""
Precess coordinate system to FK5 J2000.
args:
ra - arraylike, right ascension
dec- arraylike, declination
mjd- arraylike
"""
if degrees:
c = np.pi/180.
else:
c = 1.
raout = ra.astype(np.float)*c
decout = dec.astype(np.float)*c
pysla.prenut(raout,
decout,
mjd.astype(np.float))
return raout/c, decout/c | 99cedddc4beacc99c2360ba835f39d927118c683 | 29,401 |
def make_model_and_optimizer(conf):
"""Function to define the model and optimizer for a config dictionary.
Args:
conf: Dictionary containing the output of hierachical argparse.
Returns:
model, optimizer.
The main goal of this function is to make reloading for resuming
and evaluation very simple.
"""
model = TasNet(conf["filterbank"], conf["masknet"])
# Define optimizer of this model
optimizer = make_optimizer(model.parameters(), **conf["optim"])
return model, optimizer | 93a0a7c8f5b31571c5d8a5130c5fd1de0f046adc | 29,402 |
from re import T
import numpy
def trange(start: int, end: int, step: int=1, dtype: T.Dtype = None) -> T.Tensor:
"""
Generate a tensor like a python range.
Args:
start: The start of the range.
end: The end of the range.
step: The step of the range.
Returns:
tensor: A vector ranging from start to end in increments
of step. Cast to float rather than int.
"""
return numpy.arange(start, end, step, dtype=dtype) | 140a88069503f2bb372b8f18796a56bb41e465f3 | 29,403 |
def get_molecules(topology):
"""Group atoms into molecules."""
if 'atoms' not in topology:
return None
molecules = {}
for atom in topology['atoms']:
idx, mol_id, atom_type, charge = atom[0], atom[1], atom[2], atom[3]
if mol_id not in molecules:
molecules[mol_id] = {'atoms': [], 'types': [], 'charge': []}
molecules[mol_id]['atoms'].append(idx)
molecules[mol_id]['types'].append(atom_type)
molecules[mol_id]['charge'].append(charge)
return molecules | 4bf63000c9d5b56bb9d35922ed521ce81cf3a6c1 | 29,404 |
def disk_partitions(all):
"""Return disk partitions."""
rawlist = _psutil_mswindows.get_disk_partitions(all)
return [nt_partition(*x) for x in rawlist] | 9c888e365fbd43bacb7ae2a9e025abdf14efcbff | 29,405 |
from typing import Literal
def simple_dataset() -> Dataset:
"""
This is a simple dataset with no BNodes that can be used in tests.
Assumptions/assertions should not be made about the quads in it, other
than that it contains no blank nodes.
"""
graph = Dataset()
graph.default_context.add((EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object))
graph.default_context.add((EGURN.subject, EGURN.predicate, EGURN.object))
graph.default_context.add((EGHTTP.subject, EGHTTP.predicate, Literal("typeless")))
graph.get_context(EGSCHEMA.graph).add(
(EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object)
)
graph.get_context(EGSCHEMA.graph).add(
(EGSCHEMA.subject, EGSCHEMA.predicate, Literal(12))
)
graph.get_context(EGSCHEMA.graph).add(
(
EGHTTP.subject,
EGHTTP.predicate,
Literal("日本語の表記体系", lang="jpx"),
)
)
graph.get_context(EGSCHEMA.graph).add(
(EGURN.subject, EGSCHEMA.predicate, EGSCHEMA.subject)
)
graph.get_context(EGURN.graph).add(
(EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object)
)
graph.get_context(EGURN.graph).add(
(EGSCHEMA.subject, EGHTTP.predicate, EGHTTP.object)
)
graph.get_context(EGURN.graph).add(
(EGSCHEMA.subject, EGHTTP.predicate, Literal("XSD string", datatype=XSD.string))
)
return graph | ee897b222d95bc1e92160f925679b9c864c3674a | 29,406 |
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value).replace('-', 'ng').replace('.', 'pt')
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_') | 45a01d4552de0094b56b40a9c13102e59af32b5b | 29,407 |
def is_between(start, stop, p):
"""Given three point check if the query point p is between the other two
points
Arguments:
----------
start: array(shape=(D, 1))
stop: array(shape=(D, 1))
p: array(shape=(D, 1))
"""
# Make sure that the inputs are vectors
assert_col_vectors(start, stop)
assert_col_vectors(stop, p)
# First make sure that the three points are collinear
# if not is_collinear(start, p, stop):
# return False
v0 = p - start
v1 = stop - p
# Check that p is between start and stop
v2 = stop - start
dot = np.dot(v2.reshape(1, -1), v0)
# Check that the total distance is equal to the distance from start-point
# and from point-stop
d = distance(stop, start)
d1 = distance(start, p)
d2 = distance(stop, p)
if dot < 0 or not np.allclose(d1 + d2, d):
return False
return True | f7cf20420115a71fb66ce5f8f8045163fa34c7ff | 29,408 |
def get_elasticsearch_type():
""" Getting the name of the main type used """
return settings.ELASTICSEARCH_TYPE | 889cb6e698f88c38229b908dd92d8933ec36ba8e | 29,409 |
def run_cmd_code(cmd, directory='/'):
"""Same as run_cmd but it returns also the return code.
Parameters
----------
cmd : string
command to run in a shell
directory : string, default to '/'
directory where to run the command
Returns
-------
std_out, std_err, return_code
a triplet with standard output, standard error output and return code.
std_out : string
std_err : string
return_code : int
"""
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
return_code = pipe.wait()
return out, error, return_code | 0e001d36df6e0b9b39827f36e1bda369e2185adf | 29,410 |
def unpack_le32(data):
"""
Unpacks a little-endian 32-bit value from a bytearray
:param data: 32-bit little endian bytearray representation of an integer
:return: integer value
"""
_check_input_array(data, 4)
return data[0] + (data[1] << 8) + (data[2] << 16) + (data[3] << 24) | c1cdd8f71dbb03769a2e681948300436e6cd735f | 29,411 |
def InductionsFromPrescribedCtCq_ST(vr_bar,Ct,Cq,Lambda,bSwirl):
"""
Returns the stream tube theory inductions based on a given Ct and Cq.
Based on script fGetInductions_Prescribed_CT_CQ_ST
"""
lambda_r=Lambda*vr_bar
# --- Stream Tube theory
a_ST = 1/2*(1-np.sqrt(1-Ct))
if bSwirl:
a_prime_ST = Cq/(4*(1-a_ST)*lambda_r)
# a_prime_ST = 0.5*(sqrt(1+(4*a_ST.*(1-a_ST)./lambda_r.^2))-1);
else:
a_prime_ST =0
return a_ST,a_prime_ST | 6b254056b70d65dc20f89811e4938dd7ad5323f6 | 29,412 |
from typing import get_origin
from typing import Union
from typing import get_args
def unwrap_Optional_type(t: type) -> type:
""" Given an Optional[...], return the wrapped type """
if get_origin(t) is Union:
# Optional[...] = Union[..., NoneType]
args = tuple(a
for a in get_args(t)
if a is not type(None))
if len(args) == 1:
return args[0]
else:
return Union[args]
return t | 6ffd9fa6dc95ba669b0afd23a36a2975e29c10da | 29,413 |
import subprocess
def tox_get_python_executable(envconfig):
"""Return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting.
"""
try:
# pylint: disable=no-member
pyenv = (getattr(py.path.local.sysfind('pyenv'), 'strpath', 'pyenv')
or 'pyenv')
cmd = [pyenv, 'which', envconfig.basepython]
pipe = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
out, err = pipe.communicate()
except OSError:
err = '\'pyenv\': command not found'
LOG.warning(
"pyenv doesn't seem to be installed, you probably "
"don't want this plugin installed either."
)
else:
if pipe.poll() == 0:
return out.strip()
else:
if not envconfig.tox_pyenv_fallback:
raise PyenvWhichFailed(err)
LOG.debug("`%s` failed thru tox-pyenv plugin, falling back. "
"STDERR: \"%s\" | To disable this behavior, set "
"tox_pyenv_fallback=False in your tox.ini or use "
" --tox-pyenv-no-fallback on the command line.",
' '.join([str(x) for x in cmd]), err) | f5c7bb8533d55606661a786d1b62ca28cffda778 | 29,414 |
def _normalize_angle(x, zero_centered=True):
"""Normalize angles.
Take angles in radians and normalize them to [-pi, pi) or [0, 2 * pi)
depending on `zero_centered`.
"""
if zero_centered:
return (x + np.pi) % (2 * np.pi) - np.pi
else:
return x % (2 * np.pi) | 2e73a9fb20743f4721c954a48ca838c1eaca5edd | 29,415 |
def g_fam(arr):
"""
Returns the next array
"""
aux = 0
hol = []
while(aux +1 < arr.__len__()):
if arr[aux] or arr[aux + 1]:
hol.append(True)
else:
hol.append(False)
aux += 1
return hol | 4f0ed0d4ba205ef205579a2b150250760e7b38fe | 29,416 |
import time
def main_archive(args, cfg: Configuration):
"""Start running archival"""
jobs = Job.get_running_jobs(cfg.log)
print('...starting archive loop')
firstit = True
while True:
if not firstit:
print('Sleeping 60s until next iteration...')
time.sleep(60)
jobs = Job.get_running_jobs(cfg.log)
firstit = False
archive.archive(cfg.directories, jobs)
return 0 | 17bc1c63896edae46f2db60bd9bbcbd699d6f1ab | 29,417 |
def k2j(k, E, nu, plane_stress=False):
"""
Convert fracture
Parameters
----------
k: float
E: float
Young's modulus in GPa.
nu: float
Poisson's ratio
plane_stress: bool
True for plane stress (default) or False for plane strain condition.
Returns
-------
float
"""
if plane_stress:
E = E / (1 - nu ** 2)
return k ** 2 / E | 7fb34149c7fc9b557ab162632884f605693aa823 | 29,418 |
def get_worker_bonus(job_id, worker_id, con=None):
"""
:param job_id:
:param worker_id:
:param con:
"""
bonus_row = _get_worker_bonus_row(job_id, worker_id, con)
if bonus_row is None:
return 0
return bonus_row["bonus_cents"] | 2b58723f275f26c9208a36b650d75596a21354b2 | 29,419 |
def get_attribute_distribution():
"""
Attribute weights based on position and prototype, in this order:
[potential, confidence, iq, speed, strength, agility, awareness, stamina,
injury, run_off, pass_off, special_off, run_def, pass_def, special_def]
"""
attr_dist = {
'QB': {
'Gunslinger': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Scrambler': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Field General': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'HB': {
'Elusive': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'All-Purpose': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'FB': {
'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Rushing': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'WR': {
'Possession': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Deep Threat': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Route Runner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'TE': {
'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Receiving': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Hybrid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'LT': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'LG': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'C': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'RG': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'RT': {
'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'DE': {
'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'DT': {
'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'OLB': {
'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'MLB': {
'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'CB': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'FS': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'SS': {
'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'K': {
'Accurate': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'P': {
'Coffin Corner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
}
}
return attr_dist | 25dc83ba2f4bec4caaa88423e2607af300dcfbc4 | 29,420 |
def predict(product: Product):
"""Return ML predictions, see /docs for more information.
Args:
product: (Product) the parsed data from user request
Returns:
A dictionnary with the predicted nutrigrade
and the related probability
"""
sample = {
"energy": round(float(product.energy)),
"saturated_fat": round(float(product.saturated_fat)),
"sugars": round(float(product.sugars)),
"salt": round(float(product.salt)),
"fibers": round(float(product.fibers)),
"group1_Beverages": 0,
"group1_Cereals and potatoes": 0,
"group1_Composite foods": 0,
"group1_Fat and sauces": 0,
"group1_Fruits and vegetables": 0,
"group1_Milk and dairy products": 0,
"group1_Sugary snacks": 0,
"group1_unknown": 0,
}
# If category is detected then assign the property value to 1.
formatted_category = "group1_{0}".format(product.pnns_group)
if formatted_category in sample.keys():
sample[formatted_category] = 1
sample = list(sample.values())
# Predict the nutrigrade !
nutrigrade = model.predict([sample])[0]
probability = model.predict_proba([sample]).argmax(1).item()
# Return of the prediction and the probability
return {"nutrigrade": nutrigrade, "probability": probability} | ca2456989b5cc82f56ed908c5d51471237c68d73 | 29,421 |
def get_stock_historicals(symbol, interval="5minute", span="week"):
"""Returns the historical data for a SYMBOL with data at every time INTERVAL over a given SPAN."""
assert span in ['day', 'week', 'month', '3month', 'year', '5year']
assert interval in ['5minute', '10minute', 'hour', 'day', 'week']
historicals = robin_stocks.stocks.get_stock_historicals(symbol, interval, span)
process_historicals(historicals)
return historicals | 62612a94e385c8c3703e42f2ace49d4a37d598ef | 29,422 |
import re
def re_match_both2( item, args ):
"""Matches a regex with a group (argument 2) against the column (number in argument 1)"""
# setup
(re_col1, re_expr1, re_col2, re_expr2 ) = args
if re_expr1 not in compiled_res:
compiled_res[re_expr1] = re.compile(re_expr1)
if re_expr2 not in compiled_res:
compiled_res[re_expr2] = re.compile(re_expr2)
# test if a match occurred
match1 = compiled_res[re_expr1].search(item[re_col1])
match2 = compiled_res[re_expr2].search(item[re_col2])
if match1 and match2:
if match2.group( 1 ) == None and match2.group( 2 ) == None:
return ['','']
grp = "g1"
if match2.group(1) == None:
grp = "g2"
return ["%s-%s" % (match1.group(1), grp ), '']
return ['', ''] | 13ac911e71324f54cde60f8c752603d21df98918 | 29,423 |
def at_least(actual_value, expected_value):
"""Assert that actual_value is at least expected_value."""
result = actual_value >= expected_value
if result:
return result
else:
raise AssertionError(
"{!r} is LESS than {!r}".format(actual_value, expected_value)
) | 6897c863d64d1e4ce31e9b42df8aba04f1bbdd7a | 29,424 |
import re
import string
def clean_text(text):
""" Clean text : lower text + Remove '\n', '\r', URL, '’', numbers and double space + remove Punctuation
Args:
text (str)
Return:
text (str)
"""
text = str(text).lower()
text = re.sub('\n', ' ', text)
text = re.sub('\r', ' ', text)
text = re.sub('\[.*?\]', ' ', text)
text = re.sub('https?://\S+|www\.\S+', ' ', text)
text = re.sub('[%s]' % re.escape(string.punctuation), ' ', text) # remove punctuation
text = re.sub('’', ' ', text)
text = re.sub('\w*\d\w*', ' ', text)
text = re.sub(' +', ' ', text)
return text | 2d9ddf56a9eeb1a037ec24a8907d3c85c9bbee43 | 29,425 |
import fnmatch
def fnmatch_list(filename, pattern_list):
""" Check filename against a list of patterns using fnmatch """
if type(pattern_list) != list:
pattern_list = [pattern_list]
for pattern in pattern_list:
if fnmatch(filename, pattern):
return True
return False | 72204c3168c0a97ad13134dcb395edf5e44e149f | 29,426 |
def math_div_str(numerator, denominator, accuracy=0, no_div=False):
"""
除法
:param numerator: 分子
:param denominator: 分母
:param accuracy: 小数点精度
:param no_div: 是否需要除。如3/5,True为3/5,False为1/1.6
:return:
"""
if denominator == 0 or numerator == 0:
return 0
if abs(numerator) < abs(denominator):
if no_div:
return '%d/%d' % (numerator, denominator)
return '1/' + str(int(round(denominator / numerator, 0)))
else:
if not numerator % denominator:
accuracy = 0
t = round(float(numerator) / float(denominator), accuracy)
return str(int(t)) if accuracy == 0 else str(t) | bbcead0ec0f79d8915289b6e4ff23b0d6e4bf8ed | 29,427 |
from datetime import datetime
def create_comments(post):
"""
Helper to create remote comments.
:param post:
:return:
"""
comment_list = list()
post = post.get('posts')[0]
for c in post.get('comments'):
comment = Comment()
comment.author = create_author(c.get('author'))
comment.comment = c.get('comment')
comment.contentType = c.get('contentType')
comment.content = comment.get_comment()
comment.id = c.get('id')
comment.published = utc.localize(datetime.strptime(c.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))
comment_list.append(comment)
return comment_list | 48bbad23a60efdd0ad47b2dfeb2e5b943a2743af | 29,428 |
def zero_fuel(distance_to_pump, mpg, fuel_left):
"""
You were camping with your friends far away from home, but when it's time to go back, you realize that you fuel is
running out and the nearest pump is 50 miles away! You know that on average, your car runs on about 25 miles per
gallon. There are 2 gallons left. Considering these factors, write a function that tells you if it is possible to
get to the pump or not. Function should return true (1 in Prolog) if it is possible and false (0 in Prolog) if not.
The input values are always positive.
:param distance_to_pump: an integer value, positive.
:param mpg: an integer value, positive.
:param fuel_left: an integer value, positive.
:return: True if able to make journey to pump on fuel left, otherwise False.
"""
return distance_to_pump / mpg <= fuel_left | 67a69b59d6f35a872f87e18ee0e8693af886c386 | 29,429 |
import itertools
def get_routing_matrix(
lambda_2,
lambda_1_1,
lambda_1_2,
mu_1,
mu_2,
num_of_servers_1,
num_of_servers_2,
system_capacity_1,
system_capacity_2,
buffer_capacity_1,
buffer_capacity_2,
routing_function=get_weighted_mean_blocking_difference_between_two_systems,
alpha=0,
):
"""
Get the optimal distribution matrix that consists of the proportion of
individuals to be distributed to each hospital for all possible
combinations of thresholds of the two hospitals (T_1, T_2). For every set of
thresholds, the function fills the entries of the matrix using the
proportion of individuals to distribute to hospital 1.
Parameters
----------
lambda_2 : float
lambda_1_1 : float
lambda_1_2 : float
mu_1 : float
mu_2 : float
num_of_servers_1 : int
num_of_servers_2 : int
system_capacity_1 : int
system_capacity_2 : int
buffer_capacity_1 : int
buffer_capacity_2 : int
routing_function : function, optional
The function to use to get the optimal distribution of patients
Returns
-------
numpy array
The matrix with proportions of all possible combinations of threshold
"""
routing_matrix = np.zeros((system_capacity_1, system_capacity_2))
for threshold_1, threshold_2 in itertools.product(
range(1, system_capacity_1 + 1), range(1, system_capacity_2 + 1)
):
opt = calculate_class_2_individuals_best_response(
lambda_2=lambda_2,
lambda_1_1=lambda_1_1,
lambda_1_2=lambda_1_2,
mu_1=mu_1,
mu_2=mu_2,
num_of_servers_1=num_of_servers_1,
num_of_servers_2=num_of_servers_2,
system_capacity_1=system_capacity_1,
system_capacity_2=system_capacity_2,
buffer_capacity_1=buffer_capacity_1,
buffer_capacity_2=buffer_capacity_2,
threshold_1=threshold_1,
threshold_2=threshold_2,
routing_function=routing_function,
alpha=alpha,
)
routing_matrix[threshold_1 - 1, threshold_2 - 1] = opt
return routing_matrix | fad50cb2a160ba569788ea4f546eb4f0292d47c0 | 29,430 |
def astrange_to_symrange(astrange, arrays, arrname=None):
""" Converts an AST range (array, [(start, end, skip)]) to a symbolic math
range, using the obtained array sizes and resolved symbols. """
if arrname is not None:
arrdesc = arrays[arrname]
# If the array is a scalar, return None
if arrdesc.shape is None:
return None
# If range is the entire array, use the array descriptor to obtain the
# entire range
if astrange is None:
return [
(symbolic.pystr_to_symbolic(0),
symbolic.pystr_to_symbolic(symbolic.symbol_name_or_value(s)) -
1, symbolic.pystr_to_symbolic(1)) for s in arrdesc.shape
]
missing_slices = len(arrdesc.shape) - len(astrange)
if missing_slices < 0:
raise ValueError(
'Mismatching shape {} - range {} dimensions'.format(
arrdesc.shape, astrange))
for i in range(missing_slices):
astrange.append((None, None, None))
result = [None] * len(astrange)
for i, r in enumerate(astrange):
if isinstance(r, tuple):
begin, end, skip = r
# Default values
if begin is None:
begin = symbolic.pystr_to_symbolic(0)
else:
begin = symbolic.pystr_to_symbolic(unparse(begin))
if end is None and arrname is None:
raise SyntaxError('Cannot define range without end')
elif end is not None:
end = symbolic.pystr_to_symbolic(unparse(end)) - 1
else:
end = symbolic.pystr_to_symbolic(
symbolic.symbol_name_or_value(arrdesc.shape[i])) - 1
if skip is None:
skip = symbolic.pystr_to_symbolic(1)
else:
skip = symbolic.pystr_to_symbolic(unparse(skip))
else:
# In the case where a single element is given
begin = symbolic.pystr_to_symbolic(unparse(r))
end = begin
skip = symbolic.pystr_to_symbolic(1)
result[i] = (begin, end, skip)
return result | eca988aac1d0b69ad45907b4d3dd1c6be2e914b3 | 29,431 |
def get_union(*args):
"""Return unioin of multiple input lists.
"""
return list(set().union(*args)) | 18025cfd37d64f15daf92aa2ae3e81176cae6e39 | 29,432 |
def retrieve(func):
"""
Decorator for Zotero read API methods; calls _retrieve_data() and passes
the result to the correct processor, based on a lookup
"""
@wraps(func)
def wrapped_f(self, *args, **kwargs):
"""
Returns result of _retrieve_data()
func's return value is part of a URI, and it's this
which is intercepted and passed to _retrieve_data:
'/users/123/items?key=abc123'
"""
if kwargs:
self.add_parameters(**kwargs)
retrieved = self._retrieve_data(func(self, *args))
# we now always have links in the header response
self.links = self._extract_links()
# determine content and format, based on url params
content = (
self.content.search(self.request.url)
and self.content.search(self.request.url).group(0)
or "bib"
)
# JSON by default
formats = {
"application/atom+xml": "atom",
"application/x-bibtex": "bibtex",
"application/json": "json",
"text/html": "snapshot",
"text/plain": "plain",
"application/pdf; charset=utf-8": "pdf",
"application/pdf": "pdf",
"application/msword": "doc",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx",
"application/zip": "zip",
"application/epub+zip": "zip",
"audio/mpeg": "mp3",
"video/mp4": "mp4",
"audio/x-wav": "wav",
"video/x-msvideo": "avi",
"application/octet-stream": "octet",
"application/x-tex": "tex",
"application/x-texinfo": "texinfo",
"image/jpeg": "jpeg",
"image/png": "png",
"image/gif": "gif",
"image/tiff": "tiff",
"application/postscript": "postscript",
"application/rtf": "rtf",
}
# select format, or assume JSON
content_type_header = self.request.headers["Content-Type"].lower() + ";"
fmt = formats.get(
# strip "; charset=..." segment
content_type_header[0 : content_type_header.index(";")],
"json",
)
# clear all query parameters
self.url_params = None
# check to see whether it's tag data
if "tags" in self.request.url:
self.tag_data = False
return self._tags_data(retrieved.json())
if fmt == "atom":
parsed = feedparser.parse(retrieved.text)
# select the correct processor
processor = self.processors.get(content)
# process the content correctly with a custom rule
return processor(parsed)
if fmt == "snapshot":
# we need to dump as a zip!
self.snapshot = True
if fmt == "bibtex":
parser = bibtexparser.bparser.BibTexParser(common_strings=True, ignore_nonstandard_types=False)
return parser.parse(retrieved.text)
# it's binary, so return raw content
elif fmt != "json":
return retrieved.content
# no need to do anything special, return JSON
else:
return retrieved.json()
return wrapped_f | 8a2b441f42e26c69e39d1f22b7624350f3bef8b0 | 29,433 |
def specificity(y, z):
"""True negative rate `tn / (tn + fp)`
"""
tp, tn, fp, fn = contingency_table(y, z)
return tn / (tn + fp+pseudocount) | bf1c835072463e14420939ef56aade365863f559 | 29,434 |
import os
import re
def untag_file(fname, tag, comment=False):
"""Removes all of a given tag from a given TeX file or list of files.
Positional arguments:
fname -- file path of file to be edited, or list of file paths
tag -- tag to be removed
Keyword arguments:
comment -- True to process comment lines, False otherwise (default False)
Returns:
number of tag removals made
The 'tag' argument should include only the letters that make up the name
of the tag. For example, to remove all instances of the
\textit{...}
tag, pass the argument 'textit'.
It is assumed that any instances of the given tag begin and end on the
same line.
"""
count = 0 # number of removals made
# Convert input to a file list if needed
if isinstance(fname, str) == True:
fname = [fname]
elif (isinstance(fname, list) or isinstance(fname, tuple)) == False:
raise TypeError("input must be a file or list of files")
# Process all files
for f in fname:
# Get file path
if os.path.exists(f) == False:
raise FileNotFoundError("input path does not exist")
path = os.path.abspath(f)
if os.path.isfile(path) == False:
raise FileNotFoundError("input path is not a file name")
# Initialize output file
outfile = path + ".tmp"
# Write edits to a temporary file
with open(f, mode='r') as fin:
with open(outfile, mode='w') as fout:
for line in fin:
# Split line at comment
if comment == True:
parts = re.split("(?<!\\\\)%", line)
else:
parts = [line]
# Remove the tag from the pre-comment string
lcount = 0 # number of replacements made in this line
(parts[0], lcount) = _untag_string(parts[0], tag)
count += lcount
# Write edited line to temporary file
print("%".join(parts), file=fout, end="")
# Replace original file with temporary file
os.remove(path)
os.rename(outfile, path)
return count | 93a7ce3a7f843942ff79ed729f8c17386e723567 | 29,435 |
import json
import time
def deploy_template(template):
"""
:type template: WavycloudStack
:return: cloudformation waiter object
"""
stack_name = template.stack_name
logger.debug(pformat(get_stacks_by()))
logger.debug(pformat(json.loads(template.to_json())))
policy = template.get_template_policy()
cloudformation.validate_template(TemplateBody=template.to_json())
if stack_name in get_stacks_by():
logger.info("Updating Stack: {}".format(stack_name))
changeset_name = 'changeset-{}'.format(time.strftime("%Y-%m-%dT%H-%M-%S"))
cloudformation.create_change_set(StackName=stack_name,
TemplateBody=template.to_json(),
ChangeSetName=changeset_name, Capabilities=['CAPABILITY_IAM'],
Parameters=template.get_secret_params())
changeset_description = cloudformation.describe_change_set(ChangeSetName=changeset_name, StackName=stack_name)
while changeset_is_pending(changeset_description):
changeset_description = cloudformation.describe_change_set(ChangeSetName=changeset_name,
StackName=stack_name)
if changeset_has_delete(changeset_description):
raise Exception("Changeset '{}' has Remove action. Please review and execute change manually")
elif not changset_is_empty(changeset_description):
cloudformation.execute_change_set(ChangeSetName=changeset_name, StackName=stack_name)
return cloudformation.get_waiter('stack_update_complete')
else:
logger.info("Creating Stack: {}".format(stack_name))
cloudformation.create_stack(StackName=stack_name, TemplateBody=template.to_json(),
Capabilities=['CAPABILITY_IAM'], StackPolicyBody=policy,
Parameters=template.get_secret_params())
return cloudformation.get_waiter('stack_create_complete') | 176bc8812f24750cffac7f3ae87911946b4924c2 | 29,436 |
def get_model_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn):
"""
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
c1 = conn.cursor()
new_stor_costs = c1.execute(
"""
SELECT project, vintage, lifetime_yrs,
annualized_real_cost_per_mw_yr,
annualized_real_cost_per_mwh_yr
FROM inputs_project_portfolios
CROSS JOIN
(SELECT period AS vintage
FROM inputs_temporal_periods
WHERE temporal_scenario_id = {}) as relevant_vintages
INNER JOIN
(SELECT project, vintage, lifetime_yrs,
annualized_real_cost_per_mw_yr, annualized_real_cost_per_mwh_yr
FROM inputs_project_new_cost
WHERE project_new_cost_scenario_id = {}) as cost
USING (project, vintage)
WHERE project_portfolio_scenario_id = {}
AND capacity_type = 'stor_new_bin'
;""".format(
subscenarios.TEMPORAL_SCENARIO_ID,
subscenarios.PROJECT_NEW_COST_SCENARIO_ID,
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
)
)
c2 = conn.cursor()
new_stor_build_size = c2.execute(
"""SELECT project, binary_build_size_mw, binary_build_size_mwh
FROM inputs_project_portfolios
INNER JOIN
(SELECT project, binary_build_size_mw, binary_build_size_mwh
FROM inputs_project_new_binary_build_size
WHERE project_new_binary_build_size_scenario_id = {})
USING (project)
WHERE project_portfolio_scenario_id = {}
AND capacity_type = 'stor_new_bin';""".format(
subscenarios.PROJECT_NEW_BINARY_BUILD_SIZE_SCENARIO_ID,
subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID,
)
)
return new_stor_costs, new_stor_build_size | b77e4155f89ecae0b0bc5e2517aa05a4291f73b5 | 29,437 |
def pad_image(image, padding):
"""
Pad an image's canvas by the amount of padding while filling the padded area with a reflection of the data.
:param image: Image to pad in either [H,W] or [H,W,3]
:param padding: Amount of padding to add to the image
:return: Padded image, padding uses reflection along border
"""
if len(image.shape) < 3: # Grayscale image
# Greyscale image (ground truth)
image = np.lib.pad(image, ((padding, padding), (padding, padding)), 'reflect')
elif len(image.shape) == 3: # RGB image
image = np.lib.pad(image, ((padding, padding), (padding, padding), (0, 0)), 'reflect')
else:
assert False, "Method cannot pad 4D images"
return image | ac797a201191c78a912f43908b214b3374de45d1 | 29,438 |
def delta_obj_size(object_image_size, model_image_size):
"""To compute the delta (scale b/w -inf and inf) value of object (width, height) from the image (width, height) using sigmoid (range [0, 1]).
Since sigmoid transform the real input value between 0 and 1 thus allowing model to learn unconstrained.
Parameters:
-----------
object_image_size (tuple): True width and height of the object. Already scaled (model_size / true_img_size) according to the model images size by .
model_image_size (tuple): width and height of model (since the prediction is on rescaled image)
Returns:
--------
tuple: the scaled width and height w.r.t. model
"""
obj_w, obj_h = object_image_size
model_w, model_h = model_image_size
delta_w = -(np.log((model_w / obj_w) + 1e-6 - 1)) # 1e-6 to avoid nan and is inverse of (1 / (1 + np.exp(-delta_w)))
delta_h = -(np.log((model_h / obj_h) + 1e-6 - 1))
return delta_w, delta_h | 4ba5935a8b87391ba59623d5de5a86f40f35cacd | 29,439 |
def get_relname_info(name):
"""
locates the name (row) in the release map defined above
and returns that objects properties (columns)
"""
return RELEASES_BY_NAME[name] | 538d564d6d0a67101fa84931fd7f7e69ac83f8b2 | 29,440 |
def swap(bee_permutation, n_bees):
"""Foraging stage using the swap mutation method.
This function simulates the foraging stage of the algorithm. It takes the
current bee permutation of a single bee and mutates the order using a swap
mutation step. `n_bees` forager bees are created by swapping two unique
indices per row.
Parameters
----------
bee_permutation: np.ndarray with shape (1, n_coordinates)
Array representing the indexing permutation of the discrete bee
coordinates.
Returns
-------
forager_bees: np.ndarray with shape (n_bees, n)
The new indexing permutations, using the swap mutation approach.
Examples
--------
>>> bee_permutation = np.arange(10)[np.newaxis, :]
>>> bee_permutation
array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
...
>>> swap(bee_permutation, 2)
array([[6, 1, 2, 3, 4, 5, 0, 7, 8, 9],
[0, 1, 9, 3, 4, 5, 6, 7, 8, 2]])
"""
# Prepare the forager bees and mutation indices
forager_bees, swap = _prepare_array(bee_permutation, n_bees)
# Mutate the forager bees. `h` is helper array to coerces forage into the
# correct shape. Question asked here for this procedure https://
# stackoverflow.com/questions/59936869/swap-multiple-indices-in-2d-array
h = np.arange(n_bees)[:, np.newaxis]
forager_bees[h, swap] = forager_bees[h, swap[:, ::-1]]
return forager_bees.astype(int) | 4679ebe27cba51c095cd0ece3a4aabfcdb6531a8 | 29,441 |
def update_boundaries(x=None):
"""
This is the main processing code. Every time a slider on a trackbar moves
this procedure is called as the callback
"""
# get current positions of four trackbars
maxHSV[0] = cv2.getTrackbarPos('Hmax', 'image')
maxHSV[1] = cv2.getTrackbarPos('Smax', 'image')
maxHSV[2] = cv2.getTrackbarPos('Vmax', 'image')
minHSV[0] = cv2.getTrackbarPos('Hmin', 'image')
minHSV[1] = cv2.getTrackbarPos('Smin', 'image')
minHSV[2] = cv2.getTrackbarPos('Vmin', 'image')
# create a bitmap based on the new threshold values
mask_hsv = cv2.inRange(hsv, minHSV, maxHSV)
# apply the bitmap mask on the original image
display = cv2.bitwise_and(frame, frame, mask=mask_hsv)
cv2.imshow('image', display)
return x # unneeded line, just to avoid warnings about unused x | d484d242007f906f5cd707ee02d28c9cd580c844 | 29,442 |
def get_stream(stream_id, return_fields=None, ignore_exceptions=False):
"""This function retrieves the information on a single publication when supplied its ID.
.. versionchanged:: 3.1.0
Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly.
:param stream_id: The ID of the stream to retrieve
:type stream_id: int, str
:param return_fields: Specific fields to return if not all of the default fields are needed (Optional)
:type return_fields: list, None
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:returns: A dictionary with the data for the publication
:raises: :py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`,
:py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
# Verify that the core connection has been established
verify_core_connection()
# Retrieve the publication
stream = core.get_data('streams', stream_id, return_json=False, all_fields=True)
successful_response = errors.handlers.check_api_response(stream, ignore_exceptions=ignore_exceptions)
if successful_response:
stream = core.get_fields_from_api_response(stream.json(), 'stream', return_fields)
return stream | 14adff8dcff2bd89ace9a5ef642898e15b7eeaa7 | 29,443 |
def get_grade_mdata():
"""Return default mdata map for Grade"""
return {
'output_score': {
'element_label': {
'text': 'output score',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a decimal value.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
},
'grade_system': {
'element_label': {
'text': 'grade system',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'input_score_end_range': {
'element_label': {
'text': 'input score end range',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a decimal value.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
},
'input_score_start_range': {
'element_label': {
'text': 'input score start range',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a decimal value.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_decimal_values': [None],
'syntax': 'DECIMAL',
'decimal_scale': None,
'minimum_decimal': None,
'maximum_decimal': None,
'decimal_set': [],
},
} | ab44e7cbf67a050bdb08366ebe933cb62eb9b04c | 29,444 |
def flatten_dict_join_keys(dct, join_symbol=" ", simplify_iterables=False):
""" Flatten dict with defined key join symbol.
:param dct: dict to flatten
:param join_symbol: default value is " "
:param simplify_iterables: each element of lists and ndarrays is represented as one key
:return:
"""
if simplify_iterables:
dct = ndarray_to_list_in_structure(dct)
dct = list_to_dict_in_structure(dct, keys_to_str=True)
return dict(flatten_dict(dct, join=lambda a, b: a + join_symbol + b)) | a133d1a621e4c1fa7ccce78576527a0cf212c0e3 | 29,445 |
import logging
def prev_factor(target, current):
"""Given a target to factorise, find the next highest factor above current"""
assert(current<=target)
candidates = factors(target)
if len(candidates) == 1:
return 1
logging.info("Selecting previous factor %d of %d given %d" % (candidates[candidates.index(current)-1], target, current))
return candidates[candidates.index(current)-1] | 8190d48ec210670adb8dd0c2d72b1738561191ae | 29,446 |
def compute_weight_BTEL1010(true_energy, simtel_spectral_slope=-2.0):
"""Compute the weight from requirement B-TEL-1010-Intensity-Resolution.
Parameters
----------
true_energy: array_like
simtel_spectral_slope: float
Spectral slope from the simulation.
"""
target_slope = -2.62 # spectral slope from B-TEL-1010
spec_slope = simtel_spectral_slope
# each pixel of the same image (row of data table) needs the same weight
weight = np.power(true_energy / 200.0, target_slope - spec_slope)
return weight | 64e126822dda2d6ece24cf95e4aef48a656ba4c6 | 29,447 |
def blog_post(post_url):
"""Render post of given url."""
post = Post.query.filter_by(url=post_url).first()
if post is None:
abort(404)
timezone_diff = timedelta(hours=post.timezone)
return render_template('blog_post.html', post=post, tz_diff=timezone_diff) | a87068d4fb6394b452b96b83465529681737fc31 | 29,448 |
def l2_normalization(
inputs,
scaling=False,
scale_initializer=init_ops.ones_initializer(),
reuse=None,
variables_collections=None,
outputs_collections=None,
data_format='NHWC',
trainable=True,
scope=None):
"""Implement L2 normalization on every feature (i.e. spatial normalization).
Should be extended in some near future to other dimensions, providing a more
flexible normalization framework.
Args:
inputs: a 4-D tensor with dimensions [batch_size, height, width, channels].
scaling: whether or not to add a post scaling operation along the dimensions
which have been normalized.
scale_initializer: An initializer for the weights.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: collection to add the outputs.
data_format: NHWC or NCHW data format.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'L2Normalization', [inputs], reuse=reuse) as sc:
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
dtype = inputs.dtype.base_dtype
if data_format == 'NHWC':
# norm_dim = tf.range(1, inputs_rank-1)
norm_dim = tf.range(inputs_rank-1, inputs_rank)
params_shape = inputs_shape[-1:]
elif data_format == 'NCHW':
# norm_dim = tf.range(2, inputs_rank)
norm_dim = tf.range(1, 2)
params_shape = (inputs_shape[1])
# Normalize along spatial dimensions.
outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)
# Additional scaling.
if scaling:
scale_collections = utils.get_variable_collections(
variables_collections, 'scale')
scale = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=scale_initializer,
collections=scale_collections,
trainable=trainable)
if data_format == 'NHWC':
outputs = tf.multiply(outputs, scale)
elif data_format == 'NCHW':
scale = tf.expand_dims(scale, axis=-1)
scale = tf.expand_dims(scale, axis=-1)
outputs = tf.multiply(outputs, scale)
# outputs = tf.transpose(outputs, perm=(0, 2, 3, 1))
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs) | b595699dcae6efd5c18fddc070905b1f41cc832b | 29,449 |
def generate_com_filter(size_u, size_v):
"""
generate com base conv filter
"""
center_u = size_u // 2
center_v = size_v // 2
_filter = np.zeros((size_v, size_u, 2)) # 0 channel is for u, 1 channel is for v
for i in range(size_v):
for j in range(size_u):
_filter[i, j, 0] = (j - center_u) / (size_u - 1)
_filter[i, j, 1] = (i - center_v) / (size_v - 1)
return _filter | 9797739b05724b104c932e07662278443e15eefb | 29,450 |
from typing import List
def retrieve_scores_grouped_ordered_pair_by_slug(panelist_slug: str,
database_connection: mysql.connector.connect
) -> List[tuple]:
"""Returns an list of tuples containing a score and the
corresponding number of instances a panelist has scored that amount
for the requested panelist slug
Arguments:
panelist_slug (str)
database_connection (mysql.connector.connect)
"""
panelist_id = utility.convert_slug_to_id(panelist_slug,
database_connection)
if not panelist_id:
return None
return retrieve_scores_grouped_ordered_pair_by_id(panelist_id,
database_connection,
pre_validated_id=True) | b8efd970a8adcbcbe6bdf8a737502ce174e01531 | 29,451 |
from datetime import datetime
def set_job_id():
"""Define job id for output paths.
Returns:
job_id: Identifier for output paths.
"""
job_id = FLAGS.job_id
if not job_id:
job_id = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
return job_id | 974fa455de363a4c5f3fbcb598a3a002c00c2942 | 29,452 |
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners | f0c49ffe8a8879d1d052f6fc37df596efa021a84 | 29,453 |
from typing import Dict
from typing import List
from typing import Optional
def boxplot_errors_wrt_RUL(
results_dict: Dict[str, List[PredictionResult]],
nbins: int,
y_axis_label: Optional[str] = None,
x_axis_label: Optional[str] = None,
ax=None,
**kwargs,
):
"""Boxplots of difference between true and predicted RUL over Cross-validated results
Parameters
----------
results_dict: Dict[str, List[PredictionResult]]
Dictionary with the results of the fitted models
nbins: int
Number of bins to divide the
y_axis_label: Optional[str]. Default None,
Optional string to be added to the y axis
x_axis_label: Optional[str]=None
Optional string to be added to the x axis
fig:
Optional figure in which the plot will be
ax: Optional. Default None
Optional axis in which the plot will be drawed.
If an axis is not provided, it will create one.
Keyword arguments
-----------------
**kwargs
Return
-------
fig, ax:
"""
if ax is None:
fig, ax = plt.subplots(**kwargs)
else:
fig = ax.figure
bin_edges, model_results = models_cv_results(results_dict, nbins)
return _boxplot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results,
fig=fig,
ax=ax,
y_axis_label=y_axis_label,
x_axis_label=x_axis_label,
) | 793f17df520c6474744b7d38055f717e9dfec287 | 29,454 |
def create_client(admin_user: str, key_file: str) -> CloudChannelServiceClient:
"""Creates the Channel Service API client
Returns:
The created Channel Service API client
"""
# [START channel_create_client]
# Set up credentials with user impersonation
credentials = service_account.Credentials.from_service_account_file(
key_file, scopes=["https://www.googleapis.com/auth/apps.order"])
credentials_delegated = credentials.with_subject(admin_user)
# Create the API client
client = channel.CloudChannelServiceClient(credentials=credentials_delegated)
print("=== Created client")
# [END channel_create_client]
return client | b1af051982ad737bdf66b609416c182e675d91f7 | 29,455 |
import warnings
def fourier_map(sinogram: np.ndarray, angles: np.ndarray,
intp_method: str = "cubic",
count=None, max_count=None) -> np.ndarray:
"""2D Fourier mapping with the Fourier slice theorem
Computes the inverse of the Radon transform using Fourier
interpolation.
Warning: This is the naive reconstruction that assumes that
the image is rotated through the upper left pixel nearest to
the actual center of the image. We do not have this problem for
odd images, only for even images.
Parameters
----------
sinogram: (A,N) ndarray
Two-dimensional sinogram of line recordings.
angles: (A,) ndarray
Angular positions of the `sinogram` in radians equally
distributed from zero to PI.
intp_method: {'cubic', 'nearest', 'linear'}, optional
Method of interpolation. For more information see
`scipy.interpolate.griddata`. One of
- "nearest": instead of interpolating, use the points closest
to the input data
- "linear": bilinear interpolation between data points
- "cubic": interpolate using a two-dimensional poolynimial
surface
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
Returns
-------
out: ndarray
The reconstructed image.
See Also
--------
scipy.interpolate.griddata: interpolation method used
"""
if max_count is not None:
with max_count.get_lock():
max_count.value += 4
if len(sinogram[0]) % 2 == 0:
warnings.warn("Fourier interpolation with slices that have" +
" even dimensions leads to image distortions!")
# projections
p_x = sinogram
# Fourier transform of the projections
# The sinogram data is shifted in Fourier space
P_fx = np.fft.fft(np.fft.ifftshift(p_x, axes=-1), axis=-1)
if count is not None:
with count.get_lock():
count.value += 1
# This paragraph could be useful for future version if the reconstruction
# grid is to be changed. They directly affect *P_fx*.
# if False:
# # Resize everyting
# factor = 10
# P_fx = np.zeros((len(sinogram), len(sinogram[0])*factor),
# dtype=np.complex128)
# newp = np.zeros((len(sinogram), len(sinogram[0])*factor),
# dtype=np.complex128)
# for i in range(len(sinogram)):
# x = np.linspace(0, len(sinogram[0]), len(sinogram[0]))
# dint = intp.interp1d(x, sinogram[i])
# xn = np.linspace(0, len(sinogram[0]), len(sinogram[0])*factor)
# datan = dint(xn)
# datfft = np.fft.fft(datan)
# newp[i] = datan
# # shift datfft
# P_fx[i] = np.roll(1*datfft,0)
# p_x = newp
# if False:
# # Resize the input image
# P_fx = np.zeros(p_x.shape, dtype=np.complex128)
# for i in range(len(sinogram)):
# factor = 2
# x = np.linspace(0, len(sinogram[0]), len(sinogram[0]))
# dint = intp.interp1d(x, sinogram[i])#, kind="nearest")
# xn = np.linspace(0, len(sinogram[0]), len(sinogram[0])*factor)
# datan = dint(xn)
# datfft = np.fft.fft(datan)
# fftint = intp.interp1d(xn, datfft)
# start = (len(xn) - len(x))/2
# end = (len(xn) + len(x))/2
# fidata = fftint(x)
# datfft = np.fft.fftshift(datfft)
# datfft = datfft[start:end]
# datfft = np.fft.ifftshift(datfft)
# P_fx[i] = 1*datfft
# angles need to be normalized to 2pi
# if angles star with 0, then the image is falsly rotated
# unfortunately we still have ashift in the data.
ang = (angles.reshape(-1, 1))
# compute frequency coordinates fx
fx = np.fft.fftfreq(len(p_x[0]))
fx = fx.reshape(1, -1)
# fy is zero
fxl = (fx) * np.cos(ang)
fyl = (fx) * np.sin(ang)
# now fxl, fyl, and P_fx have same shape
# DEBUG: plot coordinates of positions of projections in fourier domain
# from matplotlib import pylab as plt
# plt.figure()
# for i in range(len(fxl)):
# plt.plot(fxl[i],fyl[i],"x")
# plt.axes().set_aspect('equal')
# plt.show()
# flatten everything for interpolation
Xf = fxl.flatten()
Yf = fyl.flatten()
Zf = P_fx.flatten()
# rintp defines the interpolation grid
rintp = np.fft.fftshift(fx.reshape(-1))
if count is not None:
with count.get_lock():
count.value += 1
# The code block yields the same result as griddata (below)
# interpolation coordinates
# Rf = np.zeros((len(Xf),2))
# Rf[:,0] = 1*Xf
# Rf[:,1] = 1*Yf
# reconstruction coordinates
# Nintp = len(rintp)
# Xn, Yn = np.meshgrid(rintp,rintp)
# Rn = np.zeros((Nintp**2, 2))
# Rn[:,0] = Xn.flatten()
# Rn[:,1] = Yn.flatten()
#
# if intp_method.lower() == "bilinear":
# Fr = intp.LinearNDInterpolator(Rf,Zf.real)
# Fi = intp.LinearNDInterpolator(Rf,Zf.imag)
# elif intp_method.lower() == "nearest":
# Fr = intp.NearestNDInterpolator(Rf,Zf.real)
# Fi = intp.NearestNDInterpolator(Rf,Zf.imag)
# else:
# raise NotImplementedError("Unknown interpolation type: {}".format(
# intp_method.lower()))
# Fcomp = (Fr(Rn) + 1j*Fi(Rn)).reshape(Nintp,Nintp)
# The actual interpolation
Fcomp = intp.griddata((Xf, Yf), Zf, (rintp[None, :], rintp[:, None]),
method=intp_method)
if count is not None:
with count.get_lock():
count.value += 1
# remove nans
Fcomp[np.where(np.isnan(Fcomp))] = 0
f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(Fcomp)))
if count is not None:
with count.get_lock():
count.value += 1
return f.real | 0444d888a01fc1785ce8bb638d38b21fa7d4064e | 29,456 |
import string
import random
def password_generator(length=12, chars=None):
"""
Simple, naive password generator
"""
if not chars:
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length)) | e94754e8d8ee3cf806ddbe092033f8cbc89496f7 | 29,457 |
def get_node_hint(node):
"""Return the 'capabilities:node' hint associated with the node """
capabilities = node.get('properties').get('capabilities')
capabilities_dict = capabilities_to_dict(capabilities)
if 'node' in capabilities_dict:
return capabilities_dict['node']
return None | 8fb28b38238d5c59db5fd42336d18292f4214963 | 29,458 |
def execPregionsExactCP(y, w, p=2,rho='none', inst='none', conseq='none'):
#EXPLICAR QUÉ ES EL P-REGIONS
"""P-regions model
The p-regions model, devised by [Duque_Church_Middleton2009]_,
clusters a set of geographic areas into p spatially contiguous
regions while minimizing within cluster heterogeneity.
In Clusterpy, the p-regions model is formulated as a mixed
integer-programming (MIP) problem and solved using the
Gurobi optimizer. ::
#CÓMO CORRERLO layer.cluster(...)
layer.cluster('pRegionsExact',vars,<p>,<wType>,<std>,<dissolve>,<dataOperations>)
:keyword vars: Area attribute(s) (e.g. ['SAR1','SAR2','POP'])
:type vars: list
:keyword p: Number of spatially contiguous regions to be generated. Default value p = 2.
:type p: integer
:keyword wType: Type of first-order contiguity-based spatial matrix: 'rook' or 'queen'. Default value wType = 'rook'.
:type wType: string
:keyword std: If = 1, then the variables will be standardized.
:type std: binary
:keyword dissolve: If = 1, then you will get a "child" instance of the layer that contains the new regions. Default value = 0. Note: Each child layer is saved in the attribute layer.results. The first algorithm that you run with dissolve=1 will have a child layer in layer.results[0]; the second algorithm that you run with dissolve=1 will be in layer.results[1], and so on. You can export a child as a shapefile with layer.result[<1,2,3..>].exportArcData('filename')
:type dissolve: binary
:keyword dataOperations: Dictionary which maps a variable to a list of operations to run on it. The dissolved layer will contains in it's data all the variables specified in this dictionary. Be sure to check the input layer's fieldNames before use this utility.
:type dataOperations: dictionary
The dictionary structure must be as showed bellow.
>>> X = {}
>>> X[variableName1] = [function1, function2,....]
>>> X[variableName2] = [function1, function2,....]
Where functions are strings which represents the name of the
functions to be used on the given variableName. Functions
could be,'sum','mean','min','max','meanDesv','stdDesv','med',
'mode','range','first','last','numberOfAreas. By deffault just
ID variable is added to the dissolved map.
"""
# print "Running p-regions model (Duque, Church and Middleton, 2009)"
# print "Number of areas: ", len(y)
# print "Number of regions: ", p, "\n"
start = tm.time()
# PARAMETERS
# Number of areas
n = len(y)
l=n-p
# Area iterator
numA = range(n)
d={}
temp=range(n-1)
for i in temp:
list1=[]
for j in numA:
if i<j:
list1.append(distanceA2AEuclideanSquared([y[i],y[j]])[0][0])
d[i]=list1
#-----------------------------------
try:
# CONSTRUCTION OF THE MODEL
# Tolerance to non-integer solutions
tol = 1e-5#1e-9 #min value: 1e-9
# SUBTOUR ELIMINATION CONSTRAINTS
def subtourelim(model, where):
if where == GRB.callback.MIPSOL:
vals = model.cbGetSolution(model.getVars())
varsx = model.getVars()[n*n:]
varsx1 = [varsx[i:i+n] for i in range(0,len(varsx),n)]
t1 = [vals[i:i+n] for i in range(0,n*n,n)]
x1 = [vals[n*n+i:n*n+i+n] for i in range(0,n*n,n)]
num = list(numA)
cycle = [] #sets of areas involved in cycles
while num:
area = num[0]
c =[area]
acum = 0
k = 0
while True:
if k==n:
break
if x1[area][k]>=1-tol:#==1:
acum = 1
break
k += 1
f=num.remove(area)
for j in numA:
if t1[area][j]>=1-tol:#==1:
c.append(j)
k=0
while True:
if k==n:
break
if x1[j][k]>=1-tol:#==1:
acum += 1
break
k += 1
if num.count(j)!=0:
b =num.remove(j)
if acum==len(c) and acum>1:
cycle.append(c)
if len(cycle):
# add a subtour elimination constraint
for cycle_k in cycle:
temp1 = 0
card = len(cycle_k)
for i in cycle_k:
for j in cycle_k:
if j in w[i]:
temp1 += varsx1[i][j]
if temp1!=0:
model.cbLazy(temp1 <= card-1)
# Create the new model
m=Model("pRegions")
# Create variables
# t_ij
# 1 if areas i and j belongs to the same region
# 0 otherwise
t = []
for i in numA:
t_i = []
for j in numA:
t_i.append(m.addVar(vtype=GRB.BINARY,name="t_"+str([i,j])))
t.append(t_i)
# x_ij
# 1 if arc between adjacent areas i and j is selected for a tree graph
# 0 otherwise
x = []
for i in numA:
x_i=[]
for j in numA:
x_i.append(m.addVar(vtype=GRB.BINARY,name="x_"+str([i,j])))
x.append(x_i)
# Integrate new variables
m.update()
# Objective function
of=0
for i in numA:
for j in range(i+1,n):
of+=t[i][j]*d[i][j-i-1]
m.setObjective(of, GRB.MINIMIZE)
# Constraints 1, 5
temp = 0
for i in numA:
for j in w[i]:
temp += x[i][j]
m.addConstr(x[i][j]-t[i][j]<=tol,"c5_"+str([i,j]))
m.addConstr(temp == l-tol,"c1")
# Constraint 2
i = 0
for x_i in x:
temp =[]
for j in w[i]:
temp.append(x_i[j])
m.addConstr(quicksum(temp) <=1-tol, "c2_"+str(i))
i += 1
# Constraints 3, 4
for i in numA:
for j in numA:
if i!=j:
m.addConstr(t[i][j]-t[j][i]<=tol,"c4_"+str([i,j]))
for em in numA:
if em!=j:
m.addConstr(t[i][j]+t[i][em]-t[j][em]<=1-tol,"c3_"+str([i,j,em]))
# Constraint REDUNDANTE
for i in numA:
for j in numA:
if i!=j:
m.addConstr(x[i][j]+x[j][i]<=1,"c3_"+str([i,j,em]))
m.update()
#Writes the .lp file format of the model
#m.write("test.lp")
#To reduce memory use
#m.setParam('Threads',1)
#m.setParam('NodefileStart',0.1)
# To disable optimization output
#m.setParam('OutputFlag',False)
#m.setParam('ScaleFlag',0)
# To set the tolerance to non-integer solutions
m.setParam('IntFeasTol', tol)
m.setParam('LogFile', 'CP-'+str(conseq)+'-'+str(n)+'-'+str(p)+'-'+str(rho)+'-'+str(inst))
# To enable lazy constraints
m.params.LazyConstraints = 1
m.params.timeLimit = 1800
#m.params.ResultFile= "resultados.sol"
m.optimize(subtourelim)
time = tm.time()-start
# for v in m.getVars():
# if v.x >0:
# print v.varName, v.x
#import pdb; pdb.set_trace()
# sol = [0 for k in numA]
# num = list(numA)
# regID=0 #Number of region
# while num:
# area = num[0]
# sol[area]=regID
# f = num.remove(area)
# for j in numA:
# if t[area][j].x>=1-tol:#==1:
# sol[j] = regID
# if num.count(j)!=0:
# b = num.remove(j)
# regID += 1
# print 'FINAL SOLUTION:', sol
# print 'FINAL OF:', m.objVal
# print 'FINAL bound:', m.objBound
# print 'GAP:', m.MIPGap
# print "running time", time
# print "running timeGR", m.Runtime
output = { "objectiveFunction": m.objVal,
"bestBound": m.objBound,
"running time": time,
"algorithm": "pRegionsExactCP",
#"regions" : len(sol),
"r2a": "None",#sol,
"distanceType" : "EuclideanSquared",
"distanceStat" : "None",
"selectionType" : "None",
"ObjectiveFunctionType" : "None"}
print "Done"
return output
except GurobiError:
print 'Error reported' | ea5d165918c6f203cf3cc42f9a1422a538ff133a | 29,459 |
def generate_scale(name, octave, major=True):
"""
Generates a sequence of MIDI note numbers for a scale (do re mi fa sol la
si do). `name` specifies the base note, `octave` specifies in which octave
the scale should be, and `major` designates whether the produced scale
should be major or minor.
"""
scale = major_scale_progression if major else minor_scale_progression
base_note = note_number(name+str(octave))
return [ base_note + x for x in scale ] | 8276101a3ec7ddd340f5fa2c24e13b9b321e4307 | 29,460 |
def tensor_product(a, b, reshape=True):
"""
compute the tensor protuct of two matrices a and b
if a is (n, m_a), b is (n, m_b),
then the result is
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
Parameters
---------
a : array-like of shape (n, m_a)
b : array-like of shape (n, m_b)
reshape : bool, default True
whether to reshape the result to be 2-dimensional ie
(n, m_a * m_b)
or return a 3-dimensional tensor ie
(n, m_a, m_b)
Returns
-------
dense np.ndarray of shape
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
"""
assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim)
assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim)
na, ma = a.shape
nb, mb = b.shape
if na != nb:
raise ValueError('both arguments must have the same number of samples')
if sp.sparse.issparse(a):
a = a.A
if sp.sparse.issparse(b):
b = b.A
tensor = a[..., :, None] * b[..., None, :]
if reshape:
return tensor.reshape(na, ma * mb)
return tensor | f7891d1cffa19fb8bdfd2adaa23d2aa94367b8ab | 29,461 |
def disable_user( request, username ):
"""
Enable/disable an user account.
If the account is disabled, the user won't be able to login.
"""
userModel = get_user_model()
try:
user = userModel.objects.get( username= username )
except userModel.DoesNotExist:
raise Http404( "User doesn't exist." )
else:
value = not user.is_active
# only other staff users can enable/disable staff users
if user.is_staff:
if request.user.is_staff:
user.is_active = value
user.save()
else:
return HttpResponseForbidden( "Can't disable a staff member." )
else:
user.is_active = value
user.save()
if value:
message = "'{}' account is now active.".format( user )
else:
message = "'{}' account is now disabled.".format( user )
utilities.set_message( request, message )
return HttpResponseRedirect( user.get_url() ) | 6500c053ee637cd47a4cec6feb7ec72001ccfb6a | 29,462 |
from datetime import datetime
def get_next_event(user: User) -> Event | None:
"""
Get event that provided user has next.
"""
current_time = datetime.datetime.now().hour*60 + datetime.datetime.now().minute
return Event.query \
.join(Event.subject, aliased=True) \
.filter(Subject.user == user,
Event.weekday == datetime.date.today().weekday(),
Event.start_time > current_time,
) \
.order_by(asc(Event.start_time)).first() | ac52bb2a5b0e9f368fccbf93d05fbcc6184462dd | 29,463 |
import math
def affineToText(matrix):
"""
Converts a libcv matrix into human readable text
"""
tiltv = matrix[0,0] * matrix[1,1]
rotv = (matrix[0,1] - matrix[1,0]) / 2.0
if abs(tiltv) > 1:
tilt = degrees(math.acos(1.0/tiltv))
else:
tilt = degrees(math.acos(tiltv))
if tilt > 90.0:
tilt = tilt - 180.0
if abs(rotv) < 1:
rot = degrees(math.asin(rotv))
else:
rot = 180.0
mystr = ( "tiltang = %.2f, rotation = %.2f, shift = %.2f,%.2f" %
(tilt, rot, matrix[2,0], matrix[2,1]) )
return mystr | 14a754d804d509b1029c00ae40fbef70735d072f | 29,464 |
import numpy
def createBridgeSets(blocksize,operating,MPSS):
"""Use this function to create the iidx sets for bridges."""
sets = tuple()
xul = blocksize[0]-operating
xdl = operating
yul = int(blocksize[0]/2+operating)
ydl = int(blocksize[0]/2-operating)
xts = xul
xbs = xdl
for i in range(MPSS):
sets+=(tuple(product(numpy.arange(xdl,xul,1),numpy.arange(ydl,yul,1))),)
xdl+=operating
xul-=operating
ydl-=operating
yul+=operating
return sets,sets[::-1] | a97f44a44e00f4375c3aae0162edca5b78bcd5f1 | 29,465 |
def get_uniq_id_with_dur(meta, deci=3):
"""
Return basename with offset and end time labels
"""
bare_uniq_id = get_uniqname_from_filepath(meta['audio_filepath'])
if meta['offset'] is None and meta['duration'] is None:
return bare_uniq_id
if meta['offset']:
offset = str(int(round(meta['offset'], deci) * pow(10, deci)))
else:
offset = 0
if meta['duration']:
endtime = str(int(round(meta['offset'] + meta['duration'], deci) * pow(10, deci)))
else:
endtime = 'NULL'
uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
return uniq_id | 62d93703a8b33bbc3e1a533aedac11fec8d59fb1 | 29,466 |
def delete(table, whereclause = None, **kwargs):
"""Return a ``DELETE`` clause element.
This can also be called from a table directly via the table's
``delete()`` method.
table
The table to be updated.
whereclause
A ``ClauseElement`` describing the ``WHERE`` condition of the
``UPDATE`` statement.
"""
return _Delete(table, whereclause, **kwargs) | 49d6d98083d4dee0cf7dac62e30ddf90cd383955 | 29,467 |
import random
import string
def oversized_junk():
"""
Return a string of random lowercase letters that is over 4096 bytes long.
"""
return "".join(random.choice(string.ascii_lowercase) for _ in range(4097)) | a7bbaadde1948e1644f708c0166aa7833bb25037 | 29,468 |
def dcm_to_unrescaled(dcm_dict, save_path=None, show=True, return_resolution=False):
"""
just stack dcm files together
:param return_resolution:
:param show:
:param dcm_dict:
:param save_path: the save path for stacked array
:return: the stacked array in float32
"""
array_stacked, resolution = read_in_CT.stack_dcm_files_simplest(dcm_dict, show=show)
if save_path is not None:
if show:
print("save array to:", save_path)
Functions.save_np_to_path(save_path, array_stacked)
if return_resolution:
return array_stacked, resolution
return array_stacked | b4954c4f89100093b501d6e662a8b03eb247039b | 29,469 |
import os
def plot_posterior_pair(
hddm_model=None,
axes_limits="samples", # 'samples' or dict({'parameter_name': [lower bound, upper bound]})
font_scale=1.5,
height=2,
aspect_ratio=1,
n_subsample=1000,
kde_levels=50,
model_ground_truth=None,
save=False,
save_path=None,
show=True,
):
"""Basic pair plot useful for inspecting posterior parameters.
At this point can be used only for single subject data.
Works for all models listed in hddm (e.g. 'ddm', 'angle', 'weibull', 'levy', 'ornstein')
Arguments:
hddm_model: hddm model object <default=None>
If you supply a ground truth model, the data you supplied to the hddm model should include trial by trial parameters.
axes_limits: str or dict <default='samples'>
Either a string that says 'samples', which makes axes limits depends on the posterior sample values directly,
(separately for each parameter). Or a dictionary with keys parameter names, and values a 2-list with a lower
and an upper bound ([lower, upper]).
height: float <default=10>
Figure height in inches.
aspect_ratio: float <default=1>
Aspect ratio of figure
n_subsample: int <default=1000>
Number of posterior samples to use for figure finally. Subsamples from the provided traces.
ground_truth_parameters: dict <default=None>
Ground truth parameters (will be shown in the plot if supplied). Supplied as a dict of the form (e.g. DDM)
{'v': 1, 'a': 2, 'z': 0.5, 't': 2}
model_fitted: str <default=None>
String that supplies which model was fitted to the data.
font_scale: float <default= 1.5>
Scale of fonts. Not always predictable which is the best value, so
the setting is left to the user.
save: bool <default= False>
Whether or not to save the figure.
Return: plot object
"""
if hddm_model == None:
return "No data supplied --> please supply a HDDM model (including traces)"
model_fitted = hddm_model.model
data = filter_subject_condition_traces(
hddm_model=hddm_model, model_ground_truth=model_ground_truth
)
# print(data)
sns.set()
sns.set_theme(
style="ticks", rc={"axes.spines.right": False, "axes.spines.top": False}
)
sns.set_context("notebook", font_scale=font_scale)
plot_cnt = 0
for c_tmp in data.keys():
for s_tmp in data[c_tmp].keys():
# Turn traces into dataframe:
# Get all ground truths
gt_dict = {}
# AF COMMENT: Is this sorting of trace names necessrary ?
sorted_trace_names_tmp = data[c_tmp][s_tmp]["trace_names"].copy()
for trace_name_tmp in data[c_tmp][s_tmp]["trace_names"]:
if (
trace_name_tmp.split("_")[0].split("(")[0]
in model_config[model_fitted]["params"]
):
tmp_param = trace_name_tmp.split("_")[0].split("(")[0]
idx_tmp = model_config[model_fitted]["params"].index(tmp_param)
sorted_trace_names_tmp[idx_tmp] = trace_name_tmp
if model_ground_truth is not None:
gt_dict[trace_name_tmp] = data[c_tmp][s_tmp][
"gt_parameter_vector"
][idx_tmp]
else:
pass
data[c_tmp][s_tmp]["trace_names"] = sorted_trace_names_tmp.copy()
data[c_tmp][s_tmp]["traces"] = pd.DataFrame(
data[c_tmp][s_tmp]["traces"], columns=data[c_tmp][s_tmp]["trace_names"]
)
g = sns.PairGrid(
data[c_tmp][s_tmp]["traces"].sample(n_subsample),
height=height,
aspect=aspect_ratio,
diag_sharey=False,
)
g = g.map_diag(sns.kdeplot, color="black", shade=False) # shade = True,
g = g.map_lower(
sns.kdeplot,
thresh=0.01,
n_levels=kde_levels,
shade=False,
cmap="Purples_d",
) # 'Greys'
# Hide upper triangular part
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
g.axes[i, j].set_visible(False)
# Get x and y labels of graph as determined by the posterior_samples panda
xlabels, ylabels = [], []
for ax in g.axes[-1, :]:
xlabel = ax.xaxis.get_label_text()
ax.set_xlabel(ax.get_xlabel(), rotation=45)
xlabels.append(xlabel)
for ax in g.axes[:, 0]:
ylabel = ax.yaxis.get_label_text()
ylabels.append(ylabel)
# ax.yaxis.set_label_text('')
ax.set_ylabel("")
if axes_limits == "model":
for i in range(len(xlabels)):
for j in range(len(ylabels)):
try:
g.axes[j, i].set_xlim(
model_config[model_fitted]["param_bounds"][0][
model_config[model_fitted]["params"].index(
xlabels[i]
)
],
model_config[model_fitted]["param_bounds"][1][
model_config[model_fitted]["params"].index(
xlabels[i]
)
],
)
g.axes[j, i].set_ylim(
model_config[model_fitted]["param_bounds"][0][
model_config[model_fitted]["params"].index(
ylabels[j]
)
],
model_config[model_fitted]["param_bounds"][1][
model_config[model_fitted]["params"].index(
ylabels[j]
)
],
)
except:
print(
"ERROR: It looks like you are trying to make axis limits dependend on model specific parameters, but the column-names of your posterior traces do not align with the requested model's parameters"
)
elif type(axes_limits) == dict:
for i in range(len(xlabels)):
for j in range(len(ylabels)):
try:
g.axes[j, i].set_xlim(
axes_limits[xlabels[i]][0], axes_limits[xlabels[i]][1]
)
g.axes[j, i].set_ylim(
axes_limits[ylabels[j]][0], axes_limits[ylabels[j]][1]
)
except:
print(
"ERROR: Does your axes_limits dictionary match the column names of your posterior_samples DataFrame?"
)
return
for ax in g.axes.flat:
plt.setp(ax.get_xticklabels(), rotation=45)
g.fig.suptitle(
model_fitted.upper()
+ " , condition: "
+ str(c_tmp)
+ ", subject: "
+ str(s_tmp),
y=1.03,
fontsize=24,
)
# posterior_samples_key_set = np.sort(posterior_samples.keys())
# If ground truth is available add it in:
if model_ground_truth is not None:
for i in range(g.axes.shape[0]):
for j in range(i + 1, g.axes.shape[0], 1):
g.axes[j, i].plot(
data[c_tmp][s_tmp]["gt_parameter_vector"][
i
], # [xlabels[i]],
data[c_tmp][s_tmp]["gt_parameter_vector"][
j
], # [ylabels[j]],
".",
color="red",
markersize=10,
)
for i in range(g.axes.shape[0]):
if i == 0:
y_lims_tmp = g.axes[i, i].get_ylim()
g.axes[i, i].set_ylim(0, y_lims_tmp[1])
g.axes[i, i].plot(
data[c_tmp][s_tmp]["gt_parameter_vector"][
i
], # [xlabels[i]], # ground_truth_parameters[xlabels[i]],
g.axes[i, i].get_ylim()[0],
".",
color="red",
markersize=10,
)
# AF-COMMENT: The yaxis ticks are supposed to be turned off only for the
# diagonal, but seemingly this is applied across the board....
g.axes[i, i].yaxis.set_ticks([])
if save == True:
if save_path is None:
save_path = "figures/"
if os.path.exists("figures"):
pass
else:
os.mkdir("figures")
elif type(save_path) == str:
pass
else:
return "Error: please specify a save_path as a string"
plt.savefig(
save_path + "posterior_pair_plot_" + str(plot_cnt) + ".png",
format="png",
transparent=True,
)
if show:
plt.show()
if save == True:
plt.savefig(
"figures/" + "pair_plot_" + model_fitted + "_" + datatype + ".png",
format="png",
transparent=True,
frameon=False,
)
plt.close()
plot_cnt += 1
# Show
return | b74d9892604718b12ba19fd52c894b9ba3cb0fa3 | 29,470 |
def change_unit_of_metrics(metrics):
"""Change order of metrics from bpd to nats for binarized mnist only"""
if hparams.data.dataset_source == 'binarized_mnist':
# Convert from bpd to nats for comparison
metrics['kl_div'] = metrics['kl_div'] * jnp.log(2.) * get_effective_n_pixels()
metrics['avg_kl_divs'] = jax.tree_map(lambda x: x * jnp.log(2.) * get_effective_n_pixels(), metrics['avg_kl_divs'])
metrics['avg_recon_loss'] = metrics['avg_recon_loss'] * jnp.log(2.) * get_effective_n_pixels()
return metrics | 0435a4caf8c82587f84fb05ae493e43654bdf22e | 29,471 |
import os
def get_ff(filename):
"""Get path to a file in ffxml directory
"""
file_path = resource_filename('mosdef_slitpore',
os.path.join('ffxml', filename))
return file_path | ddfafa054515dff5e84418eab6e149b43a868832 | 29,472 |
def step(ram: dict, regs: dict, inputs: list[int]) -> tuple:
"""Advance robot by a single step
:param ram: memory contents
:param regs: register map
:param inputs: input queue
:return: updated pc; new color and turn direction
"""
pc = regs['pc']
relative_base = regs['rb']
output_values = list()
while True:
instruction = ram[pc]
opcode, operand_modes = decode(instruction=instruction)
halt = ISA[opcode].name == 'Halt'
if halt:
raise HaltOpcode
load_modes = operand_modes[:ISA[opcode].load_args]
operands = fetch(instruction_pointer=pc,
load_modes=load_modes, ram=ram,
relative_base=relative_base,
opcode=opcode, input_stack=inputs)
output = execute(opcode=opcode, operands=operands)
store_mode = operand_modes[-ISA[opcode].store_args:][0]
store(opcode=opcode, store_mode=store_mode, output=output,
instruction_pointer=pc, ram=ram,
relative_base=relative_base)
output_values.extend(push_output(opcode=opcode, output=output))
relative_base += shift_base(opcode=opcode, output=output)
next_instruction_pointer = jump_next_instruction(
opcode=opcode, instruction_pointer=pc, operands=operands)
pc = next_instruction_pointer
if len(output_values) == 2:
break
regs['pc'] = pc
regs['rb'] = relative_base
return tuple(output_values) | 4df0395e88a5ccd9f34edd39ea0841d16df6838a | 29,473 |
def replace_start(text,
pattern,
repl,
ignore_case=False,
escape=True):
"""Like :func:`replace` except it only replaces `text` with `repl` if
`pattern` mathces the start of `text`.
Args:
text (str): String to replace.
pattern (str): String pattern to find and replace.
repl (str): String to substitute `pattern` with.
ignore_clase (bool, optional): Whether to ignore case when replacing.
Defaults to ``False``.
escape (bool, optional): Whether to escape `pattern` when searching.
This is needed if a literal replacement is desired when `pattern`
may contain special regular expression characters. Defaults to
``True``.
Returns:
str: Replaced string.
Example:
>>> replace_start('aabbcc', 'b', 'X')
'aabbcc'
>>> replace_start('aabbcc', 'a', 'X')
'Xabbcc'
.. versionadded:: 4.1.0
"""
return replace(text,
pattern,
repl,
ignore_case=ignore_case,
escape=escape,
from_start=True) | 2296503a1c97cc06fa1fcc3768f54595fbc09940 | 29,474 |
def setup(params):
"""Sets up the environment that BenchmarkCNN should run in.
Args:
params: Params tuple, typically created by make_params or
make_params_from_flags.
Returns:
A potentially modified params.
Raises:
ValueError: invalid parames combinations.
"""
# Set up environment variables before doing any other global initialization to
# make sure it uses the appropriate environment variables.
params = _set_environ_vars(params)
# horovod needs to be initialized before create_config_proto() call since
# it will be used in config generation if enabled.
if params.variable_update == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
hvd.init()
platforms_util.initialize(params, create_config_proto(params))
if not params.job_name:
# Create a dummy session to initialize TF global variables using the input
# params. Otherwise, ListDevices function may create global devices using
# the default config instead of using the user provided config.
#
# TODO(hinsu): Find a way to achieve the same for distributed benchmark. It
# is not legal to create distributed session after local session. It is also
# not possible to create distributed session here as that results in
# multiple creation of ClusterManager and Server.
with tf.Session(config=create_config_proto(params)) as sess:
del sess
return params | 08fc16d2f4dadafcc1e2b13364d7519e5aac4eba | 29,475 |
import copy
def copy_excel_cell_range(
src_ws: openpyxl.worksheet.worksheet.Worksheet,
min_row: int = None,
max_row: int = None,
min_col: int = None,
max_col: int = None,
tgt_ws: openpyxl.worksheet.worksheet.Worksheet = None,
tgt_min_row: int = 1,
tgt_min_col: int = 1,
with_style: bool = True
) -> openpyxl.worksheet.worksheet.Worksheet:
"""
copies all cells from the source worksheet [src_ws] starting from [min_row] row
and [min_col] column up to [max_row] row and [max_col] column
to target worksheet [tgt_ws] starting from [tgt_min_row] row
and [tgt_min_col] column.
@param src_ws: source worksheet
@param min_row: smallest row index in the source worksheet (1-based index)
@param max_row: largest row index in the source worksheet (1-based index)
@param min_col: smallest column index in the source worksheet (1-based index)
@param max_col: largest column index in the source worksheet (1-based index)
@param tgt_ws: target worksheet.
If None, then the copy will be done to the same (source) worksheet.
@param tgt_min_row: target row index (1-based index)
@param tgt_min_col: target column index (1-based index)
@param with_style: whether to copy cell style. Default: True
@return: target worksheet object
"""
if tgt_ws is None:
tgt_ws = src_ws
# https://stackoverflow.com/a/34838233/5741205
for row in src_ws.iter_rows(min_row=min_row, max_row=max_row,
min_col=min_col, max_col=max_col):
for cell in row:
tgt_cell = tgt_ws.cell(
row=cell.row + tgt_min_row - 1,
column=cell.col_idx + tgt_min_col - 1,
value=cell.value
)
if with_style and cell.has_style:
# tgt_cell._style = copy(cell._style)
tgt_cell.font = copy(cell.font)
tgt_cell.border = copy(cell.border)
tgt_cell.fill = copy(cell.fill)
tgt_cell.number_format = copy(cell.number_format)
tgt_cell.protection = copy(cell.protection)
tgt_cell.alignment = copy(cell.alignment)
return tgt_ws | b98d2dda9fa0915dcb7bc3f4b1ff1049340afc68 | 29,476 |
def index(request):
"""Home page"""
return render(request, 'index.html') | 66494cd74d1b0969465c6f90c2456b4283e7e2d3 | 29,477 |
import ast
def get_teams_selected(request, lottery_info):
""" get_teams_selected updates the teams
selected by the user
@param request (flask.request object): Object containing
args attributes
@param lottery_info (dict): Dictionary keyed by
reverse standings order, with dictionary
values containing 'name' and 'id' keys
for the team
Returns:
- teams_selected (list): Teams previously
selected by the user
"""
teams_selected = []
selections = ast.literal_eval(request.args['teams_selected'])
for val in selections:
team_name = selections[val].split(' ')[-1]
if team_name != '':
for x in range(len(lottery_info), 0, -1):
if lottery_info[x]['name'] == team_name:
teams_selected.append(x)
return teams_selected | 35edfab322ce5ad039f869027552c664f9e6b576 | 29,478 |
from testtools import TestCase
def assert_fails_with(d, *exc_types, **kwargs):
"""Assert that ``d`` will fail with one of ``exc_types``.
The normal way to use this is to return the result of
``assert_fails_with`` from your unit test.
Equivalent to Twisted's ``assertFailure``.
:param Deferred d: A ``Deferred`` that is expected to fail.
:param exc_types: The exception types that the Deferred is expected to
fail with.
:param type failureException: An optional keyword argument. If provided,
will raise that exception instead of
``testtools.TestCase.failureException``.
:return: A ``Deferred`` that will fail with an ``AssertionError`` if ``d``
does not fail with one of the exception types.
"""
failureException = kwargs.pop('failureException', None)
if failureException is None:
# Avoid circular imports.
failureException = TestCase.failureException
expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
def got_success(result):
raise failureException(
"%s not raised (%r returned)" % (expected_names, result))
def got_failure(failure):
if failure.check(*exc_types):
return failure.value
raise failureException("%s raised instead of %s:\n %s" % (
failure.type.__name__, expected_names, failure.getTraceback()))
return d.addCallbacks(got_success, got_failure) | 1ff967f66c6d8e1d7f34354459d169bdfe95987a | 29,479 |
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db | 2cf4ead02fdaa0a54f828d09166128f1b5473d0b | 29,480 |
def _make_cmake(config_info):
"""This function initializes a CMake builder for building the project."""
configure_args = ["-DCMAKE_EXPORT_COMPILE_COMMANDS=ON"]
cmake_args = {}
options, option_fns = _make_all_options()
def _add_value(value, key):
args_key, args_value = _EX_ARG_FNS[key](value)
cmake_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder(
"cmake",
config_info,
options,
lambda v, key: configure_args.extend(option_fns[key](v)),
)
devpipeline_core.toolsupport.args_builder(
"cmake", config_info, _EX_ARGS, _add_value
)
cmake = CMake(cmake_args, config_info, configure_args)
build_type = config_info.config.get("cmake.build_type")
if build_type:
cmake.set_build_type(build_type)
return devpipeline_build.make_simple_builder(cmake, config_info) | fdef36f0875438ed0b5544367b4cb3fb5308f43d | 29,481 |
def box_to_delta(box, anchor):
"""((x1,y1) = upper left corner, (x2, y2) = lower right corner):
* box center point, width, height = (x, y, w, h)
* anchor center point, width, height = (x_a, y_a, w_a, h_a)
* anchor = (x1=x_a-w_a/2, y1=y_a-h_a/2, x2=x_a+w_a/2, y2=y_a+h_a/2)
* box = (x1=x-w/2, y1=y-h/2, x2=x+w/2, y2=y+w/2)
* box_delta = ((x-x_a)/w_a, (y-y_a)/h_a, log(w/w_a), log(h/h_a))
:param tuple box: box coordinates
:param tuple anchor: anchor coordinates
:return: box delta coordinates as described above
"""
# box
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
x, y = x1 + w / 2, y1 + h / 2
# anchor
x1_a, y1_a, x2_a, y2_a = anchor
w_a, h_a = x2_a - x1_a, y2_a - y1_a
x_a, y_a = x1_a + w_a / 2.0, y1_a + h_a / 2.0
dx, dy = (x - x_a) / w_a, (y - y_a) / h_a
dw, dh = log(w / w_a), log(h / h_a)
return dx, dy, dw, dh | 2033c66c89a25541af77678ab368d6f30628d0f5 | 29,482 |
def mutual_information(prob1, prob2, prob_joint):
"""
Calculates mutual information between two random variables
Arguments
------------------
prob1 (numpy array):
The probability distribution of the first variable
prob1.sum() should be 1
prob2 (numpy array):
The probability distrubiont of the second variable
Again, prob2.sum() should be 1
prob_joint (two dimensional numpy array):
The joint probability, marginazling over the
different axes should give prob1 and prob2
Returns
------------------
mutual information (scalar):
The mutual information between two variables
Examples
------------------
A mixed joint:
>>> p_joint = np.array((0.3, 0.1, 0.2, 0.4)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.12451124978365299
An uninformative joint:
>>> p_joint = np.array((0.25, 0.25, 0.25, 0.25)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.0
A very coupled joint:
>>> p_joint = np.array((0.4, 0.05, 0.05, 0.4)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> mutual_information(p1, p2, p_joint)
0.58387028280246378
Using the alternative definition of mutual information
>>> p_joint = np.array((0.4, 0.2, 0.1, 0.3)).reshape((2, 2))
>>> p1 = p_joint.sum(axis=1)
>>> p2 = p_joint.sum(axis=0)
>>> MI = mutual_information(p1, p2, p_joint)
>>> x1 = entropy(p1)
>>> x2 = entropy(p2)
>>> x3 = joint_entropy(p_joint)
>>> np.isclose(MI, x1 + x2 - x3)
True
"""
outer = np.outer(prob1, prob2)
return np.sum(prob_joint * np.log2(prob_joint / outer)) | 4d6d2738c84092470b83497e911e767b18878857 | 29,483 |
def hog(img, num_bins, edge_num_cells=2):
""" Histogram of oriented gradients
:param img: image to process
:param edge_num_cells: cut img into cells: 2 = 2x2, 3 = 3x3 etc.
:return:
"""
if edge_num_cells != 2:
raise NotImplementedError
w, h = img.shape[:2]
cut_x = w / 2
cut_y = h / 2
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
ang = np.int32(num_bins * ang / (2 * np.pi))
bin_cells = (ang[:cut_x, :cut_y], ang[cut_x:, :cut_y],
ang[:cut_x, cut_y:], ang[cut_x:, cut_y:])
mag_cells = (mag[:cut_x, :cut_y], mag[cut_x:, :cut_y],
mag[:cut_x, cut_y:], mag[cut_x:, cut_y:])
hists = [np.bincount(
b.ravel(), m.ravel(), num_bins) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
return hist | d2b7eadda978896826800c7159a8e4604b150aa6 | 29,484 |
def get_graph(adj) -> nx.classes.graph.Graph:
"""
Returns a nx graph from zero-padded adjacency matrix.
@param adj: adjustency matrix
[[0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[1. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 1. 1. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 0. 0.]]
@return:
"""
# remove all zeros rows and columns
adj = adj[~np.all(adj == 0, axis=1)]
adj = adj[:, ~np.all(adj == 0, axis=0)]
adj = np.asmatrix(adj)
return nx.from_numpy_matrix(adj) | a62f1c696111e7c4f97bb6fd858fc3bc9e011f6f | 29,485 |
import re
import os
import six
def cert_config(pth_config, config_str, pth_prepend="cert="):
""" Update config file located at pth_config with a string(config_str). """
# Determine path of cert from config_str list of strings (first str containing pth_prepend)
path_cert = [x.split(pth_prepend) for x in config_str if pth_prepend in x][0][1]
path_cert = re.sub('[\n\r]', '', _path_update(path_cert))
if not os.path.exists(pth_config): # if config file does not exist, create and populate
if not os.path.exists(os.path.dirname(pth_config)):
os.makedirs(os.path.dirname(pth_config))
with open(pth_config, 'w', encoding='utf-8') as file:
for line in config_str:
# Use re to print filename consistently.
file.write(six.text_type(re.sub('{}.*$'.format(pth_prepend), ''.join([pth_prepend, path_cert]), line)))
status = "".join(["Created and updated ", pth_config])
else: # If config file exists, replace or append if pth_prepend not present
with open(pth_config, encoding='utf-8') as f:
pip_contents = f.readlines() # Read contents
pip_contents = [_path_update(x) for x in pip_contents] # Update windows paths for string literals
if pth_prepend not in '\t'.join(pip_contents): # Append pip.ini cert
with open(pth_config, 'a', encoding='utf-8') as file:
# Use re to print filename consistently.
file.write(six.text_type(re.sub('{}.*$'.format(pth_prepend),
''.join([pth_prepend, path_cert]),
'{}{}\r\n'.format(pth_prepend, path_cert))))
status = "".join(["Appended to ", pth_config])
else: # Update path_cert:
if path_cert not in '\t'.join(pip_contents):
# Update pip.ini cert location:
config_loc_backup = _backup_file(pth_config)
copy2(pth_config, config_loc_backup) # Make backup copy of original
with open(pth_config, 'w', encoding='utf-8') as file:
for line in pip_contents:
# Use re to print filename consistently.
file.write(re.sub('{}.*$'.format(pth_prepend),
''.join([pth_prepend, path_cert]),
line))
status = "Backed up config to {}.\nUpdated config file to contain new location.".format(config_loc_backup)
else:
status = "Config file already refers to right location."
return status | 217810b344000bba4c9ba225ca22a1c4e05771b8 | 29,486 |
import logging
import os
def analysis_directed(net, label, outpath):
"""
Analyze directed network.
"""
result_dict = dict()
# Check whether graph is directed
is_directed = net.isDirected()
if not is_directed:
logging.error('Input graph should be directed.')
else:
logging.info('Directed graph')
# Check whether the output directory exists
if not os.path.exists(outpath):
os.mkdir(outpath)
# Profiling
nodes = net.numberOfNodes()
edges = net.numberOfEdges()
result_dict['d_num_of_nodes'] = nodes
result_dict['d_num_of_edges'] = edges
logging.info('Number of nodes: {0}'.format(nodes))
logging.info('Number of edges: {0}'.format(edges))
pf = nk.profiling.Profile.create(net, preset="minimal")
pf.output("HTML", outpath)
# os.rename(outpath+label+'.html', outpath+label+'-directed.html')
logging.info('\n')
# In-degree distribution
logging.info('Write indegree to file...')
networkit_util.get_and_write_deg_dist(net, label, outpath, degree_type='in')
logging.info('Start to plot in-degree distribution...')
uniqe_deg_seq = networkit_plot.plot_indeg_dist(net, label, outpath)
min_indeg = min(uniqe_deg_seq)
max_indeg = max(uniqe_deg_seq)
result_dict['d_min_indeg'] = min_indeg
result_dict['d_max_indeg'] = max_indeg
logging.info('Min in-degree: {0}'.format(min_indeg))
logging.info('Max in-degree: {0}'.format(max_indeg))
logging.info('\n')
# Out-degree distribution
logging.info('Write outdegree to file...')
networkit_util.get_and_write_deg_dist(net, label, outpath, degree_type='out')
logging.info('Start to plot out-degree distribution...')
uniqe_deg_seq = networkit_plot.plot_outdeg_dist(net, label, outpath)
min_outdeg = min(uniqe_deg_seq)
max_outdeg = max(uniqe_deg_seq)
result_dict['d_min_outdeg'] = min_indeg
result_dict['d_max_outdeg'] = max_outdeg
logging.info('Min out-degree: {0}'.format(min_indeg))
logging.info('Max out-degree: {0}'.format(max_outdeg))
logging.info('\n')
# CCDF
logging.info('Start to plot complementary cumulative (in/out) degree distribution...')
networkit_plot.plot_ccum_degree_dist(net, label, outpath, degree_type='in')
networkit_plot.plot_ccum_degree_dist(net, label, outpath, degree_type='out')
logging.info('Plot cc (in/out) degree distribution done.\n')
# In-Out-degree
logging.info('Plot outdegree vs indegree...')
networkit_plot.plot_out_in_degree_comparision(net, label, outpath)
logging.info('Plot out vs in done.\n')
# Powerlaw
degree_analysis.get_deg_seq(net, label, outpath, degree_type='in')
degree_seq_filename = outpath + label + '-in-degree'
degree_analysis.power_law_analysis(degree_seq_filename, label, outpath, degree_type='in')
degree_analysis.get_deg_seq(net, label, outpath, degree_type='out')
degree_seq_filename = outpath + label + '-out-degree'
degree_analysis.power_law_analysis(degree_seq_filename, label, outpath, degree_type='out')
# Reciprocity
logging.info('Calculating reciprocity...')
reciprocity = networkit_util.get_reciprocity(net)
result_dict['d_reciprocity'] = reciprocity
logging.info('Reciprocity: {0}'.format(reciprocity))
logging.info('Reciprocity done.\n')
# Connected components
# Weakly connected components
logging.info('Plot wcc distribution...')
wcc = networkit_plot.plot_wcc_dist(net, label, outpath)
logging.info('Number of weakly connected components: {0}'.format(len(wcc)))
lwcc = np.max(wcc)
logging.info('The largest weakly connected component size: {0}'.format(lwcc))
logging.info('')
# Strongly connected components
logging.info('Plot scc distribution...')
scc = networkit_plot.plot_scc_dist(net, label, outpath)
logging.info('Number of strongly connected components: {0}'.format(len(scc)))
lscc = np.max(scc)
logging.info('The largest strongly connected component size: {0}'.format(lscc))
lscc = networkit_util.get_lscc_subgraph(net)
lscc_nodes_per = lscc.numberOfNodes() / net.numberOfNodes()
result_dict['d_lscc_nodes_percentage'] = lscc_nodes_per
logging.info('LCC nodes percentage: {0}'.format(lscc_nodes_per))
logging.info('LCC edges percentage: {0}'.format(lscc.numberOfEdges() / net.numberOfEdges()))
# Macro structure
components_analysis.run(net, label, outpath)
logging.info('Components done.\n')
# Pagerank
logging.info('Calculating pagerank...')
networkit_plot.plot_pagerank(net, label, outpath)
centrality_name = 'pagerank'
centrality_filename = outpath + label + '-' + centrality_name + '-falseid-value'
paras = {'centrality_filename': centrality_filename, 'label': label, 'outpath': outpath,
'centrality_name': centrality_name}
networkit_plot.plot_ccum_centrality_dist(**paras)
logging.info('Pagerank done.\n')
return result_dict | d6af414909c9b8bf82668caabbeacb6897c392e9 | 29,487 |
import requests
import json
import re
def build_response_message(object_spec, response_message, namespace):
"""Function to build the response message used to inform users of policy decisions"""
try:
opa_response = requests.post(
opa_url,
json=object_spec,
headers={"Content-Type": "application/json"},
timeout=5,
)
except requests.exceptions.RequestException as exception:
app.logger.info(f"Call to OPA was unsuccessful")
print(f"Exception:\n{exception}")
response_message = "[FAIL] HIGH - Call to OPA was unsuccessful. Please contact your cluster administrator"
return response_message
if opa_response and opa_response.status_code == 200:
app.logger.info("Call to OPA was successful")
app.logger.debug(f"Opa Response Headers: {opa_response.headers}")
app.logger.debug(f"OPA Response Text:\n{opa_response.text}")
else:
app.logger.info(
f"Request to OPA returned an error {opa_response.status_code}, the response is:\n{opa_response.text}"
)
response_message = "[FAIL] HIGH - Call to OPA was unsuccessful. Please contact your cluster administrator"
return response_message
# Load OPA request results as JSON
opa_response_json = json.loads(opa_response.text)["decisions"]
app.logger.debug(f"OPA JSON:\n{opa_response_json}")
# Build response message from "msg" component of each object in the OPA response
messages = []
# Note this entire statement can likely be broken down into a simpler chained
# generator/list comprehension statement.....I tried, but couldn't get it to work
# Something similar to:
# opa_response_msg = ", ".join(reason['msg'] for reason in decision['reasons'] for decision in opa_response_json)
for decision in opa_response_json:
for reason in decision["reasons"]:
messages.append(reason["msg"])
# Sort messages for consistent output
messages.sort()
opa_response_msg = ", ".join(messages)
# Cleanup artifacts from OPA response before sending to K8s API
response_message = re.sub(r"^\[\'|\'\]$|\'(\, )\'", r"\1", opa_response_msg)
app.logger.debug(f"response_message:\n{response_message}")
# Increment Prometheus counter for each policy object in the OPA response
for policy_obj in opa_response_json:
policy_name = re.sub("policy-", "", policy_obj["policy"]).replace("_", "-")
app.logger.debug(f"Policy Object: {policy_obj}")
app.logger.debug(f"Policy Name: {policy_name}")
if policy_obj["reasons"]:
for reason in policy_obj["reasons"]:
app.logger.debug(f"Policy Failed")
# Increment Prometheus Counters
magtape_metrics_policies.labels(
count_type="total", policy=policy_name, ns=namespace
).inc()
magtape_metrics_policies.labels(
count_type="fail", policy=policy_name, ns=namespace
).inc()
else:
app.logger.debug(f"Policy Passed")
# Increment Prometheus Counters
magtape_metrics_policies.labels(
count_type="total", policy=policy_name, ns=namespace
).inc()
magtape_metrics_policies.labels(
count_type="pass", policy=policy_name, ns=namespace
).inc()
return response_message | 50c273b7a3e0f8c902770f504d6a781ad088cc66 | 29,488 |
def findSamplesInRage(pointIds, minVal, maxVal):
"""根据样本编号的范围[1, 10]处理ID
Args:
pointIds (ndarray): 样本的ID
minVal (Number): 样本范围的起始位置
maxVal (Number): 样本范围的结束位置
Returns:
result (ndarray): 样本的ID
"""
digits = pointIds % 100
result = (digits >= minVal) & (digits <= maxVal)
return result | d2f040480c6513e9b845aaaa13485ecbe3376c41 | 29,489 |
def test_results_form_average(fill_market_trade_databases):
"""Tests averages are calculated correctly by ResultsForm, compared to a direct calculation
"""
Mediator.get_volatile_cache().clear_cache()
market_df, trade_df, order_df = get_sample_data()
trade_df, _ = MetricSlippage().calculate_metric(trade_order_df=trade_df, market_df=market_df, bid_benchmark='mid',
ask_benchmark='mid')
results_form = BarResultsForm(market_trade_order_list=['trade_df'],
metric_name='slippage',
aggregation_metric='mean',
aggregate_by_field=['ticker', 'venue'], scalar=10000.0,
weighting_field='executed_notional_in_reporting_currency')
results_df = results_form.aggregate_results(market_trade_order_df=trade_df, market_df=market_df,
market_trade_order_name='trade_df')
slippage_average = float(results_df[0][0].values[0])
# Directly calculate slippage
def grab_slippage(trade_df):
return 10000.0 * ((trade_df['slippage'] * trade_df['executed_notional_in_reporting_currency']).sum() \
/ trade_df['executed_notional_in_reporting_currency'].sum())
slippage_average_comp = grab_slippage(trade_df)
# Check the average slippage
assert slippage_average - slippage_average_comp < eps
slippage_average_venue = results_df[1][0]['venue'][venue_filter]
slippage_average_venue_comp = grab_slippage(trade_df[trade_df['venue'] == venue_filter])
# Check the average slippage by venue
assert slippage_average_venue - slippage_average_venue_comp < eps | 92cb07fe56f026f0b1449e07e635db50581cffa9 | 29,490 |
from operator import mod
def _prot_builder_from_seq(sequence):
"""
Build a protein from a template.
Adapted from fragbuilder
"""
names = []
bonds_mol = []
pept_coords, pept_at, bonds, _, _, offset = templates_aa[sequence[0]]
names.extend(pept_at)
bonds_mol.extend(bonds)
offsets = [0, offset]
for idx, aa in enumerate(sequence[1:]):
tmp_coords, tmp_at, bonds, _, _, offset = templates_aa[aa]
if sequence[0] == 'B' and idx == 0:
v3 = pept_coords[0 + offsets[idx]] # C
v2 = pept_coords[2 + offsets[idx]] # CH3
v1 = (pept_coords[5 + offsets[idx]] + pept_coords[3 + offsets[idx]]) / 2 # HH31 / HH33
#['C', 'O', 'CH3', 'HH31', 'HH32', 'HH33'],
else:
v3 = pept_coords[2 + offsets[idx]] # C
v2 = pept_coords[1 + offsets[idx]] # CA
v1 = pept_coords[0 + offsets[idx]] # N
connectionpoint = v3 + (v2 - v1) / mod(v2 - v1) * constants.peptide_bond_lenght
connectionvector = tmp_coords[0] - connectionpoint
# translate
tmp_coords = tmp_coords - connectionvector
# first rotation
v4 = v3 - v2 + connectionpoint
axis1 = perp_vector(tmp_coords[1], connectionpoint, v4)
angle1 = get_angle(tmp_coords[1], connectionpoint, v4)
center1 = connectionpoint
ba = axis1 - center1
tmp_coords = tmp_coords - center1
tmp_coords = tmp_coords @ rotation_matrix_3d(ba, angle1)
tmp_coords = tmp_coords + center1
axis2 = tmp_coords[1] - connectionpoint
axis2 = axis2 / mod(axis2) + connectionpoint
d3 = tmp_coords[1]
d4 = tmp_coords[2]
angle2 = constants.pi + get_torsional(v3, connectionpoint, d3, d4)
if aa == 'P':
angle2 -= - 90 * bmb.constants.degrees_to_radians
center2 = connectionpoint
ba = axis2 - center2
tmp_coords = tmp_coords - center2
tmp_coords = tmp_coords @ rotation_matrix_3d(ba, angle2)
tmp_coords = tmp_coords + center2
names.extend(tmp_at)
offsets.append(offsets[idx + 1] + offset)
pept_coords = np.concatenate([pept_coords, tmp_coords])
# create a list of bonds from the template-bonds by adding the offset
prev_offset = offsets[-3]
last_offset = offsets[-2]
bonds_mol.extend([(i + last_offset, j + last_offset)
for i, j in bonds] + [(2 + prev_offset, last_offset)])
offsets.append(offsets[-1] + offset)
exclusions = _exclusions_1_3(bonds_mol)
# generate a list with the names of chemical elements
elements = []
for i in names:
element = i[0]
if element in ['1', '2', '3']:
element = i[1]
elements.append(element)
occupancies = [1.] * len(names)
bfactors = [0.] * len(names)
return (pept_coords,
names,
elements,
occupancies,
bfactors,
offsets,
exclusions) | da182a0dd323db2e3930a72c0080499ed643be1a | 29,491 |
def connect_thread():
"""
Starts a SlaveService on a thread and connects to it. Useful for testing
purposes. See :func:`rpyc.utils.factory.connect_thread`
:returns: an RPyC connection exposing ``SlaveService``
"""
return factory.connect_thread(SlaveService, remote_service = SlaveService) | 557dfbd7a5389345f7becdc550f4140d74cf6695 | 29,492 |
from typing import Tuple
def yxyx_to_albu(yxyx: np.ndarray,
img_size: Tuple[PosInt, PosInt]) -> np.ndarray:
"""Unnormalized [ymin, xmin, ymax, xmax] to Albumentations format i.e.
normalized [ymin, xmin, ymax, xmax].
"""
h, w = img_size
ymin, xmin, ymax, xmax = yxyx.T
ymin, ymax = ymin / h, ymax / h
xmin, xmax = xmin / w, xmax / w
xmin = np.clip(xmin, 0., 1., out=xmin)
xmax = np.clip(xmax, 0., 1., out=xmax)
ymin = np.clip(ymin, 0., 1., out=ymin)
ymax = np.clip(ymax, 0., 1., out=ymax)
xyxy = np.stack([xmin, ymin, xmax, ymax], axis=1).reshape((-1, 4))
return xyxy | d6429ca3c694e5f2fd69dba645e3d97cab4720f8 | 29,493 |
def parse_tags(source):
"""
extract any substring enclosed in parenthesis
source should be a string
normally would use something like json for this
but I would like to make it easy to specify these tags and their groups
manually (via text box or command line argument)
http://stackoverflow.com/questions/1651487/python-parsing-bracketed-blocks
"""
unmatched_count = 0
start_pos = 0
opened = False
open_pos = 0
cur_pos = 0
finished = []
segments = []
for character in source:
#scan for mismatched parenthesis:
if character == '(':
unmatched_count += 1
if not opened:
open_pos = cur_pos
opened = True
if character == ')':
unmatched_count -= 1
if opened and unmatched_count == 0:
clean = source[start_pos:open_pos]
clean = clean.strip()
if clean:
finished.extend(clean.split())
segment = source[open_pos:cur_pos+1]
#segments.append(segment)
#get rid of bounding parentheses:
pruned = segment[1:-1]
group = pruned.split()
finished.append(group)
opened = False
start_pos = cur_pos+1
cur_pos += 1
assert unmatched_count == 0
if start_pos != cur_pos:
#get anything that was left over here
remainder = source[start_pos:cur_pos].strip()
finished.extend(remainder.split())
## #now check on recursion:
## for item in segments:
## #get rid of bounding parentheses:
## pruned = item[1:-1]
## if recurse:
## results = parse_tags(pruned, recurse)
## finished.expand(results)
## else:
## finished.append(pruned.strip())
return finished | 315ea121cec56a38edc16bfa9e6a7ccaeeab1dc2 | 29,494 |
def NonZeroMin(data):
"""Returns the smallest non-zero value in an array.
Parameters
----------
data : array-like
A list, tuple or array of numbers.
Returns
-------
An integer or real value, depending on data's dtype.
"""
# 1) Convert lists and tuples into arrays
if type(data) != 'numpy.ndarray':
data = np.array(data)
# 2) Find the minimal non-zero value and return.
idx = np.where(data)
return data[idx].min() | 89d466c9d739dc511cd37fd71283ae8c6b2cc388 | 29,495 |
def get_99_pct_params_ln(x1: float, x2: float):
"""Wrapper assuming you want the 0.5%-99.5% inter-quantile range.
:param x1: the lower value such that pr(X > x1) = 0.005
:param x2: the higher value such that pr(X < x2) = 0.995
"""
return get_lognormal_params_from_qs(x1, x2, 0.005, 0.995) | 2ce424a289ea8a5af087ca5120b3d8763d1e2f31 | 29,496 |
def summarize_data(data):
"""
"""
#subset desired columns
data = data[['scenario', 'strategy', 'confidence', 'decile', 'cost_user']]
#get the median value
data = data.groupby(['scenario', 'strategy', 'confidence', 'decile'])['cost_user'].median().reset_index()
data.columns = ['Scenario', 'Strategy', 'Confidence', 'Decile', 'Cost Per User ($)']
return data | 9964d99ed70a1405f1c94553172fd6830371472a | 29,497 |
def gen_cam(image, mask):
"""
生成CAM图
:param image: [H,W,C],原始图像
:param mask: [H,W],范围0~1
:return: tuple(cam,heatmap)
"""
# mask转为heatmap
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
# heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
# 合并heatmap到原始图像
cam = np.float32(heatmap) + np.float32(image)
return norm_image(cam), heatmap.astype(np.uint8) | a9d221b6d536aef6c2e2093bb20614cf682de704 | 29,498 |
def SetInputFilePath(path):
"""
Set input file name
This function updates the file name that is stored in the database
It is used by the debugger and other parts of IDA
Use it when the database is moved to another location or when you
use remote debugging.
@param path: new input file path
"""
return idaapi.set_root_filename(path) | 8e776c24848e040c96b7d1978091ed8861949f74 | 29,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.