content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import os
def get_linux_memory_limit(): # pragma: no cover
"""This generally shows the full address space by default.
>> limit = get_linux_memory_limit()
>> assert isinstance(limit, int)
"""
if os.path.isfile("/sys/fs/cgroup/memory/memory.limit_in_bytes"):
with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as limit:
mem = int(limit.read())
return mem
else:
raise RuntimeError("get_linux_memory_limit() failed.")
|
3d167b1c5eb69e4735fd5e36289ba83ce4f9b050
| 22,232
|
def autoBindEvents (sink, source, prefix='', weak=False, priority=None):
"""
Automatically set up listeners on sink for events raised by source.
Often you have a "sink" object that is interested in multiple events
raised by some other "source" object. This method makes setting that
up easy.
You name handler methods on the sink object in a special way. For
example, lets say you have an object mySource which raises events of
types FooEvent and BarEvent. You have an object mySink which wants to
listen to these events. To do so, it names its handler methods
"_handle_FooEvent" and "_handle_BarEvent". It can then simply call
autoBindEvents(mySink, mySource), and the handlers are set up.
You can also set a prefix which changes how the handlers are to be named.
For example, autoBindEvents(mySink, mySource, "source1") would use a
handler named "_handle_source1_FooEvent".
"weak" has the same meaning as with addListener().
Returns the added listener IDs (so that you can remove them later).
"""
if len(prefix) > 0 and prefix[0] != '_': prefix = '_' + prefix
if hasattr(source, '_eventMixin_events') is False:
# If source does not declare that it raises any events, do nothing
print("Warning: source class %s doesn't specify any events!" % (
source.__class__.__name__,))
return []
events = {}
for e in source._eventMixin_events:
if type(e) == str:
events[e] = e
else:
events[e.__name__] = e
listeners = []
# for each method in sink
for m in dir(sink):
# get the method object
a = getattr(sink, m)
if callable(a):
# if it has the revent prefix signature,
if m.startswith("_handle" + prefix + "_"):
event = m[8+len(prefix):]
# and it is one of the events our source triggers
if event in events:
# append the listener
listeners.append(source.addListener(events[event], a, weak=weak,
priority=priority))
#print("autoBind: ",source,m,"to",sink)
elif len(prefix) > 0 and "_" not in event:
print("Warning: %s found in %s, but %s not raised by %s" %
(m, sink.__class__.__name__, event,
source.__class__.__name__))
return listeners
|
40e1cba92eee53fa74bad8d0d260ba0cd54c7407
| 22,233
|
def CustomLanguageModel(path):
"""
Custom language model. This is actually a dummy wrapper, and you may also pass the path to your custom .klm model
directly to the recognizer.
:param str path: Path to a .klm language model
:return: path to .klm language model
:rtype: str
"""
return path
|
f1d6ac452e0374edb2ac0ac3f60e6ef9d2fc66ad
| 22,235
|
def find_row_in_array(array, column, value):
"""
Find a single row in a record array.
Parameters
----------
array : numpy.recarray
The record array to be searched.
column : str
The name of the column of the array to search.
value : int, str, or float
The value sought in ``column``
Raises
------
ValueError
An error is raised if more than one row is found.
Returns
-------
row : numpy.recarray row
The found row from ``array``.
Examples
--------
>>> from gisutils import utils
>>> import numpy
>>> x = numpy.array(
[
('A1', 'Ocean', 'A1_x'), ('A2', 'Ocean', 'A2_x'),
('B1', 'A1', 'None'), ('B2', 'A1', 'B2_x'),
], dtype=[('ID', '<U5'), ('DS_ID', '<U5'), ('Cu', '<U5'),]
)
>>> utils.find_row_in_array(x, 'ID', 'A1')
('A1', 'Ocean', 'A1_x', 'A1_y')
"""
rows = list(filter(lambda x: x[column] == value, array))
if len(rows) == 0:
row = None
elif len(rows) == 1:
row = rows[0]
else:
raise ValueError("more than one row where {} == {}".format(column, value))
return row
|
92be94ce7671db74f3742f6049def4949c6d22fd
| 22,236
|
from typing import Dict
from typing import Any
def as_lta_record(catalog_record: Dict[str, Any]) -> Dict[str, Any]:
"""Cherry pick keys from a File Catalog record to include in Bundle metadata."""
# As created by the nersc_verifier component...
# ---------------------------------------------
# "uuid": bundle["uuid"],
# "logical_name": hpss_path,
# "checksum": bundle["checksum"],
# "locations": [
# {
# "site": "NERSC",
# "path": hpss_path,
# "hpss": True,
# "online": False,
# }
# ],
# "file_size": bundle["size"],
# # note: 'lta' is an application-private metadata field
# "lta": bundle,
KEYS = ['checksum', 'file_size', 'logical_name', 'meta_modify_date', 'uuid']
lta_record = {k: catalog_record[k] for k in KEYS}
return lta_record
|
96eb177bb8de6a8faa5f2647e922e70f2516187e
| 22,239
|
import torch
def _positive_function(positive):
"""
Returns the torch function belonging to a positive string
"""
if positive == 'Abs':
return torch.abs
elif positive == 'Square':
return torch.square
elif positive == 'Softplus':
return torch.nn.Softplus()
elif positive == 'ReLU':
return torch.nn.ReLU()
elif positive == 'ReLU6':
return torch.nn.ReLU6()
elif positive == '4':
return lambda x: torch.pow(x, 4)
elif positive == 'Exp':
return torch.exp
elif positive == 'None':
return torch.nn.Identity()
else:
error = f"Positve transformation {positive} not supported!"
raise ValueError(error)
|
9166e0f5f4bbb0dce8f43e7d87cb905b7a570476
| 22,240
|
def battle(p1_current_unit, p2_current_unit):
"""
@purpose: To carry out the battle
@complexity:
Best & Worst Case: O(1), when the army input is invalid, no stack
@parameter:
p1_current_unit: The current unit of p1
p2_current_unit: The current unit of p2
@precondition: Two units from both sides are passed
@postcondition: A gladiatorial battle is carried out, and returns the units after battle
"""
# Battle is on
# Compare the unit's speeds
if p1_current_unit.getSpeed() > p2_current_unit.getSpeed():
# if u1 > u2 in speed
# u1 attack first
atk = p1_current_unit.attack()
dmg = p2_current_unit.defend(atk)
print("Player 1's " + p1_current_unit.unit_name() + " attacks for " + str(dmg) + " damage!")
# after attacking, check if u2 still alive
# if yes, u2 retaliates
if p2_current_unit.isAlive() == True:
atk = p2_current_unit.attack()
dmg = p1_current_unit.defend(atk)
print("Still standing, Player 2's " + p2_current_unit.unit_name() + " attacks for " + str(dmg) + " damage!")
# check if u2's attack killed u1
if p1_current_unit.isAlive() == False:
print("Player 1 lost one unit!")
else:
# if no, u2 dead
print("Player 2 lost one unit!")
# if u1 < u2 in speed
# u2 attack first
elif p1_current_unit.getSpeed() < p2_current_unit.getSpeed():
atk = p2_current_unit.attack()
dmg = p1_current_unit.defend(atk)
print("Player 2's " + p2_current_unit.unit_name() + " attacks for " + str(dmg) + " damage!")
# check if u1 still alive
# if yes, u1 retaliates
if p1_current_unit.isAlive() == True:
atk = p1_current_unit.attack()
dmg = p2_current_unit.defend(atk)
print("Still standing, Player 1's " + p1_current_unit.unit_name() + " attacks for " + str(dmg) + " damage!")
# check if u1's retaliation killed u2
if p2_current_unit.isAlive() == False:
print("Player 2 lost one unit!")
else:
print("Player 1 lost one unit!")
else:
# equal speed
print("Player 1's " + p1_current_unit.unit_name() + " and 2's " + p2_current_unit.unit_name() + " attacks!")
atk1 = p1_current_unit.attack()
atk2 = p2_current_unit.attack()
dmg1 = p2_current_unit.defend(atk1)
print("Player 2 receive " + str(dmg1) + " damage!")
dmg2 = p1_current_unit.defend(atk2)
print("Player 1 receive " + str(dmg2) + " damage!")
if p2_current_unit.isAlive() == False:
print("Player 2 lost one unit!")
if p1_current_unit.isAlive() == False:
print("Player 1 lost one unit!")
# return the units post battle
return p1_current_unit, p2_current_unit
|
09ea8e36f58027e832734b653113717613237a1d
| 22,241
|
import decimal
import math
def factorPR(n: int) -> int:
"""Return a factor of n using the Pollard Rho method.
The return value is 1, if n is prime, and a non-trivial factor,
otherwise. Note: This method will occasionally fail to find a
non-trivial factor when one exists.
Examples:
>>> factorPR(2017*2027*12353948231) # product of primes
2017
>>> factorPR(8) == factorPR(4) == 2 # fails
False
"""
numsteps = 2 * int(decimal.Decimal(n).sqrt().sqrt())
for slow in [2, 3, 4, 6]:
fast = slow
for _ in range(numsteps):
slow = (slow * slow + 1) % n
fast = (fast * fast + 1) % n
fast = (fast * fast + 1) % n
g = math.gcd(fast - slow, n)
if g != 1:
if g == n:
break
else:
return g
return 1
|
a429f4e5c7fa603615a8bd4f687fc076a4becc56
| 22,242
|
import os
def oci_config_profile():
"""
Returns the OCI config profile location.
"""
return os.environ.get("OCI_CONFIG_PROFILE", "DEFAULT")
|
d7ebbc950d0f0831e0e670ac43276656dc01b5a2
| 22,243
|
def parse_uri(uri):
"""Parse the doozerd URI scheme to get node addresses"""
if uri.startswith("doozer:?"):
before, params = uri.split("?", 1)
addrs = []
for param in params.split("&"):
key, value = param.split("=", 1)
if key == "ca":
addrs.append(value)
return addrs
else:
raise ValueError("invalid doozerd uri")
|
deddb9da40303656baf47942153500ca878596bf
| 22,244
|
from typing import List
from typing import Any
def _get_lemmas(synsets: List[Any]) -> List[str]:
"""
Return all the lemma names associated with a list of synsets.
"""
return [lemma_name for synset in synsets for lemma_name in synset.lemma_names()]
|
43b6be39b733c9fee82958476b32b4ab296c5276
| 22,245
|
def compile_per_chrom(hbar_list):
"""Return [{chr: upper: lower:}]"""
if hbar_list == []:
return []
mylist = []
comp = {"chr": None , "xranges":[], "upper":[], "lower":[]}
mylist.append(comp)
for i in hbar_list:
# same chromosome, add to lists
if mylist[-1]['chr'] is None:
mylist[-1]['chr'] = i["chr"]
mylist[-1]['xranges'].append(i["xranges"])
mylist[-1]['upper'].append(i["hbar_upper"])
mylist[-1]['lower'].append(i["hbar_lower"])
elif mylist[-1]['chr'] == i["chr"]:
mylist[-1]['xranges'].append(i["xranges"])
mylist[-1]['upper'].append(i["hbar_upper"])
mylist[-1]['lower'].append(i["hbar_lower"])
else:
mylist.append({"chr": i["chr"],
"xranges":[i["xranges"]],
"upper":[i["hbar_upper"]],
"lower":[i["hbar_lower"]]})
return mylist
|
c8eac7ae54bd39245414cc95533e379453c21bce
| 22,246
|
def get_search_context(pos_tags, i, n, head=0, tail=0):
"""
j = i
for n in range(words):
j = plaintext.find(' ', j + 1)
if j == -1:
j = None
return plaintext[i:j]
"""
j = i + n
if i - head >= 0:
i -= head
if j + tail < len(pos_tags):
j += tail
n = j - i
words = [ pos_tag[0].upper() if ((head+tail > 0) and (head <= k < n-tail))
else pos_tag[0] for pos_tag, k in zip(pos_tags[i:j], range(n)) ]
return ' '.join(words)
|
f346ea0b8fd31493e734755f0bece054125cf9d1
| 22,247
|
def markSigLoops(key, loops):
"""
Mark the significance of different loops.
"""
sig = lambda x: True if x.binomial_p_value <= 1e-10 and x.FDR <= 0.05 and loop.ES >= 2 else False
for loop in loops:
if sig(loop):
loop.significant = 1
else:
loop.significant = 0
return key, loops
|
5976c4e554dc8cea4cd12d638facbd696a6ee1ab
| 22,248
|
def get_context(nts):
"""return context of nucleotide"""
if len(nts) < 5:
return None
if nts[2] == 'C':
if nts[3] == 'G':
context = 'CG'
elif nts[4] == 'G':
context = 'CHG'
else:
context = 'CHH'
else:
if nts[1] == 'C':
context = 'CG'
elif nts[0] == 'C':
context = 'CHG'
else:
context = 'CHH'
return context
|
200de8e65277c6153974ce1575e574151cb85d53
| 22,249
|
def normalize(x):
"""
Normalize input data.
Args:
x (NDarray):
Returns:
Normalized NDarray
"""
return (x-x.min())/(x.max()-x.min()) # Normalize (0.0-1.0)
|
5e6adbaff542afd54490665bc1764aa3c2688545
| 22,250
|
def get_linear_equation_parameters(rho, sens_1, spec_1, cov_1, sens_2, spec_2, cov_2):
"""
Get the slope and intercept of the linear equation of the intersection of two planes (equation A3.3)
ZeroDivisionError: planes are parallel
"""
try:
denominator = (cov_1 * (1 - rho) * (1 - spec_1) + cov_1 - 1) - (cov_2 * (1 - rho) * (1 - spec_2) + cov_2 - 1)
slope = ((cov_2 * rho * (1 - sens_2) + cov_2 - 1) - (cov_1 * rho * (1 - sens_1) + cov_1 - 1)) / denominator
intercept = ((1 - cov_2) - (1 - cov_1)) / denominator
if str(intercept) == '-0.0':
intercept = 0.0
if str(slope) == '-0.0':
slope = 0.0
return slope, intercept
except ZeroDivisionError:
return None, None
|
15321849940c7e09ed1a6ea84e20fd0b4d9b82d0
| 22,251
|
import json
import os
def load_config(config_file):
"""
Load the SCM configuration from a JSON configuration file.
Expects that the config file has three values:
{
"local": <local path to repo>,
"remote": <path to remote repo>,
"rev": <target revision>
}
"""
data = json.load(open(config_file))
if not os.path.isabs(data['local']):
# assume that the data path is relative to the configuration file.
config_file_dir = os.path.dirname(config_file)
data['local'] = os.path.join(config_file_dir, data['local'])
return data
|
d1a62aa2a40b7250f094837eb62aeb3c2e2e6bed
| 22,252
|
def find_epsilon(epsilon, epoch, update_epsilon, start_updates=499):
""" Updates epsilon ("random guessing rate") based on epoch. """
if epsilon <= 0.1:
epsilon = 0.1
elif (not (epoch % update_epsilon)) and epoch > start_updates:
epsilon -= 0.1
return epsilon
|
b68bb8395663eb0774931915f30f42a3d0de5761
| 22,253
|
import functools
def compose_functions(*func_list):
"""
Referenes:
https://mathieularose.com/function-composition-in-python/
"""
def apply_composition(f, g):
def compose(x):
return f(g(x))
return compose
composed_func = functools.reduce(apply_composition, func_list)
return composed_func
|
2300562314f6ebd4fe1155181f933f3db3446091
| 22,254
|
import inspect
def LoadHitSignal(ref_date, option_only=True):
"""
Load signal hit history in database.
Return all or option only signal with wide or long format
Args:
ref_date (str): Date in YYYY-MM-DD format, e.g. 2018-01-01
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df.signal (Dataframe): Stock price dataframe with calculated signal in the input date only
Example:
LoadHitSignal(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
|
07837a1a9c91c703037ba86822693a9704989e2c
| 22,255
|
def augument_data(df):
"""As many labels seem to be given incorrectly (messages contain category keywords but aren't labeled as them)
, when label keyword occurs in text it should be relevant for the label
INPUT:
df (pd.DataFrame) - cleaned data with messages and categories
OUTPUT:
df (pd.DataFrame) - augumented data """
category_names = list(df.columns[5:-1])
for category in category_names:
category_str= category.replace("_", " ")
#numberofcases = len(df[(df["message"].str.contains(category_str)) & (df[category]==0)])
print(len(df[(df["message"].str.contains(category_str)) & (df[category]==0)]), "changes made to column", category)
df.loc[(df["message"].str.contains(category_str)) & (df[category]==0), category] = 1
print(len(df.loc[((df[category]==1) & (df["related"]==0) ), "related"] ), "updates to column 'related'")
df.loc[((df[category]==1) & (df["related"]==0) ), "related"] = 1
df.loc[((df["related"]==1) & (df["not_related"]==1) ), "not_related"] = 0
return df
|
01e0a46426035850ccfa1fcaf88bc605d08bb515
| 22,256
|
import argparse
import numpy
import copy
import pickle
def score_spoof(seq, correl, settings):
"""
Spoof a score for a given residue sequence according to some internal rules.
This function is used only for testing purposes, when settings.SPOOF = True. It plays no role in production.
Spoofing is done by assigning a random magnitude to each position proportional to the covariance score and a random
type of residue (positive, negative, aromatic, hydrophobic, polar, bulky, small) that is preferred in that position.
Then a random synergy score is applied for each pair of residues and this is multiplied by each individual score.
Finally, a small amount of random noise is added.
Parameters
----------
seq : list
The sequence of three-letter residue codes for this mutant
correl : list
seq-length list of correlation scores by residue
settings : argparse.Namespace
Settings namespace
Returns
-------
score : float
Spoofed score value for the given sequence
"""
all_resnames = ['ARG', 'HIS', 'LYS', 'ASP', 'GLU', 'SER', 'THR', 'ASN', 'GLN', 'CYS', 'GLY', 'PRO', 'ALA', 'VAL',
'ILE', 'LEU', 'MET', 'PHE', 'TYR', 'TRP']
noise = 0.001 # weight factor for random noise (multiplied by random from normal distribution)
syn_weight = 0.001 # weight factor for synergy terms
# First, set up the latent algorithm if it hasn't already been
if not settings.spoof_latent:
mutability = 0.05 # likelihood that a residue will be assigned a different ideal type than its current type
settings.spoof_latent = argparse.Namespace() # initialize
settings.spoof_latent.weights = [] # randomly chosen weights proportional to correl
settings.spoof_latent.ideals = [] # randomly chosen ideal residue type, odds to be different from wild type are given by mutability
settings.spoof_latent.synergies = numpy.ones([len(seq),len(seq)]) # random synergy map
resid = -1 # initialize resid
for res in seq:
resid += 1 # increment resid
weight = numpy.random.normal() * correl[resid]
settings.spoof_latent.weights.append(weight)
if res in ['ARG', 'LYS', 'HIS']:
wt_type = 'positive'
elif res in ['ASP', 'GLU']:
wt_type = 'negative'
elif res in ['TRP', 'PHE', 'TYR']:
wt_type = 'aromatic'
elif res in ['LEU', 'ILE', 'VAL', 'MET']:
wt_type = 'hydrophobic'
elif res in ['SER', 'THR', 'GLN', 'ASN']:
wt_type = 'polar'
elif res in ['ALA', 'GLY']:
wt_type = 'small'
elif res == 'CYS':
wt_type = 'CYS'
elif res == 'PRO':
wt_type = 'PRO'
else:
raise RuntimeError('Unrecognized residue name: ' + res)
if numpy.random.rand() > mutability:
settings.spoof_latent.ideals.append(wt_type)
else:
settings.spoof_latent.ideals.append(['positive', 'negative', 'aromatic', 'hydrophobic', 'polar', 'small', 'CYS', 'PRO'][numpy.random.randint(0, 8)])
for other_index in range(len(seq)):
if other_index == resid:
continue
settings.spoof_latent.synergies[resid, other_index] = numpy.random.normal()
settings.spoof_latent.synergies[other_index, resid] = settings.spoof_latent.synergies[resid, other_index]
if not settings.dont_dump:
temp_settings = copy.copy(settings) # initialize temporary copy of settings to modify
temp_settings.__dict__.pop('env') # env attribute is not picklable
pickle.dump(temp_settings, open(settings.working_directory + '/settings.pkl', 'wb'))
score = 0 # initialize score
resid = -1
for res in seq:
resid += 1
ideal = settings.spoof_latent.ideals[resid]
if res in ['ARG', 'LYS', 'HIS'] and ideal == 'positive':
ideal_type = True
elif res in ['ASP', 'GLU'] and ideal == 'negative':
ideal_type = True
elif res in ['TRP', 'PHE', 'TYR'] and ideal == 'aromatic':
ideal_type = True
elif res in ['LEU', 'ILE', 'VAL', 'MET'] and ideal == 'hydrophobic':
ideal_type = True
elif res in ['SER', 'THR', 'GLN', 'ASN'] and ideal == 'polar':
ideal_type = True
elif res in ['ALA', 'GLY'] and ideal == 'small':
ideal_type = True
elif res == 'CYS' and ideal == 'CYS':
ideal_type = True
elif res == 'PRO' and ideal == 'PRO':
ideal_type = True
else:
ideal_type = False
this_score = 0
if not ideal_type:
this_score += settings.spoof_latent.weights[resid] * -1
else:
this_score += settings.spoof_latent.weights[resid]
this_score += noise * numpy.random.normal()
syn_score = 0
for other_index in range(len(seq)):
if other_index == resid:
continue
syn_score += syn_weight * settings.spoof_latent.synergies[resid, other_index] * this_score
score += this_score + syn_score
return score
|
4828c2db861c6f0c317674ba23a669a220b4a598
| 22,257
|
def rsplit(_str, seps):
"""
Splits _str by the first sep in seps that is found from the right side.
Returns a tuple without the separator.
"""
for idx, ch in enumerate(reversed(_str)):
if ch in seps:
return _str[0:-idx - 1], _str[-idx:]
|
9755b328e0b414721a7db4fe52293100bb03d1a8
| 22,258
|
def compare(numA, numB):
"""
compare(numA, numB):
Compares two numbers. Returns:
1, if the first number is greater than the second,
0, if they are equal,
-1, if the first number is smaller than the second.
Parameters
----------
numA: integer or float
numB: integer of float
Returns
-------
integer
"""
if numA > numB:
return 1
if numA < numB:
return -1
if numA == numB:
return 0
|
7ad62cb677882d22b32adb517a31a4149685ecef
| 22,259
|
def get_map_winrate(map_list, map_uni, results):
""" Computes the inrate of the team over each map
Returns 3 dictionnaries with unique map names as keys.
map_wr stores the winrate for each map
map_w stores the number of wins for each map
map_p stores the number of times each map was played
"""
map_w = [0]*len(map_uni)
map_p = [0]*len(map_uni)
for i in range(0, len(map_uni)):
map_p[i] = sum([1 for mapp in map_list if mapp == map_uni[i]])
map_w[i] = sum([1 for j in range(0, len(map_list)) if ((map_list[j] == map_uni[i]) and (results[j] == 1))])
map_wr = [map_w[i]/map_p[i]*100 for i in range(0, len(map_uni))]
return map_wr, map_w, map_p
|
e3ef1208f9c2ef14f94f3ee7f0c2f8ce0fe1e547
| 22,262
|
def atom_footer():
"""No templating here, just returning a
text string. We may want to expand this later."""
return "</feed>\n"
|
b8ea5305c0b7a1d6691f2e009e12d55a8d6a51ed
| 22,263
|
def ratio_col(df, df_cols): # Tested [Y]
"""
This function computes the ratio between two columns and returns a Dataframe containing the ratio as a column.
Args
df (pd.DataFrame): Dataframe containing the columns to compute a ratio.
df_cols (tuple): A tuple containing the names of columns in the Dataframe to use in computing the ratio.
Format is (<numerator>, <denominator>)
Return
(pd.Series) The inital dataframe entered with addition columns for the ratio of the two columns specified in the df_cols argument
"""
df[df_cols[0]] = df[df_cols[0]].div(df[df_cols[1]].values, axis=0)
return df[df_cols[0]]
|
a4bfc13a5e87604ddae865f6df3b9b123359be52
| 22,264
|
def clean_hanging_newline(t):
"""
Many editors will silently add a newline to the final line of a
document (I'm looking at you, Vim). This function fixes this common
problem at the risk of removing a hanging newline in the rare cases
where the user actually intends it.
"""
if t and t[-1] == "\n":
return t[:-1]
return t
|
16c80b00530ef333ce1ad39ebf083e12a04ab58b
| 22,266
|
def has_video_search_args(request):
"""
Returns whether the object has any video search filter args in it
"""
search_kws = (
"q",
"location",
"channel_ids",
"collection_id",
"tag_ids",
"date",)
for kw in search_kws:
if getattr(request, kw, None) is not None:
return True
|
49dcbdc681be0867c80fc9c8e90a1cbf10a9f5bb
| 22,267
|
def _parse_one_level_list(dat):
"""Get list information from queued message
Args:
dat (bytes): received message data
Returns:
list: list of information
"""
results = []
count = int.from_bytes(dat[:2], 'big')
for i in range(count):
base = 2 + 32 * i
results.append(dat[base:base + 32])
return results
|
6c090cb9e396c1a5ae273f250fd7b0a88fcf418a
| 22,269
|
def parse_predecessor_ids(predecessor_ids_string):
"""Parses a comma seperated list of task IDs
Args:
predecessor_ids_string: [string] comma separated task IDs
Returns:
List of task IDs as integers
"""
predecessor_ids = []
predecessor_ids_strings = predecessor_ids_string.split(',')
for predecessor_id_string in predecessor_ids_strings:
if predecessor_id_string != '':
predecessor_ids.append(int(predecessor_id_string))
return predecessor_ids
|
a8c344c5c51a5735899515c10df7b51349d4971e
| 22,270
|
import re
def clean_description(solic_description):
"""
Separates 'DESCRIPTION' and 'KEYWORDS' section of solicitation text.
"""
stringer = solic_description
description = stringer[stringer.find('DESCRIPTION:')+len('DESCRIPTION: ') : stringer.find('KEYWORDS:')]
description = description.replace('</p><p>', '')
keywords = stringer[stringer.find('KEYWORDS:')+len('KEYWORDS: ') : stringer.find('<p>References:')]
keywords = keywords.split(',')
keywords = [w.replace('</p>','') for w in keywords]
keywords = [w.replace('<p>','') for w in keywords]
keywords = [w.replace(' p ','') for w in keywords]
keywords = [w.replace('nbsp','') for w in keywords]
keywords = [re.sub(pattern = r'[^\w]', repl = " ", string = word ) for word in keywords]
keywords = [w.lower() for w in keywords]
keywords = [i for i in keywords if i]
keywords = [w.strip() for w in keywords]
return description, keywords
|
5bc5582bd06f00255597894654517f810484bd92
| 22,272
|
import socket
def _receive_devices(port: int, sock: socket.socket, devices: list) -> list:
"""
After sending the device discovery request, collect all available
devices
Args:
port (int): Local port
sock (socket.socket): Socket object
devices (list): List of available devices.
[(device_name: str, ip: str)]
Returns:
list: Updated list of available devices.
[(device_name: str, ip: str)]
"""
try:
while True:
data, _ = sock.recvfrom(port)
split_data = data.decode("utf-8").split(":")
if len(split_data) == 3:
devices.append((split_data[0].strip(), split_data[2].strip()))
else:
raise ValueError(data)
except (socket.timeout, OSError):
pass
return devices
|
176a6606e5179f28edefa56c0d79474715d15b5b
| 22,274
|
def update_lr(optimizer,lr):
"""Scheduler to update learning rate at every iteration"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
5e4b8bc9b435b502b1e4c7c2eb4299be7222092d
| 22,277
|
def parse_patient_record(patient_record):
"""Parse patient record.
Args:
patient_record (object): object from DICOMDIR level 0 object
Returns:
children_object
appending_keys
"""
patient_id = patient_record.PatientID
#patient_name = patient_record.PatientName
return patient_record.children, patient_id
|
cce9b64bb07e3303dcab89f79310f9c878c91889
| 22,278
|
from typing import List
import os
def find_yaml_files(base_dir: str) -> List[str]:
"""find all of yaml files with recursion mode
Args:
base_dir (str): the base directory of files
Returns:
List[str]: the yaml files
"""
files_or_directories: List[str] = []
# 1. find all files/directories in the current dir
for file_or_dir in os.listdir(base_dir):
path = os.path.join(base_dir, file_or_dir)
if os.path.isdir(path):
files_or_directories.extend(
find_yaml_files(path)
)
continue
# 2. if there is dir, find with recursion mode
if file_or_dir.endswith('.yml') or file_or_dir.endswith('.yaml'):
files_or_directories.append(path)
return files_or_directories
|
9441a4f8ae04422c09bea366dc78814cdb3f5781
| 22,279
|
def extract_gif_param(proc: str):
"""
Extracts the parameter for an animated GIF, currently just the frame
display duration in milliseconds, from a string that ends with an
integer, in parentheses.
"""
a = proc.strip(")").split("(")
assert len(a) == 2
return int(a[1])
|
0b9f5f6cc7ecfe38ad36731fc28d932316b5b0b3
| 22,280
|
def test_unknown_template_error(a, b):
"""
>>> test_unknown_template_error(1, 2)
Traceback (most recent call last):
...
InvalidTemplateError: Unknown template type: T2
"""
return a + b
|
c4bd0fcce99ce0ea361a57f3702381cd9de19509
| 22,281
|
def hm(seg):
""" simple format hour:minute
"""
return '%02d:%02d'%(seg/60, seg % 60)
|
5d664a646123c8b440b00de7f7012c248fa9f5f1
| 22,283
|
def work(num,x = 0):
"""thread worker function"""
for i in range(10000000):
x += 1
return x,x
|
708248cc304feea87beec1379d6530571d26dd8e
| 22,284
|
def R(a0,a1,a2,a3,a4,T):
"""
Troscompt et al (2009) coefficients using Faure et al (2004) equation:
log10(R) = sum(a_n T^{-n/6})
where n=0..4, R is presumably cm^3 s^-1
"""
return a0 + a1*T**(-1./6.) + a2*T**(-2./6.) + a3*T**(-3./6.) + a4*T**(-4./6.)
|
ea5111f0c745bfc396271092596b932822cd4ada
| 22,285
|
def permission_check(check):
"""
Class decorator for subclasses of PublicTask to sprinkle in re-usable
permission checks::
@permission_check(user_id_matches)
class MyTask(PublicTask):
def run_public(self, user_id):
pass
"""
def decorator(cls):
cls.check_permission = staticmethod(check)
return cls
return decorator
|
14880bf052c7659447dbd9388174860f8f74a133
| 22,286
|
def accepts(*types):
""" Function decorator. Checks that inputs given to decorated function
are of the expected type.
Parameters:
types -- The expected types of the inputs to the decorated function.
Must specify type for each parameter.
"""
def decorator(f):
def newf(*args):
assert len(args) == len(types)
argtypes = tuple(map(type, args))
if argtypes != types:
a = "in %s " % f.__name__
a += "got %s but expected %s" % (argtypes,types)
raise TypeError, a
return f(*args)
return newf
return decorator
|
0820d57edcc8e670eb18d1f250947ce25db8e87a
| 22,287
|
import numpy
def U_from_array(U_array):
"""Converts the U array from the optimizer to a bunch of column vectors.
Args:
U_array, numpy.array[N] The U coordinates in v, av, v, av, ...
Returns:
numpy.matrix[2, N/2] with [[v, v, v, ...], [av, av, av, ...]]
"""
return numpy.matrix(U_array).reshape((2, -1), order='F')
|
2b753ea171cd03cccc73a58c249f1db8232988a6
| 22,292
|
def python_list_to_cons_list(python_list):
"""
Python 形式の (普通の) リストを cons 形式のリストへ変換する。
:param python_list: Python 形式のリスト
:return: cons 形式のリスト
"""
if type(python_list) is list:
if len(python_list) == 0:
return None
else:
return python_list[0], python_list_to_cons_list(python_list[1:])
else:
print('Error! Not a Python list:', python_list)
exit(2)
|
714781d5aab025febc91fea0c2ac0a72a0761425
| 22,293
|
import torch
def log_mean_exp(mtx):
"""
Возвращает логарифм среднего по каждому столбцу от экспоненты данной матрицы.
Вход: Tensor - матрица размера n x k.
Выход: Tensor, вектор длины n.
"""
m, _ = torch.max(mtx, dim=1, keepdim=True)
outputs = m + (mtx - m).exp().mean(dim=1, keepdim=True).log()
outputs = outputs.squeeze(1)
return outputs
|
6a203d0ec2481888854d40cabffca7d1f3702a48
| 22,298
|
def get_GOI_record(record, *args):
""" defines a list of records corresponding to the GOI """
chrom = 'chr' + str(args[0])
start = int(args[1])
end = int(args[2])
if record['CHROM'] == chrom:
if end >= record['POS'] >= start:
return 1
else:
return 0
else:
return 0
|
8d0a4b543968be2de6329ef6c9b5a2ea52a821cc
| 22,299
|
import math
def calcFrag(values):
"""Given a set of parsed values that were generated by macs2 predictd,
see get_size fn, this fn calculates the estimated fragment size and
the sd.
**IMPORTANT: this fn is a python translation of the R code in
chilin2/modules/macs2_fragment/qc.py -- stat_frag_std
RETURNS: (estimated frag, sd)
"""
#calculate the estimated frag size: xmax
ymax = max(values['ycorr'])
i_ymax = values['ycorr'].index(ymax)
xmax = values['xcorr'][i_ymax]
#print(ymax, xmax)
#find expected
p_expect=sum([x* p/100.0 for (x,p) in zip(values['x'],values['positive'])])
m_expect=sum([x* m/100.0 for (x,m) in zip(values['x'],values['minus'])])
#print(p_expect, m_expect)
#calc sd
p_sd = math.sqrt(sum([((x - p_expect)**2)* p/100.0 \
for (x,p) in zip(values['x'],values['positive'])]))
m_sd = math.sqrt(sum([((x - m_expect)**2)* m/100.0 \
for (x,m) in zip(values['x'],values['minus'])]))
#print(p_sd, m_sd)
#FINAL avg std error
avg_sd = (p_sd + m_sd) /2.0
return (xmax, avg_sd)
|
dd4079265ca300760e364cfbfbcf5705dcf265f5
| 22,300
|
def solution(integers):
"""
Finds the two entries that sum to 2020 and returns their product.
Raises `ValueError` if there is no solution.
"""
inverse = set()
for n in integers:
if 2020 - n in inverse:
return n * (2020 - n)
inverse.add(n)
raise ValueError('no solution found')
|
188aff6b889b7903361ac347fa3806f94a86d39e
| 22,301
|
import logging
def get_gp_kwargs(gp_config):
"""Extract keyword argument parameters for the Gaussian process layer."""
covmat_momentum = gp_config.get('covmat_momentum', 0.999)
# Extracts model parameter.
logging.info('gp_config.covmat_momentum = %s', covmat_momentum)
covmat_momentum = None if covmat_momentum < 0. else covmat_momentum
covmat_kwargs = dict(momentum=covmat_momentum)
# Assembles into kwargs dictionary.
gp_layer_kwargs = dict(covmat_kwargs=covmat_kwargs)
return gp_layer_kwargs
|
4909d8b5231bbae20ae17e1cdc1ae17b0ad6714f
| 22,302
|
def canon(raw_attr_name: str) -> str:
"""
Canonicalize input attribute name for indy proofs and credential offers.
Args:
raw_attr_name: raw attribute name
Returns:
canonicalized attribute name
"""
if raw_attr_name: # do not dereference None, and "" is already canonical
return raw_attr_name.replace(" ", "").lower()
return raw_attr_name
|
71858810bc3a65864f4df3c8a2d9c714c12b3692
| 22,305
|
def TOC_schmoker1979(rho_b, A=157.0, B=58.1):
"""
Schmoker (1979) method of TOC caluculation from bulk density to estimate
TOC in devonian shales.
bulk density units: g/cc
"""
TOC = (A / rho_b) - B
return TOC
|
530e86cbd3ba1629549c0d5fcc6b0263397d3fa2
| 22,308
|
def read_vocabulary_prefixes(lm_filename, vocab_offset, vocab_length):
"""
Extract the set of all possible prefixes of vocabulary words from LM file
in kenlm binary format.
Args:
lm_filename (pathlib.Path)
vocab_offset (int)
vocab_length (int or None), None defaults to spanning until the end of file
Return:
set of str with all possible prefixes of the words in the vocabulary.
"""
if vocab_length is None:
vocab_length = lm_filename.stat().st_size - vocab_offset
if vocab_length <= 0:
raise RuntimeError("lm_vocabulary_offset parameter beyond the end of file.")
elif vocab_offset + vocab_length > lm_filename.stat().st_size:
raise RuntimeError("lm_vocabulary_offset + lm_vocabulary_length beyond the end of file.")
with open(str(lm_filename), 'rb') as lm_file:
lm_file.seek(vocab_offset)
vocab_data = lm_file.read(vocab_length)
if len(vocab_data) < 6 or vocab_data[:6] != b'<unk>\0':
raise RuntimeError(
"LM vocabulary section does not start with \"<unk>\\0\". Wrong value of lm_vocabulary_offset parameter? "
"lm_vocabulary_offset should point to \"<unk>\" in lm_file."
)
if vocab_data[-1:] != b'\0':
raise RuntimeError(
"The last byte is LM vocabulary strings section is not 0. Wrong value of lm_vocabulary_length parameter? "
"Omitting this parameter results in vocabulary strings section spanning to the end of file."
)
vocab_list = vocab_data[:-1].decode('utf8').split('\0')
def all_prefixes_word(word):
# Skipping the empty prefix
for prefix_len in range(1, len(word)+1):
yield word[:prefix_len]
def all_prefixes_vocab(vocab_list):
for word in vocab_list:
for prefix in all_prefixes_word(word):
yield prefix
yield ''
return set(all_prefixes_vocab(vocab_list))
|
3842b3a6f09dcbe868dd61f284e3b674d79eb83f
| 22,309
|
def get_dtype(table,name):
""" get the dtype of a field in a table (recarray)
given its name
"""
return table.dtype.fields[name][0].descr[0][1]
|
6d63ab0f80b955124ccc94363a860c345c1f92b5
| 22,311
|
def internal(fn):
"""
Decorator which does not affect functionality but it is used as marker
which tells that this object is not interesting for users and it is only used internally
"""
return fn
|
fa9a31962a4f45f7794ec42fc4f2507c52c4535a
| 22,312
|
import click
def _cb_shape(ctx, param, value):
"""
Click callback to validate `--shape`.
Returns
-------
tuple
(height, width)
"""
for v in value:
if not v >= 1:
raise click.BadParameter('values must be >= 1')
return value
|
5d978379ab21239dec12340347266cab7f0d14f2
| 22,313
|
def parse_database_credentials(db_credentials):
"""
Parsing database credentials to needed format
:param db_credentials: Dictionary
:return: Dictionary
"""
#
# Distinguish between database config scheme of TYPO3 v8+ and TYPO3 v7-
#
if 'Connections' in db_credentials:
_db_config = db_credentials['Connections']['Default']
_db_config['name'] = _db_config['dbname']
else:
_db_config = db_credentials
_db_config['user'] = _db_config['username']
_db_config['name'] = _db_config['database']
if 'port' not in _db_config:
_db_config['port'] = 3306
return _db_config
|
c8e23607f8c2715c8532f0d0d8f653268a2e2813
| 22,315
|
def remove_automaton_states(waut, pred_fn):
"""
Make a copy of L{waut}, with states removed for which L{pred_fn} does not
hold.
@param waut: Input automaton.
@type waut: L{WeightedAutomaton}
@param pred_fn: Predicate function.
@type pred_fn: C{func} from L{WeightedState} to C{bool}
@return: Resulting automaton.
@rtype: L{WeightedAutomaton}
"""
res_waut = waut.copy()
for state in waut.get_states():
if not pred_fn(state):
res_waut.remove_state(res_waut.get_state(state.number))
return res_waut
|
e182ada8642894ef20238c7069e612e09b5ff020
| 22,316
|
from typing import List
def golomb_lst(number: int=1) -> List[int]:
"""Последовательность Голомба
Вычисления проводятся по рекурсивной формуле на основе списка.
Wiki:
https://en.wikipedia.org/wiki/Golomb_sequence
:param number: количество чисел, defaults to 1
:type number: int, optional
:return: n первых чисел последовательности Голомба
:rtype: List[int]
"""
sequence = [1]
for i in range(1, number):
sequence.append(1 + sequence[i - sequence[sequence[i - 1] - 1]])
return sequence
|
3f429c89081b5c9b30ebef192e0124086b1737cb
| 22,317
|
from typing import Set
def get_subclasses(cls) -> Set:
"""Returns the subclasses of the specified class, recursively."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in get_subclasses(c)])
|
2247d1e0ae33904d2019591cd939702fdf4cc26a
| 22,319
|
def safe_module_name(n):
"""Returns a module name which should not conflict with any other symbol."""
if n:
return "_mod_" + n.replace(".", "_")
return n
|
2c12f48a97a983f69fa39b3b94eb642157d212bf
| 22,320
|
def available(test):
"""
Check if MongoDB is compiled with OpenSSL support
"""
return 'OpenSSLVersion' in test.tester.info \
or 'openssl' in test.tester.info
|
e502dd7041f6f3eff4deda6c37640e6c19aef4fb
| 22,322
|
def staff_check(user):
"""A method that checks if a user is a memeber of staff and returns true.
It is used by the @user_passes_test() decorator to lock away views that should
only be accessed by staff memebers.
Returns:
Bool: The boolean indicating if a user is a staff memeber or not.
"""
return user.is_staff
|
e5832ceb205c31c9d6ff3bdacfaf5c7f6135c024
| 22,323
|
import glob
import os
from datetime import datetime
def get_restart_times(wdir, end_time):
"""
Search for restart files, select the most recent one, and update the model start and end times.
Parameters
----------
wdir : str
Path of the simulation folder that contains the restart files.
end_time : datetime.datetime
End time of simulation.
Returns
-------
run_hours : datetime.datetime
Start time of restart run.
rst_opt : dict
Updated namelist options for restart run.
"""
# check if already finished
runlogs = glob.glob(wdir + "/run_*.log")
if len(runlogs) > 0:
if len(runlogs) > 1:
timestamp = sorted([r.split("/")[-1].split("_")[1].split(".")[0] for r in runlogs])[-1]
runlog = wdir + "/run_{}.log".format(timestamp)
else:
runlog = runlogs[0]
with open(runlog) as f:
runlog = f.readlines()
if "d01 {} wrf: SUCCESS COMPLETE WRF\n".format(end_time) in runlog:
print("Run already complete")
return -1, None
# search rst files and determine start time
rstfiles = os.popen("ls -t {}/wrfrst*".format(wdir)).read()
if rstfiles == "":
print("no restart files found. Run from start...")
return None, None
restart_time = rstfiles.split("\n")[0].split("/")[-1].split("_")[-2:]
print("Restart run from {}".format(" ".join(restart_time)))
start_time_rst = datetime.strptime("_".join(restart_time), '%Y-%m-%d_%H:%M:%S')
times = {}
rst_date, rst_time = restart_time
times["start"] = rst_date.split("-")
times["start"].extend(rst_time.split(":"))
end_time_dt = datetime.strptime(end_time, '%Y-%m-%d_%H:%M:%S')
end_d, end_t = end_time.split("_")
times["end"] = end_d.split("-")
times["end"].extend(end_t.split(":"))
run_hours = (end_time_dt - start_time_rst).total_seconds() / 3600
if run_hours <= 0:
print("Run already complete")
return -1, None
rst_opt = "restart .true."
for se in ["start", "end"]:
for unit, t in zip(["year", "month", "day", "hour", "minute", "second"], times[se]):
rst_opt += " {}_{} {}".format(se, unit, t)
return run_hours, rst_opt
|
b9245f5a2bca98bcf1f84c45c7fb09a9a402458f
| 22,324
|
def compute_source_marker(line, column, expression, size):
"""Computes source marker location string.
>>> def test(l, c, e, s):
... s, marker = compute_source_marker(l, c, e, s)
... out = s + '\\n' + marker
...
... # Replace dot with middle-dot to work around doctest ellipsis
... print(out.replace('...', '···'))
>>> test('foo bar', 4, 'bar', 7)
foo bar
^^^
>>> test('foo ${bar}', 4, 'bar', 10)
foo ${bar}
^^^
>>> test(' foo bar', 6, 'bar', 6)
··· oo bar
^^^
>>> test(' foo bar baz ', 6, 'bar', 6)
··· o bar ···
^^^
The entire expression is always shown, even if ``size`` does not
accomodate for it.
>>> test(' foo bar baz ', 6, 'bar baz', 10)
··· oo bar baz
^^^^^^^
>>> test(' foo bar', 10, 'bar', 5)
··· o bar
^^^
>>> test(' foo bar', 10, 'boo', 5)
··· o bar
^
"""
s = line.lstrip()
column -= len(line) - len(s)
s = s.rstrip()
try:
i = s[column:].index(expression)
except ValueError:
# If we can't find the expression
# (this shouldn't happen), simply
# use a standard size marker
marker = "^"
else:
column += i
marker = "^" * len(expression)
if len(expression) > size:
offset = column
size = len(expression)
else:
window = (size - len(expression)) / 2.0
offset = column - window
offset -= min(3, max(0, column + window + len(expression) - len(s)))
offset = int(offset)
if offset > 0:
s = s[offset:]
r = s.lstrip()
d = len(s) - len(r)
s = "... " + r
column += 4 - d
column -= offset
# This also adds to the displayed length
size += 4
if len(s) > size:
s = s[:size].rstrip() + " ..."
return s, column * " " + marker
|
dcfd8bc74a83f3b2c7431a2a97c16c58c1b84742
| 22,325
|
def _get_value_for_key(lines, key):
"""Given list of |lines| with colon separated key value pairs,
return the value of |key|."""
for line in lines:
parts = line.split(':')
if parts[0].strip() == key:
return parts[1].strip()
return None
|
e08bad43f5b095632ef217a4ef7c8a6344d5d32f
| 22,326
|
def extract_course(transcript, course):
""" (str, int) -> str
Return a string containing a course code, course mark and final mark,
in that order. The second argument specifies the order in which the course
appears in the transcript.
>>> extract_course('MAT,90,94,ENG,92,NE,CHM,80,85', 2)
'ENG,92,NE'
>>> extract_course('MAT,90,94,ENG,92,NE,CHM,80,85', 4)
''
"""
if 1 <= course < 4:
index = 0;
if course == 2:
index = 10;
elif course == 3:
index = 20
return transcript[index: index + 9]
else:
return ''
|
6983fa4ec608b068e83a05457d18ca190f37f50a
| 22,328
|
import re
def cleanStr(x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = re.sub(r'^-([0-9].*)', r'__\1', x)
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
return x
|
f04752aba472c870abbe4336c5284a6c6d35e409
| 22,330
|
import os
def get_cal_id():
"""
Gets the ID of the calender that this script should write to. This ID should
belong to the user that logged in from get_google_creds()
1. From the cal_id.txt file
2. From GOOGLE_MCU_CALENDAR_ID environment variable
"""
if os.path.exists('cal_id.txt'):
with open('cal_id.txt', 'r', encoding='UTF-8') as reader:
return reader.read().strip()
if 'GOOGLE_MCU_CALENDAR_ID' in os.environ:
return os.environ['GOOGLE_MCU_CALENDAR_ID']
# This would normally be secret, but this project is so people can add this calendar
# to their calendars, and this information is on the iCal url, so why hide it?
return "unofficial.mcu.calendar@gmail.com"
|
7473d23114d2d8a4fb502d6cfaec1a75b627ba0a
| 22,331
|
from typing import Optional
def prompt_yes_no(question: str, default: Optional[bool] = None) -> bool:
"""
Prompts the user a yes/no question and returns their choice.
"""
if default is True:
prompt = "[Y/n]"
elif default is False:
prompt = "[y/N]"
else:
prompt = "[y/n]"
text = f"{question} {prompt} "
wrong_reply = "Please reply with 'yes'/'y' or 'no'/'n'."
while True:
response = input(text).strip().lower()
if response in {"yes", "ye", "y"}:
return True
if response in {"no", "n"}:
return False
if response == "" and default is not None:
return default
print(wrong_reply)
|
d66ac36e51795f5b63fd0ddf482ae9e1529bc02a
| 22,333
|
def first(iterable, condition=lambda x: True):
"""Return the first item in the `iterable` that satisfies the `condition`.
If the condition is not given, returns the first item of the iterable.
Raises `StopIteration` if no item satisfying the condition is found.
Parameters
----------
iterable : iterable
condition : callable
callable which returns true when the element is eligible as return value
"""
return next(x for x in iterable if condition(x))
|
b031650e39a1acf5185a6760622c2197e34b21e1
| 22,334
|
import numpy
def gramian_eigenspaces(gramian, eps=1e-5):
"""Returns eigenspaces of a Gramian matrix."""
# .eigh() will give us an orthonormal basis, while .eigvals() typically would
# not (even for a hermitean matrix).
eigvals, eigvecsT = numpy.linalg.eigh(gramian)
eigenspaces = [] # [(eigval, [eigvec1, eigvec2, ...]), ...]
for eigval, eigvec in zip(eigvals, eigvecsT.T):
matching_eigenspaces = (
espace for espace in eigenspaces if abs(espace[0] - eigval) <= eps)
try:
espace = next(matching_eigenspaces)
espace[1].append(eigvec)
except StopIteration: # Did not have a matching eigenvalue.
eigenspaces.append((eigval, [eigvec]))
return [(eigval, numpy.vstack(eigvecs))
for eigval, eigvecs in eigenspaces]
|
3e017cf98cec2cf258bd35f01416c93e58d8506c
| 22,335
|
def get_persistence(simplexTree, max_dimension):
"""Calculate the persistent homology of the abstract simplicial complex,
filtering by positive values and dimensions.
:param simplexTree: a simplcial complex, as returned by `build_local_complex`
:type simplexTree: simplexTree
:param max_dimension: max dimension of persistent homology to be returned.
:type max_dimension: int.
:returns: persistence diagram in the Gudhi format.
:rtype: list of tuples (dim, (birth, death)).
"""
pd = simplexTree.persistence()
return [p for p in pd if (p[0] <= max_dimension) & (p[1][0] >= 0.)]
|
2b55afcc0503f55b8f08903c1164898abc675308
| 22,336
|
def find_modules(nn_module, type):
"""
Find and return modules of the input `type`
"""
return [module for module in nn_module.modules() if isinstance(module, type)]
|
d580e570843b7504ab91291fc3a173480c63f376
| 22,337
|
def how_many_can_list(num_market_listings: int, number_to_sell: int, num_in_inventory: int) -> int:
"""
How many items I can actually list on market to have number_to_sell on sale
:param num_market_listings: Number of own listing on market.
:param number_to_sell: Max number on sale
:param num_in_inventory: Number in inventory
:return: number that can be listed
"""
if number_to_sell > num_market_listings:
toList = number_to_sell - num_market_listings
return min(toList, num_in_inventory)
elif number_to_sell == num_market_listings or number_to_sell < num_market_listings:
return 0
return 0
|
29a9844448ebd68710920b83378025ad9ffd74a3
| 22,338
|
from typing import Any
def sort_object(data_object: Any) -> Any:
""" helper function to sort objects """
if isinstance(data_object, dict):
for key, value in data_object.items():
if isinstance(value, (dict, list)):
data_object[key] = sort_object(value)
return dict(sorted(data_object.items()))
if isinstance(data_object, list) and data_object:
if not all(isinstance(entry, type(data_object[0])) for entry in data_object):
return data_object
if isinstance(data_object[0], (dict, list)):
return [sort_object(entry) for entry in data_object]
return sorted(data_object)
return data_object
|
f53899d6b42a047095185e94118b5d99be290708
| 22,339
|
from pathlib import Path
from typing import List
import os
def _collect_files(path: Path) -> List[Path]:
"""Collect all .py files in a package."""
files_list: List = []
if os.path.isdir(path):
files = os.listdir(path)
for file in files:
files_list.extend(_collect_files((Path(path) / file).absolute()))
else:
if path.name.endswith(".py") and not path.name.startswith("__"):
files_list.append(Path(path).absolute())
return files_list
|
e8a9d8656758601afdca48fb2f57ae634d4b19b4
| 22,340
|
import six
def _encode_envvars(env):
"""Encode a hash of values.
:param env: A hash of key=value items.
:type env: `dict`.
"""
for key, value in env.items():
env[key] = six.text_type(value)
else:
return env
|
ca26d2a2e09b1f74161e3e01cd9d2c46282f1b9a
| 22,341
|
def index_of(index: int, of: int): # pylint: disable=invalid-name
"""Return '$index of $of'
Arguments:
index {int} -- index
of {int} -- count
Returns:
str -- string
"""
return '{!r} of {!r}'.format(index, of)
|
e51053c9baf1c8f7ed9d932c05fa39cec405588f
| 22,342
|
def predict_xlogvar_from_epslogvar(*, eps_logvar, logsnr):
"""Scale Var[eps] by (1+exp(-logsnr)) / (1+exp(logsnr)) = exp(-logsnr)."""
return eps_logvar - logsnr
|
9f6e6e6d49ff2d6f7622d59439961a298d262693
| 22,343
|
import math
def dcg_trec(r, k=None):
"""The `trec_eval` version of DCG
:param r: results
:param k: cut-off
:return: sum rel_i / log(i + 2)
"""
result = sum([rel / math.log(rank + 2, 2) for rank, rel in enumerate(r[:k])])
return result
|
5223776ddfe0a42eaf826cb72f69d1c91ec2f094
| 22,344
|
import os
def list_files(folder):
"""Return list of files under a folder"""
return [ x for x in os.listdir(folder) if os.path.isfile(os.path.join(folder, x)) ]
|
b4702c4b89bd5407fc831558a2f808adeedd0f8a
| 22,348
|
import sys
import codecs
def _b(message):
"""convert string to correct format for buffer object"""
if hex(sys.hexversion) >= '0x30000f0':
return codecs.latin_1_encode(message)[0]
return message
|
bf146ff9e00b000510bba55c18067b11530ab406
| 22,349
|
import torch
def nentr(p, base=None):
"""
Calculates entropy of p to the base b. If base is None, the natural logarithm is used.
:param p: batches of class label probability distributions (softmax output)
:param base: base b
:return:
"""
eps = torch.tensor([1e-16], device=p.device)
if base:
base = torch.tensor([base], device=p.device, dtype=torch.float32)
return (p.mul(p.add(eps).log().div(base.log()))).sum(dim=1).abs()
else:
return (p.mul(p.add(eps).log())).sum(dim=1).abs()
|
8f54acbf406b1ed17a368c3a4808bf7add279079
| 22,350
|
def _strdel(s):
"""文字列の先頭と最後の分離記号を取り除く"""
return s.strip('<>|/(){} \n \t')
|
d1449c57d2de35fdcf15e6df9823111c2e1b69e4
| 22,351
|
def rainbow(x):
""" Eq. 3 of sron_colourschemes.pdf """
r = (0.472 - 0.567*x + 4.05*x*x) / (1.0 + 8.72*x - 19.17*x*x + 14.1*x*x*x)
g = 0.108932 - 1.22635*x + 27.284*x**2 - 98.577*x**3 + 163.3*x**4 - 131.395*x**5 + 40.634*x**6
b = 1.0 / (1.97 + 3.54*x - 68.5*x**2 + 243.*x**3 - 297.*x**4 + 125.*x**5)
return r, g, b
|
7565f1fb921f81c57102a6651dac27146a4fdc6c
| 22,352
|
import torch
def th_nearest_interp2d(input, coords):
"""
2d nearest neighbor interpolation torch.Tensor
"""
# take clamp of coords so they're in the image bounds
x = torch.clamp(coords[:, :, 0], 0, input.size(1) - 1).round()
y = torch.clamp(coords[:, :, 1], 0, input.size(2) - 1).round()
stride = torch.LongTensor(input.stride())
x_ix = x.mul(stride[1]).long()
y_ix = y.mul(stride[2]).long()
input_flat = input.view(input.size(0), -1).contiguous()
mapped_vals = input_flat.gather(1, x_ix.add(y_ix))
return mapped_vals.view_as(input)
|
8000f8da1bde82a7ad190fb26dc0af33b2a0f037
| 22,355
|
import unicodedata
def strip_accents_and_lowercase(s: str) -> str:
"""
Strip accents for greek.
Args:
s (str): Input text.
Returns:
str: Output text without accents.
"""
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn').lower()
|
1c93aab8f57862daeb7787fb6a4f7e7a158fb087
| 22,356
|
import os
def read_data_files_from_folder(foldername):
"""
Read all source files in folder
Return a list of file contents, whereby each file content is a list of strings, each string representing a line
:param foldername: name of the folder in which the data files to be read are located
:return: a list of files where each file is a list of strings
"""
# Helper variables
data = list()
file_names = list()
files_in_folder = os.listdir(foldername + "/")
# Loop over files in folder
for path in files_in_folder:
if path[0] != "." and path[-3:] == ".ll":
# If this isn't a hidden file and it is an LLVM IR file ('.ll' extension),
# open file and import content
with open(os.path.join(foldername, path)) as f:
data.append(f.read())
# Add file name to dictionary
file_names.append(path)
return data, file_names
|
c149b4d6ee8e82cba1c1545a1605b02753a3fa65
| 22,357
|
import random
def create_random_individual(variable_to_position, intervals_min, intervals_max):
"""
-> Create an individual (a dict structure)
where variable match to number of intervals for discretization
(init randomly between intervals_min and intervals_max)
-> intervals_min is always >= 2
-> variable_to_position is a dict, obtain from the dichotomization.extract_matrix_from
function
"""
individual = {}
for variable_name in variable_to_position.values():
intervals = random.randint(2, intervals_max)
individual[variable_name] = intervals
return individual
|
a637db461a7d511fe7196ac4fb5718104c1ca1c2
| 22,358
|
def qubo_to_ising(Q, offset=0.0):
"""Convert a QUBO problem to an Ising problem.
Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x`
defined over binary variables (0 or 1 values), where the linear term is contained along
the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values).
Return h and J that define the Ising model as well as the offset in energy
between the two problem formulations:
.. math::
x' Q x = offset + s' J s + h' s
See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function.
Args:
Q (dict[(variable, variable), coefficient]):
QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys
are 2-tuples of variables of the model and values are biases
associated with the pair of variables. Tuples (u, v) represent interactions
and (v, v) linear biases.
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, dict, float): A 3-tuple containing:
dict: Linear coefficients of the Ising problem.
dict: Quadratic coefficients of the Ising problem.
float: New energy offset.
Examples:
This example converts a QUBO problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to an Ising problem, and shows the new energy offset.
>>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1}
>>> dimod.qubo_to_ising(Q, 0.5)[2]
1.75
"""
h = {}
J = {}
linear_offset = 0.0
quadratic_offset = 0.0
for (u, v), bias in Q.items():
if u == v:
if u in h:
h[u] += .5 * bias
else:
h[u] = .5 * bias
linear_offset += bias
else:
if bias != 0.0:
J[(u, v)] = .25 * bias
if u in h:
h[u] += .25 * bias
else:
h[u] = .25 * bias
if v in h:
h[v] += .25 * bias
else:
h[v] = .25 * bias
quadratic_offset += bias
offset += .5 * linear_offset + .25 * quadratic_offset
return h, J, offset
|
d2df1b581612ab7f93aaf762915d62583a9df148
| 22,359
|
def find_indices(lst, element):
""" Returns the indices for all occurrences of 'element' in 'lst'.
Args:
lst (list): List to search.
element: Element to find.
Returns:
list: List of indices or values
"""
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset)
|
59df6c2dd7a4c8fd43895210503f7ae03d83618b
| 22,360
|
def convertVCFGenotype(vcfGenotype):
"""
Parses the VCF genotype
"""
if vcfGenotype is not None:
delim = "/"
if "|" in vcfGenotype:
delim = "|"
if "." in vcfGenotype:
genotype = [-1]
else:
genotype = map(int, vcfGenotype.split(delim))
else:
genotype = [-1]
return genotype
|
e505d1aa3e6c93e110672509084b6ee02d4f7307
| 22,362
|
def get_paths(cursor):
"""Get the currently watched paths."""
sql = "select path from games"
cursor.execute(sql)
paths = [row[0] for row in cursor.fetchall()]
return paths
|
1e3cd541970583bfc452b46bf9c5e635bf5555f4
| 22,363
|
import math
def angle(pos_x, pos_y):
""" Angle in degrees of 2D point """
angle_rad = math.atan(abs(pos_y/pos_x))
angle_degree = math.degrees(angle_rad)
return angle_degree
|
9d3b83c3bcb2415af50f5bad7268dd9a72542530
| 22,366
|
def _split_storage_url(storage_object_url):
""" Returns a list containing the bucket id and the object id. """
return storage_object_url.split("/")[2:]
|
8506d5071c3061cd73fc0e8ece09279ef39c377a
| 22,367
|
def encipher(message: str, cipher_map: dict) -> str:
"""
Enciphers a message given a cipher map.
:param message: Message to encipher
:param cipher_map: Cipher map
:return: enciphered string
>>> encipher('Hello World!!', create_cipher_map('Goodbye!!'))
'CYJJM VMQJB!!'
"""
return "".join(cipher_map.get(ch, ch) for ch in message.upper())
|
57053d93841dcc3982e18664a1a3ef6d85766788
| 22,370
|
def get_struct_instance_field_type(obj, field_name):
"""
:param obj: A ctypes struct instance
:param field_name: A name of a field in the struct
:return: The declared type of the field
"""
for field in obj._fields_:
current_field_name = field[0]
if current_field_name == field_name:
return field[1]
raise KeyError(field_name)
|
255e088d9f36db652d0cbb4f1c21846b35a9d748
| 22,371
|
import pkg_resources
import os
def image_path(image_name):
"""
Return the absolute path to an image
Parameters
----------
image_name : str
Name of image
Returns
-------
path : str
Full path to image
"""
try:
if pkg_resources.resource_exists('flika.images', image_name):
return pkg_resources.resource_filename('flika.images', image_name)
else:
raise RuntimeError("image does not exist: %s" % image_name)
except NotImplementedError: # workaround for mac app
result = os.path.dirname(__file__)
return os.path.join(result.replace('site-packages.zip', 'flika'),
image_name)
|
371b272e303f6a8ae2d12007d127160ad4450c26
| 22,372
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.