content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def render_template(template, defaults):
"""Render script template to string"""
if not isinstance(template, Template):
filename = template.format(**defaults)
template = Template(filename=filename)
return template.format(**defaults)
| 15,500
|
def get_hash(string):
"""
FNV1a hash algo. Generates a (signed) 64-bit FNV1a hash.
See http://www.isthe.com/chongo/tech/comp/fnv/index.html for math-y details.
"""
encoded_trimmed_string = string.strip().encode('utf8')
assert isinstance(encoded_trimmed_string, bytes)
i64 = FNV1_64A_INIT
for byte in encoded_trimmed_string:
i64 = i64 ^ byte
i64 = (i64 * FNV_64_PRIME) % (2 ** 64)
# wrap the result into the full signed BIGINT range of the underlying RDBMS
if i64 > BIGGEST_64_INT:
i64 = SMALLEST_64_INT + (i64 - BIGGEST_64_INT - 1) # optimized CPU ops
return i64
| 15,501
|
def load_seq_sizes(def_vars):
"""Load sequence sizes."""
seq1_sizes, seq2_sizes = {}, {}
if def_vars.get("SEQ1_LEN"):
seq1_sizes = read_chrom_sizes(def_vars.get("SEQ1_LEN"))
elif def_vars.get("SEQ1_CTGLEN"):
seq1_sizes = read_chrom_sizes(def_vars.get("SEQ1_CTGLEN"))
else:
clean_die("", "Cannot find SEQ1_LEN|SEQ1_CTGLEN in the DEF file")
if def_vars.get("SEQ2_LEN"):
seq2_sizes = read_chrom_sizes(def_vars.get("SEQ2_LEN"))
elif def_vars.get("SEQ1_CTGLEN"):
seq2_sizes = read_chrom_sizes(def_vars.get("SEQ2_CTGLEN"))
else:
clean_die("", "Cannot find SEQ2_LEN|SEQ2_CTGLEN in the DEF file")
return seq1_sizes, seq2_sizes
| 15,502
|
def extract_source(source_path: Path) -> (Path, Optional[str]):
"""Extract the source archive, return the extracted path and optionally the commit hash stored inside."""
extracted_path = source_path.with_name(source_path.stem)
commit_hash = None
# Determine the source archive type before extracting it
# Inspired by: https://stackoverflow.com/a/13044946/7597273
magic_dict = {
b'\x1f\x8b\x08': 'gz',
b'\x42\x5a\x68': 'bz2',
b'\x50\x4b\x03\x04': 'zip',
}
max_len = max(len(x) for x in magic_dict)
with source_path.open('rb') as f:
file_start: bytes = f.read(max_len)
for magic, archive_type in magic_dict.items():
if file_start.startswith(magic):
break
else:
raise TypeError(f'Unknown source archive type: `{source_path.name}`')
if archive_type in ('gz', 'bz2'):
with TarFile.open(str(source_path), 'r:' + archive_type) as tar:
# Commit hash (if downloaded from GitHub)
commit_hash = tar.pax_headers.get('comment')
# Update extracted path because:
# `<commit-hash>[.tar.gz]` extracts a folder named `repo-name-<commit-hash>`
# `<branch-name>[.tar.gz]` extracts a folder named `repo-name-<branch-name>`
root_files = [name for name in tar.getnames() if '/' not in name]
if len(root_files) == 1:
extracted_path = source_path.with_name(root_files[0])
tar.extractall(str(extracted_path.parent))
elif archive_type == 'zip':
with ZipFile(str(source_path), 'r') as zipf:
# Commit hash (if downloaded from GitHub)
if zipf.comment:
commit_hash = zipf.comment.decode('utf-8')
# Update extracted path because:
# `<commit-hash>[.zip]` extracts a folder named `repo-name-<commit-hash>`
# `<branch-name>[.zip]` extracts a folder named `repo-name-<branch-name>`
root_folders = []
root_files = []
for name in zipf.namelist():
if name.count('/') == 1 and name.endswith('/'):
root_folders.append(name.rstrip('/'))
if name.count('/') == 0:
root_files.append(name)
# If only one root folder
if len(root_folders) == 1 and len(root_files) == 0:
extracted_path = source_path.with_name(root_folders[0])
zipf.extractall(str(extracted_path.parent))
return extracted_path, commit_hash
| 15,503
|
def correct_distribution (lines):
"""
Balance the distribution of angles
Define an ideal value of samples per bin. If the count per bin is greated
than the the average, then randomly remove the items only for that bin
"""
angles = np.float32(np.array(lines)[:, 3])
num_bins = 21
hist, bins = plot_histogram( num_bins, angles, 'Histogram - before distribution correction')
#correct the distribution
ideal_samples = len(angles)/num_bins * 1.5
keep_prob = [1 if hist[i] < ideal_samples else ideal_samples/hist[i] for i in range(num_bins) ]
remove_list = []
for x, y in ((i,j) for i in range(len(angles)) for j in range(num_bins)):
if angles[x] > bins[y] and angles[x] <= bins[y+1]:
if np.random.rand() > keep_prob[y]:
remove_list.append(x)
lines = np.delete(lines, remove_list, axis=0)
# check if distribution is ok
angles = np.float32(np.array(lines)[:, 3])
hist = plot_histogram(num_bins , angles, 'Histogram - after distribution correction')
return lines
| 15,504
|
def numDockedWindows():
""" Determine the amount of docked windows (i.e. visible on all desktops).
return - Number of windows.
"""
stdout = runCommand(COMMAND_LIST_WINDOWS)
result = -2 # first two windows are actually not windows and don't count
for i in iter(stdout.splitlines()):
if i[POS_COMMAND_LIST_WINDOWS_DOCKED] == CHAR_COMMAND_LIST_WINDOWS_DOCKED:
result += 1
return result
| 15,505
|
def get_logger(name, level='DEBUG', ext_logger=None):
"""
retrieves a logger with colored logs installed
Args:
name: string used to describe logger names
level: log level to use
ext_logger: External logger object, if not create a new one.
Returns:
log: instance of a logger with coloredlogs installed
"""
fmt = fmt = '%(name)s %(levelname)s %(message)s'
if ext_logger is None:
log = logging.getLogger(name)
else:
log = ext_logger
coloredlogs.install(fmt=fmt, level=level, logger=log)
return log
| 15,506
|
def train():
"""
Configuring, training, evaluating and making predictions on gathered data within our model rules.
"""
# retrieving processed data, ready to pipe into Tensorflow Estimators
genomes, groups = _preprocess_data(db.all())
# first 1000 genomes/groups for training
train_x = {'genome': genomes[:1000]}
train_y = groups[:1000]
# second 1000 genomes/groups for evaluating
test_x = {'genome': genomes[1000:2000]}
test_y = groups[1000:2000]
# the last 100 genomes/groups for prediction
pred_x = {'genome': genomes[2000:2100]}
pred_y = groups[2000:2100]
"""Basic feature_column, operating on each of genomes arrays"""
feature_column =\
tf.feature_column.categorical_column_with_identity('genome', num_buckets=len(groups_dictionary))
"""Embedding feature_column, optimizing and allowing for gathering more meaningful information"""
embedding_column = tf.feature_column.embedding_column(
feature_column,
dimension=250 # picked arbitrarily, best setup may differ
)
print('[CLASSIFIER SETUP]')
# Deep Neural Network classifier
classifier = tf.estimator.DNNClassifier(
hidden_units=[100], # picked arbitrarily, best setup may differ
feature_columns=[embedding_column],
n_classes=len(groups_dictionary) # number of options for classifying
)
print('[TRAINING]')
classifier.train(
input_fn=lambda: _train_input_fn(train_x, train_y),
steps=len(genomes) # picked arbitrarily, best setup may differ
)
print('[EVALUATING]')
classifier.evaluate(
input_fn=lambda: _eval_input_fn(test_x, test_y)
)
print('[PREDICTION]')
predictions = classifier.predict(
input_fn=lambda: _predict_input_fn(pred_x)
)
correct = 0
for prediction, expect in zip(predictions, pred_y):
class_id = prediction['class_ids'][0]
probability = prediction['probabilities'][class_id]
if class_id == expect:
correct += 1
print(class_id, '{:.1f}'.format(100 * probability), expect, str(class_id == expect))
print('Correct:', correct)
| 15,507
|
def get_value(hive, key_name, value_name):
"""
>>> get_value(
... HKEY_LOCAL_MACHINE,
... "SOFTWARE/Microsoft/Windows/CurrentVersion/Explorer/StartMenu",
... "Type")
[1, 'group']
>>> get_value(
... HKEY_CURRENT_USER,
... "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\CLSID\\{645FF040-5081-101B-9F08-00AA002F954E}\\DefaultIcon",
... "Full")
[2, '%SystemRoot%\\\\System32\\\\shell32.dll,32', 'C:\\\\WINDOWS\\\\System32\\\\shell32.dll,32']
"""
assert hive in (HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, HKEY_USERS, HKEY_CLASSES_ROOT)
result = None
key_name = key_name.replace("/", "\\")
with reg_connect_registry(None, hive) as reghandle:
with reg_open_key(reghandle, key_name) as keyhandle:
try:
rval, rtype = win32api.RegQueryValueEx(keyhandle, value_name)
result = [rtype, rval]
if rtype == win32con.REG_EXPAND_SZ:
result.append(_expand_path_variables(rval))
except Exception, e:
print e
return None
return result
| 15,508
|
def addScore(appId: str, scoreName: str, value: int, userId: str,
checksum: str=Header(None), db=Depends(Database)):
""" Add user score to leaderboard
"""
validateParameters(appId=appId, scoreName=scoreName, value=value, userId=userId,
checksum=checksum)
with db.transaction() as store:
leaderboard = Leaderboards(appId=appId, userId=userId, scoreName=scoreName,
value=value)
store.merge(leaderboard)
store.commit()
userRankChecksum = computeChecksum(appId=appId, userId=userId,
scoreName=scoreName)
return getUserRank(appId, scoreName, userId, userRankChecksum, db)
| 15,509
|
def has_affect(builtin: str) -> bool:
"""Return `True` if the given builtin can affect accessed attributes."""
if builtin not in PYTHON_BUILTINS:
raise ValueError(f"'{builtin}' is not a Python builtin")
return builtin in PYTHON_ATTR_BUILTINS
| 15,510
|
def get_plot_for_different_k_values(similarity, model_name):
"""
This function plots points after applying a cluster method for k=3,4,5,6. Furthermore prints silhouette score for each k
:param similarity: Contains our dataset (The similarity of RIPE monitors)
:return: A list containing silhouette score
"""
silhouette_scores = []
f = plt.figure()
f.add_subplot(2, 2, 1)
for i in range(3, 7):
if model_name == 'Spectral':
sc = SpectralClustering(n_clusters=i, affinity='precomputed').fit(similarity)
else:
sc = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=1e-04, random_state=0).fit(similarity)
silhouette_scores.append(silhouette_score(similarity, sc.labels_))
f.add_subplot(2, 2, i - 2)
plt.scatter(similarity[:, 0], similarity[:, 1], s=5, c=sc.labels_, label="n_cluster-" + str(i))
plt.legend()
plt.show()
return silhouette_scores
| 15,511
|
def main():
"""Profiles various versions of LCA."""
nshort = 6
tshort = 2
nmed = 3
tmed = 6
nlong = 1
# Setup variables for inference
numDict = int(2048)
numBatch = int(128)
dataSize = int(256)
dictsIn = np.random.randn(numDict,dataSize)
# LCA requires that dictionary be unit norm
dictsIn = skp.normalize(dictsIn, axis=1)
stimuli = np.random.randn(numBatch,dataSize)
batchCoeffs = np.random.randn(numBatch,numDict)
coeffs = np.zeros((numBatch, numDict))
eta = .01
lamb = .05
nIter = 300
adapt = .99
softThresh = 0
thresh = np.random.randn(numBatch)
#LCA
params = """Parameters:
numDict: """+str(numDict)+"""
numBatch: """+str(numBatch)+"""
dataSize: """+str(dataSize)+"""
nIter: """+str(nIter)+"""\n"""
print params
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Numpy based LCA----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,order='F')
stimuli = np.array(stimuli,order='F')
coeffs = np.array(coeffs,order='F')
batchCoeffs = np.array(batchCoeffs,order='F')
thresh = np.array(thresh,order='F')
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Fortran based LCA--------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,dtype=np.float32,order='F')
stimuli = np.array(stimuli,dtype=np.float32,order='F')
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '----------------GPU based LCA-----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
| 15,512
|
def RandomBrightness(image, delta, seed=None):
"""Adjust the brightness of RGB or Grayscale images.
Tips:
delta extreme value in the interval [-1, 1], >1 to white, <-1 to black.
a suitable interval is [-0.5, 0.5].
0 means pixel value no change.
Args:
image: Tensor or array. An image.
delta: if int, float, Amount to add to the pixel values.
if list, tuple, randomly picked in the interval
`[delta[0], delta[1])` to add to the pixel values.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed` for behavior.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
Raises:
ValueError: if `delta` type is error.
"""
if isinstance(delta, (int, float)):
assert -1<=delta<=1, 'delta should be in the interval [-1, 1].'
image = tf.image.adjust_brightness(image, delta)
elif isinstance(delta, (list, tuple)):
assert -1<=delta[0]<delta[1]<=1, 'delta should be 1 >= delta[1] > delta[0] >= -1.'
random_delta = tf.random.uniform([], delta[0], delta[1], seed=seed)
image = tf.image.adjust_brightness(image, random_delta)
else:
raise ValueError('delta should be one of int, float, list, tuple.')
return image
| 15,513
|
async def volume(ctx, volume: float):
"""Sets the volume of the bot, (0-2)."""
server = ctx.message.server.id
players[server].volume = volume
| 15,514
|
def fetch_rfc(number):
"""
RFC fetcher
>>> fetch_rfc("1234")
(u'https://tools.ietf.org/html/rfc1234', 'Tunneling IPX traffic through IP networks')
"""
url = "https://tools.ietf.org/html/rfc%s" % (number, )
xml, dummy_response = fetch_and_parse_xml(url)
title = xml.xpath('string(//meta[@name="DC.Title"]/@content)')
return url, (title or None)
| 15,515
|
def samefile(path1, path2, user=None):
"""
Return :obj:`True` if both path arguments refer to the same path.
"""
def tr(p):
return abspath(normpath(realpath(p)), user=user)
return tr(path1) == tr(path2)
| 15,516
|
def get_list_coord(G, o, d):
"""Get the list of intermediate coordinates between
nodes o and d (inclusive).
Arguments:
G {networkx} -- Graph
o {int} -- origin id
d {int} -- destination id
Returns:
list -- E.g.: [(x1, y1), (x2, y2)]
"""
edge_data = G.get_edge_data(o, d)[0]
try:
return ox.LineString(edge_data['geometry']).coords
except:
return [(G.node[o]['x'], G.node[o]['y']), (G.node[d]['x'], G.node[d]['y'])]
| 15,517
|
def todays_date():
"""
Returns today's date in YYYYMMDD format.
"""
now = datetime.datetime.now()
date_str = "{0}{1}{2}".format(now.year, now.strftime('%m').zfill(2), now.strftime('%d').zfill(2))
return date_str
| 15,518
|
def kl_divergence(p_probs, q_probs):
""""KL (p || q)"""
kl_div = p_probs * np.log(p_probs / q_probs)
return np.sum(kl_div[np.isfinite(kl_div)])
| 15,519
|
def _dbize(ec, org, rxn, cof, all_smiles):
"""Place data into MongoDB."""
#Connect to mongodb.
client = MongoClient()
db = client.BrendaDB
ec_collection = db.ec_pages
rxn_collection = db.rxn_pages
cpd_collection = db.cpd_pages
#Build dictionary of reactions and organisms
r_o_dict = {}
for k, v in rxn.iteritems():
p_ = []
r_ = []
#The substrates/products are in name format from the dump, so adding smiles data here.
if len(v[1]) > 0 and len(v[2]) > 0:
for comp in v[1]:
if comp in all_smiles:
smiles = all_smiles[str(comp)]
id = hashlib.sha1(smiles).hexdigest()
inchi = pybel.readstring('smi', smiles).write('inchi').strip('\t\n')
inchikey = rdki.InchiToInchiKey(inchi)
r_.append(id)
cpd_collection.update({"_id": id}, {"$set": {"smiles": smiles, "inchi": inchi, "inchikey": inchikey, "name": comp}}, upsert=True)
else:
r_.append('')
for comp in v[2]:
if comp in all_smiles:
smiles = all_smiles[str(comp)]
id = hashlib.sha1(smiles).hexdigest()
inchi = pybel.readstring('smi', smiles).write('inchi').strip('\t\n')
inchikey = rdki.InchiToInchiKey(inchi)
p_.append(id)
cpd_collection.update({"_id": id}, {"$set": {"smiles": smiles, "inchi": inchi, "inchikey": inchikey, "name": comp}}, upsert=True)
else:
p_.append('')
#A reaction doc is generated containing the names/smiles of both products and reactants as well as a
#stoichiometry vector. The id field is a hash of the final dictionary, and gets added into the rxn/org dict
#for inclusion in the ec pages. Upsert option adds to anything that matches the query and creates a new
#entry if there is no match.
r_entry = {"r_name": v[1], "p_name": v[2], "r_smiles": r_, "p_smiles": p_, "s": v[3]}
rxn_collection.update({"_id": hashlib.sha1(str(r_entry)).hexdigest()}, {"$set": {"rxn": r_entry}}, upsert=True)
r_o_dict[k] = (v[0], hashlib.sha1(str(r_entry)).hexdigest())
else:
continue
#Iterate through a dictionary of organisms to create the ec pages. Each doc is for a particular organism and lists
#all of the ecs present in it, followed by a list of reactions in each ec listing, with cofactors.
for k, v in org.iteritems():
rxns_in = [x[1] for x in r_o_dict.values() if k in x[0]]
cofs_in = [{"name": x[1], "link": ''} for x in cof if k in x[0]]
for d in cofs_in:
if d["name"] in all_smiles:
d["link"] = hashlib.sha1(all_smiles[str(d["name"])]).hexdigest()
else:
d["link"] = ''
ec_collection.update({"org": v}, {"$set": {"ec." + ec.replace('.', '_'): {"rxns": rxns_in, "cofactors": cofs_in}}}, upsert=True)
| 15,520
|
def colors_to_main(colors, main_colors):
""" Mapping image colors to main colors and count pixels
:param: colors: all colors in image
:param: main_colors: input main colors
(blue, green, yellow, purple, pink, red, orange, brown, silver, white, gray, black)
:return: colors
"""
colors.sort(reverse=True)
main_color_init(main_colors)
for c1 in colors:
color_flag = lab_to_color(c1.lab)
smallest_diff = 1000
smallest_index = None
for n, c2 in enumerate(main_colors):
if color_flag is not None:
if c2.name == color_flag:
smallest_index = n
break
else:
if c2.name in ['white', 'silver', 'gray', 'black']:
continue
color_diff = diff_cie76(c1.lab, c2.lab)
if color_diff < smallest_diff:
smallest_diff = color_diff
smallest_index = n
main_colors[smallest_index].count += c1.count
colors = [color for color in main_colors]
colors.sort(reverse=True)
return colors
| 15,521
|
def await_lock(lock):
"""
Wait for a lock without blocking the main (Qt) thread.
See await_future() for more details.
"""
elapsed = 0 # total time elapsed waiting for the lock
interval = 0.02 # the interval (in seconds) between acquire attempts
timeout = 60.0 # the total time allotted to acquiring the lock
end_time = time.time() + timeout
# wait until the lock is available
while time.time() < end_time:
#
# attempt to acquire the given lock without blocking (via 'False').
# if we successfully acquire the lock, then we can return (success)
#
if lock.acquire(False):
logger.debug("Acquired lock!")
return
#
# the lock is not available yet. we need to sleep so we don't choke
# the cpu, and try to acquire the lock again next time through...
#
logger.debug("Awaiting lock...")
time.sleep(interval)
#
# if we are executing (well, blocking) as the main thread, we need
# to flush the event loop so IDA does not hang
#
if QT_AVAILABLE and is_mainthread():
flush_qt_events()
#
# we spent 60 seconds trying to acquire the lock, but never got it...
# to avoid hanging IDA indefinitely (or worse), we abort via signal
#
raise RuntimeError("Failed to acquire lock after %f seconds!" % timeout)
| 15,522
|
def get_arg_value_wrapper(
decorator_func: t.Callable[[ArgValGetter], Decorator],
name_or_pos: Argument,
func: t.Callable[[t.Any], t.Any] = None,
) -> Decorator:
"""
Call `decorator_func` with the value of the arg at the given name/position.
`decorator_func` must accept a callable as a parameter to which it will pass a mapping of
parameter names to argument values of the function it's decorating.
`func` is an optional callable which will return a new value given the argument's value.
Return the decorator returned by `decorator_func`.
"""
def wrapper(args: BoundArgs) -> t.Any:
value = get_arg_value(name_or_pos, args)
if func:
value = func(value)
return value
return decorator_func(wrapper)
| 15,523
|
def delete_index(c, core, server=DEFAULT_SOLR_URL):
"""
Delete index data.
"""
client = SolrClient(core=core, base_url=server)
client.delete_all_index()
| 15,524
|
def yaml_request(request: quart.local.LocalProxy) -> bool:
"""Given a request, return True if it contains a YAML request body"""
return request.content_type in (
"text/vnd.yaml",
"text/yaml",
"text/x-yaml",
"application/vnd.yaml",
"application/x-yaml",
"application/yaml",
)
| 15,525
|
def validatePath(path):
"""
Returns the validated path.
:param path: string or unicode - Path to format
.. note:: Only useful if you are coding for both Linux and Windows for fixing slash problems.
e.g. Corrects 'Z://something' -> 'Z:'
Example::
fpath = xbmc.validatePath(somepath)
"""
return unicode()
| 15,526
|
def fancy_scatter(x, y, values=None, bins=60, names=['x', 'y'],
marg=False, marg_perc=15, nbins=[3, 3]):
""" Scatter plot of paramters with number desnity contours
overlaid. Marginalized 1D distributions also plotted.
Make sure that the data is appropriately cleaned before using
this routine
"""
# Simple cuts to remove bad-points
from scipy.stats import binned_statistic_2d as bs2d
ixs = np.isfinite(x) & np.isfinite(y)
range1 = np.percentile(x[ixs], [5, 95])
range2 = np.percentile(y[ixs], [5, 95])
# Use interquartile ranges to suitably set the ranges for the bins
width1, width2 = np.diff(range1), np.diff(range2)
if values is None:
res = bs2d(x[ixs], y[ixs], None, 'count', bins,
range=[[range1[0] - width1, range1[1] + width1],
[range2[0] - width2, range2[1] + width2]])
else:
res = bs2d(x[ixs], y[ixs], values[ixs], 'mean', bins,
range=[[range1[0] - width1, range1[1] + width1],
[range2[0] - width2, range2[1] + width2]])
if not marg:
fig, ax2d = plt.subplots(1, figsize=(6, 5))
else:
fig = plt.figure(figsize=(6, 6))
ax2d = fig.add_subplot(223)
ax1 = fig.add_subplot(221, sharex=ax2d)
ax3 = fig.add_subplot(224, sharey=ax2d)
if values is None:
# plot the 2d histogram
ax2d.imshow(np.log10(res.statistic.T), origin='lower', extent=[res.x_edge[0],
res.x_edge[-1], res.y_edge[0], res.y_edge[-1]], aspect='auto',
cmap=plt.cm.binary, interpolation='nearest')
# overlay contours
levels = np.linspace(0, np.log(res.statistic.max()), 10)[2:]
ax2d.contour(np.log(res.statistic.T), levels, colors='k',
extent=[res.x_edge[0], res.x_edge[-1],
res.y_edge[0], res.y_edge[-1]])
else:
# plot the binned statistic image
cmap_multicolor = plt.cm.jet
cmap_multicolor.set_bad('w', 1.)
cs = ax2d.imshow(res.statistic.T, origin='lower', extent=[res.x_edge[0],
res.x_edge[-1], res.y_edge[0], res.y_edge[-1]], aspect='auto',
cmap=cmap_multicolor, interpolation='nearest')
cb = plt.colorbar(cs)
cb.set_clim(np.nanpercentile(res.statistic, 50),
np.nanpercentile(res.statistic, 60))
ax2d.set_xlabel(r'${}$'.format(names[0]))
ax2d.set_ylabel(r'${}$'.format(names[1]))
if marg:
ax1.hist(x, bins=res.x_edge, histtype='step', range=range1, color='k')
ax3.hist(y, bins=res.y_edge, histtype='step', range=range2, color='k',
orientation='horizontal')
ax1.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax1.set_ylabel(r'$\mathrm{N}$', fontsize=16)
ax3.set_xlabel(r'$\mathrm{N}$', fontsize=16)
# Put percentile cuts on the marginalized plots
p1_cuts = np.percentile(x[ixs], [marg_perc, 100 - marg_perc])
p2_cuts = np.percentile(y[ixs], [marg_perc, 100 - marg_perc])
for ele in np.linspace(*p1_cuts, nbins[0]+1):
ax2d.axvline(ele, color='r', linewidth=0.6)
for ele in np.linspace(*p2_cuts, nbins[1]+1):
ax2d.axhline(ele, color='r', linewidth=0.6)
# print the bins along each dimension along with the count in each bin
print(names[0], np.linspace(*p1_cuts, nbins[0]+1))
print(names[1], np.linspace(*p2_cuts, nbins[1]+1))
bin_counts = bs2d(x[ixs], y[ixs], None, 'count',
bins=[np.linspace(*p1_cuts, nbins[0]+1),
np.linspace(*p2_cuts, nbins[1]+1)]).statistic
# To be consistent with the way it is plotted
print(np.flipud(bin_counts.T))
plt.tight_layout()
plt.show()
| 15,527
|
def add_dingbot(dingbot_id=None, *args, **kwargs):
"""Add a dingbot config with interactive input"""
home = os.path.expanduser('~')
configfp = os.path.join(home, '.easydingbot')
if os.path.exists(configfp):
with open(configfp) as f:
config_dict = json.load(f)
if 'default' in config_dict:
if not dingbot_id:
dingbot_id = input('Please input the dingbot id ("default" if empty, "q" to quit) > ')
if dingbot_id.lower() == 'q':
exit()
if not dingbot_id:
dingbot_id = 'default'
else:
print('It\'s first time to set dingbot, We will use "default" as the first dingbot id.')
dingbot_id = 'default'
else:
dingbot_id = 'default'
config_dict = {}
webhook_pattern = r'^https://oapi.dingtalk.com/robot/send\?access_token=.*'
while True:
webhook = input('Please input the webhook string ("q" to quit) > ')
if webhook.lower() == 'q':
exit()
elif re.search(webhook_pattern, webhook):
break
else:
print('Invalid input, the format should be like "https://oapi.dingtalk.com/robot/send?access_token=XXX", please check and retry.')
secret = input('Please input the secret string ("q" to quit) > ')
if secret.lower() == 'q':
exit()
config_dict[dingbot_id] = {
'webhook': webhook,
'secret': secret
}
with open(configfp, 'w') as f:
json.dump(config_dict, f)
| 15,528
|
def get_raster_availability(layer, bbox=None):
"""retrieve metadata for raster tiles that cover the given bounding box
for the specified data layer.
Parameters
----------
layer : str
dataset layer name. (see get_available_layers for list)
bbox : (sequence of float|str)
bounding box of in geographic coordinates of area to download tiles
in the format (min longitude, min latitude, max longitude, max latitude)
Returns
-------
metadata : geojson FeatureCollection
returns metadata including download urls as a FeatureCollection
"""
base_url = 'https://www.sciencebase.gov/catalog/items'
params = [
('parentId', layer_dict[layer]),
('filter', 'tags=IMG'),
('max', 1000),
('fields', 'webLinks,spatial,title'),
('format', 'json'),
]
if bbox:
xmin, ymin, xmax, ymax = [float(n) for n in bbox]
polygon = 'POLYGON (({}))'.format(','.join([(repr(x) + ' ' + repr(y)) for x,y in [
(xmin, ymax),
(xmin, ymin),
(xmax, ymin),
(xmax, ymax),
(xmin, ymax)]]))
params.append(('filter', 'spatialQuery={{wkt:"{}",relation:"{}"}}'.format(polygon, 'intersects')))
features = []
url = base_url
while url:
r = requests.get(url, params)
print('retrieving raster availability from %s' % r.url)
params = [] # not needed after first request
content = r.json()
for item in content['items']:
feature = Feature(geometry=Polygon(_bbox2poly(item['spatial']['boundingBox'])), id=item['id'],
properties={
'name': item['title'],
'layer': layer,
'format': '.img',
'download url': [x for x in item['webLinks'] if x['type']=='download'][0]['uri']}
)
features.append(feature)
if content.get('nextlink'):
url = content['nextlink']['url']
else:
break
return FeatureCollection(features)
| 15,529
|
def absolute_error(observed, modeled):
"""Calculate the absolute error between two arrays.
:param observed: Array of observed data
:type observed: numpy.ndarray
:param modeled: Array of modeled data
:type modeled: numpy.ndarray
:rtype: numpy.ndarray
"""
error = observed - modeled
return error
| 15,530
|
def prepare_tempdir(suffix: Optional[str] = None) -> str:
"""Preapres a temprary directory, and returns the path to it.
f"_{version}" will be used as the suffix of this directory, if provided.
"""
suffix = "_" + suffix if suffix else None
dir_str = mkdtemp(suffix, "warsawgtfs_")
return dir_str
| 15,531
|
def parse_number(string):
"""
Retrieve a number from the string.
Parameters
----------
string : str
the string to parse
Returns
-------
number : float
the number contained in the string
"""
num_str = string.split(None, 1)[0]
number = float(num_str)
return number
| 15,532
|
def catch_exception(func):
"""
Decorator that catches exception and exits the code if needed
"""
def exit_if_failed(*args, **kwargs):
try:
result = func(*args, **kwargs)
except (NonZeroExitCodeException, GitLogParsingException) as exception:
Logger.error(exception.message)
quit()
else:
return result
return exit_if_failed
| 15,533
|
def _coord_byval(coord):
"""
Turns a COORD object into a c_long.
This will cause it to be passed by value instead of by reference. (That is what I think at least.)
When runing ``ptipython`` is run (only with IPython), we often got the following error::
Error in 'SetConsoleCursorPosition'.
ArgumentError("argument 2: <class 'TypeError'>: wrong type",)
argument 2: <class 'TypeError'>: wrong type
It was solved by turning ``COORD`` parameters into a ``c_long`` like this.
More info: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
"""
return c_long(coord.Y * 0x10000 | coord.X & 0xFFFF)
| 15,534
|
def create_tables(cur, conn):
"""
Creates all tables in Redshift.
cur and conn and the curson and connection from the psycopg2 API to the redshift DB.
"""
for q in sql_q.create_table_queries:
print('executing query: {}'.format(q))
cur.execute(q)
conn.commit()
| 15,535
|
def _list_flows(output_format='dict', **kwargs) -> Union[Dict, pd.DataFrame]:
"""
Perform the api call that return a list of all flows.
Parameters
----------
output_format: str, optional (default='dict')
The parameter decides the format of the output.
- If 'dict' the output is a dict of dict
- If 'dataframe' the output is a pandas DataFrame
kwargs: dict, optional
Legal filter operators: uploader, tag, limit, offset.
Returns
-------
flows : dict, or dataframe
"""
api_call = "flow/list"
if kwargs is not None:
for operator, value in kwargs.items():
api_call += "/%s/%s" % (operator, value)
return __list_flows(api_call=api_call, output_format=output_format)
| 15,536
|
def main(argv):
"""Entry point."""
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '{}\nUsage: {} ARGS\n{}'.format(e, sys.argv[0], FLAGS)
sys.exit(1)
with open(FLAGS.definition_file, 'r') as def_file:
if FLAGS.struct_file:
def_file.seek(0)
output = StructBasedOutput(FLAGS.struct_file, FLAGS.define_file)
ParseRegisterDefinitions(def_file, output)
if FLAGS.define_file:
def_file.seek(0)
ParseRegisterDefinitions(def_file, DefineBasedOutput(FLAGS.define_file))
| 15,537
|
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse.reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
| 15,538
|
def rts_smooth(kalman_filter, state_count=None):
"""
Compute the Rauch-Tung-Striebel smoothed state estimates and estimate
covariances for a Kalman filter.
Args:
kalman_filter (KalmanFilter): Filter whose smoothed states should be
returned
state_count (int or None): Number of smoothed states to return.
If None, use ``kalman_filter.state_count``.
Returns:
(list of MultivariateNormal): List of multivariate normal distributions.
The mean of the distribution is the estimated state and the covariance
is the covariance of the estimate.
"""
if state_count is None:
state_count = kalman_filter.state_count
state_count = int(state_count)
if state_count < 0 or state_count > kalman_filter.state_count:
raise ValueError("Invalid state count: {}".format(state_count))
# No states to return?
if state_count == 0:
return []
# Initialise with final posterior estimate
states = [None] * state_count
states[-1] = kalman_filter.posterior_state_estimates[-1]
priors = kalman_filter.prior_state_estimates
posteriors = kalman_filter.posterior_state_estimates
# Work backwards from final state
for k in range(state_count-2, -1, -1):
process_mat = kalman_filter.process_matrices[k+1]
cmat = posteriors[k].cov.dot(process_mat.T).dot(
np.linalg.inv(priors[k+1].cov))
# Calculate smoothed state and covariance
states[k] = MultivariateNormal(
mean=posteriors[k].mean + cmat.dot(states[k+1].mean -
priors[k+1].mean),
cov=posteriors[k].cov + cmat.dot(states[k+1].cov -
priors[k+1].cov).dot(cmat.T)
)
return states
| 15,539
|
def play_episode(args, sess, env, qnet, e):
"""
Actually plays a single game and performs updates once we have enough
experiences.
:param args: parser.parse_args
:param sess: tf.Session()
:param env: gym.make()
:param qnet: class which holds the NN to play and update.
:param e: chance of a random action selection.
:return: reward earned in the game, update value of e, transitions updated
against.
"""
done = False
_ = env.reset()
reward = 0 # total reward for this episode
turn = 0
transitions = 0 # updates * batch_size
terminal = True # Anytime we lose a life, and beginning of episode.
while not done:
if terminal:
terminal = False
# To make sure that the agent doesn't just learn to set up well for
# the way the game starts, begin the game by not doing anything and
# letting the ball move.
for _ in range(np.random.randint(1, args.random_starts)):
# Perform random actions at the beginning so the network doesn't
# just learn a sequence of steps to always take.
img, _, done, info = env.step(env.action_space.sample())
img = preprocess_img(img)
state = np.stack((img, img, img, img), axis=2)
lives = info['ale.lives']
if done:
# If lost our last life during random_start, nothing left to play
break
# Perform an action
action = qnet.predict(sess, np.array([state]))[0]
if np.random.rand(1) < e:
action = qnet.rand_action()
img, r, done, info = env.step(action)
# Store as an experience
img = np.reshape(preprocess_img(img), (85, 80, 1))
next_state = np.concatenate((state[:, :, 1:], img), axis=2)
if info['ale.lives'] < lives:
terminal = True
qnet.add_experience(state, action, r, next_state, terminal)
# Updates
if qnet.exp_buf_size() > args.begin_updates and\
turn % (qnet.batch_size // 8) == 0:
# Once we have enough experiences in the buffer we can
# start learning. We want to use each experience on average 8 times
# so that's why for a batch size of 8 we would update every turn.
qnet.update(sess)
transitions += qnet.batch_size
if e > args.e_f:
# Reduce once for every update on 8 states. This makes e
# not dependent on the batch_size.
e -= (qnet.batch_size*(args.e_i - args.e_f)) / args.e_anneal
# Prep for the next turn
state = next_state
reward += r
turn += 1
return reward, e, transitions
| 15,540
|
def to_categorical(y, num_columns):
"""Returns one-hot encoded Variable"""
y_cat = np.zeros((y.shape[0], num_columns))
y_cat[range(y.shape[0]), y.astype(int)] = 1.0
return Variable(FloatTensor(y_cat))
| 15,541
|
def suppress_vtk_warnings():
"""A context manager to suppress VTK warnings.
This is handy when trying to find something dynamically with VTK.
**Example**
with supress_vtk_warnings():
x = tvtk.VolumeRayCastMapper()
"""
try:
obj = vtk.vtkObject()
obj.GlobalWarningDisplayOff()
yield
finally:
obj.GlobalWarningDisplayOn()
| 15,542
|
def npi_provider_extension(source):
"""
Add non empty fields in NPI Record to NPI Extension in Practitioner
Profile
:param source:
:return: profile['extension'] {}
"""
extn = {}
for key, value in source.items():
if isinstance(value, str):
if value == "":
pass
else:
extn[key] = value
if isinstance(value, int):
if value:
extn[key] = value
else:
pass
if isinstance(value, list):
if len(value) > 0:
extn[key] = value
else:
pass
if isinstance(value, dict):
if len(value) > 0:
extn[key] = value
else:
pass
extn_dict = {'url' : settings.FHIR_SERVER + "/StructureDefinition/NPI_Provider_Record",
'NPI_Provider_Record' : extn
}
if settings.DEBUG:
print("extension:", extn_dict)
print("=====================")
return extn_dict
| 15,543
|
def extract_light_positions_for_rays(ray_batch, scene_info, light_pos):
"""Extract light positions for a batch of rays.
Args:
ray_batch: [R, M] tf.float32.
scene_info: Dict.
light_pos: Light position.
Returns:
light_positions: [R, 3] tf.float32.
"""
ray_sids = extract_slice_from_ray_batch( # [R,]
ray_batch=ray_batch, key='metadata')
light_positions = scene_utils.extract_light_positions_for_sids(
sids=ray_sids, # [R, 3]
scene_info=scene_info,
light_pos=light_pos)
return light_positions
| 15,544
|
def user_choice():
"""
takes input from the user and performs the tasks accordingly
"""
userchoice = """
Enter 1 for solving available problems.
Enter 2 for giving your own sudoku problem.
Enter any key to go back to main menue.
"""
print(userchoice)
i_p = input(colored("\nEnter ur choice 1 or 2: ", 'yellow', attrs=['bold']))
if i_p == '1':
tasks()
elif i_p == '2':
user_input_board()
else:
return
#End of Funtion
| 15,545
|
def cdf(x, c, loc=0, scale=1):
"""Return the cdf
:param x:
:type x:
:param c:
:type c:
:param loc:
:type loc:
:param scale:
:type scale:
:return:
:rtype:
"""
x = (x - loc) / scale
try:
c = round(c, 15)
x = np.log(1 - c * x) / c
return 1.0 / (1 + np.exp(x))
except ZeroDivisionError:
return 1.0 / (1 + np.exp(-x))
| 15,546
|
def solve_eq(preswet, func):
"""Solve the peicewise-linear stability of a parcel
INPUTS: variables from the most ascent of a parcel
preswet: pressure
func : piecewise linear function to solve (tw-te)
OUTPUTS:
solutions: zeros of the function (tw-te)
stability: indication of the stability of this solution.
NOTE ABOUT STABILITY
Stability is the sign of (d(func)/dP). So if you have used tw-te
like you were supposed to, d(tw-te)/dP>0 means this is a stbale
equilibrium level (flip the sign to envision d(tw-te)/dz).
"""
from numpy import sign, diff
# Sorry to be annoying but I'm going to force you to use
# a monotonically increasing variable
assert (sign(diff(preswet)) == 1).all(), \
"Use a monotonically increasing abscissa"
# Identify changes in sign of function
dsign = sign(func)
isdiff = zeros(dsign.shape, dtype=bool)
isdiff[1:] = abs(diff(dsign)).astype(bool)
# shift to get the value on the other side
# of the x-axis
shift = zeros(dsign.shape, dtype=bool)
shift[:-1] = isdiff[1:]
shift[-1] = isdiff[0]
# solve by linear interpolation between
# values points
sols = zeros((isdiff.sum()))
stab = zeros((isdiff.sum()))
for ii in range(isdiff.sum()):
f0 = func[isdiff][ii]
f1 = func[shift][ii]
p0 = preswet[isdiff][ii]
p1 = preswet[shift][ii]
slope = (f1-f0) / (p1-p0)
sols[ii] = p0-f0 / slope
stab[ii] = sign(slope)
# Debug with plots ###
# fig=figure()
# ax=fig.add_subplot(111)
# ax.plot(preswet,func)
# ax.plot(sols,zeros(sols.shape),ls='',marker='o')
# ax.plot(preswet[isdiff],func[isdiff],ls='',marker='+',mew=2)
# ax.plot(preswet[shift],func[shift],ls='',marker='x',mew=2)
# ax.grid(True)
# show()
return sols, stab
| 15,547
|
def load_dict(file_name):
"""
Reads JSON or YAML file into a dictionary
"""
if file_name.lower().endswith(".json"):
with open(file_name) as _f:
return json.load(_f)
with open(file_name) as _f:
return yaml.full_load(_f)
| 15,548
|
def test_two(caplog, no_job_dirs):
"""Test with two artifacts in one job.
:param caplog: pytest extension fixture.
:param str no_job_dirs: Test with --no-job-dirs.
"""
jobs_artifacts = [('spfxkimxcj6faq57', 'artifacts.py', 12479), ('spfxkimxcj6faq57', 'README.rst', 1270)]
config = dict(always_job_dirs=False, no_job_dirs=no_job_dirs, dir=None)
actual = artifacts_urls(config, jobs_artifacts)
expected = dict([
(py.path.local('artifacts.py'), (API_PREFIX + '/buildjobs/spfxkimxcj6faq57/artifacts/artifacts.py', 12479)),
(py.path.local('README.rst'), (API_PREFIX + '/buildjobs/spfxkimxcj6faq57/artifacts/README.rst', 1270)),
])
assert actual == expected
messages = [r.message for r in caplog.records]
if no_job_dirs:
assert 'Only one job ID, automatically setting job_dirs = False.' not in messages
else:
assert 'Only one job ID, automatically setting job_dirs = False.' in messages
| 15,549
|
def get_next_number(num, proxies='', auth=''):
"""
Returns the next number in the chain
"""
url = 'http://www.pythonchallenge.com/pc/def/linkedlist.php'
res = requests.get('{0}?nothing={1}'.format(url, num), proxies=proxies, auth=auth)
dat = res.content
pattern = re.compile(r'next nothing is (\d+)')
match = pattern.findall(dat)
if match:
get_next_number(match[0], proxies=proxies, auth=auth)
else:
if "Divide" in dat:
get_next_number(int(num)/2, proxies=proxies, auth=auth)
else:
return dat
| 15,550
|
def gzip_str(g_str):
"""
Transform string to GZIP coding
Args:
g_str (str): string of data
Returns:
GZIP bytes data
"""
compressed_str = io.BytesIO()
with gzip.GzipFile(fileobj=compressed_str, mode="w") as file_out:
file_out.write((json.dumps(g_str).encode()))
bytes_obj = compressed_str.getvalue()
return bytes_obj
| 15,551
|
def test_cli_argparser_raises_no_exceptions():
"""An invalid keyword to ArgumentParser was causing an exception in Python 3."""
with pytest.raises(SystemExit):
parse_args(None)
| 15,552
|
def get_summoner_spells():
"""
https://developer.riotgames.com/api/methods#!/968/3327
Returns:
SummonerSpellList: all the summoner spells
"""
request = "{version}/summoner-spells".format(version=cassiopeia.dto.requests.api_versions["staticdata"])
params = {"tags": "all"}
if _locale:
params["locale"] = _locale
return cassiopeia.type.dto.staticdata.SummonerSpellList(cassiopeia.dto.requests.get(request, params, True))
| 15,553
|
def run_expr(expr, vranges):
""" Evaluate expr for every value of free variables
given by vranges and return the tensor of results.
TODO(yzhliu): move to utils
"""
def _compute_body(*us):
vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}
return tir.stmt_functor.substitute(expr, vmap)
A = te.compute([r.extent.value for v, r in vranges.items()], _compute_body)
args = [tvm.nd.empty(A.shape, A.dtype)]
sch = te.create_schedule(A.op)
mod = tvm.build(sch, [A])
mod(*args)
return args[0].asnumpy()
| 15,554
|
def test_update_quotas_1st_time_using_name(host):
"""Configurate tenant quotas and verify it works properly at the first time"""
update_quotas(host, name=tenant, instances=12, cores=10, ram=128)
verify_updated_quotas(host, name=tenant, instances=12, cores=10, ram=128)
| 15,555
|
def cache_cleanup(dry_run: bool = False) -> list:
"""Perform cleanup in the scraper cache directory. Returns list of deleted directories.
:param dry_run: in case of value True - DRY RUN MODE is on and no cleanup will be done.
:return: list of deleted directories
"""
log.debug("cache_cleanup(): cleaning up scraper cache (delete dry runs results).")
deleted_dirs = [] # list of deleted directories
for filename in os.listdir(const.SCRAPER_CACHE_PATH):
file_path = os.path.join(const.SCRAPER_CACHE_PATH, filename)
try:
if os.path.isdir(file_path) and file_path.endswith(const.SCRAPER_CACHE_DRY_RUN_DIR_SUFFIX):
log.info(f"Found DRY RUN directory: {file_path} - to be deleted!")
if dry_run:
log.warning("DRY RUN mode is active! No cleanup will be performed!")
else:
shutil.rmtree(file_path)
deleted_dirs.append(file_path)
elif os.path.isfile(file_path) or os.path.islink(file_path):
log.debug(f"Found file/symlink: {file_path}. Skipped.")
except Exception as e: # exception with cleanup (deletion of the dir/file/link)
log.error(f"Failed to delete {file_path}. Reason: {e}")
return deleted_dirs
| 15,556
|
def solve_covariance(u) -> np.ndarray:
"""Solve covariance matrix from moments
Parameters
----------
u : List[np.ndarray]
List of moments as defined by the ``get_moments()`` method call
of a BayesPy node object.
"""
cov = u[1] - np.outer(u[0], u[0])
return cov if cov.shape != (1, 1) else np.array(cov.sum())
| 15,557
|
def graph_multiple(input_list):
"""visualize measured height"""
fig_stem = "__".join([pathlib.Path(i).stem.strip("_height")[:10] for i in input_list])
fig_name = pathlib.Path(pathlib.Path.cwd() / "cv2" / "{0}.png".format(fig_stem))
pyplot.rcParams["xtick.direction"] = "in"
pyplot.rcParams["ytick.direction"] = "in"
pyplot.figure(figsize=(4.5, 3), dpi=300)
for input in input_list:
with open(input) as f:
reader = csv.reader(f)
time_list = []
height_list = []
for idx, data in enumerate(reader):
if idx == 0:
continue
time_list.append(float(data[0]))
height_list.append(float(data[1]))
if not time_list:
print("no data exists in {0}!".format(input))
return
label_name = pathlib.Path(input).stem.strip("_height")
pyplot.plot(time_list, height_list, label=label_name)
pyplot.xlabel("time $\it{s}$")
pyplot.ylabel("climbing height $\it{mm}$")
pyplot.xlim(xmin=0)
# pyplot.xticks([0, 100, 200, 300], [0, 100, 200, 300], rotation=0)
pyplot.ylim(ymin=0)
# pyplot.yticks([0,250,500,750,1000], [0,250,500,750,1000], rotation=0)
pyplot.grid(which="minor")
pyplot.legend(bbox_to_anchor=(1, 1.01), loc="lower right", borderaxespad=0)
pyplot.savefig(fig_name, bbox_inches="tight")
# pyplot.show()
pyplot.close()
generate_script_multiple_data(input_list)
| 15,558
|
def interact_ids(*columns: Array) -> Array:
"""Create interactions of ID columns."""
interacted = columns[0].flatten().astype(np.object)
if len(columns) > 1:
interacted[:] = list(zip(*columns))
return interacted
| 15,559
|
def _get_depthwise():
"""
We ask the user to input a value for depthwise.
Depthwise is an integer hyperparameter that is used in the
mobilenet-like model. Please refer to famous_cnn submodule
or to mobilenets paper
# Default: 1
"""
depth = ''
while depth not in ['avg', 'max']:
demand = "Please choose a value for pooling argument that is `avg`\
or `max`\n"
pooling = str(get_input(demand))
return pooling
| 15,560
|
def LoadPartitionConfig(filename):
"""Loads a partition tables configuration file into a Python object.
Args:
filename: Filename to load into object
Returns:
Object containing disk layout configuration
"""
valid_keys = set(('_comment', 'metadata', 'layouts', 'parent'))
valid_layout_keys = set((
'_comment', 'num', 'blocks', 'block_size', 'fs_blocks', 'fs_block_size',
'uuid', 'label', 'format', 'fs_format', 'type', 'features',
'size', 'fs_size', 'fs_options', 'erase_block_size', 'hybrid_mbr',
'reserved_erase_blocks', 'max_bad_erase_blocks', 'external_gpt',
'page_size', 'size_min', 'fs_size_min'))
valid_features = set(('expand',))
config = _LoadStackedPartitionConfig(filename)
try:
metadata = config['metadata']
for key in ('block_size', 'fs_block_size'):
metadata[key] = ParseHumanNumber(metadata[key])
unknown_keys = set(config.keys()) - valid_keys
if unknown_keys:
raise InvalidLayout('Unknown items: %r' % unknown_keys)
if len(config['layouts']) <= 0:
raise InvalidLayout('Missing "layouts" entries')
if not BASE_LAYOUT in config['layouts'].keys():
raise InvalidLayout('Missing "base" config in "layouts"')
for layout_name, layout in config['layouts'].iteritems():
if layout_name == '_comment':
continue
for part in layout:
unknown_keys = set(part.keys()) - valid_layout_keys
if unknown_keys:
raise InvalidLayout('Unknown items in layout %s: %r' %
(layout_name, unknown_keys))
if part.get('num') == 'metadata' and 'type' not in part:
part['type'] = 'blank'
if part['type'] != 'blank':
for s in ('num', 'label'):
if not s in part:
raise InvalidLayout('Layout "%s" missing "%s"' % (layout_name, s))
if 'size' in part:
if 'blocks' in part:
raise ConflictingOptions(
'%s: Conflicting settings are used. '
'Found section sets both \'blocks\' and \'size\'.' %
part['label'])
part['bytes'] = ParseHumanNumber(part['size'])
if 'size_min' in part:
size_min = ParseHumanNumber(part['size_min'])
if part['bytes'] < size_min:
part['bytes'] = size_min
part['blocks'] = part['bytes'] / metadata['block_size']
if part['bytes'] % metadata['block_size'] != 0:
raise InvalidSize(
'Size: "%s" (%s bytes) is not an even number of block_size: %s'
% (part['size'], part['bytes'], metadata['block_size']))
if 'fs_size' in part:
part['fs_bytes'] = ParseHumanNumber(part['fs_size'])
if 'fs_size_min' in part:
fs_size_min = ParseHumanNumber(part['fs_size_min'])
if part['fs_bytes'] < fs_size_min:
part['fs_bytes'] = fs_size_min
if part['fs_bytes'] <= 0:
raise InvalidSize(
'File system size "%s" must be positive' %
part['fs_size'])
if part['fs_bytes'] > part['bytes']:
raise InvalidSize(
'Filesystem may not be larger than partition: %s %s: %d > %d' %
(layout_name, part['label'], part['fs_bytes'], part['bytes']))
if part['fs_bytes'] % metadata['fs_block_size'] != 0:
raise InvalidSize(
'File system size: "%s" (%s bytes) is not an even number of '
'fs blocks: %s' %
(part['fs_size'], part['fs_bytes'], metadata['fs_block_size']))
if part.get('format') == 'ubi':
part_meta = GetMetadataPartition(layout)
page_size = ParseHumanNumber(part_meta['page_size'])
eb_size = ParseHumanNumber(part_meta['erase_block_size'])
ubi_eb_size = eb_size - 2 * page_size
if (part['fs_bytes'] % ubi_eb_size) != 0:
# Trim fs_bytes to multiple of UBI eraseblock size.
fs_bytes = part['fs_bytes'] - (part['fs_bytes'] % ubi_eb_size)
raise InvalidSize(
'File system size: "%s" (%d bytes) is not a multiple of UBI '
'erase block size (%d). Please set "fs_size" to "%s" in the '
'"common" layout instead.' %
(part['fs_size'], part['fs_bytes'], ubi_eb_size,
ProduceHumanNumber(fs_bytes)))
if 'blocks' in part:
part['blocks'] = ParseHumanNumber(part['blocks'])
part['bytes'] = part['blocks'] * metadata['block_size']
if 'fs_blocks' in part:
max_fs_blocks = part['bytes'] / metadata['fs_block_size']
part['fs_blocks'] = ParseRelativeNumber(max_fs_blocks,
part['fs_blocks'])
part['fs_bytes'] = part['fs_blocks'] * metadata['fs_block_size']
if part['fs_bytes'] > part['bytes']:
raise InvalidLayout(
'Filesystem may not be larger than partition: %s %s: %d > %d' %
(layout_name, part['label'], part['fs_bytes'], part['bytes']))
if 'erase_block_size' in part:
part['erase_block_size'] = ParseHumanNumber(part['erase_block_size'])
if 'page_size' in part:
part['page_size'] = ParseHumanNumber(part['page_size'])
part.setdefault('features', [])
unknown_features = set(part['features']) - valid_features
if unknown_features:
raise InvalidLayout('%s: Unknown features: %s' %
(part['label'], unknown_features))
except KeyError as e:
raise InvalidLayout('Layout is missing required entries: %s' % e)
return config
| 15,561
|
def get_own():
"""Returns the instance on which the caller is running.
Returns a boto.ec2.instance.Instance object augmented by tag attributes.
IMPORTANT: This method will raise an exception if the network
fails. Don't forget to catch it early because we must recover from
this, fast. Also, it will throw an exception if you are not running
under EC2, so it is preferable to use is_running_on_ec2 before calling
this method.
"""
try:
instance_id = _query("instance-id")
if not instance_id:
raise NoEc2Instance(
"Can't find own instance id. Are you running under EC2?")
return filter(lambda i: i.id == instance_id, all())[0]
except EC2ResponseError:
raise NoEc2Instance("Cannot find instance %r" % instance_id)
| 15,562
|
def create_report(
name: str,
description: str,
published: datetime,
author: Identity,
object_refs: List[_DomainObject],
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
report_status: int,
report_type: str,
confidence_level: int,
labels: List[str],
files: List[Mapping[str, str]],
) -> STIXReport:
"""Create a report."""
return STIXReport(
created_by_ref=author,
name=name,
description=description,
published=published,
object_refs=object_refs,
labels=labels,
external_references=external_references,
object_marking_refs=object_marking_refs,
confidence=confidence_level,
report_types=[report_type],
custom_properties={
"x_opencti_report_status": report_status,
"x_opencti_files": files,
},
)
| 15,563
|
def get_curve_points(
road: Road,
center: np.ndarray,
road_end: np.ndarray,
placement_offset: float,
is_end: bool,
) -> list[np.ndarray]:
"""
:param road: road segment
:param center: road intersection point
:param road_end: end point of the road segment
:param placement_offset: offset based on placement tag value
:param is_end: whether the point represents road end
"""
width: float = road.width / 2.0 * road.scale
direction: np.ndarray = (center - road_end) / np.linalg.norm(
center - road_end
)
if is_end:
direction = -direction
left: np.ndarray = turn_by_angle(direction, np.pi / 2.0) * (
width + placement_offset
)
right: np.ndarray = turn_by_angle(direction, -np.pi / 2.0) * (
width - placement_offset
)
return [road_end + left, center + left, center + right, road_end + right]
| 15,564
|
def default_inputs_folder_at_judge(receiver):
"""
When a receiver is added to a task and `receiver.send_to_judge` is checked,
this function will be used to automatically set the name of the folder with inputs at judge server.
When this function is called SubmitReceiver object is created but is not saved in database yet.
"""
return '{}-{}'.format(submit_settings.JUDGE_INTERFACE_IDENTITY, receiver.id)
| 15,565
|
def remove_extension(string):
""" Removes the extention from a string, as well as the directories.
This function may fail if more than one . is in the file, such as ".tar.gz"
Args:
string: (string): either a path or a filename that for a specific file, with extension.
(e.g. /usr/dir/sample.mitograph or sample.mitograph)
Returns:
filename_without_extension (str): just the filename without the extension (e.g. "sample")
"""
# Remove all enclosing directories, only get the name of file.
cur_filename_with_extension = remove_enclosing_dirs(string)
# Remove the extension by splitting the string at each "." and only taking first part.
filename_without_extension = cur_filename_with_extension.split(".")[0]
return filename_without_extension
| 15,566
|
def _theme_static(path):
"""
Serve arbitrary files.
"""
return static_app.static(path, 'theme')
| 15,567
|
def create_link(link: schemas.Link, db: Session = Depends(get_db)):
"""Create link
"""
# Check if the target already exists
db_link = crud.get_link_by_target(db=db, target=link.target)
if db_link:
raise HTTPException(status_code=400, detail="link already registered")
response = crud.create_link(db=db, link=link)
payload = {'link': response.link}
return JSONResponse(content=payload)
| 15,568
|
def encode_rsa_public_key(key):
"""
Encode an RSA public key into PKCS#1 DER-encoded format.
:param PublicKey key: RSA public key
:rtype: bytes
"""
return RSAPublicKey({
'modulus': int.from_bytes(key[Attribute.MODULUS], byteorder='big'),
'public_exponent': int.from_bytes(key[Attribute.PUBLIC_EXPONENT],
byteorder='big'),
}).dump()
| 15,569
|
def is_block_valid(new_block, old_block):
"""
simple verify if the block is valid.
"""
if old_block["Index"] + 1 != new_block["Index"]:
return False
if old_block["Hash"] != new_block["PrevHash"]:
return False
if caculate_hash(new_block) != new_block["Hash"]:
return False
return True
| 15,570
|
def add_get_parameter(url, key, value):
"""
Utility method to add an HTTP request parameter to a GET request
"""
if '?' in url:
return url + "&%s" % urllib.urlencode([(key, value)])
else:
return url + "?%s" % urllib.urlencode([(key, value)])
| 15,571
|
def get_simple_match(text):
"""Returns a word instance in the dictionary, selected by a simplified String match"""
# Try to find a matching word
try:
result = word.get(word.normalized_text == text)
return result
except peewee.DoesNotExist:
return None
| 15,572
|
def validate_value_is_unique(attribute: models.Attribute, value: models.AttributeValue):
"""Check if the attribute value is unique within the attribute it belongs to."""
duplicated_values = attribute.values.exclude(pk=value.pk).filter(slug=value.slug)
if duplicated_values.exists():
raise ValidationError(
{
"name": ValidationError(
f"Value with slug {value.slug} already exists.",
code=AttributeErrorCode.ALREADY_EXISTS.value,
)
}
)
| 15,573
|
async def test_in_zone_works_for_passive_zones(hass):
"""Test working in passive zones."""
latitude = 32.880600
longitude = -117.237561
assert await setup.async_setup_component(
hass,
zone.DOMAIN,
{
"zone": [
{
"name": "Passive Zone",
"latitude": latitude,
"longitude": longitude,
"radius": 250,
"passive": True,
}
]
},
)
assert zone.in_zone(hass.states.get("zone.passive_zone"), latitude, longitude)
| 15,574
|
def ipv_plot_df(points_df, sample_frac=1, marker='circle_2d', size=0.2, **kwargs):
"""Plot vertices in a dataframe using ipyvolume."""
if sample_frac < 1:
xyz = random_sample(points_df, len(points_df), sample_frac)
else:
xyz = dict(x=points_df['x'].values, y=points_df['y'].values, z=points_df['z'].values)
fig = ipv.scatter(**xyz, marker=marker, size=size, **kwargs)
return fig
| 15,575
|
def exec_command_stdout(*command_args, **kwargs):
"""
Capture and return the standard output of the command specified by the
passed positional arguments, optionally configured by the passed keyword
arguments.
Unlike the legacy `exec_command()` and `exec_command_all()` functions, this
modern function is explicitly designed for cross-platform portability. The
return value may be safely used for any purpose, including string
manipulation and parsing.
.. NOTE::
If this command's standard output contains _only_ pathnames, this
function does _not_ return the correct filesystem-encoded string expected
by PyInstaller. If this is the case, consider calling the
filesystem-specific `exec_command()` function instead.
Parameters
----------
cmdargs : list
Variadic list whose:
1. Mandatory first element is the absolute path, relative path,
or basename in the current `${PATH}` of the command to run.
1. Optional remaining elements are arguments to pass to this command.
encoding : str, optional
Optional name of the encoding with which to decode this command's
standard output (e.g., `utf8`), passed as a keyword argument. If
unpassed , this output will be decoded in a portable manner specific to
to the current platform, shell environment, and system settings with
Python's built-in `universal_newlines` functionality.
All remaining keyword arguments are passed as is to the
`subprocess.check_output()` function.
Returns
----------
unicode or str
Unicode string of this command's standard output decoded according to
the "encoding" keyword argument. This string's type depends on the
current Python version as follows:
* Under Python 2.7, this is a decoded `unicode` string.
* Under Python 3.x, this is a decoded `str` string.
"""
# Value of the passed "encoding" parameter, defaulting to None.
encoding = kwargs.pop('encoding', None)
# If no encoding was specified, the current locale is defaulted to. Else, an
# encoding was specified. To ensure this encoding is respected, the
# "universal_newlines" option is disabled if also passed. Nice, eh?
kwargs['universal_newlines'] = encoding is None
# Standard output captured from this command as a decoded Unicode string if
# "universal_newlines" is enabled or an encoded byte array otherwise.
stdout = subprocess.check_output(command_args, **kwargs)
# Return a Unicode string, decoded from this encoded byte array if needed.
return stdout if encoding is None else stdout.decode(encoding)
| 15,576
|
def admin_login():
"""
This function is used to show the admin login page
:return: admin_login.html
"""
return render_template("admin_login.html")
| 15,577
|
def input_file_location(message):
"""
This function performs basic quality control of user input. It
calls for a filepath with a pre-specified message. The function
then checks if the given filepath leads to an actual existing
file. If no file exists at the given location, the function will
throw an error message and ask for a new file location.
:param message: String. Contains the message asking for a filepath
:return filepath: String. Contains a filepath leading to the file.
"""
filepath = input(message)
flag = path.isfile(filepath)
while not flag:
filepath = input("Error: file not found! \n"
"Please specify full relative filepath leading to the required file")
flag = path.isfile(filepath)
print("%s succesfully located"%(filepath))
return filepath
| 15,578
|
def delete_tag(
access_key: str,
url: str,
owner: str,
dataset: str,
*,
tag: str,
) -> None:
"""Execute the OpenAPI `DELETE /v2/datasets/{owner}/{dataset}/tags/{tag}`.
Arguments:
access_key: User's access key.
url: The URL of the graviti website.
owner: The owner of the dataset.
dataset: Name of the dataset, unique for a user.
tag: The name of the tag to be deleted.
Examples:
>>> delete_tag(
... "ACCESSKEY-********",
... "https://api.graviti.com",
... "graviti-example",
... "MNIST",
... tag="tag-2"
... )
"""
url = f"{url}/v2/datasets/{owner}/{dataset}/tags/{tag}"
open_api_do("DELETE", access_key, url)
| 15,579
|
def test_copy_globals():
"""
Checks that a restored function does not refer to the same globals dictionary,
but a copy of it. Therefore it cannot reassign global values.
"""
global_var_writer(10)
assert global_var_reader() == 10
assert global_var == 10
reader = Function.from_object(global_var_reader).eval()
writer = Function.from_object(global_var_writer).eval()
writer(20)
assert reader() == 10
assert global_var == 10
| 15,580
|
def check_dfs():
"""
This function checks if the dfs is running
"""
process = subprocess.Popen("jps", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
results = str(stdout)
flag = 0
if results.find("NameNode") > -1:
flag += 1
if results.find("SecondaryNameNode") > -1:
flag += 1
if results.find("DataNode") > -1:
flag += 1
if flag == 3:
print("INFO: dfs is up and running!")
else:
print("WARN: dfs is not running correctly or not running at all. Run <jps> for more information")
| 15,581
|
def create_contact():
"""
Get a contact form submission
"""
data = request.get_json(force=True)
contact = ContactDAO.create(**data)
return jsonify(contact.to_dict())
| 15,582
|
def smartSum(x, key, value):
""" create a new page in x if key is not a page of x
otherwise add value to x[key] """
if key not in list(x.keys()):
x[key] = value
else:
x[key] += value
| 15,583
|
def make_my_tuple_video(LOGGER, image, width, height, frames, codec, metric, target, subsampling, param, uuid=None):
""" make unique tuple for unique directory, primary key in DB, etc.
"""
(filepath, tempfilename) = os.path.split(image)
filename, extension = os.path.splitext(tempfilename)
my_tuple = '{filename}_{extension}_{width}x{height}x{frames}_{codec}_{metric}_{target}_{subsampling}_{param}_' \
.format(filename=filename, extension=extension[1:], image=ntpath.basename(image), width=width, height=height,
frames=frames, codec=codec, metric=metric, target=target, subsampling=subsampling, param=param)
if uuid is not None:
my_tuple = my_tuple + uuid
if len(my_tuple) > 255: # limits due to max dir name or file name length on UNIX
LOGGER.error("ERROR : Tuple too long : " + my_tuple)
assert len(my_tuple) < 256
return my_tuple
| 15,584
|
def separate_sets(
hand: DefaultDict[int, int], huro_count: int, koutsu_first: bool = True
) -> Tuple[List[Tile], List[List[Tile]], Tile]:
"""Helper function for seperating player's remaining hands into sets.
It should either be 14, 11, 8, 5, or 2 tiles.
The arg koutsu_first would change the priority for koutsu and shuntsu,
for example in the usecase for checking 全帯么九, shuntsu should have
priority over koutsu.
"""
def check_koutsu(sets_to_find):
if remain_tiles[tile_index] >= 3: # check for Koutsu
remain_tiles[tile_index] -= 3
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
return sets_to_find
def check_shuntsu(sets_to_find):
if remain_tiles[tile_index + 2] > 0: # check for Shuntsu
chii_n = min(
remain_tiles[tile_index],
remain_tiles[tile_index + 1],
remain_tiles[tile_index + 2]
)
if chii_n > 0:
remain_tiles[tile_index] -= chii_n
remain_tiles[tile_index + 1] -= chii_n
remain_tiles[tile_index + 2] -= chii_n
sets_to_find -= chii_n
for _ in range(chii_n):
shuntsu.append([
Tile.from_index(tile_index),
Tile.from_index(tile_index + 1),
Tile.from_index(tile_index + 2)
])
return sets_to_find
for possible_jantou in hand.keys():
if hand[possible_jantou] >= 2: # try using it as jantou
remain_tiles = copy.deepcopy(hand)
remain_tiles[possible_jantou] -= 2
koutsu = []
shuntsu = []
sets_to_find = 4 - huro_count
for tile_index in sorted(remain_tiles.keys()):
if tile_index < Tile(Suit.MANZU.value, 1).index:
if remain_tiles[tile_index] == 3:
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
else: # numbered tiles
if koutsu_first:
sets_to_find = check_koutsu(sets_to_find)
sets_to_find = check_shuntsu(sets_to_find)
else:
sets_to_find = check_shuntsu(sets_to_find)
sets_to_find = check_koutsu(sets_to_find)
if sets_to_find == 0:
return koutsu, shuntsu, Tile.from_index(possible_jantou)
return [], [], None
| 15,585
|
def units(legal_codes):
"""
Return sorted list of the unique units for the given
dictionaries representing legal_codes
"""
return sorted(set(lc["unit"] for lc in legal_codes))
| 15,586
|
def resize_terms(terms1, terms2, patterns_to_pgS, use_inv):
"""
Resize the terms to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
use_inv --- boolean for determining if inverse site patterns will be used
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
for tree in terms1:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
removed = set([])
# The number of site patterns to remove is the difference in counts
num_remove = abs(count2 - count1)
if use_inv:
# If not using inverses remove the inverse along with the normal pattern
num_remove = num_remove / 2
# If probabilities do not occur an equal number of times remove site patterns until they do
if count1 > count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees1[prob])).pop(0)
pgtst_to_trees1[prob].remove(r)
removed.add(r)
terms1_remove = True
if count1 < count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees2[prob])).pop(0)
pgtst_to_trees2[prob].remove(r)
removed.add(r)
terms1_remove = False
if use_inv:
# Remove site patterns and their inverses
rm = set([])
inv_rm = pattern_inverter(removed)
for pattern in inv_rm:
rm.add(''.join(pattern))
removed = removed.union(rm)
# Iterate over each pattern to be removed and remove it
for pattern in removed:
if terms1_remove:
terms1.remove(pattern)
else:
terms2.remove(pattern)
terms1, terms2 = tuple(terms1), tuple(terms2)
return terms1, terms2
| 15,587
|
def get_network_list():
"""Get a list of networks.
---
tags:
- network
"""
return jsonify([
network.to_json(include_id=True)
for network in manager.cu_list_networks()
])
| 15,588
|
def bfixpix(data, badmask, n=4, retdat=False):
"""Replace pixels flagged as nonzero in a bad-pixel mask with the
average of their nearest four good neighboring pixels.
:INPUTS:
data : numpy array (two-dimensional)
badmask : numpy array (same shape as data)
:OPTIONAL_INPUTS:
n : int
number of nearby, good pixels to average over
retdat : bool
If True, return an array instead of replacing-in-place and do
_not_ modify input array `data`. This is always True if a 1D
array is input!
:RETURNS:
another numpy array (if retdat is True)
:TO_DO:
Implement new approach of Popowicz+2013 (http://arxiv.org/abs/1309.4224)
"""
# 2010-09-02 11:40 IJC: Created
#2012-04-05 14:12 IJMC: Added retdat option
# 2012-04-06 18:51 IJMC: Added a kludgey way to work for 1D inputs
# 2012-08-09 11:39 IJMC: Now the 'n' option actually works.
if data.ndim==1:
data = np.tile(data, (3,1))
badmask = np.tile(badmask, (3,1))
ret = bfixpix(data, badmask, n=2, retdat=True)
return ret[1]
nx, ny = data.shape
badx, bady = np.nonzero(badmask)
nbad = len(badx)
if retdat:
data = np.array(data, copy=True)
for ii in range(nbad):
thisloc = badx[ii], bady[ii]
rad = 0
numNearbyGoodPixels = 0
while numNearbyGoodPixels<n:
rad += 1
xmin = max(0, badx[ii]-rad)
xmax = min(nx, badx[ii]+rad)
ymin = max(0, bady[ii]-rad)
ymax = min(ny, bady[ii]+rad)
x = np.arange(nx)[xmin:xmax+1]
y = np.arange(ny)[ymin:ymax+1]
yy,xx = np.meshgrid(y,x)
#print ii, rad, xmin, xmax, ymin, ymax, badmask.shape
rr = abs(xx + 1j*yy) * (1. - badmask[xmin:xmax+1,ymin:ymax+1])
numNearbyGoodPixels = (rr>0).sum()
closestDistances = np.unique(np.sort(rr[rr>0])[0:n])
numDistances = len(closestDistances)
localSum = 0.
localDenominator = 0.
for jj in range(numDistances):
localSum += data[xmin:xmax+1,ymin:ymax+1][rr==closestDistances[jj]].sum()
localDenominator += (rr==closestDistances[jj]).sum()
#print badx[ii], bady[ii], 1.0 * localSum / localDenominator, data[xmin:xmax+1,ymin:ymax+1]
data[badx[ii], bady[ii]] = 1.0 * localSum / localDenominator
if retdat:
ret = data
else:
ret = None
return ret
| 15,589
|
def handle_create_cfn_stack(stack_name: str, url: str, s3_bucket: str, cfn_client,
runner_role: str = ''):
"""
Creates a cfn stack for use in testing. Will wait until stack is finished being created to exit.
:param stack_name: Name of the stack
:param url: CFN Template URL
:param s3_bucket: cfn param used to store ip addresses for load balancer -> Neptune connection
:param runner_role: The iam role for cfn to use for resource creation (OPTIONAL)
"""
logging.info(f'''creating cfn stack with params:
name={stack_name}
url={url}
s3_bucket={s3_bucket}
runner_role={runner_role}''')
create_cfn_stack(stack_name, url, s3_bucket, runner_role, cfn_client)
stack = loop_until_stack_is_complete(stack_name, cfn_client)
logging.info(f'stack creation finished. Name={stack_name}, stack={stack}')
| 15,590
|
def less():
"""
Render LESS files to CSS.
"""
for path in glob('%s/less/*.less' % env.static_path):
filename = os.path.split(path)[-1]
name = os.path.splitext(filename)[0]
out_path = '%s/www/css/%s.less.css' % (env.static_path, name)
try:
local('node_modules/less/bin/lessc %s %s' % (path, out_path))
except:
print 'It looks like "lessc" isn\'t installed. Try running: "npm install"'
raise
| 15,591
|
def parse_discount(element):
"""Given an HTML element, parse and return the discount."""
try:
# Remove any non integer characters from the HTML element
discount = re.sub("\D", "", element)
except AttributeError:
discount = "0"
return discount
| 15,592
|
def read_files(allVCFs):
"""
Load all vcfs and count their number of entries
"""
# call exists in which files
call_lookup = defaultdict(list)
# total number of calls in a file
file_abscnt = defaultdict(float)
for vcfn in allVCFs:
v = parse_vcf(vcfn)
# disallow intra vcf duplicates
seen = {}
for entry in v:
key = entry_key(entry)
if key in seen:
continue
seen[key] = True
bisect.insort(call_lookup[key], vcfn)
file_abscnt[vcfn] += 1
return call_lookup, file_abscnt
| 15,593
|
def parse_dat_file(dat_file):
"""
Parse a complete dat file.
dat files are transposed wrt the rest of the data formats here. In addition, they only contain integer fields,
so we can use np.loadtxt.
First 6 columns are ignored.
Note: must have a bims and info file to process completely.
Parameters
----------
dat_file: str
Path for dat file to process.
Returns
-------
data: array-like
"""
data = np.loadtxt(dat_file)
data = data[:, 6:].T
return data
| 15,594
|
def test_env(monkeypatch, tmp_path):
"""Test that the environment variable is respected"""
data_dir = tmp_path / "envpath"
data_dir.mkdir()
monkeypatch.setenv("PPX_DATA_DIR", str(data_dir))
ppx.set_data_dir()
proj = ppx.MassiveProject(MSVID)
out_path = Path(data_dir, MSVID)
assert proj.local == out_path
| 15,595
|
def parse_cmd_arguments(mode='split_audioset', default=False, argv=None):
"""Parse command-line arguments.
Args:
mode (str): The mode of the experiment.
default (optional): If True, command-line arguments will be ignored and
only the default values will be parsed.
argv (optional): If provided, it will be treated as a list of command-
line argument that is passed to the parser in place of sys.argv.
Returns:
The Namespace object containing argument names and values.
"""
description = 'Continual learning on Audioset task.'
parser = argparse.ArgumentParser(description=description)
dnum_tasks = 1
dnum_classes_per_task = 10
if mode == 'split_audioset':
dnum_tasks = 10
dnum_classes_per_task = 10
dval_set_size=500
if mode == 'audioset':
dnum_tasks = 1
dnum_classes_per_task = 100
dval_set_size=5000
cli.cl_args(parser, show_beta=True, dbeta=0.005,
show_from_scratch=True, show_multi_head=True,
show_split_head_cl3=False, show_cl_scenario=False,
show_num_tasks=True, dnum_tasks=dnum_tasks,
show_num_classes_per_task=True,
dnum_classes_per_task=dnum_classes_per_task)
cli.train_args(parser, show_lr=True, show_epochs=False,
dbatch_size=64, dn_iter=5000,
dlr=1e-3, show_clip_grad_value=False, show_clip_grad_norm=True,
show_momentum=False, show_adam_beta1=True)
seq.rnn_args(parser, drnn_arch='32', dnet_act='tanh')
cli.hypernet_args(parser, dhyper_chunks=-1, dhnet_arch='50,50',
dtemb_size=32, demb_size=32, dhnet_act='relu')
# Args of new hnets.
nhnet_args = cli.hnet_args(parser, allowed_nets=['hmlp', 'chunked_hmlp',
'structured_hmlp', 'hdeconv', 'chunked_hdeconv'], dhmlp_arch='50,50',
show_cond_emb_size=True, dcond_emb_size=32, dchmlp_chunk_size=1000,
dchunk_emb_size=32, show_use_cond_chunk_embs=True,
dhdeconv_shape='512,512,3', prefix='nh_',
pf_name='new edition of a hyper-', show_net_act=True, dnet_act='relu',
show_no_bias=True, show_dropout_rate=True, ddropout_rate=-1,
show_specnorm=True, show_batchnorm=False, show_no_batchnorm=False)
seq.new_hnet_args(nhnet_args)
cli.init_args(parser, custom_option=False, show_normal_init=False,
show_hyper_fan_init=True)
cli.eval_args(parser, dval_iter=250, show_val_set_size=True,
dval_set_size=dval_set_size)
magroup = cli.miscellaneous_args(parser, big_data=False,
synthetic_data=True, show_plots=True, no_cuda=True,
show_publication_style=False)
seq.ewc_args(parser, dewc_lambda=5000., dn_fisher=-1, dtbptt_fisher=-1,
dts_weighting_fisher='last')
seq.si_args(parser, dsi_lambda=1.)
seq.context_mod_args(parser, dsparsification_reg_type='l1',
dsparsification_reg_strength=1., dcontext_mod_init='constant')
seq.miscellaneous_args(magroup, dmask_fraction=0.8, dclassification=True,
dts_weighting='last', show_use_ce_loss=False)
# Replay arguments.
rep_args = seq.replay_args(parser)
cli.generator_args(rep_args, dlatent_dim=100)
cli.main_net_args(parser, allowed_nets=['simple_rnn'],
dsrnn_rec_layers='32', dsrnn_pre_fc_layers='',
dsrnn_post_fc_layers='',
show_net_act=True, dnet_act='tanh', show_no_bias=True,
show_dropout_rate=False, show_specnorm=False, show_batchnorm=False,
prefix='dec_', pf_name='replay decoder')
args = None
if argv is not None:
if default:
warnings.warn('Provided "argv" will be ignored since "default" ' +
'option was turned on.')
args = argv
if default:
args = []
config = parser.parse_args(args=args)
config.mode = mode
### Check argument values!
cli.check_invalid_argument_usage(config)
seq.check_invalid_args_sequential(config)
if config.train_from_scratch:
# FIXME We could get rid of this warning by properly checkpointing and
# loading all networks.
warnings.warn('When training from scratch, only during accuracies ' +
'make sense. All other outputs should be ignored!')
return config
| 15,596
|
def get_number_packets_start_end(trace, features):
"""
Gets the number of incoming & outcoming packets in the first and last 30 packets
"""
first = trace[:30]
last = trace[-30:]
packets_in, packets_out = counts_in_out_packets(first)
features.append(packets_in)
features.append(packets_out)
packets_in, packets_out = counts_in_out_packets(last)
features.append(packets_in)
features.append(packets_out)
| 15,597
|
def emails():
"""A strategy for generating email addresses as unicode strings. The
address format is specific in :rfc:`5322#section-3.4.1`. Values shrink
towards shorter local-parts and host domains.
This strategy is useful for generating "user data" for tests, as
mishandling of email addresses is a common source of bugs. Future
updates will generate more complicated addresses allowed by the RFC.
"""
from hypothesis.provisional import domains
local_chars = string.ascii_letters + string.digits + "!#$%&'*+-/=^_`{|}~"
local_part = text(local_chars, min_size=1, max_size=64)
# TODO: include dot-atoms, quoted strings, escaped chars, etc in local part
return builds(u'{}@{}'.format, local_part, domains()).filter(
lambda addr: len(addr) <= 255)
| 15,598
|
def next_whole_token(
wordpiece_subtokens,
initial_tokenizer,
subword_tokenizer):
"""Greedily reconstitutes a whole token from a WordPiece list.
This function assumes that the wordpiece subtokens were constructed correctly
from a correctly subtokenized CuBERT tokenizer, but the sequence may be
truncated and thus incomplete.
The implementation is done in two stages: recognizing the first whole token
and then finding the correspondence of that first whole token to a prefix of
the subtoken sequence.
The implementation assumes that untokenization can do the best job on the full
context. So, it first untokenizes the whole sequence, and chooses the first
whole token.
To figure out the subtoken prefix that corresponds to that whole token, the
implementation greedily untokenizes longer and longer subtoken prefixes, until
the whole token is recognized in the output.
The reason for this somewhat expensive implementation is that the logic for
merging subtokens (for WordPiece and then for CuBERT) is intricate, and does
not export how many initial subtokens were consumed for each output token of
the next higher abstraction. What's more, a subtoken may align itself with
the previous or the next whole token, when the subtoken sequence is
incomplete.
Args:
wordpiece_subtokens: The subtokens to scan through.
initial_tokenizer: A CuBERT tokenizer.
subword_tokenizer: A SubwordTextEncoder.
Returns:
The first whole token matched, and the end index of the first subtoken index
after the first whole token. wordpiece_subtokens[0:end_index] should be
the subtokens corresponding to the whole token returned.
Raises:
ValueError if no whole token can be parsed.
"""
wordpiece_ids = wordpiece_ids_from_wordpiece_tokens(wordpiece_subtokens,
subword_tokenizer)
full_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
wordpiece_ids))
full_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
full_whole_tokens = initial_tokenizer.untokenize_agnostic(
full_cubert_subtokens)
if len(full_whole_tokens) < 2:
# It all came out a jumble. Reject it.
raise ValueError(f'Whole tokens {full_whole_tokens} ended up '
f'undifferentiable in {wordpiece_subtokens}.')
whole_token = full_whole_tokens[0]
for end_index in range(1, len(wordpiece_ids) + 1):
prefix_list = wordpiece_ids[:end_index]
partial_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
prefix_list))
# We strip EOS in `code_to_cubert_sentences`, so we have to add it back
# here.
partial_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
partial_whole_tokens = initial_tokenizer.untokenize_agnostic(
partial_cubert_subtokens)
if len(partial_whole_tokens) > 1:
if partial_whole_tokens[0] == whole_token:
return whole_token, end_index
# We got here because we couldn't match the whole token we found from the
# full sequence
raise ValueError('Could not find a whole token in %r' %
(wordpiece_subtokens,))
| 15,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.