content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def create_multiple_new_predictions(monkeypatch):
"""
Mock prediction model method to ensure no duplicate prediction in
predictions table.
"""
@classmethod
async def mockfunc_get_one_by_username(cls, username):
"""Return a user record from the users table."""
hashed_pwd = bcrypt.hash(API_USER_PASSWORD)
d = {"id": 1, "username": API_USER_NAME, "password_hash": hashed_pwd}
return d
@classmethod
async def mock_get_one_by_url(cls, url):
"""
Return None to indicate no pre-existing prediction with specified url
in the predictions table.
"""
return None
@classmethod
async def mock_create_new_record(cls, notes):
"""
Do nothing, to allow new prediction to be added to predictions table.
"""
pass
monkeypatch.setattr(
DBUser, "get_one_by_username", mockfunc_get_one_by_username
)
monkeypatch.setattr(DBPrediction, "get_one_by_url", mock_get_one_by_url)
monkeypatch.setattr(DBPrediction, "create", mock_create_new_record) | 27608239ba1d1d5da0cf464d9ab1f752ec7057b6 | 32,700 |
def tidy_split(df, column, sep, keep=False):
"""
Split the values of a column and expand so the new DataFrame has one split
value per row. Filters rows where the column is missing.
Params
------
df : pandas.DataFrame
dataframe with the column to split and expand
column : str
the column to split and expand
sep : str
the string used to split the column's values
keep : bool
whether to retain the presplit value as it's own row
Returns
-------
pandas.DataFrame
Returns a dataframe with the same columns as `df`.
"""
indexes = list()
new_values = list()
df = df.dropna(subset=[column])
for i, presplit in enumerate(df[column].astype(str)):
values = presplit.split(sep)
if keep and len(values) > 1:
indexes.append(i)
new_values.append(presplit)
for value in values:
indexes.append(i)
new_values.append(value)
new_df = df.iloc[indexes, :].copy()
new_df[column] = new_values
return new_df | 4e4138cf4f5fab924d4e9e792db5e1c954ee8032 | 32,701 |
def greedyClustering(v_space, initial_pt_index, k, style):
"""
Generate `k` centers, starting with the `initial_pt_index`.
Parameters:
----------
v_space: 2D array.
The coordinate matrix of the initial geometry.
The column number is the vertex's index.
initial_pt_index: Int.
The index of the initial point.
k: Int.
The number of centers aiming to generate.
style: String.
Indicate "last" or "mean" to choose the style of evaluation function.
"last": Calculate the farthest point by tracking the last generated center point.
Minimum distance threshold applied.
"mean": Calculate a point with the maximum average distance to all generated centers;
Calculate a point with the minimum distance variance of all generated centers;
Minimum distance threshold applied.
Returns:
----------
center_indices_list: List of int.
Containing the indices of all k centers.
"""
if style == "last":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
for j in range(k):
center_coord_temp = v_space[center_indices_list[j],:]
max_dist_temp = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_temp = np.linalg.norm(center_coord_temp.reshape(-1,3) - coord_temp.reshape(-1,3))
dist_list = []
for index in center_indices_list:
dist_temp_eachCenter = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp_eachCenter)
min_dist_temp = np.min(dist_list)
if dist_temp > max_dist_temp and min_dist_temp >= min_dist_thrshld:
max_dist_temp = dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
return center_indices_list
elif style == "mean":
center_indices_list = []
center_indices_list.append(initial_pt_index)
min_dist_thrshld = 0.01 # Unit: m. The radius of FM ball.
while(True):
max_dist_thrshld = 0.0
new_center_index_temp = 0
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
avg_dist_temp = np.mean(dist_list)
min_dist_temp = np.min(dist_list)
if avg_dist_temp > max_dist_thrshld and min_dist_temp >= min_dist_thrshld:
max_dist_thrshld = avg_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
var_thrshld = 1e5
new_center_index_temp = 0
# ================= Picking only several points to calculate the distance variance (abandoned) ================= #
# picked_num_temp = int(np.ceil(len(center_indices_list)*0.3)) # Pick several center points to compute the distance variance.
# picked_indices_temp = generateFMIndices(picked_num_temp, len(center_indices_list))
# picked_indices_temp = [center_indices_list[i] for i in copy.deepcopy(picked_indices_temp)]
# ============================================================================================================== #
for i in range(v_space.shape[0]):
if i in center_indices_list: continue
coord_temp = v_space[i,:]
dist_list = []
# for index in picked_indices_temp: # Picking only several points to calculate the distance variance (abandoned).
for index in center_indices_list:
dist_temp = np.linalg.norm(coord_temp.reshape(-1,3) - v_space[index,:].reshape(-1,3))
dist_list.append(dist_temp)
var_dist_temp = np.var(dist_list)
min_dist_temp = np.min(dist_list)
if var_dist_temp < var_thrshld and min_dist_temp >= min_dist_thrshld:
var_thrshld = var_dist_temp
new_center_index_temp = i
if new_center_index_temp not in center_indices_list:
center_indices_list.append(new_center_index_temp)
if len(center_indices_list) >= k: break
return center_indices_list
else:
print("Wrong input of the style indicator. Will start training based on the optimal FM indices. ")
return [] | ac90d1d6c461a969a2bcb71f3df3a822496b3a65 | 32,702 |
def s2_matrix(beta, gamma, device=None):
"""
Returns a new tensor corresponding to matrix formulation of the given input tensors representing
SO(3) group elements.
Args:
beta (`torch.FloatTensor`): beta attributes of group elements.
gamma (`torch.FloatTensor`): gamma attributes of group elements.
device (Device, optional): computation device. Defaults to None.
Returns:
(`torch.FloatTensor`): matrix representation of the group elements.
"""
R_beta_y = rotation_matrix(beta, "y", device)
R_gamma_z = rotation_matrix(gamma, "z", device)
return R_gamma_z @ R_beta_y | 20d08d1b75f22bddfaf5295751f05d43ea7fe6bb | 32,703 |
import threading
def cache(func):
"""Thread-safe caching."""
lock = threading.Lock()
results = {}
def wrapper(*args, **kwargs):
identifier = checksum(args, kwargs)
if identifier in results:
return results[identifier]
with lock:
if identifier in results:
return results[identifier]
result = func(*args, **kwargs)
results[identifier] = result
return result
return wrapper | c17a6550ec91edcfcad6d898a1f81fa4b878757a | 32,704 |
import subprocess
def run(cmd):
""" Run system command, returns exit-code and stdout """
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
txt = p.stdout.read()
return p.wait(), txt | 303af44fd885edeed25721f0a16c3fad6a01f102 | 32,705 |
import platform
def hide_console():
"""Startup-info for subprocess.Popen which hides the console on
Windows.
"""
if platform.system() != 'Windows':
return None
si = sp.STARTUPINFO()
si.dwFlags |= sp.STARTF_USESHOWWINDOW
si.wShowWindow = sp.SW_HIDE
return si | faf25cdf48ffddd2ae7185c457d1abe60a0ec181 | 32,706 |
from typing import Dict
from typing import Any
def merge_dicts(dict1: Dict[str, Any],
dict2: Dict[str, Any],
*dicts: Dict[str, Any]) -> Dict[str, Any]:
"""
Merge multiple dictionaries, producing a merged result without modifying
the arguments.
:param dict1: the first dictionary
:param dict2: the second dictionary
:param dicts: additional dictionaries
:return: The merged dictionary. Keys in dict2 overwrite duplicate keys in
dict1
"""
res = dict1.copy()
res.update(dict2)
for d in dicts:
res.update(d)
return res | 869399774cc07801e5fa95d9903e6a9f2dadfc25 | 32,707 |
def decode(model, inputs):
"""Decode inputs."""
decoder_inputs = encode_onehot(np.array(['='])).squeeze()
decoder_inputs = jnp.tile(decoder_inputs, (inputs.shape[0], 1))
return model(
inputs, decoder_inputs, train=False, max_output_len=get_max_output_len()) | 632b57ab86b9dd670d3e7203197575130ded7057 | 32,708 |
from typing import Sequence
from typing import List
def combinations_all(data: Sequence) -> List:
"""
Return all combinations of all length for given sequence
Args:
data: sequence to get combinations of
Returns:
List: all combinations
"""
comb = []
for r in range(1, len(data) + 1):
comb.extend(combinations(data, r=r))
return comb | 7e0b31189a5afe3ac027a4c947aca08b3a2075ff | 32,709 |
def templateSummary():
"""
"""
# Load Model
tablename = "survey_template"
s3db[tablename]
s3db.survey_complete
crud_strings = s3.crud_strings[tablename]
def postp(r, output):
if r.interactive:
if len(get_vars) > 0:
dummy, template_id = get_vars.viewing.split(".")
else:
template_id = r.id
form = s3db.survey_build_template_summary(template_id)
output["items"] = form
output["sortby"] = [[0, "asc"]]
output["title"] = crud_strings.title_analysis_summary
output["subtitle"] = crud_strings.subtitle_analysis_summary
return output
s3.postp = postp
# remove CRUD generated buttons in the tabs
s3db.configure(tablename,
listadd=False,
deletable=False,
)
output = s3_rest_controller("survey", "template",
method = "list",
rheader=s3.survey_template_rheader
)
s3.actions = None
return output | 6151e040b8e1c2491b3e282d0bb90fe278bf6dd8 | 32,710 |
import time
def suffix_array(text, _step=16):
"""Analyze all common strings in the text.
Short substrings of the length _step a are first pre-sorted. The are the
results repeatedly merged so that the garanteed number of compared
characters bytes is doubled in every iteration until all substrings are
sorted exactly.
Arguments:
text: The text to be analyzed.
_step: Is only for optimization and testing. It is the optimal length
of substrings used for initial pre-sorting. The bigger value is
faster if there is enough memory. Memory requirements are
approximately (estimate for 32 bit Python 3.3):
len(text) * (29 + (_size + 20 if _size > 2 else 0)) + 1MB
Return value: (tuple)
(sa, rsa, lcp)
sa: Suffix array for i in range(1, size):
assert text[sa[i-1]:] < text[sa[i]:]
rsa: Reverse suffix array for i in range(size):
assert rsa[sa[i]] == i
lcp: Longest common prefix for i in range(1, size):
assert text[sa[i-1]:sa[i-1]+lcp[i]] == text[sa[i]:sa[i]+lcp[i]]
if sa[i-1] + lcp[i] < len(text):
assert text[sa[i-1] + lcp[i]] < text[sa[i] + lcp[i]]
>>> suffix_array(text='banana')
([5, 3, 1, 0, 4, 2], [3, 2, 5, 1, 4, 0], [0, 1, 3, 0, 0, 2])
Explanation: 'a' < 'ana' < 'anana' < 'banana' < 'na' < 'nana'
The Longest Common String is 'ana': lcp[2] == 3 == len('ana')
It is between tx[sa[1]:] == 'ana' < 'anana' == tx[sa[2]:]
"""
tx = text
t0 = time.time()
size = len(tx)
step = min(max(_step, 1), len(tx))
sa = list(range(len(tx)))
# log.debug('%6.3f pre sort', time.time() - t0)
sa.sort(key=lambda i: tx[i:i + step])
# log.debug('%6.3f after sort', time.time() - t0)
grpstart = size * [False] + [True] # a boolean map for iteration speedup.
# It helps to skip yet resolved values. The last value True is a sentinel.
rsa = size * [None]
stgrp, igrp = '', 0
for i, pos in enumerate(sa):
st = tx[pos:pos + step]
if st != stgrp:
grpstart[igrp] = (igrp < i - 1)
stgrp = st
igrp = i
rsa[pos] = igrp
sa[i] = pos
grpstart[igrp] = (igrp < size - 1 or size == 0)
# log.debug('%6.3f after group', time.time() - t0)
while grpstart.index(True) < size:
# assert step <= size
nmerge = 0
nextgr = grpstart.index(True)
while nextgr < size:
igrp = nextgr
nextgr = grpstart.index(True, igrp + 1)
glist = []
for ig in range(igrp, nextgr):
pos = sa[ig]
if rsa[pos] != igrp:
break
newgr = rsa[pos + step] if pos + step < size else -1
glist.append((newgr, pos))
glist.sort()
for ig, g in groupby(glist, key=itemgetter(0)):
g = [x[1] for x in g]
sa[igrp:igrp + len(g)] = g
grpstart[igrp] = (len(g) > 1)
for pos in g:
rsa[pos] = igrp
igrp += len(g)
nmerge += len(glist)
# log.debug('%6.3f for step=%d nmerge=%d', time.time() - t0, step, nmerge)
step *= 2
del grpstart
# create LCP array
lcp = size * [None]
h = 0
for i in range(size):
if rsa[i] > 0:
j = sa[rsa[i] - 1]
while i != size - h and j != size - h and tx[i + h] == tx[j + h]:
h += 1
lcp[rsa[i]] = h
if h > 0:
h -= 1
if size > 0:
lcp[0] = 0
# log.debug('%6.3f end', time.time() - t0)
return sa, rsa, lcp | 1ed14feb5fa69b5d01d99128c56559c983df4e04 | 32,711 |
def get_main_image():
"""Rendering the scatter chart"""
yearly_temp = []
yearly_hum = []
for city in data:
yearly_temp.append(sum(get_city_temperature(city))/12)
yearly_hum.append(sum(get_city_humidity(city))/12)
plt.clf()
plt.scatter(yearly_hum, yearly_temp, alpha=0.5)
plt.title('Yearly Average Temperature/Humidity')
plt.xlim(70, 95)
plt.ylabel('Yearly Average Temperature')
plt.xlabel('Yearly Average Relative Humidity')
for i, txt in enumerate(CITIES):
plt.annotate(txt, (yearly_hum[i], yearly_temp[i]))
img = BytesIO()
plt.savefig(img)
img.seek(0)
return img | b9845a44b868353e878b53beb6faf9c17bdf07d6 | 32,712 |
def get_PhotoImage(path, scale=1.0):
"""Generate a TKinter-compatible photo image, given a path, and a scaling
factor.
Parameters
----------
path : str
Path to the image file.
scale : float, default: 1.0
Scaling factor.
Returns
-------
img : `PIL.ImageTk.PhotoImage <https://pillow.readthedocs.io/en/4.2.x/\
reference/ImageTk.html#PIL.ImageTk.PhotoImage>`_
Tkinter-compatible image. This can be incorporated into a GUI using
tk.Label(parent, image=img)
"""
image = Image.open(path).convert('RGBA')
[w, h] = image.size
new_w = int(w * scale)
new_h = int(h * scale)
image = image.resize((new_w, new_h), Image.ANTIALIAS)
return ImageTk.PhotoImage(image) | 02402574e0641a1caced9fe0b07434db5c84dee5 | 32,713 |
import gc
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels | f7059c0afd6f346d82ec36eae90f6c3fa8459dad | 32,714 |
import logging
import sys
def parse_sampleids(samplelabel,ids):
"""
Parse the label id according to the given sample labels
Parameter:
samplelabel: a string of labels, like '0,2,3' or 'treat1,treat2,treat3'
ids: a {samplelabel:index} ({string:int})
Return:
(a list of index, a list of index labels)
"""
# labels
idsk=[""]*len(ids);
for (k,v) in ids.iteritems():
idsk[v]=k;
if samplelabel == None:
groupidslabel=(ids.keys());
groupids=[ids[x] for x in groupidslabel];
return (groupids,groupidslabel);
try:
groupids=[int(x) for x in samplelabel.split(',')];
groupidslabel=[idsk[x] for x in groupids];
except ValueError:
groupidstr=samplelabel.split(',');
groupids=[];
groupidslabel=[];
for gp in groupidstr:
if gp not in ids:
logging.error('Sample label '+gp+' does not match records in your count table.');
logging.error('Sample labels in your count table: '+','.join(idsk));
sys.exit(-1);
groupids+=[ids[gp]];
groupidslabel+=[idsk[ids[gp]]];
logging.debug('Given sample labels: '+samplelabel);
logging.debug('Converted index: '+' '.join([str(x) for x in groupids]));
return (groupids,groupidslabel); | 448795c6f7736f52dfdb2e9d894b185fae1e17e2 | 32,715 |
from datetime import datetime
def iso_date(iso_string):
""" from iso string YYYY-MM-DD to python datetime.date
Note: if only year is supplied, we assume month=1 and day=1
This function is not longer used, dates from lists always are strings
"""
if len(iso_string) == 4:
iso_string = iso_string + '-01-01'
d = datetime.strptime(iso_string, '%Y-%m-%d')
return d.date() | 7f29b22744d384187e293c546d4c28790c211e99 | 32,716 |
def summary_dist_xdec(res, df1, df2):
"""
res is dictionary of summary-results DataFrames.
df1 contains results variables for baseline policy.
df2 contains results variables for reform policy.
returns augmented dictionary of summary-results DataFrames.
"""
# create distribution tables grouped by xdec
res['dist1_xdec'] = \
create_distribution_table(df1, 'weighted_deciles',
'expanded_income')
df2['expanded_income_baseline'] = df1['expanded_income']
res['dist2_xdec'] = \
create_distribution_table(df2, 'weighted_deciles',
'expanded_income_baseline')
del df2['expanded_income_baseline']
# return res dictionary
return res | 408bf1c8916d5338dbc01f41acb57dcc37e009e9 | 32,717 |
def is_app_running(appname):
"""Tries to determine if the application in appname is currently
running"""
display.display_detail('Checking if %s is running...' % appname)
proc_list = get_running_processes()
matching_items = []
if appname.startswith('/'):
# search by exact path
matching_items = [item for item in proc_list
if item == appname]
elif appname.endswith('.app'):
# search by filename
matching_items = [item for item in proc_list
if '/'+ appname + '/Contents/MacOS/' in item]
else:
# check executable name
matching_items = [item for item in proc_list
if item.endswith('/' + appname)]
if not matching_items:
# try adding '.app' to the name and check again
matching_items = [item for item in proc_list
if '/'+ appname + '.app/Contents/MacOS/' in item]
if matching_items:
# it's running!
display.display_debug1('Matching process list: %s' % matching_items)
display.display_detail('%s is running!' % appname)
return True
# if we get here, we have no evidence that appname is running
return False | 4d8a7c50ce38b36c6900d3482a8e6aadb98c7b5d | 32,718 |
def moore_to_basu(moore, rr, lam):
"""Returns the coordinates, speeds, and accelerations in BasuMandal2007's
convention.
Parameters
----------
moore : dictionary
A dictionary containg values for the q's, u's and u dots.
rr : float
Rear wheel radius.
lam : float
Steer axis tilt.
Returns
-------
basu : dictionary
A dictionary containing the coordinates, speeds and accelerations.
"""
m = moore
basu = {}
s3 = sin(m['q3'])
c3 = cos(m['q3'])
s4 = sin(m['q4'])
c4 = cos(m['q4'])
basu['x'] = rr * s3 * s4 - m['q1']
basu['y'] = rr * c3 * s4 + m['q2']
basu['z'] = rr * c4
basu['theta'] = -m['q3']
basu['psi'] = pi / 2.0 - m['q4']
basu['phi'] = pi + lam - m['q5']
basu['betar'] = -m['q6']
basu['psif'] = -m['q7']
basu['betaf'] = -m['q8']
basu['xd'] = rr * (c3 * s4 * m['u3'] + s3 * c4 * m['u4']) - m['u1']
basu['yd'] = rr * (-s3 * s4 * m['u3'] + c3 * c4 * m['u4']) + m['u2']
basu['zd'] = -rr * m['u4'] * s4
basu['thetad'] = -m['u3']
basu['psid'] = -m['u4']
basu['phid'] = -m['u5']
basu['betard'] = -m['u6']
basu['psifd'] = -m['u7']
basu['betafd'] = -m['u8']
basu['xdd'] = (rr * (-s3 * s4 * m['u3']**2 + c3 * c4 * m['u3'] * m['u4'] +
c3 * s4 * m['u3p'] + c3 * c4 * m['u3'] * m['u4'] - s3
* s4 * m['u4']**2 + s3 * c4 * m['u4p']) - m['u1p'])
basu['ydd'] = (m['u2p'] - rr * c3 * s4 * m['u3']**2 - rr * s3 * c4 *
m['u3'] * m['u4'] - rr * s3 * s4 * m['u3p'] - rr * s3 * c4 *
m['u3'] * m['u4'] - rr * c3 * s4 * m['u4']**2 + rr * c3 * c4
* m['u4p'])
basu['zdd'] = -rr * (m['u4p'] * s4 + m['u4']**2 * c4)
basu['thetadd'] = -m['u3p']
basu['psidd'] = -m['u4p']
basu['phidd'] = -m['u5p']
basu['betardd'] = -m['u6p']
basu['psifdd'] = -m['u7p']
basu['betafdd'] = -m['u8p']
return basu | f599c2f5226dc4a12de73e1ddc360b6176d915ed | 32,719 |
def get_username(sciper):
"""
return username of user
"""
attribute = 'uid'
response = LDAP_search(
pattern_search='(uniqueIdentifier=' + sciper + ')',
attribute=attribute
)
return response[0]['attributes'][attribute][0] | 9da92bb2f1b0b733a137ed0bf62d8817c9c13ed8 | 32,720 |
def number_to_string(s, number):
"""
:param s: word user input
:param number: string of int which represent possible anagram
:return: word of alphabet
"""
word = ''
for i in number:
word += s[int(i)]
return word | 7997b20264d0750e2b671a04aacb56c2a0559d8c | 32,721 |
def mean(num_lst):
"""
Calculates the mean of a list of numbers
Parameters
----------
num_lst : list
List of numbers to calculate the average of
Returns
-------
The average/mean of num_lst
Examples
--------
>>> mean([1,2,3,4,5])
3.0
"""
return sum(num_lst) / len(num_lst) | bc6f86fc793bad165afc8f319a3094f3fae91361 | 32,722 |
import pandas
def development_create_database(df_literature, df_inorganics, df_predictions, inp):
"""
Create mass transition database.
Create mass transition database based on literature, inorganic and prediction data.
Parameters
----------
df_literature : dataframe
Dataframe with parsed literature data.
df_inorganics : dataframe
Dataframe with parsed inorganics data.
df_predictions : dataframe
Dataframe with parsed prediction data.
inp : dict
Input dictionary.
Returns
-------
df_database : dataframe
Dataframe with joined mass transitions.
"""
# Preallocate database
df_database = pandas.DataFrame()
# Append database with input
df_database = df_database.append(df_literature, sort = False)
df_database = df_database.append(df_inorganics, sort = False)
df_database = df_database.append(df_predictions, sort = False)
# Fill database with standard parameter
df_database['EP [V]'].fillna(value = 10, inplace = True)
df_database['CXP [V]'].fillna(value = 4, inplace = True)
df_database['DP [V]'].fillna(value = 0, inplace = True)
df_database = df_database.reset_index(drop = True)
# Regression for DP
inp = development_create_database_linear_declustering_potential(df_literature, inp)
DP_slope = inp['DP_slope']
DP_intercept = inp['DP_intercept']
# Cycle analytes
for index, row in df_database.iterrows():
# Set declustering potential
if row['DP [V]'] == 0:
df_database.at[index,'DP [V]'] = round(DP_slope*row['Q1 [m/z]'] + DP_intercept)
elif row['DP [V]'] >= 200:
df_database.at[index,'DP [V]'] = 200
else:
None
# Set minimal and maximal collision energy
if row['CE [V]'] < 5:
df_database.at[index,'CE [V]'] = 5
elif row['CE [V]'] > 130:
df_database.at[index,'CE [V]'] = 130
else:
None
# Set entrance potential
if row['EP [V]'] > 15:
df_database.at[index,'EP [V]'] = 15
# Set collision cell exit potential
if row['CXP [V]'] > 55:
df_database.at[index, 'CXP [V]'] = 55
# Prioritize mass transitions of inhouse and literature data before predictions
df_database = df_database.drop_duplicates(subset=['compound_id','mode','Q1 [m/z]','Q3 [m/z]'], keep = 'first')
return df_database | 58b52d84ca98d770d3ace4a0b4dae4b369883284 | 32,723 |
import requests
from typing import IO
import hashlib
def _stream_to_file(
r: requests.Response,
file: IO[bytes],
chunk_size: int = 2**14,
progress_bar_min_bytes: int = 2**25,
) -> str:
"""Stream the response to the file, returning the checksum.
:param progress_bar_min_bytes: Minimum number of bytes to display a progress bar for. Default is 32MB
"""
# check header to get content length, in bytes
total_length = int(r.headers.get("content-length", 0))
md5 = hashlib.md5()
streamer = r.iter_content(chunk_size=chunk_size)
display_progress = total_length > progress_bar_min_bytes
if display_progress:
progress.start()
task_id = progress.add_task("Downloading", total=total_length)
for chunk in streamer: # 16k
file.write(chunk)
md5.update(chunk)
if display_progress:
progress.update(task_id, advance=len(chunk))
if display_progress:
progress.stop()
return md5.hexdigest() | 44d0529a5fdb0a14ac4dcddbfecf23442678a75a | 32,724 |
def get_user(key: str, user: int, type_return: str = 'dict', **kwargs):
"""Retrieve general user information."""
params = {
'k': key,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else 0,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'event_days': kwargs['event_days'] if 'event_days' in kwargs else 1}
r = req.get(urls['user'], params=params)
return from_json(r.text, type_return) | 1b7a5c144267c012a69aff2f02f771d848e8883a | 32,725 |
def infer_schema(example, binary_features=[]):
"""Given a tf.train.Example, infer the Spark DataFrame schema (StructFields).
Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to
disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint"
from the caller in the ``binary_features`` argument.
Args:
:example: a tf.train.Example
:binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays.
Returns:
A DataFrame StructType schema
"""
def _infer_sql_type(k, v):
# special handling for binary features
if k in binary_features:
return BinaryType()
if v.int64_list.value:
result = v.int64_list.value
sql_type = LongType()
elif v.float_list.value:
result = v.float_list.value
sql_type = DoubleType()
else:
result = v.bytes_list.value
sql_type = StringType()
if len(result) > 1: # represent multi-item tensors as Spark SQL ArrayType() of base types
return ArrayType(sql_type)
else: # represent everything else as base types (and empty tensors as StringType())
return sql_type
return StructType([StructField(k, _infer_sql_type(k, v), True) for k, v in sorted(example.features.feature.items())]) | bd952c278fafa809342b27755e2208b72bd25964 | 32,726 |
import sys
def mongo_sync_status(remongo=False, update_all=False, user=None, xform=None):
"""Check the status of records in the mysql db versus mongodb. At a
minimum, return a report (string) of the results.
Optionally, take action to correct the differences, based on these
parameters, if present and defined:
remongo -> if True, update the records missing in mongodb
(default: False)
update_all -> if True, update all the relevant records (default: False)
user -> if specified, apply only to the forms for the given user
(default: None)
xform -> if specified, apply only to the given form (default: None)
"""
qs = XForm.objects.only('id_string', 'user').select_related('user')
if user and not xform:
qs = qs.filter(user=user)
elif user and xform:
qs = qs.filter(user=user, id_string=xform.id_string)
else:
qs = qs.all()
total = qs.count()
found = 0
done = 0
total_to_remongo = 0
report_string = ""
for xform in queryset_iterator(qs, 100):
# get the count
user = xform.user
instance_count = Instance.objects.filter(xform=xform).count()
userform_id = "%s_%s" % (user.username, xform.id_string)
mongo_count = mongo_instances.find(
{common_tags.USERFORM_ID: userform_id}).count()
if instance_count != mongo_count or update_all:
line = "user: %s, id_string: %s\nInstance count: %d\t"\
"Mongo count: %d\n---------------------------------"\
"-----\n" % (
user.username, xform.id_string, instance_count,
mongo_count)
report_string += line
found += 1
total_to_remongo += (instance_count - mongo_count)
# should we remongo
if remongo or (remongo and update_all):
if update_all:
sys.stdout.write(
"Updating all records for %s\n--------------------"
"---------------------------\n" % xform.id_string)
else:
sys.stdout.write(
"Updating missing records for %s\n----------------"
"-------------------------------\n"
% xform.id_string)
update_mongo_for_xform(
xform, only_update_missing=not update_all)
done += 1
sys.stdout.write(
"%.2f %% done ...\r" % ((float(done) / float(total)) * 100))
# only show stats if we are not updating mongo, the update function
# will show progress
if not remongo:
line = "Total # of forms out of sync: %d\n" \
"Total # of records to remongo: %d\n" % (found, total_to_remongo)
report_string += line
return report_string | 3e643de52e51d85913cb01a859bef17b7e47d939 | 32,727 |
import collections
def create_batches_of_sentence_ids(sentences, batch_equal_size, max_batch_size):
"""
Groups together sentences into batches
If max_batch_size is positive, this value determines the maximum number of sentences in each batch.
If max_batch_size has a negative value, the function dynamically creates the batches such that each batch contains abs(max_batch_size) words.
Returns a list of lists with sentences ids.
"""
batches_of_sentence_ids = []
if batch_equal_size == True:
sentence_ids_by_length = collections.OrderedDict()
sentence_length_sum = 0.0
for i in range(len(sentences)):
length = len(sentences[i])
if length not in sentence_ids_by_length:
sentence_ids_by_length[length] = []
sentence_ids_by_length[length].append(i)
for sentence_length in sentence_ids_by_length:
if max_batch_size > 0:
batch_size = max_batch_size
else:
batch_size = int((-1 * max_batch_size) / sentence_length)
for i in range(0, len(sentence_ids_by_length[sentence_length]), batch_size):
batches_of_sentence_ids.append(sentence_ids_by_length[sentence_length][i:i + batch_size])
else:
current_batch = []
max_sentence_length = 0
for i in range(len(sentences)):
current_batch.append(i)
if len(sentences[i]) > max_sentence_length:
max_sentence_length = len(sentences[i])
if (max_batch_size > 0 and len(current_batch) >= max_batch_size) \
or (max_batch_size <= 0 and len(current_batch)*max_sentence_length >= (-1 * max_batch_size)):
batches_of_sentence_ids.append(current_batch)
current_batch = []
max_sentence_length = 0
if len(current_batch) > 0:
batches_of_sentence_ids.append(current_batch)
return batches_of_sentence_ids | 8db116e73e791d7eb72f080b408bb52d60481db7 | 32,728 |
def com_com_distances_axis(universe, mda_selection_pairs, fstart=0, fend=-1, fstep=1, axis='z'):
"""Center of mass to Center of mass distance in one dimension (along an axis).
This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections
across the the MD trajectory, but only uses the 1d coordinate of the specified axis.
Args:
universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on.
mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis
atom selection objects to compute the distance between.
fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame)
fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame)
fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend.
Default: 1 (or every frame)
axis (str): Optional, the 1d axis to compute the distance in. Default: 'z' (or the z axis)
Returns:
(np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times
corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the
order in the list corresponds to the atom selection pairs in the mda_selection_pairs input.
"""
dir_ind = 2
if axis is 'x':
dir_ind = 0
elif axis is 'y':
dir_ind = 1
#indices = mda_selection.indices
fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory))
times = []
pair_dists = []
for pair in mda_selection_pairs:
pair_dists.append([])
for frame in universe.trajectory[fstart:fend:fstep]:
times.append(frame.time)
i = 0
for pair in mda_selection_pairs:
sel_1 = pair[0]
sel_2 = pair[1]
com_1 = sel_1.atoms.center_of_mass()
com_2 = sel_2.atoms.center_of_mass()
norm_val_1 = com_1[dir_ind]
norm_val_2 = com_2[dir_ind]
dist = np.abs(norm_val_2 - norm_val_1)
pair_dists[i].append(dist)
i+=1
times = np.array(times)
i=0
for vals in pair_dists:
pair_dists[i] = np.array(vals)
i+=1
return times, pair_dists | 7005d13e1f18597865ca5c5dc14ec09efd7c63e1 | 32,729 |
def min_vertex_cover(G, sampler=None, **sampler_args):
"""Returns an approximate minimum vertex cover.
Defines a QUBO with ground states corresponding to a minimum
vertex cover and uses the sampler to sample from it.
A vertex cover is a set of vertices such that each edge of the graph
is incident with at least one vertex in the set. A minimum vertex cover
is the vertex cover of smallest size.
Parameters
----------
G : NetworkX graph
The graph on which to find a minimum vertex cover.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
vertex_cover : list
List of nodes that form a minimum vertex cover, as
determined by the given sampler.
Examples
--------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a minimum vertex
cover for a Chimera unit cell. Both the horizontal (vertices 0,1,2,3) and
vertical (vertices 4,5,6,7) tiles connect to all 16 edges, so repeated
executions can return either set.
>>> import dwave_networkx as dnx
>>> import dimod
>>> sampler = dimod.ExactSolver() # small testing sampler
>>> G = dnx.chimera_graph(1, 1, 4)
>>> G.remove_node(7) # to give a unique solution
>>> dnx.min_vertex_cover(G, sampler)
[4, 5, 6]
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample.
References
----------
https://en.wikipedia.org/wiki/Vertex_cover
https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization
.. [AL] Lucas, A. (2014). Ising formulations of many NP problems.
Frontiers in Physics, Volume 2, Article 5.
"""
return min_weighted_vertex_cover(G, None, sampler, **sampler_args) | b8681077d0bbb8504cdf5c96250e668bbcfe6d4e | 32,730 |
def update_message_id(message_id, peername):
"""
Update message id for peername in global PEERS_MESSAGE_IDS dict.
Return True iff message_id was updated, else False.
"""
global PEERS_MESSAGE_IDS
if peername not in PEERS_MESSAGE_IDS.keys():
print('[DEBUG] No record of latest message_id for this peer.')
PEERS_MESSAGE_IDS[peername] = int(message_id)
return True
elif int(message_id) > PEERS_MESSAGE_IDS[peername]:
print('[DEBUG] message_id still fresh.')
PEERS_MESSAGE_IDS[peername] = int(message_id)
return True
else:
print('[DEBUG] update_message_id decided that message id'
'%d belonging to peer %s is old. PEERS_MESSAGE_IDS is:'
% (int(message_id), peername))
print(PEERS_MESSAGE_IDS)
return False | 93905e9a485a6f1688fb082de2ebb53d1c048139 | 32,731 |
def quantile_bin_array(data, bins=6):
"""Returns symbolified array with equal-quantile binning.
Parameters
----------
data : array
Data array of shape (time, variables).
bins : int, optional (default: 6)
Number of bins.
Returns
-------
symb_array : array
Converted data of integer type.
"""
T, N = data.shape
# get the bin quantile steps
bin_edge = int(np.ceil(T / float(bins)))
symb_array = np.zeros((T, N), dtype='int32')
# get the lower edges of the bins for every time series
edges = np.sort(data, axis=0)[::bin_edge, :].T
bins = edges.shape[1]
# This gives the symbolic time series
symb_array = (data.reshape(T, N, 1) >= edges.reshape(1, N, bins)).sum(
axis=2) - 1
return symb_array.astype('int32') | 87d8c64a30581b700d1a4674e4527882be99444f | 32,732 |
import _socket
def wrap_socket(sock: _socket.socket) -> AsyncSocket:
"""
Wraps a standard socket into an async socket
"""
return AsyncSocket(sock) | 70a6829bdf9048514ffe5bd5b831952f1ecd8e89 | 32,733 |
def _calculate_outer_product_steps(signed_steps, n_steps, dim_x):
"""Calculate array of outer product of steps.
Args:
signed_steps (np.ndarray): Square array with either pos or neg steps returned
by :func:`~estimagic.differentiation.generate_steps.generate_steps` function
n_steps (int): Number of steps needed. For central methods, this is
the number of steps per direction. It is 1 if no Richardson extrapolation
is used.
dim_x (int): Dimension of input vector x.
Returns:
outer_product_steps (np.ndarray): Array with outer product of steps. Has
dimension (n_steps, 1, dim_x, dim_x).
"""
outer_product_steps = np.array(
[np.outer(signed_steps[j], signed_steps[j]) for j in range(n_steps)]
).reshape(n_steps, 1, dim_x, dim_x)
return outer_product_steps | 18aeadc5cb7866e6b99b5da9a2b9e6bc6ebb7c44 | 32,734 |
def compute_lima_on_off_image(n_on, n_off, a_on, a_off, kernel):
"""Compute Li & Ma significance and flux images for on-off observations.
Parameters
----------
n_on : `~gammapy.maps.WcsNDMap`
Counts image
n_off : `~gammapy.maps.WcsNDMap`
Off counts image
a_on : `~gammapy.maps.WcsNDMap`
Relative background efficiency in the on region
a_off : `~gammapy.maps.WcsNDMap`
Relative background efficiency in the off region
kernel : `astropy.convolution.Kernel2D`
Convolution kernel
Returns
-------
images : dict
Dictionary containing result maps
Keys are: significance, n_on, background, excess, alpha
See also
--------
gammapy.stats.significance_on_off
"""
# Kernel is modified later make a copy here
kernel = deepcopy(kernel)
kernel.normalize("peak")
n_on_conv = n_on.convolve(kernel.array).data
a_on_conv = a_on.convolve(kernel.array).data
alpha_conv = a_on_conv / a_off.data
significance_conv = significance_on_off(
n_on_conv, n_off.data, alpha_conv, method="lima"
)
with np.errstate(invalid="ignore"):
background_conv = alpha_conv * n_off.data
excess_conv = n_on_conv - background_conv
return {
"significance": n_on.copy(data=significance_conv),
"n_on": n_on.copy(data=n_on_conv),
"background": n_on.copy(data=background_conv),
"excess": n_on.copy(data=excess_conv),
"alpha": n_on.copy(data=alpha_conv),
} | a9bde10722cbed4dab79f157ee478c9b5ba35d86 | 32,735 |
def data_preprocess(ex, mode='uniform', z_size=20):
"""
Convert image dtype and scale imge in range [-1,1]
:param z_size:
:param ex:
:param mode:
:return:
"""
image = ex['image']
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.reshape(image, [-1])
image = image * 2 - 1.0
if mode == 'uniform':
input_z = tf.random.uniform(
shape=(z_size,), minval=-1.0, maxval=1.0
)
elif mode == 'normal':
input_z = tf.random.normal(shape=(z_size,))
return input_z, image | 95131b5e03afbc0a3c797570a48d49ca93f15116 | 32,736 |
def p2wpkh(pubkey: PubKey, network: str = 'mainnet') -> bytes:
"""Return the p2wpkh (bech32 native) SegWit address."""
network_index = _NETWORKS.index(network)
ec = _CURVES[network_index]
pubkey = to_pubkey_bytes(pubkey, True, ec)
h160 = hash160(pubkey)
return b32address_from_witness(0, h160, network) | ecb4c60871e0dc362d3576d2f02d8e07cd47614e | 32,737 |
import re
def is_not_from_subdomain(response, site_dict):
"""
Ensures the response's url isn't from a subdomain.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines if the response's url is from a subdomain
"""
root_url = re.sub(re_url_root, '', site_dict["url"])
return get_allowed_domain(response.url) == root_url | d3fa99cc8a91942de5f3ec9cb8249c62c7488821 | 32,738 |
import random
def get_affiliation():
"""Return a school/organization affiliation."""
return random.choice(AFFILIATIONS) | 41356c95447352b9ab96db5783efb5ce511e0d00 | 32,739 |
def update_item_feature(train, num_item, user_features, lambda_item, nz_users_indices, robust=False):
"""
Update item feature matrix
:param train: training data, sparse matrix of shape (num_item, num_user)
:param num_item: number of items
:param user_features: factorized user features, dense matrix of shape (num_feature, num_user)
:param lambda_item: ridge regularization parameter
:param nz_users_indices: list of arrays, contains the non-zero indices of each row (item) in train
:param robust: True to enable robustsness against singular matrices
:return: item_features: updated factorized item features, dense matrix of shape (num_feature, num_item)
"""
num_features = user_features.shape[0]
item_features = np.zeros((num_features, num_item))
for item in range(num_item):
y = train[item, nz_users_indices[item]].todense().T # non-zero elements of line n° item of train
x = user_features[:, nz_users_indices[item]] # corresponding columns of user_features
nnz = nz_users_indices[item].shape[0]
# Solution to ridge problem min(|X.T @ w - y|^2 + lambda * |w|^2)
wy = x.dot(y)
if not robust:
w = np.linalg.solve(x.dot(x.T) + lambda_item * nnz * np.identity(num_features), wy)
else:
w = np.linalg.lstsq(x.dot(x.T) + lambda_item * nnz * np.identity(num_features), wy)[0]
item_features[:, item] = w.ravel()
return item_features | 95b56754830cdcd8ddbfcabc25adcad1698903af | 32,740 |
def generate_mutation(model, mutation_type):
"""
Generate a model mutation.
Create the mutation class
Parameters:
model (dict): the model dictionary from settings
mutation_type (str): the mutation type (create, delete, update)
Returns:
graphene.Mutation.Field: the mutation field
"""
mutation_class_name = "{}{}".format(
model['name'].title(), mutation_type.title())
model_class = import_string(model['path'])
arguments = get_arguments(model_class, mutation_type)
mutate = get_mutate(model, mutation_type, model_class, mutation_class_name)
# create the mutation class
globals()[mutation_class_name] = type(mutation_class_name, (graphene.Mutation,), {
'__module__': __name__,
"Arguments": type("Arguments", (), arguments),
"message": graphene.String(),
"ingredient": graphene.Field(globals()["{}Type".format(model['name'].title())]),
"mutate": mutate
})
return globals()[mutation_class_name].Field() | 1e1c39a76508c8f33179087786c08203af57c036 | 32,741 |
import warnings
import subprocess
def call_and_return_stdout(args: tp.Union[str, tp.List[str]],
timeout: tp.Optional[tp.Union[str, int]] = None,
encoding: tp.Optional[str] = None,
expected_return_code: tp.Optional[int] = None,
**kwargs) -> tp.Union[bytes, str]:
"""
Call a process and return it's stdout.
Everything in kwargs will be passed to subprocess.Popen
A bytes object will be returned if encoding is not defined, else stdout will be decoded
according to specified encoding.
.. deprecated:: Use :code:`subprocess.check_output` instead.
:param args: arguments to run the program with. Can be either a string or a list of strings.
:param timeout: amount of seconds to wait for the process result. If process does not complete
within this time, it will be sent a SIGKILL. Can be also a time string. If left at default,
ie. None, timeout won't be considered at all.
:param encoding: encoding with which to decode stdout. If none is passed, it will be returned as
a bytes object
:param expected_return_code: an expected return code of this process. 0 is the default. If
process returns anything else, ProcessFailed will be raise. If left default (None) return
code won't be checked at all
:raises ProcessFailed: process' result code was different from the requested
:raises TimeoutError: timeout was specified and the process didn't complete
"""
warnings.warn('This is deprecated, use subprocess.check_output instead', DeprecationWarning)
kwargs['stdout'] = subprocess.PIPE
stdout_list = []
proc = subprocess.Popen(args, **kwargs)
fut = read_nowait(proc, stdout_list)
if timeout is not None:
timeout = parse_time_string(timeout)
try:
proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
proc.kill()
proc.wait()
raise TimeoutError('Process did not complete within %s seconds' % (timeout,))
finally:
fut.result()
if encoding is None:
result = b''.join(stdout_list)
else:
result = ''.join((row.decode(encoding) for row in stdout_list))
if expected_return_code is not None:
if proc.returncode != expected_return_code:
raise ProcessFailed(proc.returncode, result)
return result | 781de2cc7b3dbdfc3676bcb234db33ba985b7633 | 32,742 |
def _wf_to_char(string):
"""Wordfast &'XX; escapes -> Char"""
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(code, char.encode('utf-8'))
string = string.replace("\\n", "\n").replace("\\t", "\t")
return string | 9270f4ff5a03265956d006bd08d04e417a0c5a14 | 32,743 |
from datetime import datetime
def agg_15_min_load_profile (load_profile_df):
"""
Aggregates 1-Hz load profile by taking average demand over 15-min
increments.
"""
s_in_15min = 15 * 60
# prepare idx slices
start_idxs = np.arange(0, len(load_profile_df), s_in_15min)
end_idxs = np.arange(s_in_15min, len(load_profile_df) + s_in_15min, s_in_15min)
# generate list of avg kw over 15-min increments
avg_15min_kw = [] #init
for s_idx, e_idx in zip(start_idxs, end_idxs):
avg_15min_kw.append(load_profile_df['power_kW'][s_idx:e_idx].mean())
times = [] #init
for hour in range(24):
for minute in range(0, 60, 15):
times.append(str(datetime.time(hour, minute, 0)))
# create pd.DataFrame
agg_15min_load_profile_df = pd.DataFrame({'time': times,
'avg_power_kw': avg_15min_kw})
return agg_15min_load_profile_df | 6a92abf10b6f976d4b48bc7e6bfb4c0e44b1f4c5 | 32,744 |
import os
def icon(image):
# type (str) -> dict
"""Creates the application folder icon info for main menu items"""
return {"icon": os.path.join(MEDIA_URI, image)} | 151e20b26bcdcb001d3bcfc51e833157e1c202d3 | 32,745 |
def get_gitbuilder_hash(project=None, branch=None, flavor=None,
machine_type=None, distro=None,
distro_version=None):
"""
Find the hash representing the head of the project's repository via
querying a gitbuilder repo.
Will return None in the case of a 404 or any other HTTP error.
"""
# Alternate method for github-hosted projects - left here for informational
# purposes
# resp = requests.get(
# 'https://api.github.com/repos/ceph/ceph/git/refs/heads/master')
# hash = .json()['object']['sha']
(arch, release, _os) = get_distro_defaults(distro, machine_type)
if distro is None:
distro = _os.name
bp = get_builder_project()(
project,
dict(
branch=branch,
flavor=flavor,
os_type=distro,
os_version=distro_version,
arch=arch,
),
)
return bp.sha1 | 980abab1d3ff8bf1acdd0aec43f6ce5d5a2b6c45 | 32,746 |
def get_b16_add_conv_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.split = 'non-overlap'
config.slide_step = 12
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
config.in_planes = 64
config.n_conv_layers = 2
config.kernel_size = 7
config.stride = max(1, (7 // 2) - 1)
config.padding = max(1, (7 // 2))
config.activation = nn.ReLU
config.conv_bias = False
config.pooling_kernel_size = 3
config.pooling_stride = 2
config.pooling_padding = 1
config.max_pool = True
return config | eae5f7f33acaf5931b11c7ed2f6d1b554c8a5254 | 32,747 |
def real_proto(request) -> programl_pb2.ProgramGraph:
"""A test fixture which enumerates one of 100 "real" protos."""
return request.param | 84f604626a1545e370aa92ab509329cc23e26aa5 | 32,748 |
def flat(arr):
"""Return arr flattened except for last axis."""
shape = arr.shape[:-1]
n_features = arr.shape[-1]
return arr.reshape(np.product(shape), n_features) | 8b9dd1b92c4fffe087345fa74fbc535e2ee41fbf | 32,749 |
import unittest
import sys
def test():
"""Runs the tests without code coverage"""
tests = unittest.TestLoader().discover("project/tests", pattern="test*.py")
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
sys.exit(result) | eac0edb1cdd76085cb0a958f10ee15876cf3fd34 | 32,750 |
from datetime import datetime
import time
def wait_while(f_logic, timeout, warning_timeout=None, warning_text=None, delay_between_attempts=0.5):
"""
Внутренний цик выполняется, пока вычисление `f_logic()` трактуется как `True`.
"""
warning_flag = False
start_time = datetime.now()
while True:
try:
result = f_logic()
except Exception:
pass
else:
if not result:
return True
elaps_time = (datetime.now() - start_time).total_seconds()
if warning_timeout is not None and elaps_time > warning_timeout and not warning_flag:
text_addon = '. {}'.format(warning_text) if warning_text else ''
logger.warning("Waiting time exceeded {}{}".format(warning_timeout, text_addon))
warning_flag = True
if timeout is not None and elaps_time > timeout:
return False
time.sleep(delay_between_attempts) | 0261083b54b1572833ea146663862fce5fe690a5 | 32,751 |
def acute_lymphocytic_leukemia1():
"""Human Acute Lymphocytic Leukemia dataset (Patient 1).
This dataset was introduced in :cite:`Gawad_2014` and was used in:
* :cite:`B-SCITE` Figure 5.
* :cite:`infSCITE` Figure S16.
The size is n_cells × n_muts = 111 × 20
Returns
-------
:class:`anndata.AnnData`
An anndata in which `.X` is the input noisy.
"""
adata = scp.io.read(
scp.ul.get_file("scphylo.datasets/real/acute_lymphocytic_leukemia1.h5ad")
)
return adata | 76f15e7a19da71fe5e4451104698dfaffdbf1799 | 32,752 |
def extract_module(start_queue, g, locality="top", max_to_crawl=100, max_depth=10):
"""
([rdflib.URI], rdflib.Graph) -> rdflib.Graph
resource (rdflib.URI): resource for which we extract module
g (rdflib.Graph): RDF graph
"""
ontomodule = Graph()
ontomodule.namespace_manager = g.namespace_manager
visited = []
to_crawl = start_queue
depth = 0
# crawl until the queue is not empty
while to_crawl:
print("size of to_crawl: {}, size of visited: {}, depth: {}".format(
len(to_crawl), len(visited), depth))
next_node = to_crawl.pop()
# control the depth
depth = depth + 1
if depth > max_depth:
break
assert not any(isinstance(x, BNode) for x in to_crawl), "Caught BNodes"
if next_node not in visited:
# mark nodes which we have already visited
visited = visited + [next_node]
successor_objs = get_successors(next_node, g, locality=locality)
for successor_obj in successor_objs:
if len(to_crawl) <= max_to_crawl:
to_crawl = to_crawl + successor_obj["uris"]
# add all triples
for triple in successor_obj["triples"]:
ontomodule.add(triple)
return ontomodule | 78e60670a8c8ed53471062380d5a9cf0aab70848 | 32,753 |
import torch
def softmax(x):
"""Softmax activation function
Parameters
----------
x : torch.tensor
"""
return torch.exp(x) / torch.sum(torch.exp(x), dim=1).view(-1, 1) | 739219efe04174fe7a2b21fb8aa98816679f8389 | 32,754 |
def comp_raw_bool_eqs(eq1str, eq2str):
""" Will compare two boolean equations to see if they are the same.
The equations can be written using the characters '&', '+' and '!'
for 'and', 'or' and 'not' respectively.
"""
(eqn1, eqn1vars) = OqeFuncUtils.get_vars_bool_eqn(eq1str)
(eqn2, eqn2vars) = OqeFuncUtils.get_vars_bool_eqn(eq2str)
if eqn1 == '' or eqn2 == '':
return -1
varlist = []
for i in eqn1vars:
if i not in varlist:
varlist.append(i)
for i in eqn2vars:
if i not in varlist:
varlist.append(i)
return OqeFuncUtils.comp_bool_eqs(eqn1, eqn2, varlist) | ce7af289add4294bf8a2a7835414cfb51358bc92 | 32,755 |
def get_integrated_scene(glm_files, start_scene=None):
"""Get an integrated scene.
Given a set of GLM files, get a scene where quantities are summed or
averaged or so.
"""
ms = satpy.MultiScene.from_files(
glm_files,
"glm_l2",
time_threshold=10,
group_keys=["start_time"])
ms.load(["flash_extent_density"])
with xarray.set_options(keep_attrs=True):
sc = ms.blend(sum, scene=start_scene)
return sc | 71744bc23f961e630013ace0a85b1788f92924ff | 32,756 |
def _pt_to_test_name(what, pt, view):
"""Helper used to convert Sublime point to a test/bench function name."""
fn_names = []
pat = TEST_PATTERN.format(WHAT=what, **globals())
regions = view.find_all(pat, 0, r'\1', fn_names)
if not regions:
sublime.error_message('Could not find a Rust %s function.' % what)
return None
# Assuming regions are in ascending order.
indices = [i for (i, r) in enumerate(regions) if r.a <= pt]
if not indices:
sublime.error_message('No %s functions found about the current point.' % what)
return None
return fn_names[indices[-1]] | 580cacda4b31dff3fd05f4218733f5f0c6388ddb | 32,757 |
def load_log_weights(log_weights_root, iw_mode):
"""Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy
of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files.
Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy
In the further calls, it reuses the cached file.
Args:
log_weights_root (str or pathlib.Path)
iw_mode (str)
Returns:
np.ndarray: log importance weights
"""
agg_weights_file = log_weights_root / f"{iw_mode}.npy"
agg_weights_dir = log_weights_root / iw_mode
assert agg_weights_dir.exists() or agg_weights_file.exists()
if not agg_weights_file.exists():
log_weights = np.concatenate(
[np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")])
np.save(agg_weights_file, log_weights)
else:
log_weights = np.load(agg_weights_file)
print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces")
return log_weights | 78f633d55e1d3eedc31851a315294e6a15d381a0 | 32,758 |
import requests
import logging
def pipelines_is_ready():
"""
Used to show the "pipelines is loading..." message
"""
url = f"{API_ENDPOINT}/{STATUS}"
try:
if requests.get(url).status_code < 400:
return True
except Exception as e:
logging.exception(e)
sleep(1) # To avoid spamming a non-existing endpoint at startup
return False | 219b0935092d09311a05816cf0c4345f9abee9f6 | 32,759 |
import os
def invTransect(T, sorted_ring_list, warnifnotunique=True):
"""Finds a transect that ends at T.
In the case there are more than one, if warnifnotunique=True,
user will be warned, but this may slow down transect generation.
IS `warnifnotunique` IMPLEMENTED? Maybe not ...
Returns:
list of tuples of form [(pt, ring_idx, seg_idx, t), ...]
"""
cur_ring = sorted_ring_list[-1]
cur_idx = len(sorted_ring_list) - 1
init_t, init_seg = pathT2tseg(cur_ring.path, T)
init_seg_idx = cur_ring.path.index(init_seg)
transect_info = [(cur_ring.point(T),
len(sorted_ring_list) - 1,
init_seg_idx,
init_t)]
cur_pt = transect_info[-1][0]
while cur_idx > 0:
# Find all rings this transect segment could be coming from
test_rings = []
for r_idx, r in list(enumerate(sorted_ring_list[:cur_idx]))[::-1]:
test_rings.append((r_idx, r))
if r.path.isclosed():
break
test_ring_results = []
for r_idx, test_ring in test_rings:
args = (cur_pt, test_ring.path, cur_ring)
inward_segt_list = isPointOutwardOfPath(*args, justone=False)
for seg_idx, t in inward_segt_list:
test_ring_results.append((r_idx, seg_idx, t))
# sort choices by distance to cur_pt
def dist(res_):
r_idx_, seg_idx_, t_ = res_
new_pt_ = sorted_ring_list[r_idx_].path[seg_idx_].point(t_)
return abs(cur_pt - new_pt_)
sorted_results = sorted(test_ring_results, key=dist)
# Find the closest result such that the transect does not go through
# any other rings on it's way to cur_pt
for res in sorted_results:
wr_idx, wseg_idx, wt = res
new_pt = sorted_ring_list[wr_idx].path[wseg_idx].point(wt)
tr_line = Line(new_pt, cur_pt)
winner = not any(r.path.intersect(tr_line)
for ri, r in test_rings if ri != wr_idx)
if winner:
break
else:
if opt.skip_transects_that_dont_exist:
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
fn = sorted_ring_list[0].svgname + \
"_partial_transect_%s.svg" % s_rel
fn = os.path.join(opt.output_directory, fn)
wsvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info],
filename=fn)
warn("\nNo transect exists ending at relative arc "
"length %s. An svg displaying this partial transect has"
"been saved to:\n%s\n" % (s_rel, fn))
return []
elif opt.accept_transect_crossings:
wr_idx, wseg_idx, wt = sorted_results[0]
else:
disvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info]) # DEBUG line
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
raise Exception("No transect exists ending at relative arc "
"length %s." % s_rel)
# Record the closest choice
transect_info.append((sorted_ring_list[wr_idx].path[wseg_idx].point(wt),
cur_idx,
wseg_idx,
wt))
cur_ring = sorted_ring_list[wr_idx]
cur_pt = transect_info[-1][0]
cur_idx = wr_idx
# Erroneous Termination
if cur_idx < 0 and sorted_ring_list.index(cur_ring) != 0:
disvg([r.path for r in sorted_ring_list],
nodes=[tr[0] for tr in transect_info]) # DEBUG line
bdry_ring = sorted_ring_list[-1]
s_rel = bdry_ring.path.length(T1=T) / bdry_ring.path.length()
raise Exception("Something went wrong finding inverse transect at "
"relative arc length %s." % s_rel)
return transect_info | b553f17bec27b09411babf2d230cb12358af78a8 | 32,760 |
import cmath
def gamma_from_RLGC(freq,R,L,G,C):
"""Get propagation constant gamma from RLGC transmission line parameters"""
w=2*np.pi*freq
return cmath.sqrt((R+1j*w*L)*(G+1j*w*C)) | 9e4f09dc233f87b3fa52b9c7488b7fb65791289d | 32,761 |
from typing import Optional
from typing import List
from typing import Union
from pathlib import Path
def get_abs_paths(paths: Optional[List[Union[str, Path]]]) -> List[Union[str, Path]]:
"""Extract the absolute path from the given sources (if any).
:param paths: list of source paths, if empty this functions does nothing.
"""
if paths is None:
return []
paths_abs = []
for path in paths:
paths_abs.append(as_tcl_value(str(Path(path).absolute())))
return paths_abs | 93a785fbf679664b96c5228a9cf008cba7793765 | 32,762 |
async def async_setup_gateway_entry(hass: core.HomeAssistant, entry: config_entries.ConfigEntry) -> bool:
"""Set up the Gateway component from a config entry."""
host = entry.data[CONF_HOST]
euid = entry.data[CONF_TOKEN]
# Connect to gateway
gateway = IT600Gateway(host=host, euid=euid)
try:
await gateway.connect()
await gateway.poll_status()
except IT600ConnectionError as ce:
_LOGGER.error("Connection error: check if you have specified gateway's HOST correctly.")
return False
except IT600AuthenticationError as ae:
_LOGGER.error("Authentication error: check if you have specified gateway's EUID correctly.")
return False
hass.data[DOMAIN][entry.entry_id] = gateway
gateway_info = gateway.get_gateway_device()
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, gateway_info.unique_id)},
identifiers={(DOMAIN, gateway_info.unique_id)},
manufacturer=gateway_info.manufacturer,
name=gateway_info.name,
model=gateway_info.model,
sw_version=gateway_info.sw_version,
)
for component in GATEWAY_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | d70644b2c4423798007272777bb9c081e79fc778 | 32,763 |
import requests
def import_from_github(username, repo, commit_hash):
"""Import a GitHub project into the exegesis database. Returns True on
success, False on failure.
"""
url = 'https://api.github.com/repos/{}/{}/git/trees/{}?recursive=1'.format(
username, repo, commit_hash)
headers = {'Accept': 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
if r.status_code == 200:
response = r.json()
project = Project(name=(username + ':' + repo), source=Project.GITHUB)
project.save()
# Create the root directory.
Directory.objects.create(project=project, fullpath='', dirpath='',
name='')
for entry in response['tree']:
fullpath = entry['path']
last_slash = fullpath.rfind('/')
if last_slash != -1:
dirpath = fullpath[:last_slash+1]
name = fullpath[last_slash+1:]
else:
dirpath = ''
name = fullpath
if entry['type'] == 'tree':
Directory.objects.create(project=project, fullpath=fullpath,
dirpath=dirpath, name=name)
else:
Snippet.objects.create(project=project, fullpath=fullpath,
dirpath=dirpath, name=name,
downloaded=False, download_source=entry['url'])
return True
else:
return False | a685fc79ad374ab499823696102d22b5d49201ed | 32,764 |
import os
def MakeConfigFilenameRelative(absolute_filename):
"""Determine an absolute filename's path relative to the monolith.
Args:
absolute_filename: str
Returns:
str
NB:
looks at MONOLITHIC_CODEBASE_NAME to determine where the monolithic
codebase starts. e.g., if MONOLITHIC_CODEBASE_NAME is 'bar', then in
the path /foo/bar/baz/quux , the relative name is baz/quux.
If we cannot determine a relative name, because absolute_filename is not
in the monolithic codebase, we return absolute_filename.
"""
if not os.path.isabs(absolute_filename):
raise base.Error('%s is not an absolute path' % absolute_filename)
s = os.path.sep + MONOLITHIC_CODEBASE_NAME + os.path.sep
occurrence = absolute_filename.rfind(s)
if occurrence == -1:
return absolute_filename
return absolute_filename[occurrence + len(s):] | 45ca5417c7636762d04432dcb20a6ab195f6e6ce | 32,765 |
import getpass
import os
def get_checks():
""" Returns a list of Entry """
manager = Manager()
add = manager.add
JOY_DEVICE = '/dev/input/js0'
this_is_a_duckiebot = on_duckiebot()
this_is_a_laptop = on_laptop()
this_is_circle = on_circle()
username = getpass.getuser()
if this_is_a_duckiebot:
add(None,
"Camera is detected",
CommandOutputContains('sudo vcgencmd get_camera', 'detected=1'),
Diagnosis("The camera is not connected."))
add(None,
"Scipy is installed",
CanImportPackages(['scipy', 'scipy.io']),
Diagnosis("Scipy is not installed correctly."))
add(None,
"sklearn is installed",
CanImportPackages(['sklearn']),
Diagnosis("sklearn is not installed correctly."))
python_packages = [
# 'ros_node_utils',
'procgraph',
'comptests',
]
for p in python_packages:
add(None,
"%s is installed" % p,
CanImportPackages([p]),
Diagnosis("Dependency %r is not installed correctly." % p),
Suggestion(" pip install --user %s" % p))
add(None,
"Date is set correctly",
CheckDate(),
Diagnosis("The date is not set correctly."))
not_root=add(None,
"Not running as root",
YouAreNotUser('root'),
Diagnosis("You should not run the code as root."))
if this_is_a_duckiebot:
not_ubuntu = add(not_root,
"Not running as ubuntu",
YouAreNotUser('ubuntu'),
Diagnosis("You should not run the code as ubuntu."))
add(not_ubuntu,
"Member of group sudo",
UserBelongsToGroup(username, "sudo"),
Diagnosis("You are not authorized to run sudo."))
add(not_ubuntu,
"Member of group input",
UserBelongsToGroup(username, "input"),
Diagnosis("You are not authorized to use the joystick."))
add(not_ubuntu,
"Member of group video",
UserBelongsToGroup(username, "video"),
Diagnosis("You are not authorized to read from the camera device."))
add(not_ubuntu,
"Member of group i2c",
UserBelongsToGroup(username, "input"),
Diagnosis("You are not authorized to use the motor shield."))
for g in ['sudo','input','video','i2c']:
add(None,
"User ubuntu member of group `%s`" % g,
UserBelongsToGroup("ubuntu", g),
Diagnosis("Image not created properly."))
if this_is_a_laptop or this_is_a_duckiebot:
good_ssh_configuration(manager)
required_packages = set()
if this_is_a_duckiebot or this_is_a_laptop or this_is_circle:
required_packages.update(make_list("""
vim byobu
git git-extras
htop atop iftop
aptitude apt-file
build-essential libblas-dev liblapack-dev libatlas-base-dev gfortran libyaml-cpp-dev
python-dev ipython python-sklearn
python-termcolor
ros-kinetic-desktop-full
ntpdate
python-pip
ipython
python-ruamel.yaml
virtualenv
libxml2-dev
libxslt1-dev
libffi-dev
bibtex2html
pdftk
python-frozendict
python-tables
mplayer
mencoder
"""))
if this_is_a_duckiebot:
required_packages.update(make_list("""
i2c-tools
python-smbus
"""))
if this_is_a_laptop or this_is_circle:
required_packages.update(make_list("""
git-lfs
"""))
# TODO
# suggested = ['emacs', 'zsh', 'nethogs']
for p in required_packages:
add(None, p, CheckPackageInstalled(p), Diagnosis('Package %r not installed.' % p))
forbidden_packages = ["python-roslaunch", "rosbash"]
for p in forbidden_packages:
add(None, p, CheckPackageNotInstalled(p), Diagnosis('Forbidden package %r is installed.' % p))
if not this_is_circle:
add_suite_git(manager)
if this_is_a_duckiebot:
add(None,
"Edimax detected",
CommandOutputContains('iwconfig', 'rtl8822bu'),
Diagnosis("It seems that the Edimax is not detected."))
add(None,
'The hostname is configured',
CheckHostnameConfigured(),
Diagnosis('You have not completed a proper setup.'))
add(None,
'/etc/hosts is sane',
CheckGoodHostsFile(),
Diagnosis('The contents of /etc/hosts will cause problems later on.'))
if this_is_a_duckiebot:
add(None,
'Correct kernel version',
GoodKernel(),
Diagnosis('You have been messing with the kernel.'),
Suggestion('You probably need to start with a pristine SD card.'))
add(None,
'Wifi name configured',
WifiNameConfigured(),
Diagnosis('You have not completed the Wifi configuration.'))
add(None,
"Messages are compiled",
CheckImportMessages(),
Diagnosis("The messages are not compiling correctly."))
# if not this_is_circle:
# add(None,
# 'Shell is bash',
# EnvironmentVariableIsEqualTo('SHELL', '/bin/bash'),
# Diagnosis('You have not set the shell to /bin/bash'),
# Suggestion('You can change the shell using `chsh`.'))
if this_is_a_duckiebot:
add(None,
"Joystick detected",
DeviceExists(JOY_DEVICE),
Diagnosis("The joystick is not found at %s" % JOY_DEVICE))
DUCKIETOWN_ROOT = DuckietownConstants.DUCKIETOWN_ROOT_variable
DUCKIEFLEET_ROOT = DuckietownConstants.DUCKIEFLEET_ROOT_variable
DUCKIETOWN_CONFIG_SEQUENCE = DuckietownConstants.DUCKIETOWN_CONFIG_SEQUENCE_variable
v = DUCKIETOWN_CONFIG_SEQUENCE
add(None,
'Provided environment variable %s.' % v,
EnvironmentVariableExists(v),
Diagnosis("%s is not set." % v),
Suggestion('You have to set %r in your environment (e.g. .bashrc)' % v))
variables_to_check = [DUCKIETOWN_ROOT, DUCKIEFLEET_ROOT, #DUCKIETOWN_CONFIG_SEQUENCE
]
existence = {}
for v in variables_to_check:
var_exists = add(None,
'Provided environment variable %s.' % v,
EnvironmentVariableExists(v),
Diagnosis("%s is not set." % v),
Suggestion('You have to set %r in your environment (e.g. .bashrc)' % v))
existence[v] = add(var_exists,
'Existence of path ${%s}' % v,
DirExists('${%s}' % v),
Diagnosis("%s is set but it points to a non-existing directory." % v)
)
add(existence[DUCKIETOWN_ROOT],
'Software repo downloaded with SSH scheme.',
GitCorrectRemote('${%s}' % DUCKIETOWN_ROOT),
Diagnosis("You downloaded the repo using https."),
)
# scuderia_exists = add(existence[DUCKIEFLEET_ROOT],
# 'Existence of scuderia file',
# ScuderiaFileExists(),
# Diagnosis('You do not have a scuderia file.'),
# SeeDocs('scuderia')
# )
git_lfs_installed = add(None, # @UnusedVariable
'Git LFS installed',
GitLFSInstalled(),
Diagnosis('You have not installed Git LFS'),
SeeDocs('git-lfs'))
#
# ok_scuderia = add(scuderia_exists,
# 'Validation of scuderia file',
# ValidScuderiaFile(),
# Diagnosis('You have an invalid scuderia file.'),
# SeeDocs('scuderia')
# )
if this_is_a_duckiebot:
add(None,
'This robot is mentioned in scuderia.',
ThisRobotInScuderiaFile(),
Diagnosis('You have not added the robot to the scuderia.'),
SeeDocs('scuderia'))
progs = ['roslaunch', 'rosrun']
for prog in progs:
add(None,
'Good path for "%s"' % prog,
CommandOutputContains('which %s' % prog, '/opt/ros/kinetic'),
Diagnosis('The program `%s` is not resolved to the one in /opt/ros' % prog))
# add(None,
# 'Hub is installed',
# CommandOutputContains('hub --version'),
# Diagnosis('The program "hub" is not installed'),
# SeeDocs("hub"))
machines_exists = add(None,
'Existence of machines file',
MachinesExists(),
Diagnosis('You have an invalid or missing machines file.'),
SeeDocs('machines'),
)
if this_is_a_duckiebot:
add(machines_exists,
'Machines file contains this robot',
MachinesValid(),
Diagnosis('You have an invalid machines file.'),
)
#
# add(machines_exists,
# 'Machines is updated',
# MachinesNewerThanScuderia(),
# Diagnosis('Scuderia was modified after machines created'),
# )
if True: # TODO
if this_is_a_laptop:
existence = add(None,
'Environment variable DUCKIETOWN_DATA',
EnvironmentVariableExists('DUCKIETOWN_DATA'),
Diagnosis("DUCKIETOWN_DATA is not set."
"""
The environment variable DUCKIETOWN_DATA must either:
1) be set to "n/a"
2) point to an existing path corresponding to Dropbox/duckietown-data.
(containing a subdirectory 'logs')
"""
))
logs = [
"${DUCKIETOWN_DATA}/logs/20160400-phase3-logs/dp45/20160406/20160406-226-All_red_lights_followTheLeader1-2cv.bag",
]
for l in logs:
add(existence,
'Log %r exists in DUCKIETOWN_DATA' % os.path.basename(l),
FileExists(l),
Diagnosis("The DUCKIETOWN_DATA folder does not contain the logs it should.")
)
if False:
# TODO: not sure if this is needed
if this_is_a_duckiebot:
add(None,
'Environment variable VEHICLE_NAME',
EnvironmentVariableExists('VEHICLE_NAME'),
Diagnosis("""
The environment variable VEHICLE_NAME must be the name of your robot
(if you are on the robot)."""),
Suggestion("""
Add this line to ~/.bashrc:
export VEHICLE_NAME= (your vehicle name)
"""))
try:
packagename2dir = get_list_of_packages_in_catkin_ws()
except DTConfigException:
pass
else:
for package_name, dirname in packagename2dir.items():
add_python_package_checks(add, package_name, dirname)
# TODO: DISPLAY is not set
# files in src/ or scripts/ are executable
# There is no file "util.py" copied from pkg_name
# add(None,
# 'Passwordless sudo',
# FileContains('/etc/'))
# TODO: date
return manager.entries | 36f5b3c8e6d22a687b215f71dbb10cacc73ac747 | 32,766 |
def substructure_matching_bonds(mol: dm.Mol, query: dm.Mol, **kwargs):
"""Perform a substructure match using `GetSubstructMatches` but instead
of returning only the atom indices also return the bond indices.
Args:
mol: A molecule.
query: A molecule used as a query to match against.
kwargs: Any other arguments to pass to `mol.GetSubstructMatches()`.
Returns:
atom_matches: A list of lists of atom indices.
bond_matches: A list of lists of bond indices.
"""
# NOTE(hadim): If more substructure functions are added here, consider moving it to
# a dedicated `substructure` module.
# Set default arguments
kwargs.setdefault("uniquify", True)
# Get the matching atom indices
atom_matches = list(mol.GetSubstructMatches(query, **kwargs))
# Get the bond to highligh from the query
query_bond_indices = [
(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()) for bond in query.GetBonds()
]
# Retrieve the atom indices
query_atom_indices = [atom.GetIdx() for i, atom in enumerate(query.GetAtoms())]
bond_matches = []
for match in atom_matches:
# Map the atom of the query to the atom of the mol matching the query
atom_map = dict(zip(query_atom_indices, match))
# For this match atoms we now, we use the map to retrieve the matching bonds
# in the mol.
mol_bond_indices = [(atom_map[a1], atom_map[a2]) for a1, a2 in query_bond_indices]
# Convert the bond atom indices to bond indices
mol_bond_indices = [mol.GetBondBetweenAtoms(a1, a2).GetIdx() for a1, a2 in mol_bond_indices]
bond_matches.append(mol_bond_indices)
return atom_matches, bond_matches | 1b6f4f7e17defae555ea750941be5ec71047cc87 | 32,767 |
def remove_element(nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
sz = len(nums)
while sz > 0 and nums[sz - 1] == val:
sz -= 1
i = 0
while i < sz:
if nums[i] == val:
nums[i], nums[sz - 1] = nums[sz - 1], nums[i]
sz -= 1
while sz > 0 and nums[sz - 1] == val:
sz -= 1
i += 1
return sz | 4d29e8a8d43f191fe83ab0683f5dff005db799ec | 32,768 |
def get_fm_file(file_name):
"""Read facilitymatcher file into dataframe. If not present, generate the file
via script"""
file_meta = set_facilitymatcher_meta(file_name, category='')
df = load_preprocessed_output(file_meta, paths)
if df is None:
log.info('%s not found in %s, writing facility matches to file',
file_name, output_dir)
if file_name == 'FacilityMatchList_forStEWI':
write_fm.write_facility_matches()
elif file_name == 'FRS_NAICSforStEWI':
write_naics.write_NAICS_matches()
df = load_preprocessed_output(file_meta, paths)
col_dict = {"FRS_ID": "str",
"FacilityID": "str",
"NAICS": "str"}
for k, v in col_dict.items():
if k in df:
df[k] = df[k].astype(v)
return df | b83743b8f56376148b12fc13c3731471cd24b6a5 | 32,769 |
def call_dot(instr):
"""Call dot, returning stdout and stdout"""
dot = Popen('dot -T png'.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE)
return dot.communicate(instr) | f77c9f340f3fcbebb101c5f59d57c92b56147a11 | 32,770 |
def add_bank_member_signal(
banks_table: BanksTable,
bank_id: str,
bank_member_id: str,
signal_type: t.Type[SignalType],
signal_value: str,
) -> BankMemberSignal:
"""
Add a bank member signal. Will deduplicate a signal_value + signal_type
tuple before writing to the database.
Calling this API also makes the signal (new or existing) available to
process into matching indices.
"""
return banks_table.add_bank_member_signal(
bank_id=bank_id,
bank_member_id=bank_member_id,
signal_type=signal_type,
signal_value=signal_value,
) | 214f064f152648c78d7ee5c6b56fb81c62cb4164 | 32,771 |
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
_LOGGER.debug("Migrating from version %s", config_entry.version)
# Flatten configuration but keep old data if user rollbacks HASS
if config_entry.version == 1:
config_entry.data = {**config_entry.data, **config_entry.data[CONF_DEVICE]}
config_entry.version = 2
_LOGGER.info("Migration to version %s successful", config_entry.version)
return True | 96bb3ba729a59188b91edd55e80a0d868d75e89b | 32,772 |
import os
def get_flow1_datasets_result_path(create=True):
""" local path for flow1 results file """
path = os.path.join(get_base_path(), 'flow1-datasets-results.json')
if not os.path.isfile(path):
open(path, 'w').close()
return path | 09ecf3a9c1c34f4f12e39a560f91f3f38aaf7010 | 32,773 |
from typing import List
def map_zones(full_system) -> List[Zone]:
"""Map *zones*."""
zones = []
if full_system:
for raw_zone in full_system.get("body", dict()).get("zones", list()):
zone = map_zone(raw_zone)
if zone:
zones.append(zone)
return zones | e5996460bc66a2882ac1cabee79fdff6e4da71cd | 32,774 |
def betternn(x, keep_prob):
"""
Builds a network that learns to recognize digits
:param x: input tensor of shape (N_examples, 784) as standard MNIST image is 28x28=7845
:param keep_prob: probability for dropout layer
:return: y - a tensor of shape (N_examples, 10) with
values equal to probabilities of example being given digit
"""
# input image is stored as 784 pixels, reshape it to (28,28,1) as it's greyscale
# -1 is special value that indicates that this dimension should be inferred to keep
# constant size
net = Network(tf.reshape(x, [-1, 28, 28, 1]))
net.add_layer(
# take 5x5 features and create 32 feature maps
ConvLayer([5, 5, 1, 32], [32], tf.nn.relu)
).add_layer(
# reduce size by factor of 2
PoolLayer()
).add_layer(
# this time create 64 feature maps
ConvLayer([5, 5, 32, 64], [64], tf.nn.relu)
).add_layer(
# reduce size again
PoolLayer()
).reshape_output(
# reduced size twice (so image is [28,28] -> [7,7]) and created 64 feature maps
# so flatten previous output
[-1, 7 * 7 * 64]
).add_layer(
# create 1024 features
FullyConnectedLayer([7 * 7 * 64, 1024], [1024], tf.nn.relu)
).add_layer(
# reduce complexity
DropoutLayer(keep_prob)
).add_layer(
# Map 1024 features to 10 classes representing digits
FullyConnectedLayer([1024, 10], [10], tf.nn.softmax)
)
return net.output | 83b1155daa564b257fc1379811ddbde72d18ec4f | 32,775 |
def get_valid_scsi_ids(devices, reserved_ids):
"""
Takes a list of dicts devices, and list of ints reserved_ids.
Returns:
- list of ints valid_ids, which are the SCSI ids that are not reserved
- int recommended_id, which is the id that the Web UI should default to recommend
"""
occupied_ids = []
for d in devices:
occupied_ids.append(d["id"])
unoccupied_ids = [i for i in list(range(8)) if i not in reserved_ids + occupied_ids]
unoccupied_ids.sort()
valid_ids = [i for i in list(range(8)) if i not in reserved_ids]
valid_ids.sort(reverse=True)
if len(unoccupied_ids) > 0:
recommended_id = unoccupied_ids[-1]
else:
recommended_id = occupied_ids.pop(0)
return valid_ids, recommended_id | a5b4341fbee75e7d555c917587678dc5ea918b9f | 32,776 |
def check_password_and_delete(target: dict, password) -> dict:
"""
:param target:
:param password:
:return:
"""
if "password" in target:
if md5(str(password).encode()).hexdigest() == target["password"]:
target = dell(target["password"])
Bash().delete({
"bash_id": target["bash_id"]
})
# the bash have been found with the correct password
result = {
"code": "200",
"result": "The bash have been deleted successfully",
}
else:
# incorrect password
result = {
"code": "400",
"reason": "The password for this bash is incorrect, please try again !",
}
else:
# successfully retrieve a public bash
result = {
"code": "403",
"reason": "This is a public bash, you can't delete it, even if you're the author",
}
return result | c06f5064d8a85065c3312d09bda9b9602b772ae1 | 32,777 |
def lingodoc_trigger_to_BIO(doc):
"""
:type doc: nlplingo.text.text_theory.Document
"""
ret = []
for sentence in doc.sentences:
token_labels = []
for token_index, token in enumerate(sentence.tokens):
token_labels.append(EventTriggerFeatureGenerator.get_event_type_of_token(token, sentence))
bio_labels = transform_sentence_labels_to_BIO(token_labels)
token_bio = []
for k, v in zip(sentence.tokens, bio_labels):
token_bio.append('{} {}'.format(k.text, v))
ret.append('\n'.join(token_bio))
return ret | 7237650ef6e9649b3d0e14867d907c9f6aa5b71a | 32,778 |
def build_coiled_coil_model():
"""Generates and returns a coiled-coil model."""
model_and_info = build_and_record_model(
request, model_building.HelixType.ALPHA)
return jsonify(model_and_info) | 9eeca3d1de581559129d3a4fe529c4e13727bd71 | 32,779 |
def get_input_fn(config, is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
input_files = []
for input_pattern in config.pretrain_tfrecords.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ori_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
# "input_mask": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
}
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don"t* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
d = d.apply(tf.data.experimental.ignore_errors())
return d
return input_fn | 23992d8d4fd1bd09f12aa4c28c695a47edca420e | 32,780 |
def control_modes_available():
"""API to call the GetCtrlModesCountSrv service to get the list of available modes
in ctrl_pkg (autonomous/manual/calibration).
Returns:
dict: Execution status if the API call was successful, list of available modes
and error reason if call fails.
"""
webserver_node = webserver_publisher_node.get_webserver_node()
webserver_node.get_logger().info("Providing the number of available modes")
try:
get_ctrl_modes_req = GetCtrlModesSrv.Request()
get_ctrl_modes_res = call_service_sync(webserver_node.get_ctrl_modes_cli,
get_ctrl_modes_req)
control_modes_available = list()
for mode in get_ctrl_modes_res.modes:
control_modes_available.append(constants.MODE_DICT[mode])
data = {
"control_modes_available": control_modes_available,
"success": True
}
return jsonify(data)
except Exception as ex:
webserver_node.get_logger().error(f"Unable to reach get ctrl modes service: {ex}")
return jsonify(success=False, reason="Error") | eec46b860791d305cce14659a633b461c73143f0 | 32,781 |
def reproject_bbox(source_epsg=4326, dest_epsg=None, bbox=None):
"""
Basic function to reproject given coordinate
bounding box (in WGS84).
"""
# checks
# reproject bounding box
l, b, r, t = bbox
return transform_bounds(src_crs=source_epsg,
dst_crs=dest_epsg,
left=l, bottom=b, right=r, top=t) | 449a1cb793cb2239ed9d46449d03f77500fab031 | 32,782 |
import os
import sys
import py_compile
def compile_py_files(toc, workpath):
"""
Given a TOC or equivalent list of tuples, generates all the required pyc/pyo files, writing in a local directory
if required, and returns the list of tuples with the updated pathnames.
In the old system using ImpTracker, the generated TOC of "pure" modules already contains paths to nm.pyc or
nm.pyo and it is only necessary to check that these files are not older than the source. In the new system using
ModuleGraph, the path given is to nm.py and we do not know if nm.pyc/.pyo exists. The following logic works with
both (so if at some time modulegraph starts returning filenames of .pyc, it will cope).
"""
# For those modules that need to be rebuilt, use the build directory PyInstaller creates during the build process.
basepath = os.path.join(workpath, "localpycos")
# Copy everything from toc to this new TOC, possibly unchanged.
new_toc = []
for (nm, fnm, typ) in toc:
# Keep irrelevant items unchanged.
if typ != 'PYMODULE':
new_toc.append((nm, fnm, typ))
continue
if fnm in ('-', None):
# If fmn represents a namespace then skip
continue
if fnm.endswith('.py'):
# We are given a source path, determine the object path, if any.
src_fnm = fnm
# Assume we want pyo only when now running -O or -OO
obj_fnm = src_fnm + ('o' if sys.flags.optimize else 'c')
if not os.path.exists(obj_fnm):
# Alas that one is not there so assume the other choice.
obj_fnm = src_fnm + ('c' if sys.flags.optimize else 'o')
else:
# fnm is not "name.py", so assume we are given name.pyc/.pyo
obj_fnm = fnm # take that name to be the desired object
src_fnm = fnm[:-1] # drop the 'c' or 'o' to make a source name
# We need to perform a build ourselves if obj_fnm does not exist, or if src_fnm is newer than obj_fnm, or if
# obj_fnm was created by a different Python version.
needs_compile = mtime(src_fnm) > mtime(obj_fnm)
if not needs_compile:
with open(obj_fnm, 'rb') as fh:
needs_compile = fh.read(4) != BYTECODE_MAGIC
if needs_compile:
try:
# TODO: there should be no need to repeat the compile, because ModuleGraph does a compile and stores the
# result in the .code member of the graph node. Should be possible to get the node and write the
# code to obj_fnm.
py_compile.compile(src_fnm, obj_fnm)
logger.debug("compiled %s", src_fnm)
except IOError:
# If we are compiling in a system directory, we probably do not have write permissions; thus we compile
# to a local directory and change the TOC entry accordingly.
ext = os.path.splitext(obj_fnm)[1]
split_nm = nm.split(".")
if "__init__" not in obj_fnm:
# If it is a normal module, use the last part of the qualified name as the module name and the first
# part as the leading path.
leading = split_nm[:-1]
mod_name = split_nm[-1]
else:
# In case of an __init__ module, use all the qualified name as the leading path and use "__init__"
# as the module name.
leading = split_nm
mod_name = "__init__"
leading = os.path.join(basepath, *leading)
if not os.path.exists(leading):
os.makedirs(leading)
obj_fnm = os.path.join(leading, mod_name + ext)
needs_compile = mtime(src_fnm) > mtime(obj_fnm)
if not needs_compile:
with open(obj_fnm, 'rb') as fh:
needs_compile = fh.read(4) != BYTECODE_MAGIC
if needs_compile:
# TODO: see above TODO regarding using node.code.
py_compile.compile(src_fnm, obj_fnm)
logger.debug("compiled %s", src_fnm)
# If we get to here, obj_fnm is the path to the compiled module nm.py
new_toc.append((nm, obj_fnm, typ))
return new_toc | 846a68a20d574f1dd3c9e56c1e032a1ef41d68a4 | 32,783 |
from bs4 import BeautifulSoup
import re
def parse_cluster_card_info(soup: BeautifulSoup):
"""
App lists from GET requests follow a redirect to the /cluster page, which
contains different HTML and selectors.
:param soup: A BeautifulSoup object of an app's card
:return: A dictionary of available basic app info
"""
icon = soup.select_one("img")
details_soup = soup.select_one("div.RZEgze")
relative_url = details_soup.select_one("div.p63iDd > a")
url = relative_url.attrs.get("href") if relative_url else None
app_id = None
if url:
app_id = extract_id_query(url)
title = details_soup.select_one("div.WsMG1c.nnK0zc")
developer_soup = details_soup.select_one("a.mnKHRc")
developer = None
developer_id = None
if developer_soup:
developer = developer_soup.select_one("div.KoLSrc")
developer_url = developer_soup.attrs.get("href") if developer else None
developer_id = extract_id_query(developer_url)
description = details_soup.select_one("div.b8cIId.f5NCO")
score_soup = details_soup.select_one("div.pf5lIe div")
score = None
if score_soup:
matches = re.search(r"([0-9]\.[0-9]) star", score_soup.text)
score = matches.groups()[0] if matches else None
price = None
price_button = details_soup.select_one("button span.VfPpfd")
if price_button:
price = price_button.text
full_price = None
full_price_button = details_soup.select_one("button span.SUZt4c")
if full_price_button:
full_price = full_price_button.text
free = price is None
if free is True:
price = "0"
full_price = "0"
return {
"app_id": app_id,
"url": url,
"icon": icon.attrs.get("data-src") if icon else None,
"title": title.text if title else None,
"developer": developer.text if developer else None,
"developer_id": developer_id,
"description": description.text if description else None,
"score": score,
"full_price": full_price,
"price": price,
"free": free,
} | 0d4d0ba75a4e29b4d33e1f1a4e40239adcd80626 | 32,784 |
def get_joint_occurrence_df(df, row_column, col_column, top_k=10):
"""
Form a DataFrame where:
- index is composed of top_k top values in row_column.
- columns are composed of top_k top values in col_column.
- cell values are the number of times that the index and
column values occur together in the given DataFrame.
Note: Index of the DataFrame must be unique.
"""
df_s = df[[row_column, col_column]].copy()
# Get top row and column values.
top_rows = df[row_column].value_counts().iloc[:top_k]
top_cols = df[col_column].value_counts().iloc[:top_k]
# Drop rows that don't have a genre and style in the top list.
filter_lambda = lambda x: \
x[row_column] in top_rows and x[col_column] in top_cols
df_s = df_s[df_s.apply(filter_lambda, axis=1)]
fname = 'get_joint_occurrence_df'
print("{}: looking at co-occurrence of {} and {}".format(
fname, row_column, col_column))
print(" dropped {}/{} rows that don't have both vals in top-{}.".format(
df_s.shape[0] - df_s.shape[0], df_s.shape[0], top_k))
# Construct joint occurence matrix
JM = np.zeros((top_k, top_k))
for i, row in enumerate(top_rows.index):
for j, col in enumerate(top_cols.index):
JM[i, j] = (
(df_s[row_column] == row) &
(df_s[col_column] == col)
).sum()
df_m = pd.DataFrame(JM, columns=top_cols.index, index=top_rows.index)
return df_m | 19701a0a355733c1eb8d3aa3046fc6e00daed120 | 32,785 |
def get_total_obs_num_samples(obs_length=None,
num_blocks=None,
length_mode='obs_length',
num_antennas=1,
sample_rate=3e9,
block_size=134217728,
num_bits=8,
num_pols=2,
num_branches=1024,
num_chans=64):
"""
Calculate number of required real voltage time samples for as given `obs_length` or `num_blocks`, without directly
using a `RawVoltageBackend` object.
Parameters
----------
obs_length : float, optional
Length of observation in seconds, if in `obs_length` mode
num_blocks : int, optional
Number of data blocks to record, if in `num_blocks` mode
length_mode : str, optional
Mode for specifying length of observation, either `obs_length` in seconds or `num_blocks` in data blocks
num_antennas : int
Number of antennas
sample_rate : float
Sample rate in Hz
block_size : int
Block size used in recording GUPPI RAW files
num_bits : int
Number of bits in requantized data (for saving into file). Can be 8 or 4.
num_pols : int
Number of polarizations recorded
num_branches : int
Number of branches in polyphase filterbank
num_chans : int
Number of coarse channels written to file
Returns
-------
num_samples : int
Number of samples
"""
tbin = num_branches / sample_rate
chan_bw = 1 / tbin
bytes_per_sample = 2 * num_pols * num_bits / 8
if length_mode == 'obs_length':
if obs_length is None:
raise ValueError("Value not given for 'obs_length'.")
num_blocks = int(obs_length * chan_bw * num_antennas * num_chans * bytes_per_sample / block_size)
elif length_mode == 'num_blocks':
if num_blocks is None:
raise ValueError("Value not given for 'num_blocks'.")
pass
else:
raise ValueError("Invalid option given for 'length_mode'.")
return num_blocks * int(block_size / (num_antennas * num_chans * bytes_per_sample)) * num_branches | 0d5c3de03723c79d31c7f77ece29226daaf4f442 | 32,786 |
from pathlib import Path
def documents_glossary_term(response: Response,
request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST),
term_id: str=Path(..., title="Glossary Term ID or Partial ID", description=opasConfig.DESCRIPTION_GLOSSARYID),
#search: str=Query(None, title="Document request from search results", description="This is a document request, including search parameters, to show hits"),
return_format: str=Query("HTML", title=opasConfig.TITLE_RETURNFORMATS, description=opasConfig.DESCRIPTION_RETURNFORMATS)
): # Note this is called by the Document endpoint if it detects a term_id in the DocumentID
"""
## Function
<b>Return a glossary entry for the specified {termID} if authenticated. If not, returns error.</b>
## Return Type
models.Documents
## Status
This endpoint is working.
## Sample Call
/v2/Documents/Glossary/{term_id}
## Notes
In V1 (and PEP-Easy 1.0), glossary entries are fetched via the /v1/Documents endpoint rather than here.
## Potential Errors
USER NEEDS TO BE AUTHENTICATED for glossary access at the term level. Otherwise, returns error.
Client apps should disable the glossary links when not authenticated.
"""
ret_val = None
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
# is the user authenticated?
# is this document embargoed?
# the App should not call here if not authenticated.
if not session_info.authenticated:
response.status_code = httpCodes.HTTP_400_BAD_REQUEST
status_message = f"Must be logged in to view a glossary entry."
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DOCUMENTS,
session_info=session_info,
params=request.url._url,
item_of_interest=term_id,
return_status_code = response.status_code,
status_message=status_message
)
raise HTTPException(
status_code = response.status_code,
detail = status_message
)
try:
try:
term_parts = term_id.split(".")
if len(term_parts) == 4:
term_id = term_parts[-2]
elif len(term_parts) == 3:
term_id = term_parts[-1]
else:
pass
logger.debug("Glossary View Request (term_id/return_format): %s/%s", term_id, return_format)
except Exception as e:
logger.debug("Error splitting term: %s", e)
#raise HTTPException(
#status_code=HTTP_500_INTERNAL_SERVER_ERROR,
#detail=status_message
#)
#Keep it as is
#termID = termID
ret_val = opasAPISupportLib.documents_get_glossary_entry(term_id,
retFormat=return_format,
authenticated = session_info.authenticated)
ret_val.documents.responseInfo.request = request.url._url
except Exception as e:
response.status_code = httpCodes.HTTP_400_BAD_REQUEST
status_message = f"View Glossary Error: {e}"
logger.error(status_message)
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DOCUMENTS,
session_info=session_info,
params=request.url._url,
item_of_interest=term_id,
return_status_code = response.status_code,
status_message=status_message
)
raise HTTPException(
status_code=response.status_code,
detail=status_message
)
else:
status_message = opasCentralDBLib.API_STATUS_SUCCESS
response.status_code = httpCodes.HTTP_200_OK
ret_val.documents.responseInfo.request = request.url._url
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DOCUMENTS,
session_info=session_info,
params=request.url._url,
item_of_interest=term_id,
return_status_code = response.status_code,
status_message=status_message
)
return ret_val | 0f20aa49fdd2cfb37390f2d0db2b88a97ae15d08 | 32,787 |
def number(
x: Scalar,
c: str = 'csl',
w: int = 5,
) -> str:
"""
Return a notation of the number x in context c.
Input:
x (Scalar): number
c (str): context
w (int): width of the output string
Output:
s (str): notation of x
"""
S = 0 if x>=0 else 1 # 0 for + / 1 for -
m, e = f"{x:e}".split('e')
m, e = eval(m), int(e) # mantissa and exponent
A, B = f"{x:f}".rstrip('0').strip('-').split('.')
a, b = len(A), len(B) # number of digits before / after the comma
if isinstance(x, int) or x%1==0:
if a+S<=w or a<=3:
if c == 'stm':
return f"{S*'-'+A.zfill(w-S)}"
if c == 'ttl':
return f"$ {S*'-'+A} $"
if c == 'csl':
return format(S*'-'+A, f'>{w}')
else:
if a+S<=w-2 and np.abs(x)>0.1:
if c == 'ttl':
return f"$ {S*'-'}{A}.{B[:w-S-1]} $"
if c == 'csl':
return format(f"{S*'-'}{A}.{B[:w-S-1-a]}", f'>{w}')
u = len(str(e))
if c == 'stm':
z = 0
q = len(str(m).strip('0').strip('.'))-2-S
while z+1<=q and w>=S+1+(z+1)+1+len(str(e-(z+1))):
z += 1
return f"{m*10**z:1.0f}e{e-z}"
if c == 'ttl':
return fr"$ {format(m, f'1.{max(0, w-S-3-u)}f')} \times 10^{{ {e} }} $"
if c == 'csl':
return format(f"{format(m, f'1.{max(0, w-S-3-u)}f')}e{e}", f'>{w}')
raise ValueError(f"unknown context: {c}") | 3a36d66f9166e82bb51e39e483f9366f83f72ff8 | 32,788 |
def remove_helm_repo(repos_array):
"""
Execute 'helm repo remove' command on input values
repos_array is an array of strings as:
['stable', 'local', 'nalkinscloud', ...]
:param repos_array: array of strings
:return: return code and value from execution command as dict
"""
status = 0
value = 'no errors found'
for repo in repos_array:
completed_process_object = run(["helm", "repo", "remove", repo], stdout=PIPE, stderr=PIPE)
# In case of a non 0 return code, update return from last iteration
if completed_process_object.returncode != 0:
status = completed_process_object.returncode
value = completed_process_object.stderr.decode('utf-8') + " *** Additional errors may occurred"
return {'status': status, 'value': value} | 4b2a778122caaabf1b7cca971d2d9f2b57dbf84e | 32,789 |
def evaluate_fio(baselines: dict, results: dict, test_name: str, failures: int,
tolerance: int) -> int:
"""
Evaluate the fio test results against the baseline.
Determine if the fio test results meet the expected threshold and display
the outcome with appropriate units.
Parameters
----------
baselines : dict
A ``dictionary`` of the baseline to compare results against.
results : dict
A ``dictionary`` of the parsed results.
test_name : str
A ``string`` of the name of the test being parsed.
failures : int
An ``integer`` of the number of results that have not met the
threshold.
tolerance : int
An ``int`` of the percentage below the threshold to still mark as
passing.
Returns
-------
int
Returns an ``integer`` of the number of results that have not met the
threshold.
"""
for test, value in baselines.items():
if test_name not in results.keys():
continue
if test_name == 'bandwidth':
unit = '(GB/s)'
expected = value / 1000000000
got = round(results[test_name][test] / 1000000000, 3)
elif test_name == 'iops':
unit = '(k IOPS)'
expected = value / 1000
got = round(results[test_name][test] / 1000, 3)
print(f' {TEST_MAPPING[test_name]} {test.title()} {unit}')
text = f' Expected: {expected}, Got: {got}'
result = metric_passes(expected, got, tolerance)
output, failures = result_text(result, failures)
text += f', Result: {output}'
print(text)
return failures | d5ad4ca319163409a526fe9d8b43be13de49680a | 32,790 |
def f1_3D(x1, x2, x3):
"""
x1 dependant
from example 2.1 in iterative methods
"""
return -0.2*x2 - 0.2*x3 + 0.8 | 09e5a337f5fa62a4c3cddd9ae0dd701867c52b22 | 32,791 |
import typing
def encrypt(message: typing.Union[str, bytes], n: int, e: int) -> bytes:
"""
Encrypt MESSAGE with public key specified by N and E
"""
pub_key = rsa.PublicKey(n, e)
if isinstance(message, str):
message = message.encode("utf-8")
elif isinstance(message, bytes):
pass
else:
raise Exception("Please format your message to binary or string")
message = rsa.encrypt(message, pub_key)
return message | fb89606b0d3263c5d10479970868c5336d14679b | 32,792 |
def _safe_div(numerator, denominator):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = tf.truediv(numerator, denominator)
zero = tf.zeros_like(t, dtype=denominator.dtype)
condition = tf.greater(denominator, zero)
zero = tf.cast(zero, t.dtype)
return tf.where(condition, t, zero) | 04e9856b1283bf83cd63bb56c65f1d1e2667bcc6 | 32,793 |
def build_embedding_model():
""" Build model by stacking up a preprocessing layer
and an encoding layer.
Returns
-------
tf.keras.Model
The embedding model, taking a list of strings as input,
and outputting embeddings for each token of the input strings
"""
# Links for the pre-trained TensorFlow Hub preprocessing
# and encoding layers
tfhub_preprocessing = 'https://tfhub.dev/tensorflow/' \
'bert_en_uncased_preprocess/3'
tfhub_encoder = 'https://tfhub.dev/tensorflow/small_bert/' \
'bert_en_uncased_L-2_H-128_A-2/1'
# Define model input type and name
inputs = tf.keras.layers.Input(shape=(), dtype=tf.string, name='snippet')
# Define preprocessing layer
preprocessing_layer = hub.KerasLayer(tfhub_preprocessing,
name='preprocessing')
# Define encoding layer
encoder = hub.KerasLayer(tfhub_encoder,
trainable=True, name='BERT_encoder')
# Stack up the three layers
outputs = encoder(preprocessing_layer(inputs))
# Retrieve token embeddings i.e. the 'sequence_output' values
model_outputs = outputs['sequence_output']
# Return model
return tf.keras.Model(inputs, model_outputs) | ecf835f8543d9815c3c88b5b596c0c14d9524b66 | 32,794 |
def event_date_row(event_names_and_dates):
""" Returns the third row of the attendance csv. This is just a list of event dates.
:param list[(str, datetime)] event_names_and_dates:
A list of names and dates for each event that should appear on the csv
:returns:
the row to be printed
:rtype: [str]
"""
# =" " has to be added around the dates to make sure it isn't auto-formatted by Excel
event_dates = ['="' + str(dates) + '"' for _, dates in event_names_and_dates]
return ['', '', '', '', '', '', ''] + event_dates | 5b51eaef8cde99040a1aff9a0c6abaaef5e52896 | 32,795 |
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3):
"""Calculate the value of Tajima's D in moving windows of `size` variants.
Parameters
----------
ac : array_like, int, shape (n_variants, n_alleles)
Allele counts array.
size : int
The window size (number of variants).
start : int, optional
The index at which to start.
stop : int, optional
The index at which to stop.
step : int, optional
The number of variants between start positions of windows. If not
given, defaults to the window size, i.e., non-overlapping windows.
min_sites : int, optional
Minimum number of segregating sites for which to calculate a value. If
there are fewer, np.nan is returned. Defaults to 3.
Returns
-------
d : ndarray, float, shape (n_windows,)
Tajima's D.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 0], [0, 1]],
... [[0, 0], [1, 1]],
... [[0, 1], [1, 1]],
... [[1, 1], [1, 1]],
... [[0, 0], [1, 2]],
... [[0, 1], [1, 2]],
... [[0, 1], [-1, -1]],
... [[-1, -1], [-1, -1]]])
>>> ac = g.count_alleles()
>>> D = allel.moving_tajima_d(ac, size=4, step=2)
>>> D
array([0.1676558 , 2.01186954, 5.70029703])
"""
d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop,
step=step, min_sites=min_sites)
return d | df5217180cf5b25ccb09ee88974d82290bff43ce | 32,796 |
def sample_member(user, name='Attila'):
"""
Create and return a sample tag
:param user:
:param name:
:return:
"""
return Member.objects.create(user=user, name=name) | ac171a5da2495436596bd6e597b0b9ea498c8bcf | 32,797 |
import uuid
def _create_feed(client, customer_id):
"""Creates a page feed with URLs
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID str.
Returns:
A FeedDetails instance with information about the newly created feed.
"""
# Retrieve a new feed operation object.
feed_operation = client.get_type("FeedOperation")
# Create a new feed.
feed = feed_operation.create
feed.name = f"DSA Feed #{uuid.uuid4()}"
feed.origin = client.enums.FeedOriginEnum.USER
feed_attribute_type_enum = client.enums.FeedAttributeTypeEnum
# Create the feed's attributes.
feed_attribute_url = client.get_type("FeedAttribute")
feed_attribute_url.type_ = feed_attribute_type_enum.URL_LIST
feed_attribute_url.name = "Page URL"
feed_attribute_label = client.get_type("FeedAttribute")
feed_attribute_label.type_ = feed_attribute_type_enum.STRING_LIST
feed_attribute_label.name = "Label"
feed.attributes.extend([feed_attribute_url, feed_attribute_label])
# Retrieve the feed service.
feed_service = client.get_service("FeedService")
# Send the feed operation and add the feed.
response = feed_service.mutate_feeds(
customer_id=customer_id, operations=[feed_operation]
)
return response.results[0].resource_name | 738e940abd1a7ec90382c4011b26d757c8a916a4 | 32,798 |
def cal_pj_task_ind(_st_date, _ed_date):
"""
计算产品研发中心资源投入到非产品事务的指标。
:param _st_date: 起始日期
:param _ed_date: 截止日期
:return: 统计指标
"""
global extTask
_pj_info = handler.get_project_info("project_t")
# logging.log(logging.WARN, ">>> cal_pj_task_ind( %s, %s )" % (_st_date, _ed_date))
_pj_sum = 0
_npj_sum = 0
_project = {}
for _issue in extTask:
if _issue["updated"] is None:
continue
_issue_updated_date = _issue["updated"].split('T')[0]
"""判断任务是否在指定的时间段内"""
if handler.is_date_bef(_issue_updated_date, _st_date) or handler.is_date_aft(_issue_updated_date, _ed_date):
continue
_group = _issue['issue'].split('-')[0]
_month = int(_issue_updated_date.split('-')[1])
if _issue['project_alias'] is not None:
_pj_name = scan_project_name(_pj_info, _issue['project_alias'])
if _pj_name is None:
_pj_name = _issue['project_alias']
if _pj_name not in _project:
_project[_pj_name] = {_group: _issue['spent_time'],
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0,
"member": []
}
_project[_pj_name][_month] = _issue['spent_time']
_project[_pj_name][13] = _issue['spent_time']
else:
if _group not in _project[_pj_name]:
_project[_pj_name][_group] = _issue['spent_time']
else:
_project[_pj_name][_group] += _issue['spent_time']
_project[_pj_name][_month] += _issue['spent_time']
_project[_pj_name][13] += _issue['spent_time']
_pj_sum += _issue['spent_time']
_project[_pj_name]["member"].append(
{
"member": _issue["users"],
"date": _issue["updated"],
"summary": _issue["summary"],
"spent_time": _issue["spent_time"]
}
)
else:
"""试图从summary中寻找项目信息"""
_pj_name = scan_project_name(_pj_info, _issue['summary'])
if _pj_name is not None:
"""有项目信息"""
if _pj_name not in _project:
_project[_pj_name] = {_group: _issue['spent_time'],
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0,
"member": []
}
_project[_pj_name][_month] = _issue['spent_time']
_project[_pj_name][13] = _issue['spent_time']
else:
if _group not in _project[_pj_name]:
_project[_pj_name][_group] = _issue['spent_time']
else:
_project[_pj_name][_group] += _issue['spent_time']
_project[_pj_name][_month] += _issue['spent_time']
_project[_pj_name][13] += _issue['spent_time']
_pj_sum += _issue['spent_time']
_project[_pj_name]["member"].append(
{
"member": _issue["users"],
"date": _issue["updated"],
"summary": _issue["summary"],
"spent_time": _issue["spent_time"]
}
)
else:
"""无项目信息"""
print(u">>> %s" % _issue['summary'])
if u'其它' not in _project:
_project[u'其它'] = {_group: _issue['spent_time'],
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0,
"member": []
}
_project[u'其它'][_month] = _issue['spent_time']
_project[u'其它'][13] = _issue['spent_time']
else:
if _group not in _project[u'其它']:
_project[u'其它'][_group] = _issue['spent_time']
else:
_project[u'其它'][_group] += _issue['spent_time']
_project[u'其它'][_month] += _issue['spent_time']
_project[u'其它'][13] += _issue['spent_time']
_npj_sum += _issue['spent_time']
_project[u'其它']["member"].append(
{
"member": _issue["users"],
"date": _issue["updated"],
"summary": _issue["summary"],
"spent_time": _issue["spent_time"]
}
)
# logging.log(logging.WARN, ">>> return %s, %d, %d" % (_project, _pj_sum, _npj_sum))
return _project, _pj_sum, _npj_sum | 7a998a01a87abbc1f80147cf067b231429124001 | 32,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.