content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
def cmd_openfile(pid,abs_filename,line_no=1,column_no=1):
"""ファイルをコンパイルする
abs_filename - ファイル名の絶対パス
(Ex.) c:/project/my_app/src/main.cpp
"""
dte = get_dte_obj(pid)
if not dte:
_vs_msg("Not found process")
return False
abs_filename = _to_unicode(abs_filename)
abs_filename = os.path.normpath(abs_filename).lower()
if not os.path.isfile(abs_filename):
_vs_msg("File not found."+abs_filename)
return False
#ソリューションに含まれていなくてもファイルを開いてビルドできてしまうので、その対策。
ret = _search_file(dte,abs_filename)
if None is ret:
return False
dte.ItemOperations.OpenFile(abs_filename)
dte.ActiveDocument.Selection.StartOfDocument()
dte.ActiveDocument.Selection.MoveToLineAndOffset(int(line_no),int(column_no),False)
global g_cmd_result
g_cmd_result=True
return True | 0ca1e0c26fe12677c3c4b3ff73eafdf6ea11485e | 3,627,700 |
def optimize(s, probability, loBound, hiBound):
""" Optimiere auf die max. mögliche Entnahme bei einer vorgegebenen Fehlerquote
Returns:
widthdrawal: max. mögliche prozentuale Entnahme
"""
n_ret_months = s.simulation['n_ret_years'] * 12
accuracy = 0.01 # Genauigkeit der Optimierung
# Vorbereitung der Optimierung
deltaWidthdrawal = (hiBound - loBound) / 2
percWidthdrawal = loBound + deltaWidthdrawal
cnt = 0
curProb = 0
# Optimization by successiv approximation
while (deltaWidthdrawal > accuracy) or (curProb > probability):
cnt += 1
s.withdrawal['fixed_pct'] = percWidthdrawal
s.init_simulation()
s.simulate()
survival = [trial_dict['exhaustion'] for trial_dict in s.latest_simulation]
curProb = 100 * (len(survival) - survival.count(n_ret_months)) / len(survival)
if s.visualization['textoutput'] == True:
print(cnt, '. Entnahme: ', percWidthdrawal, ' Ausfallwahrscheinlichkeit: ', curProb, '%')
deltaWidthdrawal /= 2
if deltaWidthdrawal <= accuracy / 10:
break
if curProb > probability:
percWidthdrawal -= deltaWidthdrawal
else:
percWidthdrawal += deltaWidthdrawal
return percWidthdrawal | 0c572b885f5803d832b1ff1bde0e4013895b4665 | 3,627,701 |
from typing import Tuple
import torch
def torch_data_loader(
features: np.ndarray,
labels: np.ndarray,
batch_size: int,
shuffle: bool = None,
num_workers: int = 0
) -> Tuple[torch.utils.data.DataLoader]:
"""
Creates the data loader for the train and test dataset.
Parameters
---------
features: numpy.ndarray
Training input features.
labels: numpy.ndarray
Training labels.
batch_size: int
Batch size
shuffle: bool, optional, default: None
Flag indicating whether to shuffle dataset or not.
num_workers: int, optional, default: 0
Number of workers to use during sampling iteration.
Returns
-------
torch.utils.data.DataLoader: data loader
"""
features = torch.Tensor(features)
labels = torch.Tensor(labels)
dataset = torch.utils.data.TensorDataset(features, labels)
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle
)
return data_loader | 4891a486f980714d8500c246e022c1412f27e563 | 3,627,702 |
def delete_row_col(np_arr, row, col):
"""Removes the specified row and col from a Numpy array.
A new np array is returned, so this does not affect the input array."""
return np.delete(np.delete(np_arr, row, 0), col, 1) | bcdb55aa78d676861ed820a550a85ad0c24bbe76 | 3,627,703 |
def make_3d(adata,
time_var='Metadata_Time',
tree_var='Metadata_Trace_Tree',
use_rep=None,
n_pcs=50):
"""Return three dimensional representation of data.
Args:
time_var (str): Variable in .obs with timesteps
tree_var (str): Variable in .obs with tree identifier
use_rep (bool): Make representation of data 3d
n_pcs (int): Number of PCs to use if use_rep is "X_pca"
Returns:
np.array of shape [samples x timesteps x features]
"""
adata = adata[np.isfinite(adata.obs[tree_var])]
if use_rep is None:
use_rep = 'X'
X = choose_representation(adata,
rep=use_rep,
n_pcs=n_pcs)
trees, trees_ix = np.unique(adata.obs[tree_var], return_inverse=True)
time, time_ix = np.unique(adata.obs[time_var], return_inverse=True)
X_traj = np.zeros((len(trees), len(time), adata.n_vars), dtype=X.dtype)
X_traj[trees_ix, time_ix, :] = X
return X_traj | 01abf024914bcb12a2f02f542cc4c406e461fe79 | 3,627,704 |
import re
def _natural_key(x):
""" Splits a string into characters and digits. This helps in sorting file
names in a 'natural' way.
"""
return [int(c) if c.isdigit() else c.lower() for c in re.split("(\d+)", x)] | 1fab7dffb9765b20f77ab759e43a23325b4441f4 | 3,627,705 |
def get_location_1(box_2d, dimension, rotation_x, rotation_y, rotation_z, proj_matrix):
"""
方法1 2Dbbox中心与3Dbbox中心重合
只存在一个中心点间的对应关系。难以约束。
若是将Z的值替换成真实值,效果还行。Z方向的值与XY相比差距太大。
"""
R = get_R(rotation_x, rotation_y)
# format 2d corners
xmin = box_2d[0]
ymin = box_2d[1]
xmax = box_2d[2]
ymax = box_2d[3]
h, w, l = dimension[0], dimension[1], dimension[2]
constraints = [0, -h/2, 0]
corners = [(xmin+xmax)/2, (ymin+ymax)/2]
# create pre M (the term with I and the R*X)
M = np.zeros([4, 4])
for i in range(0, 4):
M[i][i] = 1
# create A, b
A = np.zeros([2, 3], dtype=np.float)
b = np.zeros([2, 1])
RX = np.dot(R, constraints)
M[:3, 3] = RX.reshape(3)
M = np.dot(proj_matrix, M)
A[0, :] = M[0, :3] - corners[0] * M[2, :3] # [540 0 960] - 1116[0 0 1]
b[0] = corners[0] * M[2, 3] - M[0, 3]
A[1, :] = M[1, :3] - corners[1] * M[2, :3] # [540 0 960] - 1116[0 0 1]
b[1] = corners[1] * M[2, 3] - M[1, 3]
loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)
# loc = [loc[0][0], loc[1][0] + dimension[0] / 2, loc[2][0]]
loc = [loc[0][0], loc[1][0], loc[2][0]]
return loc | 54b575ec603de0c65d57b6e07ef62d56b3f12ee2 | 3,627,706 |
import os
def find_file(directory: str, search_file: str) -> str:
"""Finds relative path of file in given directory and its subdirectories.
Args:
directory (str): Directory to search in.
search_file (str): File to search in directory.
Returns:
str: Path to file.
"""
paths = []
for root, _, files in os.walk(directory):
for file in files:
if file.lower().endswith(search_file.lower()):
paths.append(os.path.join(root, file))
if not paths:
return ""
paths = [ele for ele in paths if ".test" not in ele]
shortest_path = min(paths, key=len)
relative_path = shortest_path.replace(directory, "").strip("/")
return relative_path | 4f272ab643d8b271d01cd38041e1e003e3171224 | 3,627,707 |
def get_smoothing_kernel(x, y, smoothing_length):
""" x = x - xj, y = y - yj"""
r_xy_2 = x + y
q_xy_2 = r_xy_2 / smoothing_length
return get_dimensionless_2D_kernel(q_xy_2) | 183087724d2c3a921e64a65a00f2ec5c783ceb40 | 3,627,708 |
def evolve_population(population: list, generation: int) -> list:
"""
This evolves an existing population by doubling them (binary fission), then introducing random mutation to
each member of the population.
:param generation: Helps determine the starting point of the numbering system so the bacteria have unique names
:param population: A list of fasta files representing the bacteria.
:return: None
"""
children_population = population + population
names = []
new_population = []
for j in range(len(children_population)):
names.append("bacterium_{}_{}".format(generation, j+1))
for i in range(len(children_population)):
child = Bacterium(children_population[i].get_file(), names[i], children_population[i].get_chroms())
new_population.append(child)
return new_population | e631864de0c25857ffcb4dff0504dac63ee49aab | 3,627,709 |
def create_siamese_trainer(
model,
optimizer,
loss_fn,
device=None,
non_blocking=False,
prepare_batch=_prepare_batch,
output_transform=output_transform_trainer,
):
"""Factory function for creating an ignite trainer Engine for a siamese architecture.
Args:
model: siamese network module that receives, embeds and computes embedding distances between image pairs.
optimizer: the optimizer to be used for the siamese network
loss_fn: contrastive loss function
device: optional) (default: None) device type specification.
non_blocking: if True and the copy is between CPU and GPU, the copy may run asynchronously (Default value = False)
prepare_batch: optional) batch preparation logic. Takes a batch, the device and the `non_blocking`
option and returns the triplet tensors: the siamese pair tensors and the siamese target indicating pair similarity. (Default value = _prepare_batch)
output_transform: optional) function that receives the result of a siamese network trainer engine (the
siamese pair embeddings, siamese target and the loss module) and returns value to be assigned to the engine's
`state.output` after each iteration, typically the loss value. (Default value = output_transform_trainer)
Returns:
a trainer engine with the update function
"""
if device:
model.to(device)
def _update(_, batch):
"""
Args:
_:
batch:
Returns:
"""
images_1, images_2, target = prepare_batch(batch, device=device, non_blocking=non_blocking) # unpack batch
optimizer.zero_grad() # reset gradients
model.train() # training mode
embeddings_0, embeddings_1 = model(images_1[0], images_2[0]) # train over batch pairs. Actual images in `0` idx
contrastive_loss = loss_fn(embeddings_0, embeddings_1, target) # compute the contrastive loss
contrastive_loss.backward() # accumulate gradients
optimizer.step() # update model weights
return output_transform(embeddings_0, embeddings_1, target, contrastive_loss)
return Engine(_update) | 5a81b6d58f824dff857ce662505c3896c4af9a66 | 3,627,710 |
import json
def handle_methods(
request,
GET=None,
POST=None,
PUT=None,
PATCH=None,
DELETE=None,
args=[],
kwargs={},
):
"""
REST Method Handler.
Return the view handleMethods(request)
Add all allowed methods with their responses. These can either be a Django HttpResponse,
or a callable function that takes a single request argument and calls it if the request is of the specified type.
Any requests without a defined type will be returned with a 405 Method Not Allowed
and a corresponding list of allowed methods.
The supported methods are GET, POST, PUT, PATCH, and DELETE.
Example:
def my_restful_view(request):
return handleMethods(request,
GET=HttpResponse("GET"),
POST=HttpResponse("POST"),
PUT=HttpResponse("PUT"),
PATCH=my_restful_patch_function)
"""
def method_not_allowed():
methods = []
if GET is not None:
methods += ["GET"]
if POST is not None:
methods += ["POST"]
if PUT is not None:
methods += ["PUT"]
if PATCH is not None:
methods += ["PATCH"]
if DELETE is not None:
methods += ["DELETE"]
return HttpResponseNotAllowed(methods)
try:
if request.content_type.lower() == "application/json":
data = json.loads(request.body)
elif request.content_type.lower() == "multipart/form-data":
data = (
MultiPartParser(request.META, request, request.upload_handlers)
.parse()[0]
.dict()
)
else:
data = QueryDict(request.body).dict()
except Exception:
data = QueryDict(request.body).dict()
request.DATA = data
if request.method == "GET":
if GET is None:
return method_not_allowed()
if callable(GET):
return GET(request, *args, **kwargs)
return GET
if request.method == "POST":
if POST is None:
return method_not_allowed()
if callable(POST):
return POST(request, *args, **kwargs)
return POST
if request.method == "PUT":
if PUT is None:
return method_not_allowed()
if callable(PUT):
return PUT(request, *args, **kwargs)
return PUT
if request.method == "PATCH":
if PATCH is None:
return method_not_allowed()
if callable(PATCH):
return PATCH(request, *args, **kwargs)
return PATCH
if request.method == "DELETE":
if DELETE is None:
return method_not_allowed()
if callable(DELETE):
return DELETE(request, *args, **kwargs)
return DELETE
for method, function in kwargs.items():
if request.method == method:
if function is None:
return method_not_allowed()
if callable(function):
kwargs.pop(method)
return function(request, *args, **kwargs)
return function
return method_not_allowed() | ee87a12959c32a42ca39b231c6e3956050b427b3 | 3,627,711 |
def compute_bounding_box(points, convex_hull=None,
given_angles=None, max_error=None):
"""
Computes the minimum area oriented bounding box of a set of points.
Parameters
----------
points : (Mx2) array
The coordinates of the points.
convex_hull : scipy.spatial.ConvexHull, optional
The convex hull of the points, as computed by SciPy.
given_angles : list of float, optional
If set the minimum area bounding box of these angles will be checked
(instead of the angles of all edges of the convex hull).
max_error : float, optional
The maximum error (distance) a point can have to the bounding box.
Returns
-------
bbox : polygon
The minimum area oriented bounding box as a shapely polygon.
"""
if convex_hull is None:
convex_hull = ConvexHull(points)
hull_points = points[convex_hull.vertices]
if given_angles is None:
angles = compute_edge_angles(points[convex_hull.simplices])
else:
angles = given_angles
bbox_corner_points = rotating_calipers_bbox(hull_points, angles)
bbox = Polygon(bbox_corner_points)
if max_error is not None and given_angles is not None:
if not check_error(points, bbox, max_error):
angles = compute_edge_angles(points[convex_hull.simplices])
bbox_corner_points = rotating_calipers_bbox(hull_points, angles)
bbox = Polygon(bbox_corner_points)
return bbox | 0c77963ed739431f1e0a02125a3703a76a26866f | 3,627,712 |
import argparse
from typing import Tuple
from typing import List
import re
import sys
def parse_outputs_from_args(args: argparse.Namespace) -> Tuple[List[str], List[int]]:
"""Get a list of outputs specified in the args."""
name_and_port = [output.split(':') for output in re.split(', |,', args.output_layers)]
try:
return [name for name, _ in name_and_port], [int(port) for _, port in name_and_port]
except ValueError:
log.error('Incorrect value for -oname/--output_layers option, please specify a port for each output layer.')
sys.exit(-4) | 25fe89ed77b44344bad945bf91a2024f3753cd49 | 3,627,713 |
def n_pitches_used(tensor):
"""Return the number of unique pitches used per bar."""
if tensor.get_shape().ndims != 5:
raise ValueError("Input tensor must have 5 dimensions.")
return tf.reduce_mean(tf.reduce_sum(tf.count_nonzero(tensor, 3), 2), [0, 1]) | 0ad75015c4333e2a981d3cd344d6a279a98d9392 | 3,627,714 |
from datetime import datetime
def survey():
"""Survey home page."""
N_SIMULATION_PERIODS = get_n_periods()
db = get_db()
user_data = db.execute(
"SELECT * FROM user WHERE id = ?",
(session["user_id"],)
).fetchone()
user_stage = user_data['current_stage']
simulation_period = user_data['simulation_period']
user_treatment_level = user_data['treatment_level']
display_dict = {}
if user_stage == 'simulation':
fig_url = \
blog_functions.get_fig_url(user_treatment_level, simulation_period)
experiment_data, rec_param_demand_data = \
blog_functions.get_experiment_data(
db, simulation_period, user_treatment_level)
rec_param_demand_data_cols = ['Q_rec', 'v', 'p']
display_dict.update({x: int(rec_param_demand_data[x].tolist()[0])
for x in rec_param_demand_data_cols})
show_recs = True
calc_decision_suffixes = ['_Q']
calc_decision_list = [
x + y for x in ['calc', 'decision'] for y in calc_decision_suffixes
]
display_dict.update(
{x: 0 for x in calc_decision_list})
display_dict.update(
{'calc_errors': [],
'calc_n_errors': 0,
'decision_errors': [],
'decision_n_errors': 0,
'expected_profit': 0}
)
# need an empty dataframe before history is made
temp_display_df_cols = ['Period',
'Ordered From Supplier',
'Demand',
'Profit ($)']
if ((simulation_period >= 2)
& (simulation_period <= N_SIMULATION_PERIODS)):
# get the relevant historical data and display it as a table
temp_exp_df = experiment_data.loc[
(experiment_data['ID'] == user_treatment_level)
& (experiment_data['Period'] < simulation_period)][
['Period', 'Demand']]
# now get the ful contracts table for this user
temp_user_contracts_df = read_sql_query(
"SELECT * FROM contracts WHERE user_id = "\
+ str(session["user_id"]), con=db
)
temp_display_df = temp_exp_df.merge(
temp_user_contracts_df,
how='left',
left_on='Period', right_on='simulation_period')
temp_display_df = blog_functions.get_contract_metrics(
temp_display_df,
display_dict['v'],
display_dict['p'],
'Demand',
'q'
)
temp_display_df.rename(
{'q': 'Ordered From Supplier',
'sales': 'Sales (Units)',
'lost_sales': 'Lost Sales (Units)',
'profit': 'Profit ($)'}, axis=1, inplace=True)
cols = ['Period', 'Demand',
'Ordered From Supplier',
'Profit ($)']
temp_display_df = temp_display_df[temp_display_df_cols]
else:
temp_display_df = DataFrame(columns=temp_display_df_cols)
if request.method == 'GET':
if user_stage == 'simulation':
if request.args.get('action') == 'Calculate':
validate = blog_functions.validate_input()
error_list = blog_functions.do_validate_instructions(
validate, display_dict, request, 'calc_Q', 'calc'
)
if len(error_list) == 0:
expected_profit = blog_functions.get_expected_profit(
int(display_dict['v']),
int(display_dict['p']),
int(request.args.get('calc_Q'))
)
display_dict.update({
'expected_profit': np_round(expected_profit, 2)
})
update_calculator_count = user_data['calculator_count'] + 1
db.execute("UPDATE user"
" SET calculator_count = ?"
" WHERE id = ?;",
(update_calculator_count,
session["user_id"]))
db.commit()
return render_template("blog/" + user_stage + ".html",
display_dict=display_dict,
simulation_period=simulation_period,
historical_table=temp_display_df.to_html(
index=False,
justify='left'),
fig_url=fig_url,
show_recs=show_recs)
if simulation_period <= N_SIMULATION_PERIODS:
return render_template("blog/" + user_stage + ".html",
display_dict=display_dict,
simulation_period=simulation_period,
historical_table=temp_display_df.to_html(
index=False,
justify='left'),
fig_url=fig_url,
show_recs=show_recs)
else:
db.execute("UPDATE user"
" SET current_stage = ?"
" WHERE id = ?;",
(shuttle_dict[user_stage], session["user_id"]))
db.commit()
if user_stage == 'risk':
return render_template("blog/" + user_stage + ".html",
question_dict=QUESTION_DICT,
risk_preference_dict=RISK_PREFERENCE_DICT)
if user_stage == 'risk_answer':
given_answer = RISK_PREFERENCE_DICT['RP9'][user_data['RP9']]
answer_list = ['You chose ' + given_answer + '.']
answer_list.extend(
['The computer chose ' + UNFORTUNATE_RP9[user_data['RP9']][0] \
+ ' points.'])
answer_list.extend(['If you would have chosen "' + \
RISK_PREFERENCE_DICT['RP9'][1 - user_data['RP9']] +
'", you would have won ' + \
UNFORTUNATE_RP9[user_data['RP9']][1] + ' points!'])
return render_template("blog/" + user_stage + ".html",
answer_list=answer_list)
return render_template("blog/" + user_stage + ".html")
if request.method == 'POST':
if user_stage == 'demographics':
gender = request.form.get('gender')
age = request.form.get('age')
sc = request.form.get('sc')
procurement = request.form.get('procurement')
db.execute("UPDATE user"
" SET gender = ?, age = ?, sc_exp = ?,"
" procurement_exp = ?, current_stage = ?"
" WHERE id = ?;",
(gender, age, sc, procurement,
shuttle_dict[user_stage], session["user_id"]))
db.commit()
if user_stage == 'cognitive':
db.execute("UPDATE user"
" SET CRT1 = ?, CRT2 = ?, CRT3 = ?,"
" CRT4 = ?, CRT5 = ?, CRT6 = ?, CRT7 = ?,"
" current_stage = ?, enter_simulation = ?"
" WHERE id = ?;",
(request.form.get("CRT1"),
request.form.get("CRT2"),
request.form.get("CRT3"),
request.form.get("CRT4"),
request.form.get("CRT5"),
request.form.get("CRT6"),
request.form.get("CRT7"),
shuttle_dict[user_stage],
datetime.now(),
session["user_id"]))
db.commit()
if user_stage == 'simulation':
if simulation_period <= N_SIMULATION_PERIODS:
validate = blog_functions.validate_input()
error_list = blog_functions.do_validate_instructions(
validate, display_dict, request, 'decision_Q', 'decision'
)
if len(error_list) == 0:
db.execute("INSERT INTO contracts"
"(user_id, simulation_period, q, time_stamp,"
" calculator_count)"
"VALUES (?, ?, ?, ?, ?);",
(session["user_id"],
simulation_period,
int(request.form.get('decision_Q')),
datetime.now(),
user_data['calculator_count'])
)
db.commit()
update_simulation_period = simulation_period + 1
if simulation_period < N_SIMULATION_PERIODS:
db.execute("UPDATE user"
" SET simulation_period = ?"
" WHERE id = ?",
(update_simulation_period,
session["user_id"]))
db.commit()
else:
# go to the risk survey
db.execute("UPDATE user"
" SET current_stage = ?"
" WHERE id = ?;",
(shuttle_dict[user_stage], session["user_id"]))
db.commit()
else:
return render_template("blog/" + user_stage + ".html",
display_dict=display_dict,
simulation_period=simulation_period,
historical_table=temp_display_df.to_html(
index=False,
justify='left'),
fig_url=fig_url,
show_recs=show_recs)
if user_stage == 'risk':
fin_answer_dict = {x: request.form.get(x)
for x in QUESTION_DICT.keys()
}
risk_answer_dict = {x: request.form.get(x)
for x in RISK_PREFERENCE_DICT.keys()
}
all_updates = [shuttle_dict[user_stage]]
all_updates.extend([int(fin_answer_dict[x])
for x in fin_answer_dict.keys()])
all_updates.extend([int(risk_answer_dict[x])
for x in risk_answer_dict.keys()])
all_updates.extend([session["user_id"]])
db.execute("UPDATE user"
" SET current_stage = ?,"
" Fin1 = ?, Fin2 = ?, Fin3 = ?, Fin4 = ?, Fin5 = ?, Fin6 = ?,"
" RP1 = ?, RP2 = ?, RP3 = ?, RP4 = ?, RP5 = ?, RP6 = ?,"
" RP7 = ?, RP8 = ?, RP9 = ?"
"WHERE id = ?;",
tuple(all_updates)
)
db.commit()
if user_stage == 'risk_answer':
answer = request.form.get('RP10')
db.execute("UPDATE user"
" SET current_stage = ?,"
" RP10 = ?"
"WHERE id = ?;",
(shuttle_dict[user_stage], answer, session["user_id"])
)
db.commit()
if user_stage == 'thankyou':
feedback = request.form.get('feedback_input')
db.execute("UPDATE user"
" SET feedback = ?, current_stage = ?"
" WHERE id = ?;",
(feedback, shuttle_dict[user_stage],
session["user_id"]))
db.commit()
session.clear()
return redirect(url_for("blog.survey"))
return redirect(url_for("blog.survey")) | 8a839041bbb4cc8b5e2c0ecb2e5a09395552bf8a | 3,627,715 |
import struct
def ReadXTrace(trace_filename):
"""
Returns the trace for this XTrace dataset.
@param trace_filename: location of file to read into XTrace object
"""
# maximum size for strings
max_bytes = 32
max_function_bytes = 64
# open the file
with open(trace_filename, 'rb') as fd:
# create the list of nodes and edges
nodes = []
edges = []
# read the dataset for this trace
dataset_bytes, = struct.unpack('%ds' % max_bytes, fd.read(max_bytes))
dataset = dataset_bytes.decode().strip('\0')
# read the request type for this trace
request_type_bytes, = struct.unpack('%ds' % max_bytes, fd.read(max_bytes))
request_type = request_type_bytes.decode().strip('\0')
# read the request for this trace
request_bytes, = struct.unpack('%ds' % max_function_bytes, fd.read(max_function_bytes))
request = request_bytes.decode().strip('\0')
# read the base id for this trace
base_id_bytes, = struct.unpack('%ds' % max_bytes, fd.read(max_bytes))
base_id = base_id_bytes.decode().strip('\0')
# read the nodes and edges
nnodes, nedges, = struct.unpack('ii', fd.read(8))
# read all of the nodes
for iv in range(nnodes):
node_id_bytes, = struct.unpack('%ds' % max_bytes, fd.read(max_bytes))
node_id = node_id_bytes.decode().strip('\0')
# read the function id for this node
function_id_bytes, = struct.unpack('%ds' % max_function_bytes, fd.read(max_function_bytes))
function_id = function_id_bytes.decode().strip('\0')
# read the timestamp
timestamp, = struct.unpack('q', fd.read(8))
# create this node after reading all attributes
node = XTraceNode(node_id, function_id, timestamp)
nodes.append(node)
for ie in range(nedges):
# get the source and destination indices
source_index, destination_index, = struct.unpack('ii', fd.read(8))
# read the duration of the edge
duration, = struct.unpack('q', fd.read(8))
# create this edge after reading all attributes
edge = XTraceEdge(nodes[source_index], nodes[destination_index], duration)
edges.append(edge)
# create new trace object and return
trace = XTrace(dataset, nodes, edges, request_type, request, base_id)
return trace | 9ce8271e9f45463721582939ea54a63a25e2e9ac | 3,627,716 |
import pandas as pd
import numpy as np
import mydatapreprocessing as mdp
def to_vue_plotly(data: np.ndarray | pd.DataFrame, names: list = None) -> dict:
"""Takes data (dataframe or numpy array) and transforms it to form, that vue-plotly understand.
Links to vue-plotly:
https://www.npmjs.com/package/vue-plotly
https://www.npmjs.com/package/@rleys/vue-plotly - fork for vue 3 version
Note:
In js, you still need to edit the function, it's because no need to have all x axis for every column.
Download the js function from project-starter and check for example.
Args:
data (np.array | pd.DataFrame): Plotted data.
names (list, optional): If using array, you can define names. If using pandas, columns are
automatically used. Defaults to None.
Returns:
dict: Data in form for plotting in frontend.
Example:
>>> import pandas as pd
>>> df = pd.DataFrame([[1, "a"], [2, "b"]], columns=["numbers", "letters"])
>>> to_vue_plotly(df)
{'x_axis': [0, 1], 'y_axis': [[1, 2]], 'names': ['numbers']}
"""
if isinstance(data, np.ndarray):
data = pd.DataFrame(data, columns=names)
data = pd.DataFrame(data)
numeric_data = data.select_dtypes(include="number").round(decimals=3)
# TODO fix datetime
try:
numeric_data = mdp.misc.add_none_to_gaps(numeric_data)
except Exception:
pass
numeric_data = numeric_data.where(np.isfinite(numeric_data), np.nan)
# TODO
# Remove dirty hack... editing lists
values_list = numeric_data.values.T.tolist()
for i, j in enumerate(values_list):
values_list[i] = [k if not np.isnan(k) else None for k in j]
# TODO use typed dict? May not work in VUE
return {
"x_axis": numeric_data.index.to_list(), # type: ignore
"y_axis": values_list,
"names": numeric_data.columns.values.tolist(),
} | e29814b89550ae0247342607bea90ec263dbf72a | 3,627,717 |
def get_model_name(file_path, sheet_name):
"""
Return the model name, which is assumed to be in the first row, second column of the sheet_name.
Args:
file_path: path to file containing the model
sheet_name: sheet_name: name of the excel sheet where the model name is, should be 'general'
Returns:
Model name
"""
data_df = pd.read_excel(file_path, sheet_name=sheet_name)
model_name = data_df.iloc[0, 1]
return model_name | 30cb673e87a6b71d76049434615a9b4396130125 | 3,627,718 |
def duplicate_ticket_view(request, uuid):
"""
Create duplicate of a given ticket (found by uuid) i
The result is a new ticket, which has the same connection and validity_period
Does not allow duplicating shared tickets.
Does not allow to duplicate if you are not the author of ticket.
"""
try:
original_ticket = Ticket.objects.get(pk=uuid)
# Raise exception if this ticket is a shared ticket
if original_ticket.parent is not None:
raise PermissionDenied(detail="This is a shared ticket and cannot be duplicated.")
# Raise exception if this ticket's author is not request user
if original_ticket.author != request.user:
raise PermissionDenied(detail="You are not the author of ticket, you cannot duplicate it.")
if request.method == 'GET':
return Response("Ticket found. Use POST method to with empty body to duplicate.")
ticket = Ticket.objects.create(
author=request.user,
user=request.user,
connection=original_ticket.connection,
validityperiod=original_ticket.validityperiod)
TicketLog.addlog(original_ticket, 'duplicate', request=request)
TicketLog.addlog(ticket, 'create', request=request)
return Response(TicketSerializer(ticket).data, status=status.HTTP_202_ACCEPTED)
except Ticket.DoesNotExist:
raise NotFound(detail="Ticket not found") | 07cfd486d9812227686301f87d9317d65a51d82a | 3,627,719 |
from typing import Tuple
import math
def _projected_velocities_from_cog(beta: float, cog_speed: float) -> Tuple[float, float]:
"""
Computes the projected velocities at the rear axle using the Bicycle kinematic model using COG data
:param beta: [rad] the angle from rear axle to COG at instantaneous center of rotation
:param cog_speed: [m/s] Magnitude of velocity vector at COG
:return: Tuple with longitudinal and lateral velocities [m/s] at the rear axle
"""
# This gives COG longitudinal, which is the same as rear axle
rear_axle_forward_velocity = math.cos(beta) * cog_speed # [m/s]
# Lateral velocity is zero, by model assumption
rear_axle_lateral_velocity = 0
return rear_axle_forward_velocity, rear_axle_lateral_velocity | defbfa58d1e67b67ff4a118ebff03e62f4c1042c | 3,627,720 |
from typing import List
import math
def prime_factors(a:int) -> List[int]:
"""
Returns the prime factors of a number.
Parameters:
a (int): the number to return the prime factors of
Returns:
(list[int]): an unsorted list of the prime factors of a
"""
# prime numbers only have itself as a prime factor
if is_prime(a):
return [a]
pfs = []
for i in range(1, math.floor(math.sqrt(a))+1):
if a % i == 0:
if is_prime(i):
pfs.append(i)
other = a//i
if is_prime(other):
pfs.append(other)
return pfs | 75b27331c8dbb9fd6ed03c97af841bc76ed6cd8f | 3,627,721 |
def add_ngram(sequences, token_indice, ngram_range=2):
"""
Augment the input list of list (sequences) by appending n-grams values.
Example: adding bi-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
>>> add_ngram(sequences, token_indice, ngram_range=2)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
Example: adding tri-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9, 2): 2018}
>>> add_ngram(sequences, token_indice, ngram_range=3)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42, 2018]]
"""
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences | 8e339e6b5c3fca6f62fd38804465488297b93ad3 | 3,627,722 |
import json
def get_sea_surface_height_trend_image():
"""generate bathymetry image for a certain timespan (begin_date, end_date) and a dataset {jetski | vaklodingen | kustlidar}"""
r = request.get_json()
image = ee.Image('users/fbaart/ssh-trend-map')
image = image.visualize(**{'bands': ['time'], 'min': -0.03, 'max': 0.03,
'palette': ["151d44", "156c72", "7eb390",
"fdf5f4", "db8d77", "9c3060",
"340d35"]})
m = image.getMapId()
mapid = m.get('mapid')
token = m.get('token')
url = 'https://earthengine.googleapis.com/map/{0}/{{z}}/{{x}}/{{y}}?token={1}'.format(
mapid, token)
response = Response(json.dumps({'url': url}), status=200,
mimetype='application/json')
return response | 89e23fdb458b8fbb3230f19ab8664e102ed215fb | 3,627,723 |
def xy_to_rho(pt1, pt2):
"""convert two points of line into rho, theta form"""
# find inverse of slope of line
m, b = xy_to_mb(pt1, pt2)
minv = -1 / m if m else None if m == 0 else 0
# find intersection point of line with line defined by rho, theta
intersection = line_intersection(m, b, minv, 0)
# rho is distance of line to origin
rho = dist(intersection, (0, 0))
# theta is angle of perpendicular line with positive x-axis
theta = atan2(intersection[1], intersection[0])
return rho, theta | b9ec3e3af6e734580a9ffd05a022e29e8e36d1eb | 3,627,724 |
from numpy import meshgrid, arange, ones, zeros, sin, cos, sqrt, clip
from scipy.special import jv as bessel
from numpy.random import poisson as poisson
def generate_image(image_parameters):
"""Generate image with particles.
Input:
image_parameters: list with the values of the image parameters in a dictionary:
image_parameters['Particle Center X List']
image_parameters['Particle Center Y List']
image_parameters['Particle Radius List']
image_parameters['Particle Bessel Orders List']
image_parameters['Particle Intensities List']
image_parameters['Image Half-Size']
image_parameters['Image Background Level']
image_parameters['Signal to Noise Ratio']
image_parameters['Gradient Intensity']
image_parameters['Gradient Direction']
image_parameters['Ellipsoid Orientation']
image_parameters['Ellipticity']
Note: image_parameters is typically obained from the function get_image_parameters()
Output:
image: image of the particle [2D numpy array of real numbers betwen 0 and 1]
"""
particle_center_x_list = image_parameters['Particle Center X List']
particle_center_y_list = image_parameters['Particle Center Y List']
particle_radius_list = image_parameters['Particle Radius List']
particle_bessel_orders_list = image_parameters['Particle Bessel Orders List']
particle_intensities_list = image_parameters['Particle Intensities List']
image_half_size = image_parameters['Image Half-Size']
image_background_level = image_parameters['Image Background Level']
signal_to_noise_ratio = image_parameters['Signal to Noise Ratio']
gradient_intensity = image_parameters['Gradient Intensity']
gradient_direction = image_parameters['Gradient Direction']
ellipsoidal_orientation_list = image_parameters['Ellipsoid Orientation']
ellipticity = image_parameters['Ellipticity']
### CALCULATE IMAGE PARAMETERS
# calculate image full size
image_size = image_half_size * 2 + 1
# calculate matrix coordinates from the center of the image
image_coordinate_x, image_coordinate_y = meshgrid(arange(-image_half_size, image_half_size + 1),
arange(-image_half_size, image_half_size + 1),
sparse=False,
indexing='ij')
### CALCULATE BACKGROUND
# initialize the image at the background level
image_background = ones((image_size, image_size)) * image_background_level
# add gradient to image background
if gradient_intensity!=0:
image_background = image_background + gradient_intensity * (image_coordinate_x * sin(gradient_direction) +
image_coordinate_y * cos(gradient_direction) ) / (sqrt(2) * image_size)
### CALCULATE IMAGE PARTICLES
image_particles = zeros((image_size, image_size))
for particle_center_x, particle_center_y, particle_radius, particle_bessel_orders, particle_intensities, ellipsoidal_orientation in zip(particle_center_x_list, particle_center_y_list, particle_radius_list, particle_bessel_orders_list, particle_intensities_list, ellipsoidal_orientation_list):
# calculate the radial distance from the center of the particle
# normalized by the particle radius
radial_distance_from_particle = sqrt((image_coordinate_x - particle_center_x)**2
+ (image_coordinate_y - particle_center_y)**2
+ .001**2) / particle_radius
# for elliptical particles
rotated_distance_x = (image_coordinate_x - particle_center_x)*cos(ellipsoidal_orientation) + (image_coordinate_y - particle_center_y)*sin(ellipsoidal_orientation)
rotated_distance_y = -(image_coordinate_x - particle_center_x)*sin(ellipsoidal_orientation) + (image_coordinate_y - particle_center_y)*cos(ellipsoidal_orientation)
elliptical_distance_from_particle = sqrt((rotated_distance_x)**2
+ (rotated_distance_y / ellipticity)**2
+ .001**2) / particle_radius
# calculate particle profile
for particle_bessel_order, particle_intensity in zip(particle_bessel_orders, particle_intensities):
image_particle = 4 * particle_bessel_order**2.5 * (bessel(particle_bessel_order, elliptical_distance_from_particle) / elliptical_distance_from_particle)**2
image_particles = image_particles + particle_intensity * image_particle
# calculate image without noise as background image plus particle image
image_particles_without_noise = clip(image_background + image_particles, 0, 1)
### ADD NOISE
image_particles_with_noise = poisson(image_particles_without_noise * signal_to_noise_ratio**2) / signal_to_noise_ratio**2
return image_particles_with_noise | 1ebb0b5fa200b5590769d5e09fdeef9d57bfcdcb | 3,627,725 |
import numpy
def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output = _get_output(output, input)
orders = _normalize_sequence(order, input.ndim)
sigmas = _normalize_sequence(sigma, input.ndim)
modes = _normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output | f9fdac5e8c3c38936db8f44731dd975ed8d78c12 | 3,627,726 |
def run_test(target_call,
num_steps,
strategy,
batch_size=None,
log_steps=100,
num_steps_per_batch=1,
iterator=None):
"""Run benchmark and return TimeHistory object with stats.
Args:
target_call: Call to execute for each step.
num_steps: Number of steps to run.
strategy: None or tf.distribute.DistibutionStrategy object.
batch_size: Total batch size.
log_steps: Interval of steps between logging of stats.
num_steps_per_batch: Number of steps per batch. Used to account for total
number of transitions or examples processed per iteration.
iterator: Iterator for each execute step
Returns:
TimeHistory object containing step performance stats.
"""
history = TimeHistory(batch_size, log_steps, num_steps_per_batch)
for _ in range(num_steps):
history.on_batch_begin()
if strategy:
if iterator:
strategy.run(target_call, args=(next(iterator),))
else:
strategy.run(target_call)
else:
if iterator:
target_call(next(iterator))
else:
target_call()
history.on_batch_end()
return history | f9e996198a0dee309f7a2e08505d0cc0e5778023 | 3,627,727 |
import tqdm
import logging
def remove_punctuation_from_text(data):
""" Enriches a dataframe or Anytree structure containing "text" field with "clean text" field
See utils.clean_text for more information
Returns:
[pd.DataFrame or dictionary] -- conversations with new clean text field
"""
if isinstance(data, pd.DataFrame):
raise NotImplementedError
if isinstance(data, list):
for tree in tqdm.tqdm(data):
for _,v in tree.items():
v.text = remove_punctuation(v.text)
logging.info('Removed punctuation from text')
return data | 1c76585b07c8865913aa3e0660b3bc59b9edf6a3 | 3,627,728 |
def querystring_parse(parameter_data):
"""Parse dictionary to querystring"""
data = parameter_data
return urlencode(data).replace("%2F","/") | 41035637b5af123b6102c27df24f95bbfd030de2 | 3,627,729 |
def _deployment_rollback(deployment_id):
"""
:param deployment_id: the application id
:type deployment_di: str
:returns: process return code
:rtype: int
"""
client = marathon.create_client()
deployment = client.rollback_deployment(deployment_id)
emitter.publish(deployment)
return 0 | 37faad940a7f5a20b5e8d17c62920a9c9f781cd8 | 3,627,730 |
def find_regular_bin_edges_from_centers(centers):
"""
Finds bin (grid cell) edges from center positions. Assumes a regular grid.
Inputs:
centers = bin/grid center position vector of current grid, shape [nb]
Returns:
edges = edge positions of bins (grid), shape [nb+1]
"""
edges = np.zeros(np.size(centers)+1,np.float64)
offset = (centers[1]-centers[0])*0.5 # should be regular grid
edges[1:] = centers + offset
edges[0] = centers[0] - offset
edges.sort()
return edges | c5a957209222b9b1d63ce1b6efa72b4abb3591b9 | 3,627,731 |
def postordereval(parseTree):
"""Compute the result inline with postorder"""
ops = {'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv}
if parseTree:
evalLeft = postordereval(parseTree.getLeftChild())
evalRight = postordereval(parseTree.getRightChild())
if evalLeft and evalRight:
return ops[parseTree.getRoot()](evalLeft, evalRight)
else:
return parseTree.getRoot() | 0192ddfb601192ac5ca37527e28da9655fde10ea | 3,627,732 |
def non_max_suppression(boxes, scores, threshold, max_num):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (z1, y1, x1, z2, y2, x2)]. Notice that (z2, y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
max_num: Int. The max number of boxes to keep.
Return the index of boxes to keep.
"""
# Compute box volumes
z1 = boxes[:, 0]
y1 = boxes[:, 1]
x1 = boxes[:, 2]
z2 = boxes[:, 3]
y2 = boxes[:, 4]
x2 = boxes[:, 5]
volume = (z2 - z1) * (y2 - y1) * (x2 - x1)
# Get indices of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
if len(pick) >= max_num:
break
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], volume[i], volume[ixs[1:]])
# Identify boxes with IoU over the threshold. This returns indices into ixs[1:],
# so add 1 to get indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32) | 5e0d166667f3f82f622ac4607b4235a4db06aab3 | 3,627,733 |
def mask2rle(img):
"""
- https://www.kaggle.com/paulorzp/rle-functions-run-lenght-encode-decode
img: numpy array, 1 -> mask, 0 -> background
Returns run length as string formated
"""
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs) | e2f06ac4767e3af1a88cee0ac1ae7686487dece6 | 3,627,734 |
import os
def fp(path):
"""Prepends SEIR_HOME to path and returns full path."""
return os.path.join(SEIR_HOME, path) | e085a9fdc54b891ffcfedb5ab614edd1148426c9 | 3,627,735 |
def rgb_to_hsv(color: np.ndarray) -> np.ndarray:
"""
Convert a color from the RGB colorspace to the HSV colorspace
>>> rgb_to_hsv(np.array([10, 20, 30], np.uint8))
array([105, 170, 30])
Args:
color: Color as numpy array. Can either have shape (X, Y, 3)
if it is a whole image, (N, 3) if it is a list of colors
or just (3) if it is a single color.
The dtype of the array needs to be uint8
Returns:
The converted color in a numpy array the same shape as the
given array
"""
return __convert_color__(color, cv2.COLOR_RGB2HSV) | b44e443a215c080f9fa7a107686a400aa8ac3ea7 | 3,627,736 |
def plot_params(model):
"""Print parameters
"""
x0 = 0.05
y0 = 0.95
dy = 0.03
fig = plt.figure(1, figsize=(10, 10))
plt.subplots_adjust(left=0.1, top=0.95, bottom=0.05, right=0.95)
ax_lab = fig.add_subplot(111)
ax_lab.xaxis.set_visible(False)
ax_lab.yaxis.set_visible(False)
ax_lab.set_axis_off()
ax_lab.text(x0, y0 - 0 * dy, 'Model Parameters:', fontsize=10)
def get_param_value(pname):
if pname.endswith('_E') or pname.endswith('_N'):
pname_act = pname[:-2]
elif pname == 'log10_thetaE':
pname_act = 'thetaE_amp'
else:
pname_act = pname
pvalue = getattr(model, pname_act)
if pname.endswith('_E'):
pvalue = pvalue[0]
if pname.endswith('_N'):
pvalue = pvalue[1]
if pname == 'log10_thetaE':
pvalue = np.log10(pvalue)
return pvalue
for ff in range(len(model.fitter_param_names)):
pname = model.fitter_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
if pname.startswith('x'):
fmt_str = '{0:s} = {1:.4f}'
ax_lab.text(x0, y0 - (ff + 1) * dy,
fmt_str.format(pname, pvalu),
fontsize=10)
nrow = len(model.fitter_param_names)
for ff in range(len(model.phot_param_names)):
pname = model.phot_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
for rr in range(len(pvalu)):
ax_lab.text(x0, y0 - (nrow + 1) * dy,
fmt_str.format(pname + str(rr + 1), pvalu[rr]),
fontsize=10)
nrow += 1
nrow = 0
for ff in range(len(model.additional_param_names)):
pname = model.additional_param_names[ff]
pvalu = get_param_value(pname)
fmt_str = '{0:s} = {1:.2f}'
if pname in multi_filt_params:
for rr in range(len(pvalu)):
ax_lab.text(x0, y0 - (nrow + 1) * dy,
fmt_str.format(pname + str(rr + 1), pvalu[rr]),
fontsize=10)
nrow += 1
else:
ax_lab.text(x0 + 0.5, y0 - (ff + 1) * dy,
fmt_str.format(pname, pvalu),
fontsize=10)
nrow += 1
return fig | 58ae599ff0073c6c51ef2c4058f54646186b5d87 | 3,627,737 |
import torch
def class_avg_chainthaw(model, nb_classes, loss_op, train, val, test, batch_size,
epoch_size, nb_epochs, checkpoint_weight_path,
f1_init_weight_path, patience=5,
initial_lr=0.001, next_lr=0.0001, verbose=True):
""" Finetunes given model using chain-thaw and evaluates using F1.
For a dataset with multiple classes, the model is trained once for
each class, relabeling those classes into a binary classification task.
The result is an average of all F1 scores for each class.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
f1_init_weight_path: Filepath where weights will be saved to and
reloaded from before training each class. This ensures that
each class is trained independently. This file will be rewritten.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
# Returns:
Averaged F1 score.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
torch.save(model.state_dict(), f1_init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i+1, nb_iter))
model.load_state_dict(torch.load(f1_init_weight_path))
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_gen=[(X_val_resamp, y_val_resamp)],
loss_op=loss_op, patience=patience,
nb_epochs=nb_epochs,
checkpoint_path=checkpoint_weight_path,
initial_lr=initial_lr, next_lr=next_lr,
verbose=verbose)
# Evaluate
y_pred_val = model(X_val).cpu().numpy()
y_pred_test = model(X_test).cpu().numpy()
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter | 2aeb2f442fb648ac7bb9536b989e3a0ea2f77087 | 3,627,738 |
def quality_scrub(df, target_cols = ['quality_1', 'quality_2', 'quality_3']):
"""
Definition:
Filters a dataframe where each target_col does not contain 'no_cough'
Args:
df: Required. A dataframe containing the target columns
target_cols: default = ['quality_1', 'quality_2', 'quality_3'].
Returns:
Returns a filtered dataframe where each target_col does not contain 'no_cough'
"""
for col in target_cols:
df = df[df[col] != 'no_cough']
return df | 1187278e008f1e4ec4688d3cf9a3d7a0c1a82dc0 | 3,627,739 |
def create_arrival_timer(model, name, descr = None):
"""Return a new timer that allows measuring the processing time of transacts."""
y = ArrivalTimerPort(model, name = name, descr = descr)
code = 'newArrivalTimer'
y.write(code)
return y | 276a439fb0152f66cf6cc420cb7599b83d1718f6 | 3,627,740 |
from typing import Dict
from re import T
import torch
def get_default_transforms() -> Dict[T.Compose, T.Compose]:
"""augmentationを取得
Returns:
Dict[T.Compose, T.Compose]: 学習用,検証用のaugmentation
"""
transform = {
"train": T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
MyRotateTransform([90, 180, 270]),
T.ConvertImageDtype(torch.float),
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
]
),
"val": T.Compose(
[
T.ConvertImageDtype(torch.float),
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
]
),
}
return transform | 5cbea521348a2bed215a2692eb9be7dea2af1b7e | 3,627,741 |
import subprocess
import os
def simple_shell(args, stdout=False):
""" Simple Subprocess Shell Helper Function """
if stdout:
rc = subprocess.call(args, shell=False)
else:
rc = subprocess.call(args, shell=False, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
return rc | b922e35565a5da58cec153415b9112e560de6c73 | 3,627,742 |
def print_decorator(fct):
"""dento dekorator pouzivam, na to aby som dokazal testovat aj vypisy na standardny vystup"""
original_fct = fct
output = []
def wrapper(*args):
output.append((args))
return original_fct(*args)
return wrapper | 4fa74cf9bf3653f89114cbdd6503ef13630a17e7 | 3,627,743 |
import logging
def parse_testresults(xml, test_id, domain):
""" Parse the given XML file and build mappings """
global_lookup = {}
global_testresults = {}
for event, element in etree.iterparse(xml, events=("start", "end")):
try:
global_id, global_title, global_fixtext = \
build_lookup(element, domain)
global_lookup.update({global_id: [global_title, global_fixtext]})
except TypeError:
pass
# # Get results
if element.tag == domain + 'TestResult' and \
element.get('id') == test_id:
# Go through TestResult tag and get all associated results only
for ch in element:
if ch.tag == domain + "rule-result":
for child in ch:
if child.tag == domain + "result":
global_testresults[ch.get('idref')] = child.text
if ch.tag == domain + "score":
global_maximum_score = ch.get('maximum')
global_score = ch.text
global_testresults['max_score'] = global_maximum_score
global_testresults['score'] = global_score
logging.debug("Lookup build successful for all results")
return global_lookup, global_testresults | f36923e8a17f0c9646fcde570a008b030eda464f | 3,627,744 |
def remodel_matrix(matrix, new_fire_cells, moisture_matrix):
"""
matrix: Array of the fire spread area
new_fire_cells: list of tuples, each tuple representing the x,y coordinates
of a new cell that has been affected by the fire spread.
"""
for cell in new_fire_cells:
x = int(cell[0])
y = int(cell[1])
matrix[x][y] = 0.5*(moisture_matrix[x][y])
return matrix | c24c8fae19e0a8bb884103191906e76af625430f | 3,627,745 |
import re
def getOffers(session, city):
"""
Parameters
----------
session : ikabot.web.session.Session
city : dict
Returns
-------
offers : list[dict]
"""
html = getMarketHtml(session, city)
hits = re.findall(r'short_text80">(.*?) *<br/>\((.*?)\)\s *</td>\s *<td>(\d+)</td>\s *<td>(.*?)/td>\s *<td><img src="skin/resources/icon_(\w+)\.png[\s\S]*?white-space:nowrap;">(\d+)\s[\s\S]*?href="\?view=takeOffer&destinationCityId=(\d+)&oldView=branchOffice&activeTab=bargain&cityId=(\d+)&position=(\d+)&type=(\d+)&resource=(\w+)"', html)
offers = []
for hit in hits:
offer = {
'ciudadDestino': hit[0],
'jugadorAComprar' : hit[1],
'bienesXminuto': int(hit[2]),
'amountAvailable': int(hit[3].replace(',', '').replace('.', '').replace('<', '')),
'tipo': hit[4],
'precio': int(hit[5]),
'destinationCityId': hit[6],
'cityId': hit[7],
'position': hit[8],
'type': hit[9],
'resource': hit[10]
}
offers.append(offer)
return offers | bed8332cd501da9871ec2a9a576783bdc87340de | 3,627,746 |
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""
给训练添加一个新的softmax和全连接层,
我们需要重新训练顶层来识别我们的新类,所以这个函数为graph添加了正确的操作
:param class_count: 多类的事物总数
:param final_tensor_name: 生成结果的新的最终节点的名称字符串。
:param bottleneck_tensor: 主CNN图的输出。
:return: The tensors for the training and cross entropy results, and tensors for the bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001),
name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor) | 96d80fe0aded67684a4ed8534a9634995c4cad4e | 3,627,747 |
def generate_northern_ireland_data(directory, file_date, records):
"""
generate northern ireland file.
"""
northern_ireland_data_description = lambda: { # noqa: E731
"UIC": _("random.custom_code", mask="############", digit="#"),
"Sample": _("random.custom_code", mask="#&&&", digit="#", char="&"),
"oa11": code_mask(mask="N0000####", min_code=["N00000001", None], max_code=["N00004537", None]),
"laua": code_mask(mask="N090000##", min_code=["N09000001", None], max_code=["N09000011", None]),
"ctry": "N92000002",
"GOR9D": "N99999999",
"lsoa11": code_mask(mask="95&&##S#", min_code=["95AA01S1", None], max_code=["95ZZ16S2", None]),
"msoa11": "N99999999",
"oac11": code_mask(mask="#&#", min_code=["1A1", None], max_code=["8B3", None], use_incremntal_letters=True),
"CIS20CD": code_mask(mask="J06000###", min_code="J06000229", max_code="J06000233"),
"rgn": "N92000002",
"imd": code_mask(mask="00###", min_code=["00001", None], max_code=["00890", None]),
"interim_id": 999,
}
schema = Schema(schema=northern_ireland_data_description)
northern_ireland_data = pd.DataFrame(schema.create(iterations=records))
northern_ireland_data.to_csv(directory / f"CIS_Direct_NI_{file_date}.csv", index=False)
return northern_ireland_data | b74bb5d25b401a3695276f49bbddfb08ed025d26 | 3,627,748 |
def create_message(address, subject, message_text, html=True, attachments=None):
"""Create a message for an email, using the low-level API.
Arguments:
address (str): Email address(es) of the receiver.
subject (str): The subject of the email message.
message_text (str): The text of the email message.
html (bool): If True, treat message as HTML instead of plain text. Defaults to True.
attachments (List[str]): A list of filepaths to attach to the email. Defaults to [].
Returns:
dict: A base64url encoded email JSON "object".
"""
if attachments is None:
attachments = []
# create the initial message
message = MIMEMultipart()
message['to'] = address
message['subject'] = subject
# add the email body text
if html:
message.attach(MIMEText(message_text, 'html'))
else:
message.attach(MIMEText(message_text))
# add each file attachment
for filepath in attachments:
with open(realpath(expanduser(filepath)), 'rb') as fd:
attachment = MIMEApplication(fd.read(), Name=basename(filepath))
attachment['Content-Disposition'] = 'attachment; filename="{}"'.format(basename(filepath))
message.attach(attachment)
# correctly encode and decode the message
return {'raw': urlsafe_b64encode(message.as_string().encode()).decode()} | 9e1638e2940ef133dfb185e2bf54002cfb25f9e4 | 3,627,749 |
import logging
def whole_appendix(xml, cfr_part, letter):
"""Attempt to parse an appendix. Used when the entire appendix has been
replaced/added or when we can use the section headers to determine our
place. If the format isn't what we expect, display a warning."""
xml = deepcopy(xml)
hds = xml.xpath('//HD[contains(., "Appendix %s to Part %s")]'
% (letter, cfr_part))
if len(hds) == 0:
logging.warning("Could not find Appendix %s to part %s"
% (letter, cfr_part))
elif len(hds) > 1:
logging.warning("Too many headers for %s to part %s"
% (letter, cfr_part))
else:
hd = hds[0]
hd.set('SOURCE', 'HED')
extract = hd.getnext()
if extract is not None and extract.tag == 'EXTRACT':
extract.insert(0, hd)
for trailing in dropwhile(lambda n: n.tag != 'AMDPAR',
extract.getchildren()):
extract.remove(trailing)
return process_appendix(extract, cfr_part)
logging.warning("Bad format for whole appendix") | b1e757ae292d299096abde80c74354093c1d6684 | 3,627,750 |
import collections
def read_image_files(image_files,image_shape=None, crop=None, label_indices=None):
"""
:param image_files:
:param image_shape:
:param crop:
:param use_nearest_for_last_file: If True, will use nearest neighbor interpolation for the last file. This is used
because the last file may be the labels file. Using linear interpolation here would mess up the labels.
:return:
"""
if label_indices is None:
label_indices = []
elif not isinstance(label_indices, collections.Iterable) or isinstance(label_indices, str):
label_indices = [label_indices]
image_list = list()
affine_list= list()
for index, image_file in enumerate(image_files): #ct + truth (2 files)
if (label_indices is None and (index + 1) == len(image_files)) \
or (label_indices is not None and index in label_indices):
interpolation = "nearest"
else:
interpolation = "linear"
image, affine = read_image(image_file,image_shape=image_shape, crop=crop, interpolation=interpolation)
image_list.append(image)
affine_list.append(affine)
return image_list, affine_list | 0e01d5a47786154cde030b7cd252154a183c7358 | 3,627,751 |
import traceback
def wrap_unexpected_exceptions(f, execute_if_error=None):
"""A decorator that catches all exceptions from the function f and alerts the user about them.
Self can be any object with a "logger" attribute and a "ipython_display" attribute.
All exceptions are logged as "unexpected" exceptions, and a request is made to the user to file an issue
at the Github repository. If there is an error, returns None if execute_if_error is None, or else
returns the output of the function execute_if_error.
Usage:
@wrap_unexpected_exceptions
def fn(self, ...):
..etc """
@wraps(f)
def wrapped(self, *args, **kwargs):
try:
out = f(self, *args, **kwargs)
except Exception as e:
self.logger.error(u"ENCOUNTERED AN INTERNAL ERROR: {}\n\tTraceback:\n{}".format(e, traceback.format_exc()))
self.send_error(INTERNAL_ERROR_MSG.format(e))
return None if execute_if_error is None else execute_if_error()
else:
return out
return wrapped | d2f37ff0c8a1dac6cbab1fe8e8c6598de0ab059a | 3,627,752 |
def store_topology(topology_file_string: str, fileformat="pdbx"):
"""Store a file (containing topology, such as pdbx) in a topology XML block."""
root = etree.fromstring(f'<TopologyFile format="{fileformat}"/>')
root.text = topology_file_string
return root | ed9b2b27cda4a31fcd04a920ce4d344b268bf012 | 3,627,753 |
def zeros_like(tab):
"""
Wrapper to numpy.zeros_like, force order to hysop.constants.ORDER
"""
return np.zeros_like(tab, dtype=tab.dtype, order=ORDER) | ab089b5003ce07ef8bc9a6ac4b027a974833a04b | 3,627,754 |
def get_bert_embeddings(input_ids,
bert_config,
input_mask=None,
token_type_ids=None,
is_training=False,
use_one_hot_embeddings=False,
scope=None):
"""Returns embeddings for BERT."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=token_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=scope)
return model.get_sequence_output() | 8dc60142ded4951e9ae69f02f8eb928c3f6c0b2a | 3,627,755 |
import time
def blind_deconvolution_multiple_subjects(
X, t_r, hrf_rois, hrf_model='scaled_hrf', shared_spatial_maps=False,
deactivate_v_learning=False, deactivate_z_learning=False,
deactivate_u_learning=False, n_atoms=10, n_times_atom=60, prox_z='tv',
lbda_strategy='ratio', lbda=0.1, rho=2.0, delta_init=1.0,
u_init_type='ica', eta=10.0, z_init=None, prox_u='l1-positive-simplex',
max_iter=100, get_obj=0, get_time=False, random_seed=None,
early_stopping=True, eps=1.0e-5, raise_on_increase=True, verbose=0):
""" Multivariate Blind Deconvolution main function for mulitple subjects.
Parameters
----------
X : array, shape (n_voxels, n_times), fMRI data
t_r : float, Time of Repetition, fMRI acquisition parameter, the temporal
resolution
hrf_rois : dict (key: ROIs labels, value: indices of voxels of the ROI)
atlas HRF
hrf_model : str, (default='3_basis_hrf'), type of HRF model, possible
choice are ['3_basis_hrf', '2_basis_hrf', 'scaled_hrf']
shared_spatial_maps : bool, whether or not to learn a single set of
spatials maps accross subjects.
deactivate_v_learning : bool, (default=False), option to force the
estimated HRF to to the initial value.
deactivate_z_learning : bool, (default=False), option to force the
estimated z to its initial value.
deactivate_u_learning : bool, (default=False), option to force the
estimated u to its initial value.
n_atoms : int, number of components on which to decompose the neural
activity (number of temporal components and its associated spatial
maps).
n_times_atom : int, (default=30), number of points on which represent the
Haemodynamic Response Function (HRF), this leads to the duration of the
response function, duration = n_times_atom * t_r
prox_z : str, (default='tv'), temporal proximal operator should be in
['tv', 'l1', 'l2', 'elastic-net']
lbda_strategy str, (default='ratio'), strategy to fix the temporal
regularization parameter, possible choice are ['ratio', 'fixed']
lbda : float, (default=0.1), whether the temporal regularization parameter
if lbda_strategy == 'fixed' or the ratio w.r.t lambda max if
lbda_strategy == 'ratio'
rho : float, (default=2.0), the elastic-net temporal regularization
parameter
delta_init : float, (default=1.0), the initialization value for the HRF
dilation parameter
u_init_type : str, (default='ica'), strategy to init u, possible value are
['gaussian_noise', 'ica', 'patch']
eta : float, (default=10.0), the spatial sparsity regularization parameter
z_init : None or array, (default=None), initialization of z, if None, z is
initialized to zero
prox_u : str, (default='l2-positive-ball'), constraint to impose on the
spatial maps possible choice are ['l2-positive-ball',
'l1-positive-simplex', 'positive']
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
random_seed : None, int, random-instance, (default=None), random-instance
or random-seed used to initialize the analysis
early_stopping : bool, (default=True), whether to early stop the analysis
eps : float, (default=1.0e-4), stoppping parameter w.r.t evolution of the
cost-function
raise_on_increase : bool, (default=True), whether to stop the analysis if
the cost-function increases during an iteration. This can be due to the
fact that the temporal regularization parameter is set to high
verbose : int, (default=0), verbose level, 0 no verbose, 1 low verbose,
2 (or more) maximum verbose
Return
------
z : array, shape (n_subjects, n_atoms, n_times_valid), the estimated
temporal components
Dz : array, shape (n_subjects, n_atoms, n_times_valid), the estimated first
order derivation temporal components
u : array, shape (n_subjects or 1, n_atoms, n_voxels), the estimated
spatial maps
a : array, shape (n_subjects, n_hrf_rois, n_param_HRF), the estimated HRF
parameters
v : array, shape (n_subjects, n_hrf_rois, n_times_atom), the estimated HRFs
v : array, shape (n_subjects, n_hrf_rois, n_times_atom), the initial used
HRFs
lbda : float, the temporal regularization parameter used
lobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
ltime : array or None, shape (n_iter,) or(3 * n_iter,), the saved duration
per steps
Throws
------
CostFunctionIncreased : if the cost-function increases during an iteration,
of the analysis. This can be due to the fact that the temporal
regularization parameter is set to high
"""
if isinstance(X, np.ndarray) and X.ndim == 2:
X = [X] # handle single subject case
if not isinstance(X, list): # break not valid cases
raise ValueError("Subjects data X should a list.")
n_subjects = len(X)
for n in range(n_subjects):
X[n] = X[n].astype(np.float64)
if verbose > 1:
print(f"[BDA] Seed used = {random_seed}")
rng = check_random_state(random_seed)
n_times, n_times_valid, n_voxels = [], [], []
for n in range(n_subjects):
n_voxels_, n_times_ = X[n].shape
n_times_valid_ = n_times_ - n_times_atom + 1
if n_times_valid_ < 2 * n_times_atom - 1:
raise ValueError("'n_times_atom' is too hight w.r.t the duration "
"of the acquisition, please reduce it.")
n_voxels.append(n_voxels_)
n_times.append(n_times_)
n_times_valid.append(n_times_valid_)
for n in range(1, n_subjects):
if n_voxels[n] != n_voxels[0]:
raise ValueError("All subjects do not have the same number of "
"voxels.")
if n_subjects == 1 and shared_spatial_maps:
print("Only 1 subject loaded, 'shared_spatial_maps' force to False")
if (deactivate_v_learning and deactivate_u_learning
and deactivate_z_learning):
raise ValueError("'deactivate_v_learning', 'deactivate_z_learning' "
"and 'deactivate_u_learning' can't be set to True "
"all together.")
if deactivate_z_learning:
prox_u = 'positive'
print("'deactivate_z_learning' is enable: 'prox_u' is forced to "
"'positive'")
if deactivate_z_learning and (z_init is None):
raise ValueError("If 'deactivate_z_learning' is enable 'z_init' should"
" be provided")
# split atlas
rois_idx, _, n_hrf_rois = split_atlas(hrf_rois)
constants = dict(rois_idx=rois_idx, hrf_model=hrf_model)
get_obj = set_get_time_get_obj(get_obj, verbose, raise_on_increase,
early_stopping)
# v initialization
v_hat, a_hat = init_v_hat(hrf_model, t_r, n_times_atom, n_subjects,
n_hrf_rois, constants, delta_init)
v_init = v_hat[0][0, :]
# H initialization
H_hat = []
for n in range(n_subjects):
H_hat_ = np.empty((n_hrf_rois, n_times[n], n_times_valid[n]))
for m in range(n_hrf_rois):
H_hat_[m, :, :] = make_toeplitz(v_hat[n][m],
n_times_valid=n_times_valid[n])
H_hat.append(H_hat_)
# z initialization
z_hat = init_z_hat(z_init, n_subjects, n_atoms, n_times_valid)
n_spatial_maps = 1 if shared_spatial_maps else n_subjects
# u initialization
u_hat = init_u_hat(X, v_hat, rng, u_init_type, eta, n_spatial_maps,
n_atoms, n_voxels, n_times, n_times_atom)
# temporal regularization parameter
lbda_new = []
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
lbda_ = check_lbda(lbda, lbda_strategy, X[n], u_hat[u_idx], H_hat[n],
rois_idx, prox_z)
lbda_new.append(lbda_)
lbda = lbda_new
# spatial regularization parameter
if prox_u == 'l2-positive-ball':
def _prox(u_k):
return _prox_positive_l2_ball(u_k, step_size=1.0)
prox_u_func = _prox
elif prox_u == 'l1-positive-simplex':
def _prox(u_k):
return _prox_l1_simplex(u_k, eta=eta)
prox_u_func = _prox
elif prox_u == 'positive':
def _prox(u_k):
return _prox_positive(u_k, step_size=1.0)
prox_u_func = _prox
else:
raise ValueError(f"prox_u should be in ['l2-positive-ball', "
f"'l1-positive-simplex', 'positive'], got {prox_u}")
constants['prox_z'] = prox_z
constants['rho'] = rho
constants['prox_u'] = prox_u_func
if get_obj:
_obj_value = 0.0
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
_obj_value += _obj(X=X[n], prox=prox_u_func, lbda=lbda[n],
u=u_hat[u_idx], z=z_hat[n], H=H_hat[n],
rois_idx=rois_idx, valid=True, rho=rho,
prox_z=prox_z) / n_subjects
lobj = [_obj_value]
if get_time:
ltime = [0.0]
# main loop
with threadpool_limits(limits=1):
for ii in range(max_iter):
if get_time == 1:
t0 = time.process_time()
# use Toeplitz matrices for obj. func. computation (Numpy speed-up)
for n in range(n_subjects):
for m in range(n_hrf_rois):
H_hat[n][m, :, :] = make_toeplitz(
v_hat[n][m], n_times_valid=n_times_valid[n])
if not deactivate_z_learning:
if get_time == 2:
t0 = time.process_time()
# Update z
z_hat_new = []
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
constants['a'] = a_hat[n]
constants['v'] = v_hat[n]
constants['H'] = H_hat[n]
constants['u'] = u_hat[u_idx]
constants['X'] = X[n]
constants['lbda'] = lbda[n]
z_hat_new.append(_update_z(z_hat[n], constants)) # update
z_hat = z_hat_new
if get_time == 2:
ltime.append(time.process_time() - t0)
if get_obj == 2:
_obj_value = 0.0
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
_obj_value_ = _obj(X=X[n], prox=prox_u_func,
lbda=lbda[n], u=u_hat[u_idx],
z=z_hat[n], H=H_hat[n],
rois_idx=rois_idx, valid=True,
rho=rho, prox_z=prox_z)
_obj_value += _obj_value_ / n_subjects
lobj.append(_obj_value)
if verbose > 1:
if get_time:
print(f"[{ii + 1:03d}/{max_iter:03d}][001/003] "
f"Temporal activations estimation done in "
f"{ltime[-1]:.3f}s: cost = "
f"{lobj[-1] / lobj[0]:.6f} (of "
f"initial value)")
else:
print(f"[{ii + 1:03d}/{max_iter:03d}][1/3] "
f"Temporal activations estimation done: "
f"cost = {lobj[-1] / lobj[0]:.6f} "
f"(of initial value)")
# check if some z_k vanished
msg = ("Temporal component vanished, may be 'lbda' is too "
"high, please try to reduce its value.")
for n in range(n_subjects):
check_if_vanished(z_hat[n], msg)
if not deactivate_u_learning:
if get_time == 2:
t0 = time.process_time()
# Update u
u_hat_new = []
if shared_spatial_maps:
B, C = [], []
for n in range(n_subjects):
B_, C_ = _precompute_B_C(X[n], z_hat[n], H_hat[n],
rois_idx)
B.append(B_)
C.append(C_)
constants['C'] = np.mean(C, axis=0)
constants['B'] = np.mean(B, axis=0)
u_hat[0] = _update_u(u_hat[0], constants) # update
else:
for n in range(n_subjects):
B, C = _precompute_B_C(X[n], z_hat[n], H_hat[n],
rois_idx)
constants['C'] = C
constants['B'] = B
# update
u_hat_new.append(_update_u(u_hat[n], constants))
u_hat = u_hat_new
if get_time == 2:
ltime.append(time.process_time() - t0)
if get_obj == 2:
_obj_value = 0.0
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
_obj_value_ = _obj(X=X[n], prox=prox_u_func,
lbda=lbda[n], u=u_hat[u_idx],
z=z_hat[n], H=H_hat[n],
rois_idx=rois_idx, valid=True,
rho=rho, prox_z=prox_z)
_obj_value += _obj_value_ / n_subjects
lobj.append(_obj_value)
if verbose > 1:
if get_time:
print(f"[{ii + 1:03d}/{max_iter:03d}][002/003] "
f"Spatial maps estimation done in "
f"{ltime[-1]:.3f}s: cost = "
f"{lobj[-1] / lobj[0]:.6f} (of "
f"initial value)")
else:
print(f"[{ii + 1:03d}/{max_iter:03d}][002/003] "
f"Spatial maps estimation done: cost = "
f"{lobj[-1] / lobj[0]:.6f} "
f"(of initial value)")
if not deactivate_v_learning:
if get_time == 2:
t0 = time.process_time()
# Update v
a_hat_new, v_hat_new = [], []
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
constants['u'] = u_hat[u_idx]
constants['z'] = z_hat[n]
constants['X'] = X[n]
a_hat_, v_hat_ = _update_v(a_hat[n], constants) # update
a_hat_new.append(a_hat_)
v_hat_new.append(v_hat_)
a_hat, v_hat = a_hat_new, v_hat_new
if get_time == 2:
ltime.append(time.process_time() - t0)
if get_obj == 2:
_obj_value = 0.0
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
_obj_value_ = _obj(X=X[n], prox=prox_u_func,
lbda=lbda[n], u=u_hat[u_idx],
z=z_hat[n], H=H_hat[n],
rois_idx=rois_idx, valid=True,
rho=rho, prox_z=prox_z)
_obj_value += _obj_value_ / n_subjects
lobj.append(_obj_value)
if verbose > 1:
if get_time:
print(f"[{ii + 1:03d}/{max_iter:03d}][003/003] "
f"HRF estimation done in "
f"{ltime[-1]:.3f}s: cost = "
f"{lobj[-1] / lobj[0]:.6f} (of "
f"initial value)")
else:
print(f"[{ii + 1:03d}/{max_iter:03d}][003/003] "
f"HRF estimation done: cost = "
f"{lobj[-1] / lobj[0]:.6f} "
f"(of initial value)")
if get_time == 1:
ltime.append(time.process_time() - t0)
if get_obj == 1:
_obj_value = 0.0
for n in range(n_subjects):
u_idx = 0 if shared_spatial_maps else n
_obj_value += _obj(X=X[n], prox=prox_u_func, lbda=lbda[n],
u=u_hat[u_idx], z=z_hat[n], H=H_hat[n],
rois_idx=rois_idx, valid=True,
rho=rho, prox_z=prox_z) / n_subjects
lobj.append(_obj_value)
if verbose == 1:
if get_time:
print(f"[{ii + 1:03d}/{max_iter:03d}] Iteration done "
f"in {ltime[-1]:.3f}s: cost = "
f"{lobj[-1] / lobj[0]:.6f} (of initial value)")
else:
print(f"[{ii + 1:03d}/{max_iter:03d}] Iteration done: "
f"cost = {lobj[-1] / lobj[0]:.6f} (of "
f"initial value)")
if ii > 2 and get_obj:
try:
check_obj(lobj, ii + 1, max_iter,
early_stopping=early_stopping,
raise_on_increase=raise_on_increase, eps=eps,
level=get_obj)
except EarlyStopping as e:
if verbose > 1:
print(str(e))
break
Dz_hat = [np.diff(z_hat[n], axis=-1) for n in range(n_subjects)]
return_vars = [z_hat, Dz_hat, u_hat, a_hat, v_hat, v_init, lbda]
if get_obj and get_time:
return_vars.extend([lobj, ltime])
return return_vars
elif get_obj and not get_time:
return_vars.extend([lobj, None])
return return_vars
elif not get_obj and get_time:
return_vars.extend([None, ltime])
return return_vars
else:
return_vars.extend([None, None])
return return_vars | a4fa4eabe035c792fdd1a3b75600a443e9f73058 | 3,627,756 |
def _TestRemovePhotos(tester, user_cookie, request_dict):
"""Called by the ServiceTester in order to test remove_photos service API call."""
validator = tester.validator
user_id, device_id = tester.GetIdsFromCookie(user_cookie)
request_dict = deepcopy(request_dict)
user = validator.GetModelObject(User, user_id)
# Send remove_photos request.
actual_dict = tester.SendRequest('remove_photos', user_cookie, request_dict)
op_dict = tester._DeriveNotificationOpDict(user_id, device_id, request_dict)
# Validate POST objects.
for request_ep in request_dict['episodes']:
episode_id = request_ep['episode_id']
episode = validator.GetModelObject(Episode, episode_id)
for photo_id in request_ep['photo_ids']:
post = validator.GetModelObject(Post, DBKey(episode_id, photo_id))
if not post.IsRemoved():
# Validate that REMOVED label was added.
validator.ValidateUpdateDBObject(Post,
episode_id=episode_id,
photo_id=photo_id,
labels=post.labels.combine().union([Post.REMOVED]))
# Validate notification for the remove.
invalidate = {'episodes': [{'episode_id': request_ep['episode_id'], 'get_photos': True}
for request_ep in request_dict['episodes']]}
validator.ValidateNotification('remove_photos', user_id, op_dict, invalidate)
validator.ValidateViewpointAccounting(user.private_vp_id)
tester._CompareResponseDicts('remove_photos', user_id, request_dict, {}, actual_dict)
return actual_dict | d71619608435bc763d8902856ce39585f62dd320 | 3,627,757 |
import csv
def get_author_book_publisher_data(filepath):
"""
This function gets the data from the csv file
"""
with open(filepath) as csvfile:
csv_reader = csv.DictReader(csvfile)
data = [row for row in csv_reader]
return data | 5d095b20e2e32aacbe4d85efd80461abfa175127 | 3,627,758 |
from typing import Optional
async def update_workflow_revision(
# pylint: disable=W0622
id: UUID,
updated_workflow_dto: WorkflowRevisionFrontendDto,
) -> WorkflowRevisionFrontendDto:
"""Update or store a transformation revision of type workflow in the data base.
If no DB entry with the provided id is found, it will be created.
Updating a transformation revision is only possible if it is in state DRAFT
or to change the state from RELEASED to DISABLED.
This endpoint is deprecated and will be removed soon,
use PUT /api/transformations/{id} instead.
"""
logger.info("update workflow %s", id)
if id != updated_workflow_dto.id:
msg = (
"The id {id} does not match "
f"the id of the provided workflow revision DTO {updated_workflow_dto.id}"
)
logger.error(msg)
raise HTTPException(status.HTTP_403_FORBIDDEN, detail=msg)
updated_transformation_revision = updated_workflow_dto.to_transformation_revision()
existing_transformation_revision: Optional[TransformationRevision] = None
try:
existing_transformation_revision = read_single_transformation_revision(
id, log_error=False
)
logger.info("found transformation revision %s", id)
check_modifiability(
existing_transformation_revision, updated_transformation_revision
)
except DBNotFoundError:
# base/example workflow deployment needs to be able to put
# with an id and either create or update the workflow revision
pass
if existing_transformation_revision is not None:
updated_transformation_revision.documentation = (
existing_transformation_revision.documentation
)
updated_transformation_revision.test_wiring = (
existing_transformation_revision.test_wiring
)
updated_transformation_revision = update_content(
existing_transformation_revision, updated_transformation_revision
)
updated_transformation_revision = if_applicable_release_or_deprecate(
existing_transformation_revision, updated_transformation_revision
)
try:
persisted_transformation_revision = (
update_or_create_single_transformation_revision(
updated_transformation_revision
)
)
logger.info("updated workflow %s", id)
except DBIntegrityError as e:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) from e
except DBNotFoundError as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail=str(e)) from e
persisted_workflow_dto = WorkflowRevisionFrontendDto.from_transformation_revision(
persisted_transformation_revision
)
logger.debug(persisted_workflow_dto.json())
return persisted_workflow_dto | 888fa1edd72232c0300cf02b194330e4d9dbfaea | 3,627,759 |
def tf_repeat(tensor, repeats):
"""
Args:
input: A Tensor. 1-D or higher.
repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input
Returns:
A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats
"""
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples=multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor | 5a9022d427caed7ad645c7ede8142851c1d0af88 | 3,627,760 |
def apply_slim_collections(cost):
"""
Add the cost with the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.
Args:
cost: a scalar tensor
Return:
a scalar tensor, the cost after applying the collections.
"""
regulization_losses = set(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
ctx = get_current_tower_context()
if len(regulization_losses) > 0:
assert not ctx.has_own_variables, "REGULARIZATION_LOSSES collection doesn't work in replicated mode!"
logger.info("Applying REGULARIZATION_LOSSES on cost.")
reg_loss = tf.add_n(list(regulization_losses), name="regularize_loss")
cost = tf.add(reg_loss, cost, name='total_cost')
add_moving_summary(reg_loss, cost)
return cost | 7182995a31c3daa6b33bb8be28ec3a1b2a98e7d6 | 3,627,761 |
import re
def isGoodResult(name, show, log=True, season=-1):
"""
Use an automatically-created regex to make sure the result actually is the show it claims to be
"""
all_show_names = allPossibleShowNames(show,season=season)
showNames = map(sanitizeSceneName, all_show_names) + all_show_names
for curName in set(showNames):
escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName))
if show.startyear:
escaped_name += "(?:\W+"+str(show.startyear)+")?"
curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|(Sea|sai)son\W+\d+\W+|E\d+\W+)'
if log:
logger.log(u"Checking if show "+name+" matches " + curRegex, logger.DEBUG)
match = re.search(curRegex, name, re.I)
if match:
logger.log(u"Matched "+curRegex+" to "+name, logger.DEBUG)
return True
if log:
logger.log(u"Provider gave result "+name+" but that doesn't seem like a valid result for "+show.name+" so I'm ignoring it")
return False | c29ff6dbb829553d546d9f80975b3eb6c355fa6d | 3,627,762 |
import six
import itertools
def get_configuration(configuration_schema,
command_line_options=None,
environment_variables=None,
config_content=None,
django_settings=None):
"""Get configuration from all sources.
Notes:
* The priority is as follows: command line options first, then
environment variables, than configuration from the configuration
file, and at last the default values.
Args:
configuration_schema (dict): a match between each target option to its
sources.
command_line_options (iterable): the program arguments, as given by
`sys.argv`.
environment_variables (dict): the environment variables, as given by
`os.environ`.
config_content (str): content of the configuration file in YAML format.
django_settings (dict): content of the Django settings file as dict.
"""
default_configuration = {
target: option.default_value
for target, option in six.iteritems(configuration_schema)}
if command_line_options is None:
cli_configuration = {}
else:
cli_configuration = get_command_line_configuration(
configuration_schema=configuration_schema,
command_line_options=command_line_options)
if environment_variables is None:
env_var_configuration = {}
else:
env_var_configuration = get_configuration_from_object(
configuration_schema=configuration_schema,
target_object=AttrDict(environment_variables))
if config_content is None:
file_configuration = {}
else:
file_configuration = get_file_configuration(
configuration_schema=configuration_schema,
config_content=config_content)
if django_settings is None:
django_configuration = {}
else:
django_configuration = get_configuration_from_object(
configuration_schema=configuration_schema,
target_object=django_settings)
return AttrDict(dict(itertools.chain(
iteritems(default_configuration),
iteritems(django_configuration),
iteritems(file_configuration),
iteritems(env_var_configuration),
iteritems(cli_configuration),
))) | 88a363d474ba3eebace2ebb2085572850345240b | 3,627,763 |
from typing import Tuple
def convert_descriptor_to_type(desc: str) -> Tuple[str, int]:
""" Converts a java descriptor to the java type, in the inverse of convert_descriptor_to_type()
Returns the type, and the number of array levels (e.g. [[Z would return ('boolean', 2), not 'boolean[][]'
Optionally will remap objects using the provided dictionary
"""
parser = Parser(desc)
arrays = 0
while parser.accept('['):
arrays += 1
key = parser.peek()
if key in JAVA_DESCRIPTOR_TO_TYPE:
return JAVA_DESCRIPTOR_TO_TYPE[key], arrays
else:
parser.expect('L')
name = parser.accept_until(';')
return name, arrays | 6f5aaa164636c0e99472c56b212b3a586939f62a | 3,627,764 |
import pandas
def _normalize_similarity(df: pandas.DataFrame) -> None:
"""Normalizes similarity by combining cls and transformation."""
df["params.similarity"] = (df["params.similarity.cls"] + "_" + df["params.similarity.transformation"])
# transformation only active if similarity in {l1, 2}
unused_transformation_mask = ~(df["params.similarity"].str.startswith("l1_") | df["params.similarity"].str.startswith("l2_"))
df.loc[unused_transformation_mask, "params.similarity"] = df.loc[unused_transformation_mask, "params.similarity"].apply(lambda v: v.replace("_bound_inverse", "").replace("_negative", ""))
def _normalize(name):
name = name.split("_")
if len(name) == 1:
return name[0]
else:
return f"{name[0]} ({' '.join(name[1:])})"
df["params.similarity"] = df["params.similarity"].apply(_normalize) | 048c1a7107cf61c20ebc7d2e10214c434898466e | 3,627,765 |
def density_bounds(density, wi,
vo=.49,
ve=.5,
dt=.1,
exact=False):
"""THIS IS A BOUND, NOT THE ACTUAL VELOCITY.
Min density bound for nnovation front as derived from MFT and compared with
simulation results.
Depends on obsolescence and growth rates. Make sure that density has already been
cached into parquet for faster runtime.
Parameters
----------
density : list of ndarray
wi : float
Innovation probability at each attempt.
vo : float, .49
Obsolescence rate per unit time.
ve : float, .5
Firm growth rate per unit time (attempt at moving to the right).
dt : float, .1
Size of simulation time step.
exact : bool, False
If True, use exact formulation. Else use small dt approximation. Make sure that dt
corresponds to the actual time step in the simulation and not the rate at which
samples were recorded.
Returns
-------
float
MFT min density bound for innovation front to keep moving.
list
Avg density for each simulation.
"""
# number of firms on right boundary for each random trajectory
right_density = [[i[-1] for i in d] for d in density]
# histogram this
y = [np.bincount(d, minlength=1) for d in right_density]
sim_density = np.array([i.dot(range(i.size))/i.sum() for i in y])
# min density required to progress
if exact:
assert not dt is None
mft_bound = np.log(1 - vo * dt) / np.log(1 - wi * ve * dt)
else:
mft_bound = vo / wi / ve
return mft_bound, sim_density | 548e06e8b4a148c5ca1ffd95ce1cfcfb51183773 | 3,627,766 |
def process_instructions(instructions):
"""Process instructions in order, starting from line 0"""
line = 0
instructions_executed = set()
accumulator = 0
while line not in instructions_executed:
try:
instruction = instructions[line]
except IndexError:
print(f'End encountered, accumulator: {accumulator}')
return accumulator
instructions_executed.add(line)
line, accumulator = process_instruction(instruction, line, accumulator)
return None | 4c2278b03db2ddb0ca3292f591509d82b2e8f361 | 3,627,767 |
from typing import List
def check_absence_of_skip_series(
movement: int,
past_movements: List[int],
max_n_skips: int = 2,
**kwargs
) -> bool:
"""
Check that there are no long series of skips.
:param movement:
melodic interval (in scale degrees) for line continuation
:param past_movements:
list of past movements
:param max_n_skips:
maximum allowed number of skips in a row
:return:
indicator whether a continuation is in accordance with the rule
"""
if abs(movement) <= 1:
return True
if len(past_movements) < max_n_skips:
return True
only_skips = all(abs(x) > 1 for x in past_movements[-max_n_skips:])
return not only_skips | 94ff2f3e03956d5bea1173182e389a3e6bb4b487 | 3,627,768 |
import glob
import os
def get_preview_images_by_rootname(rootname):
"""Return a list of preview images available in the filesystem for
the given ``rootname``.
Parameters
----------
rootname : str
The rootname of interest (e.g. ``jw86600008001_02101_00007_guider2``).
Returns
-------
preview_images : list
A list of preview images available in the filesystem for the
given ``rootname``.
"""
proposal = rootname.split('_')[0].split('jw')[-1][0:5]
preview_images = sorted(glob.glob(os.path.join(
PREVIEW_IMAGE_FILESYSTEM,
'jw{}'.format(proposal),
'{}*'.format(rootname))))
preview_images = [os.path.basename(preview_image) for preview_image in preview_images]
return preview_images | cf615abde2f09251e0b9e2ab5e89347994f9c29f | 3,627,769 |
def get_discharge_measurements(sites=None, start=None, end=None, **kwargs):
"""
Get discharge measurements from the waterdata service.
Parameters (Additional parameters, if supplied, will be used as query parameters)
----------
sites: array of strings
If the qwdata parameter site_no is supplied, it will overwrite the sites parameter
start: string
If the qwdata parameter begin_date is supplied, it will overwrite the start parameter
end: string
If the qwdata parameter end_date is supplied, it will overwrite the end parameter
Returns:
DataFrame containing times series data from the NWIS json and Metadata as tuple
"""
start = kwargs.pop('begin_date', start)
end = kwargs.pop('end_date', end)
sites = kwargs.pop('site_no', sites)
return _discharge_measurements(site_no=sites, begin_date=start, end_date=end, **kwargs) | 855b875dda057108129da5f742a4257b73a20510 | 3,627,770 |
def get_full_frame_size(body_size):
"""
Returns size of full frame for provided frame body size
:param body_size: frame body size
:return: size of full frame
"""
return eth_common_constants.FRAME_HDR_TOTAL_LEN + \
get_padded_len_16(body_size) + \
eth_common_constants.FRAME_MAC_LEN | 23986f5d3ffda84fe8eebff8ab6159f96ab5f2f4 | 3,627,771 |
def is_iterable(obj):
"""
Returns *True* when an object *obj* is iterable and *False* otherwise.
"""
try:
iter(obj)
except Exception:
return False
return True | cb4b383780ac6f257c734aef2ccd8f00ecd9af77 | 3,627,772 |
def vels2waves(vels, restwav, hdr, usewcs=None, observatory="SPM"):
"""Heliocentric radial velocity (in km/s) to observed wavelength (in
m, or whatever units restwav is in)
"""
# Heliocentric correction
vels = np.array(vels) + helio_topo_from_header(
hdr, usewcs=usewcs, observatory=observatory
)
waves = restwav * (1.0 + vels / LIGHT_SPEED_KMS)
return waves | 7153d751793a7e8370fa206c591d73d5245ff955 | 3,627,773 |
def make_blueprint(db_connection_string=None, configuration={}): # noqa
"""Create blueprint.
"""
controllers = Controllers(configuration=configuration,
connection_string=db_connection_string)
# Create instance
blueprint = Blueprint('pipelines', 'pipelines')
@check_permission([Permissions.pipelinesListAll, Permissions.pipelinesListOwn, Permissions.pipelinesListPublic])
def query_pipelines_(role=None, user=None):
if role == Permissions.pipelinesListAll:
return controllers.query_pipelines()
elif role == Permissions.pipelinesListOwn:
return controllers.query_pipelines(user=user)
elif role == Permissions.pipelinesListPublic:
return controllers.query_pipelines(public=True)
@check_permission([Permissions.pseudoAdmin, Permissions.login])
def configuration_(role=None):
return controllers.configuration(admin=role==Permissions.pseudoAdmin)
@check_permission([Permissions.pipelinesEditAll, Permissions.pipelinesEditOwn])
def edit_pipeline_(role=None, user=None):
if request.method == 'POST':
body = request.json
id = body.get('id')
return controllers.create_or_edit_pipeline(id, body, user, role == Permissions.pipelinesEditAll)
else:
return {}
@check_permission([Permissions.pipelinesStatusAll, Permissions.pipelinesStatusOwn, Permissions.pipelinesStatusPublic])
def query_pipeline_(id, role=None, user=None):
if role == Permissions.pipelinesStatusAll:
return controllers.query_pipeline(id)
elif role == Permissions.pipelinesStatusOwn:
return controllers.query_pipeline(id, user=user)
elif role == Permissions.pipelinesStatusPublic:
return controllers.query_pipeline(id, public=True)
@check_permission([Permissions.pipelinesDeleteAll, Permissions.pipelinesDeleteOwn])
def delete_pipeline_(id, role=None, user=None):
if role == Permissions.pipelinesDeleteAll:
return controllers.delete_pipeline(id)
elif role == Permissions.pipelinesDeleteOwn:
return controllers.delete_pipeline(id, user=user)
@check_permission([Permissions.pipelinesExecute])
def start_pipeline_(id):
return controllers.start_pipeline(id)
# Register routes
blueprint.add_url_rule(
'pipelines', 'query_pipelines', query_pipelines_, methods=['GET'])
blueprint.add_url_rule(
'pipeline', 'edit_pipeline', edit_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'query_pipeline', query_pipeline_, methods=['GET'])
blueprint.add_url_rule(
'pipeline/start/<id>', 'start_pipeline', start_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'delete_pipeline', delete_pipeline_, methods=['DELETE'])
blueprint.add_url_rule(
'configuration', 'configuration', configuration_, methods=['GET'])
# Return blueprint
return blueprint | df5871436fd8768224a342daffe2afffb262e402 | 3,627,774 |
from typing import Any
from pathlib import Path
def jsonable(obj: Any):
"""Convert obj to a JSON-ready container or object.
Args:
obj ([type]):
"""
if isinstance(obj, (str, float, int, complex)):
return obj
elif isinstance(obj, Path):
return str(obj.resolve())
elif isinstance(obj, (list, tuple)):
return type(obj)(map(jsonable, obj))
elif isinstance(obj, dict):
return dict(jsonable(list(obj.items())))
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif hasattr(obj, "__array__"):
return np.array(obj).tolist()
else:
raise ValueError(f"Unknown type for JSON: {type(obj)}") | 494bd41dc0b3ef4cc81e4daf5b1bc24b618ea7f8 | 3,627,775 |
def read_data_from(file_: str) -> dict:
"""Load image tiles from file."""
tiles = {}
tile = []
for line in open(file_, "r").read().splitlines():
if "Tile" in line:
idx = int(line[5:-1])
elif line == "":
tiles[idx] = np.array(tile)
tile = []
else:
tile.append(list(line))
return tiles | 4b4a072cf9c2a28fa64b18adff1354ac171ad18c | 3,627,776 |
def create_graph(A, create_using=None, remove_self_loops=True):
"""
Function for flexibly creating a networkx graph from a numpy array.
Params
------
A (np.ndarray): A numpy array.
create_using (nx.Graph or None): Create the graph using a specific networkx graph.
Can be used for forcing an asymmetric matrix to create an undirected graph, for example.
remove_self_loops (bool): If True, remove the diagonal of the matrix before creating the
graph object.
Returns
-------
G: A graph, typically a nx.Graph or nx.DiGraph.
"""
if remove_self_loops:
np.fill_diagonal(A, 0)
if create_using is None:
if np.allclose(A, A.T):
G = nx.from_numpy_array(A, create_using=nx.Graph())
else:
G = nx.from_numpy_array(A, create_using=nx.DiGraph())
else:
G = nx.from_numpy_array(A, create_using=create_using)
return G | 3baa2be7cbf3f0e2c18273aabe7ae7864853c59f | 3,627,777 |
def delete(*tables):
"""
Returns :py:class:`~.Delete` instance and passed arguments are used for list
of tables from which really data should be deleted. But probably you want
to use :py:func:`~.delete_from` instead.
"""
return Delete(*tables) | 9cd4099655d1e8f4393fcad07b453a9597aaf5d7 | 3,627,778 |
def create_string_for_failing_metrics(hpo_objects):
"""
Function is used to create a string for the failing
metrics that can ultimately be inserted into the email output.
Parameters
----------
hpo_objects (list): contains all of the HPO objects. the
DataQualityMetric objects will now be associated to
the HPO objects appropriately.
Returns
-------
issue_report (str): string with all of the data quality issues
presented
hpo_id (str): string that represents the HPO ID whose email
is to be generated
"""
unique_metrics, hpo_id = organize_relevant_dqms(hpo_objects)
issue_num = 1
issue_report = "" #empty string
for metric_type, tables_or_classes in unique_metrics.items():
tables_affected = ', '.join(tables_or_classes)
threshold = thresholds_full_name[metric_type]
min_or_max_str = min_or_max[metric_type]
issue_report += \
f"{issue_num}. {metric_type} " \
f"has failed in the following tables or classes:\n" \
f"\t{tables_affected}\n" \
f"\tWe aim for a {min_or_max_str} value of {threshold} " \
f"for this metric.\n\n"
issue_num += 1
if not issue_report: # no issues found
issue_report = great_job
return issue_report, hpo_id | 4128685d058e5fba4efbcbe30aa6e45bf5aeef2a | 3,627,779 |
import argparse
def _main(argv, standard_out, standard_error, standard_in):
"""Run internal main entry point."""
flargs = {}
if "--config" in argv:
flargs = find_config_file(argv)
parser = argparse.ArgumentParser(description=__doc__, prog='docformatter')
changes = parser.add_mutually_exclusive_group()
changes.add_argument('-i', '--in-place', action='store_true',
help='make changes to files instead of printing '
'diffs')
changes.add_argument('-c', '--check', action='store_true',
help='only check and report incorrectly formatted '
'files')
parser.add_argument('-r', '--recursive', action='store_true',
default=bool(flargs.get('recursive', False)),
help='drill down directories recursively')
parser.add_argument('-e', '--exclude', nargs='*',
help='exclude directories and files by names')
parser.add_argument('--wrap-summaries',
default=int(flargs.get('wrap-summaries', 79)),
type=int,
metavar='length',
help='wrap long summary lines at this length; '
'set to 0 to disable wrapping '
'(default: %(default)s)')
parser.add_argument('--wrap-descriptions',
default=int(flargs.get('wrap-descriptions', 72)),
type=int,
metavar='length',
help='wrap descriptions at this length; '
'set to 0 to disable wrapping '
'(default: %(default)s)')
parser.add_argument('--blank', dest='post_description_blank',
action='store_true',
default=bool(flargs.get('blank', False)),
help='add blank line after description')
parser.add_argument('--pre-summary-newline',
action='store_true',
default=bool(flargs.get('pre-summary-newline', False)),
help='add a newline before the summary of a '
'multi-line docstring')
parser.add_argument('--make-summary-multi-line',
action='store_true',
default=bool(flargs.get('make-summary-multi-line',
False)),
help='add a newline before and after the summary of a '
'one-line docstring')
parser.add_argument('--force-wrap', action='store_true',
default=bool(flargs.get('force-wrap', False)),
help='force descriptions to be wrapped even if it may '
'result in a mess')
parser.add_argument('--range', metavar='line', dest='line_range',
default=flargs.get('range', None), type=int, nargs=2,
help='apply docformatter to docstrings between these '
'lines; line numbers are indexed at 1')
parser.add_argument('--docstring-length', metavar='length',
dest='length_range',
default=flargs.get('docstring-length', None),
type=int, nargs=2,
help='apply docformatter to docstrings of given '
'length')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('--config',
help='path to file containing docformatter options')
parser.add_argument('files', nargs='+',
help="files to format or '-' for standard in")
args = parser.parse_args(argv[1:])
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
if args.length_range:
if args.length_range[0] <= 0:
parser.error('--docstring-length must be positive numbers')
if args.length_range[0] > args.length_range[1]:
parser.error('First value of --docstring-length should be less '
'than or equal to the second')
if '-' in args.files:
_format_standard_in(args,
parser=parser,
standard_out=standard_out,
standard_in=standard_in)
else:
return _format_files(args,
standard_out=standard_out,
standard_error=standard_error) | 3a87528c2680eea464cd3b2f1f911f7010d7d018 | 3,627,780 |
from typing import Coroutine
from typing import Any
def current_effective_deadline() -> Coroutine[Any, Any, float]:
"""
Return the nearest deadline among all the cancel scopes effective for the current task.
:return: a clock value from the event loop's internal clock (``float('inf')`` if there is no
deadline in effect)
:rtype: float
"""
return get_asynclib().current_effective_deadline() | b3230c8aeb240d0a02fabdfabbb2adadb57062a8 | 3,627,781 |
import time
def from_openid_response(openid_response):
""" return openid object from response """
issued = int(time.time())
sreg_resp = sreg.SRegResponse.fromSuccessResponse(openid_response) \
or []
ax_resp = ax.FetchResponse.fromSuccessResponse(openid_response)
ax_args = {}
if ax_resp is not None:
ax_args = ax_resp.getExtensionArgs()
ax_resp.parseExtensionArgs(ax_args)
ax_args = ax_resp.data
return OpenID(
openid_response.identity_url, issued, openid_response.signed_fields,
dict(sreg_resp), ax_args
) | d98ce4587f2ac380c77ec92f948f355c051f4184 | 3,627,782 |
from typing import Callable
import click
def dcos_login_pw_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for choosing the password to set the ``DCOS_LOGIN_PW``
environment variable to.
"""
function = click.option(
'--dcos-login-pw',
type=str,
default=DEFAULT_SUPERUSER_PASSWORD,
help=(
'The password to set the ``DCOS_LOGIN_PW`` environment variable '
'to.'
),
show_default=True,
)(command) # type: Callable[..., None]
return function | fe4b4d9dac90536046bcebf56fa2fe5144aaa665 | 3,627,783 |
from typing import Dict
from typing import Tuple
from typing import Any
def get_default_triggers() -> Dict[Tuple[Tuple[str, Any]], Dict[str, Any]]:
"""Make _triggers read only"""
return _default_triggers | a467de3534e58701f26d9d8d57f105743dcf283a | 3,627,784 |
def yesno_choice(title, callback_yes=None, callback_no=None):
"""
Display a choice to the user. The corresponding callback will be called in case of
affermative or negative answers.
:param title: text to display (e.g.: 'Do you want to go to Copenaghen?' )
:param callback_yes: callback function to be called in case of 'y' answer
:param callback_no: callback function to be called in case of 'n' answer
Return the callback result
"""
print()
print(f'{title} (y/n)')
valid_inp = ['y', 'n']
while (True):
inp = input()
if inp in valid_inp:
if inp == 'y':
if callable(callback_yes):
return callback_yes()
else:
return 'y'
elif inp == 'n':
if callable(callback_no):
return callback_no()
else:
return 'n'
else:
print('Wrong choice buddy ;) Retry:') | 93b76a3c7740b90dd01bd46ed429411991f3f34d | 3,627,785 |
def is_tensor_object(x):
"""
Test whether or not `x` is a tensor object.
:class:`tf.Tensor`, :class:`tf.Variable` and :class:`TensorWrapper`
are considered to be tensor objects.
Args:
x: The object to be tested.
Returns:
bool: A boolean indicating whether `x` is a tensor object.
"""
return isinstance(x, (tf.Tensor, tf.Variable, TensorWrapper)) | 8b2610d6d26bc3bb1ae72a11416507110a2d2bef | 3,627,786 |
import json
def slack(text: str, webhookAddress: str) -> str:
"""Send a slack message"""
data = bytes(json.dumps({"text": text}), "utf-8")
handler = urlopen(webhookAddress, data)
return handler.read().decode('utf-8') | 5570ba3c11f907e0b96f4878fc915c711b01ef3b | 3,627,787 |
def wrap_col(string, str_length=11):
"""
String wrap
"""
if [x for x in string.split(' ') if len(x) > 25]:
parts = [string[i:i + str_length].strip() for i in range(0, len(string), str_length)]
return ('\n'.join(parts) + '\n')
else:
return (string) | 7b5cdf37cb84a2d2ebbc421ea917fc563026927e | 3,627,788 |
import re
def tokenize_text_with_special(text):
"""
Tokenizes a string. Does not filter any characters.
:param text: The String to be tokenized.
:return: Tokens
"""
token = []
running_word = ""
for c in text:
if re.match(alphanumeric, c):
running_word += c
else:
if running_word != "":
token.append(running_word)
token.append(c)
running_word = ""
if running_word != "":
token.append(running_word)
return token | e6fbc0067dddf749f1d969646f2544352c36a380 | 3,627,789 |
def rms_slope_from_profile(topography, short_wavelength_cutoff=None, window=None,
direction=None):
"""
Compute the root mean square amplitude of the height derivative of a
topography or line scan stored on a uniform grid. If the topography is two
dimensional (i.e. a topography map), the derivative is computed in the
x-direction.
Parameters
----------
topography : :obj:`SurfaceTopography` or :obj:`UniformLineScan`
SurfaceTopography object containing height information.
short_wavelength_cutoff : float
All wavelengths below this cutoff will be set to zero amplitude.
window : str, optional
Window for eliminating edge effect. See scipy.signal.get_window.
Only used if short wavelength cutoff is set.
(Default: no window for periodic Topographies, "hann" window for
nonperiodic Topographies)
direction : str, optional
Direction in which the window is applied. Possible options are
'x', 'y' and 'radial'. If set to None, it chooses 'x' for line
scans and 'radial' for topographies. Only used if short wavelength
cutoff is set. (Default: None)
Returns
-------
rms_slope : float
Root mean square slope value.
"""
if short_wavelength_cutoff is not None:
topography = topography.window(window=window, direction=direction)
mask_function = None if short_wavelength_cutoff is None else \
lambda frequency: frequency[0] ** 2 < 1 / short_wavelength_cutoff ** 2
if topography.dim == 1:
dx = topography.derivative(1, mask_function=mask_function)
elif topography.dim == 2:
dx, dy = topography.derivative(1, mask_function=mask_function)
else:
raise ValueError(f'Cannot handle topographies of dimension {topography.dim}')
reduction = Reduction(topography._communicator)
return np.sqrt(reduction.mean(dx ** 2)) | fc6a1b1ce653c16b34bc42151dd62a05be6fc36f | 3,627,790 |
def _get_exec_driver():
"""
Get the method to be used in shell commands
"""
contextkey = "docker.exec_driver"
if contextkey not in __context__:
from_config = __salt__["config.option"](contextkey, None)
# This if block can be removed once we make docker-exec a default
# option, as it is part of the logic in the commented block above.
if from_config is not None:
__context__[contextkey] = from_config
return from_config
# The execution driver was removed in Docker 1.13.1, docker-exec is now
# the default.
driver = info().get("ExecutionDriver", "docker-exec")
if driver == "docker-exec":
__context__[contextkey] = driver
elif driver.startswith("lxc-"):
__context__[contextkey] = "lxc-attach"
elif driver.startswith("native-") and HAS_NSENTER:
__context__[contextkey] = "nsenter"
elif not driver.strip() and HAS_NSENTER:
log.warning(
"ExecutionDriver from 'docker info' is blank, falling "
"back to using 'nsenter'. To squelch this warning, set "
"docker.exec_driver. See the Salt documentation for the "
"docker module for more information."
)
__context__[contextkey] = "nsenter"
else:
raise NotImplementedError(
"Unknown docker ExecutionDriver '{}', or didn't find "
"command to attach to the container".format(driver)
)
return __context__[contextkey] | 33a9b06543af74c91bf0720caaad7c9ddd793ccd | 3,627,791 |
def combine_results_jsons(drtdp_json, psrtdp_json, vi_json):
"""
takes overall results jsons and combines them to one json
:param drtdp_json a json for drtdp overall results
:param psrtdp_json a json for ps-rtdp overall results
:param vi_json a json for value iteration overall results
:return combined json
"""
vi_cost = vi_json['best_cost'] if 'best_cost' in vi_json else None
vi_time = '{0:.3f}'.format(vi_json['planning_time']) if 'planning_time' in vi_json else None
return {
"domain": drtdp_json['domain'],
"actions": drtdp_json['actions'],
"facts": drtdp_json['facts'],
"num_agents": drtdp_json['num_agents'],
"best_cost": (drtdp_json['best_cost'], psrtdp_json['best_cost'], vi_cost),
"messages": ('{0:.3f}'.format(drtdp_json['messages']/10000),
'{0:.3f}'.format(psrtdp_json['messages']/10000)),
"expansions": ('{0:.3f}'.format(drtdp_json['expansions']/10000),
'{0:.3f}'.format(psrtdp_json['expansions']/10000)),
"trajectories": (drtdp_json['trajectories'], psrtdp_json['trajectories']),
"restarts": (drtdp_json['restarts'], psrtdp_json['restarts']),
"planning_time": ('{0:.3f}'.format(drtdp_json['planning_time']),
'{0:.3f}'.format(psrtdp_json['planning_time']),
vi_time)
} | a2bedf628e2af91af2c16111cd33600ade7e435e | 3,627,792 |
def preprocess_data(data_path, embeds_path, lang='fr'):
"""
Loads pre-embedded dataset and labels, in a random (but consistent) order.
:param data_path: (str) filepath to csv
:param embeds_path: (str) filepath to json
:return: X (list of list of list), y (len(train) x 2) np array
"""
X = load_json(embeds_path)[lang] # list (dataset) of list (command) of list (word embedding)
data = pd.read_csv(data_path)
X_str = data['string'].tolist()
y = data[['x', 'y']].values / 100
return X_str, X, y | 6b516bacc7084b1df892fdbadecea78ff7e65121 | 3,627,793 |
def text_comp19_to_df():
"""
Returns a pandas Dataframe object with
the data of the TextComplexityDE19 dataset
"""
# Path to relevant csv file
csv_path = join(
dirname(dirname(dirname(abspath(__file__)))),
"data",
"TextComplexityDE19/ratings.csv",
)
# read in csv file
print("Check for TextComplexityDE19/ratings.csv")
if isfile(csv_path):
print("Reading in TextComplexityDE19/ratings.csv")
corpus = pd.read_csv(csv_path, encoding="windows-1252")
else:
print("Downloading TextComplexityDE19 Dataset")
downloader.download_TextComplexityDE19()
print("Reading in TextComplexityDE19/ratings.csv")
corpus = pd.read_csv(csv_path, encoding="windows-1252")
# Rename columns and insert source of this dataframe for consistency
corpus = corpus.rename(columns={"Sentence": "raw_text", "MOS_Complexity": "rating"})
corpus.insert(2, "source", 0)
# Delete all columns except the raw_text and the rating column
corpus = corpus.drop(
columns=[
"ID",
"Article_ID",
"Article",
"Votes_Complexity",
"Std_Complexity",
"Votes_Understandability",
"MOS_Understandability",
"Std_Understandability",
"Vote_Lexical_difficulty",
"MOS_Lexical_difficulty",
"Std_Lexical_difficulty",
]
)
return corpus | d12500ace3fe92bc0e39cb86d58a8077fcc28635 | 3,627,794 |
def from_relay(func: relay.Function) -> IRModule:
"""Convert a Relay function into a Relax program.
Parameters
----------
func : relay.Function
Relay function to be converted
Returns
-------
mod : tvm.IRModule
The Relax IRModule for compilation
"""
# A map to store the mapping of Relay Expr to its corresponding Relax var
var_map = {}
# The output of the function
output_var = None
params = []
convert_map = get_convert_map()
def visit_func(node):
nonlocal output_var
if isinstance(node, relay.Var):
if isinstance(node.type_annotation, relay.TensorType):
var_map[node] = nn.Placeholder(
tuple(node.type_annotation.shape), node.type_annotation.dtype, node.name_hint
)
params.append(var_map[node])
else:
raise TypeError("The type of relay.Var to be translated must be of TensorType.")
elif isinstance(node, relay.Call):
args = node.args
new_args = []
for arg in args:
if arg in var_map:
new_args.append(var_map[arg])
op_name = node.op.name
attrs = node.attrs
compute_func = node.op.get_attr("FTVMCompute")
if compute_func is None:
if node.op.name not in convert_map:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported.".format(op_name)
)
var = convert_operator(op_name, new_args, attrs)
else:
name_hint = op_name.split(".")[-1]
var = bb.emit_te(
compute_func, attrs, new_args, node.checked_type, primfunc_name_hint=name_hint
)
output_var = var
var_map[node] = var
elif isinstance(node, relay.Constant):
# fill the shape and checked_type fields of the Constant
new_constant = relay.Constant(node.data)
var_map[node] = new_constant
elif isinstance(node, relay.Tuple):
new_fields = []
for field in node.fields:
if field in var_map:
new_fields.append(var_map[field])
else:
raise RuntimeError("field is not in var_map.")
new_tuple = relax.Tuple(new_fields)
new_tuple_var = relax.BlockBuilder.current().emit(new_tuple)
var_map[node] = new_tuple_var
output_var = new_tuple_var
elif isinstance(node, relay.TupleGetItem):
if node.tuple_value in var_map:
new_tuple = var_map[node.tuple_value]
new_tuple_get_item_node = relax.TupleGetItem(new_tuple, node.index)
new_tuple_get_item_var = relax.BlockBuilder.current().emit(new_tuple_get_item_node)
var_map[node] = new_tuple_get_item_var
output_var = new_tuple_get_item_var
else:
raise RuntimeError("tuple is not in var_map")
elif isinstance(node, relay.Function):
relax.BlockBuilder.current().emit_func_output(output_var, params)
elif isinstance(node, tvm.ir.Op):
pass
else:
raise TypeError("{} is not supported yet.".format(str(type(node))))
bb = relax.BlockBuilder()
with bb.function("main"):
relay.analysis.post_order_visit(func, visit_func)
return bb.get() | fc1031fff3098e7c53321c2f7ca4ecfbdb456255 | 3,627,795 |
import os
def get_list_of_all_data_file_names(datadirectory):
"""
Return list of all data files (.txt) in specified directory
"""
print('get_list_of_all_data_file_names', datadirectory)
list_of_files = []
for file in os.listdir(datadirectory):
if file.endswith('txt'):
list_of_files.append(file)
return list_of_files | 35dd02acdc492d1d38e9cedbe154f2754706e25b | 3,627,796 |
def function_profiler(naming='qualname'):
"""
decorator that uses FunctionLogger as a context manager to
log information about this call of the function.
"""
def layer(function):
def wrapper(*args, **kwargs):
with FunctionLogger(function, naming):
return function(*args, **kwargs)
return wrapper
return layer | 001977a23788a6e897b0882b894a5cdecc6b881f | 3,627,797 |
from unittest.mock import Mock
def mock_data_manager(components):
"""Return a mock data manager of a general model."""
dm = Mock()
dm.components = components
dm.fixed_components = []
return dm | e796dbe73e2ec7df650ceab450a3a5449a6af9ed | 3,627,798 |
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""下载Fashion-MNIST数据集,然后将其加载到内存中"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
return (paddle.io.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True,
num_workers=get_dataloader_workers()),
paddle.io.DataLoader(dataset=mnist_test,
batch_size=batch_size,
shuffle=True,
num_workers=get_dataloader_workers())) | d946c33f7bff29a3278f5dcdc85195de772e85f7 | 3,627,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.