content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_load(cursor, run):
"""verify simple load by key"""
_ = run(MyTable.load, cursor, 123)
assert cursor.query_after == (
"SELECT 'my_table'.'id' AS 0_id, 'my_table'.'name' AS 0_name,"
" 'my_table'.'email' AS 0_email FROM 'my_table' AS 'my_table'"
" WHERE 'id'=123 LIMIT 1"
) | 30,400 |
def my_join(x):
"""
:param x: -> the list desired to join
:return:
"""
return ''.join(x) | 30,401 |
def config_parse_args(configfile=None):
""" Command line arguments and configuration file setting """
parser = argparse.ArgumentParser(
description='nanoping log importer to InfluxDB')
parser.add_argument('--config', type=str, required=True,
help='configuration file')
parser.add_argument('--interface', type=str, required=True,
help='interface name')
parser.add_argument('--log', type=str, required=False,
help='nanoping logfile location')
parser.add_argument('--db', type=str, required=False,
help='database name of InfluxDB')
parser.add_argument('--debug', action='store_true',
help='turn on debug output')
parser.add_argument('--dry', action='store_true',
help='dry run mode')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config)
# override by command line option
if args.db is not None:
config['influxdb']['db'] = args.db
return args, config | 30,402 |
def run_example(FLAGS):
"""
Run parallel Sherpa optimization over a set of discrete hp combinations.
"""
# Iterate algorithm accepts dictionary containing lists of possible values.
hp_space = {'act': ['tanh', 'relu'],
'lrinit': [0.1, 0.01],
'momentum': [0.0],
'lrdecay': [0.0],
'arch': [[20,5], [20, 10], [10,10,10]],
'epochs': [20],
}
parameters = sherpa.Parameter.grid(hp_space)
alg = sherpa.algorithms.GridSearch()
stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=10, min_trials=5)
f = './bianchini.py' # Python script to run.
dir = './output' # All files written to here.
if not FLAGS.local:
# Submit to SGE queue.
# env = '/home/pjsadows/profiles/auto.profile' # Script specifying environment variables.
env = FLAGS.env
opt = '-N example -P {} -q {} -l {}'.format(FLAGS.P, FLAGS.q, FLAGS.l)
sched = SGEScheduler(environment=env, submit_options=opt, output_dir=dir)
else:
# Run on local machine.
sched = LocalScheduler() # Run on local machine without SGE.
rval = sherpa.optimize(parameters=parameters,
algorithm=alg,
stopping_rule=stopping_rule,
output_dir=dir,
lower_is_better=True,
filename=f,
scheduler=sched,
max_concurrent=FLAGS.max_concurrent)
print()
print('Best results:')
print(rval) | 30,403 |
def visualize_code_vectors(code_vectors, cmap='Paired', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True):
"""
Document
"""
to_plot = np.array(code_vectors)
# First the parameters
to_plot_title = 'Code Vectors in Time'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
xlabel = 'Sensor Clusters'
ylabel = 'Time'
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
im = plt.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Se the ticks names for x
# x_labels = np.arange(Nseries * Nseries + 1)
# ax.xaxis.set_major_formatter(plt.FixedFormatter(x_labels))
# ax.xaxis.set_major_locator(plt.MultipleLocator(1))
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return fig | 30,404 |
def get_displacements_and_forces(disp_dataset):
"""Return displacements and forces of all atoms from displacement dataset.
This is used to extract displacements and forces from displacement dataset.
This method is considered more-or-less as a converter when the input is in
type-1.
Parameters
----------
disp_dataset : dict
Displacement dataset either in type-1 or type-2.
Returns
-------
displacements : ndarray
Displacements of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
forces : ndarray or None
Forces of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
None is returned when forces don't exist.
"""
if "first_atoms" in disp_dataset:
natom = disp_dataset["natom"]
disps = np.zeros(
(len(disp_dataset["first_atoms"]), natom, 3), dtype="double", order="C"
)
forces = None
for i, disp1 in enumerate(disp_dataset["first_atoms"]):
disps[i, disp1["number"]] = disp1["displacement"]
if "forces" in disp1:
if forces is None:
forces = np.zeros_like(disps)
forces[i] = disp1["forces"]
return disps, forces
elif "displacements" in disp_dataset:
if "forces" in disp_dataset:
forces = disp_dataset["forces"]
else:
forces = None
return disp_dataset["displacements"], forces | 30,405 |
def GetUserAllBasicData(user_url: str) -> Dict:
"""获取用户的所有基础信息
Args:
user_url (str): 用户个人主页 Url
Returns:
Dict: 用户基础信息
"""
result = {}
json_obj = GetUserJsonDataApi(user_url)
html_obj = GetUserPCHtmlDataApi(user_url)
anniversary_day_html_obj = GetUserNextAnniversaryDayHtmlDataApi(UserUrlToUserSlug(user_url))
result["name"] = json_obj["nickname"]
result["url"] = user_url
result["uslug"] = UserUrlToUserSlug(user_url)
result["gender"] = json_obj["gender"]
result["followers_count"] = json_obj["following_users_count"]
result["fans_count"] = json_obj["followers_count"]
result["articles_count"] = json_obj
result["wordage"] = json_obj["total_wordage"]
result["likes_count"] = json_obj["total_likes_count"]
try:
result["assets_count"] = html_obj.xpath("//div[@class='info']/ul/li[6]/div[@class='meta-block']/p")[0].text
result["assets_count"] = float(result["assets_count"].replace(".", "").replace("w", "000"))
except IndexError:
result["assets_count"] = None
if json_obj["total_wordage"] == 0 and json_obj["jsd_balance"] == 0:
result["FP_count"] = None
else:
result["FP_count"] = json_obj["jsd_balance"] / 1000
if result["assets_count"] and result["FP_count"]:
result["FTN_count"] = result["assets_count"] - result["FP_count"]
result["FTN_count"] = round(abs(result["FTN_count"]), 3)
else:
result["FTN_count"] = None
result["badges_list"] = html_obj.xpath("//li[@class='badge-icon']/a/text()")
result["badges_list"] = [item.replace(" ", "").replace("\n", "") for item in result["badges_list"]] # 移除空格和换行符
result["badges_list"] = [item for item in result["badges_list"] if item != ""] # 去除空值
result["last_update_time"] = datetime.fromtimestamp(json_obj["last_updated_at"])
try:
result["vip_info"] = {
"vip_type": {
"bronze": "铜牌",
"silver": "银牌",
"gold": "黄金",
"platina": "白金"
}[json_obj["member"]["type"]],
"expire_date": datetime.fromtimestamp(json_obj["member"]["expires_at"])
}
except KeyError:
result["vip_info"] = {
"vip_type": None,
"expire_date": None
}
result["introduction_html"] = json_obj["intro"]
if not result["introduction_html"]:
result["introduction_text"] = ""
else:
result["introduction_text"] = "\n".join(etree.HTML(result["introduction_html"]).xpath("//*/text()"))
result["next_anniversary_day"] = anniversary_day_html_obj.xpath('//*[@id="app"]/div[1]/div/text()')[0]
result["next_anniversary_day"] = datetime.fromisoformat("-".join(findall(r"\d+", result["next_anniversary_day"])))
return result | 30,406 |
def get_trace(session, trace_uuid):
"""Retrieves traces given a uuid.
Args:
sesssion: db session
trace_uuid: uuid of trace in question
Returns 2-tuple of plop, flamegraph input or None if trace doesn't exist
(or was garbage collected.
"""
trace = session.query(PerfProfile).filter(PerfProfile.uuid == trace_uuid).first()
if not trace:
raise InvalidUUID()
return trace.plop_input, trace.flamegraph_input | 30,407 |
def iter_package_families(paths=None):
"""Iterate over package families, in no particular order.
Note that multiple package families with the same name can be returned.
Unlike packages, families later in the searchpath are not hidden by earlier
families.
Args:
paths (list of str, optional): paths to search for package families,
defaults to `config.packages_path`.
Returns:
`PackageFamily` iterator.
"""
for path in (paths or config.packages_path):
repo = package_repository_manager.get_repository(path)
for resource in repo.iter_package_families():
yield PackageFamily(resource) | 30,408 |
def first_kind_discrete(orientations, order=4):
"""
Calc orientation tensors of first kind for given discrete vectors
"""
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
# Symmetrize orientations
# orientations_reversed = [-v for v in orientations]
# orientations = orientations + orientations_reversed
einsumStrings = {
1: "ij -> j",
2: "ij, ik -> jk",
3: "ij, ik, il -> jkl",
4: "ij, ik, il, im -> jklm",
5: "ij, ik, il, im, in -> jklmn",
6: "ij, ik, il, im, in, ip -> jklmnp",
}
ori = orientations
if order == 1:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori,)
elif order == 2:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori)
elif order == 3:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori, ori)
elif order == 4:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori)
)
elif order == 5:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori)
)
elif order == 6:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori, ori)
)
else:
raise Exception("Not implemented")
return N | 30,409 |
def _reorganize_beam_groups(ed_obj):
"""
Maps Beam --> Sonar/Beam_group1 and Beam_power --> Sonar/Beam_group2.
Parameters
----------
ed_obj : EchoData
EchoData object that was created using echopype version 0.5.x
Notes
-----
The function directly modifies the input EchoData object.
"""
# Map Beam --> Sonar/Beam_group1
if "Beam" in ed_obj.group_paths:
ed_obj._tree["Sonar"].add_child(ed_obj._tree["Beam"])
ed_obj._tree["Sonar/Beam"].name = "Beam_group1"
# Map Beam_power --> Sonar/Beam_group2
if "Beam_power" in ed_obj.group_paths:
ed_obj._tree["Sonar"].add_child(ed_obj._tree["Beam_power"])
ed_obj._tree["Sonar/Beam_power"].name = "Beam_group2" | 30,410 |
def write_tsv(headerfields, features, outfn):
"""Writes header and generator of lines to tab separated file.
headerfields - list of field names in header in correct order
features - generates 1 list per line that belong to header
outfn - filename to output to. Overwritten if exists
"""
with open(outfn, 'w') as fp:
write_tsv_line_from_list(headerfields, fp)
for line in features:
write_tsv_line_from_list([str(line[field]) for field
in headerfields], fp) | 30,411 |
def create_tables():
""" Create all tables for the db
:returns: Nothing. Message is printed if connection error
"""
sql_create_web_addon_table = """ CREATE TABLE IF NOT EXISTS web_addon (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
esoui_id text NOT NULL,
name text NOT NULL
); """
sql_create_local_addon_table = """ CREATE TABLE IF NOT EXISTS local_addon (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
esoui_id text DEFAULT '',
folder_name text NOT NULL,
web_name text DEFAULT '',
local_version text DEFAULT '0',
web_version text DEFAULT '0'
); """
sql_create_correction_table = """ CREATE TABLE IF NOT EXISTS correction (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
folder_name text NOT NULL,
web_name text NOT NULL
); """
# create a database connection
conn = create_connection()
if conn is not None:
create_table(sql_create_web_addon_table)
create_table(sql_create_local_addon_table)
create_table(sql_create_correction_table)
else:
print("Unable to connect to the database") | 30,412 |
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
class_names=['Car'],
remove_outside_points=False,
training=True,
create_targets=True,
shuffle_points=False,
reduce_valid_area=False,
remove_unknown=False,
gt_rotation_noise=[-np.pi / 3, np.pi / 3],
gt_loc_noise_std=[1.0, 1.0, 1.0],
global_rotation_noise=[-np.pi / 4, np.pi / 4],
global_scaling_noise=[0.95, 1.05],
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=[0.78, 2.35],
generate_bev=False,
without_reflectivity=False,
num_point_features=6,
anchor_area_threshold=1,
gt_points_drop=0.0,
gt_drop_max_keep=10,
remove_points_after_sample=True,
anchor_cache=None,
remove_environment=False,
random_crop=False,
reference_detections=None,
add_rgb_to_points=False,
lidar_input=False,
unlabeled_db_sampler=None,
out_size_factor=2,
min_gt_point_dict=None,
bev_only=False,
use_group_id=False,
out_dtype=np.float32):
"""convert point cloud to voxels, create targets if ground truths
exists.
"""
points = input_dict["points"]
if training:
gt_boxes = input_dict["gt_boxes"]
gt_names = input_dict["gt_names"]
difficulty = input_dict["difficulty"]
group_ids = None
if use_group_id and "group_ids" in input_dict:
group_ids = input_dict["group_ids"]
rect = input_dict["rect"]
Trv2c = input_dict["Trv2c"]
P2 = input_dict["P2"]
unlabeled_training = unlabeled_db_sampler is not None
image_idx = input_dict["image_idx"]
if reference_detections is not None:
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(reference_detections, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
masks = points_in_convex_polygon_3d_jit(points, surfaces)
points = points[masks.any(-1)]
if remove_outside_points and not lidar_input:
image_shape = input_dict["image_shape"]
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
image_shape)
if remove_environment is True and training:
selected = kitti.keep_arrays_by_name(gt_names, class_names)
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
points = prep.remove_points_outside_boxes(points, gt_boxes)
if training:
# print(gt_names)
selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
if remove_unknown:
remove_mask = difficulty == -1
"""
gt_boxes_remove = gt_boxes[remove_mask]
gt_boxes_remove[:, 3:6] += 0.25
points = prep.remove_points_in_boxes(points, gt_boxes_remove)
"""
keep_mask = np.logical_not(remove_mask)
gt_boxes = gt_boxes[keep_mask]
gt_names = gt_names[keep_mask]
difficulty = difficulty[keep_mask]
if group_ids is not None:
group_ids = group_ids[keep_mask]
gt_boxes_mask = np.array(
[n in class_names for n in gt_names], dtype=np.bool_)
if db_sampler is not None:
sampled_dict = db_sampler.sample_all(
root_path,
gt_boxes,
gt_names,
num_point_features,
random_crop,
gt_group_ids=group_ids,
rect=rect,
Trv2c=Trv2c,
P2=P2)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
# gt_names = gt_names[gt_boxes_mask].tolist()
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
# gt_names += [s["name"] for s in sampled]
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0)
if group_ids is not None:
sampled_group_ids = sampled_dict["group_ids"]
group_ids = np.concatenate([group_ids, sampled_group_ids])
if remove_points_after_sample:
points = prep.remove_points_in_boxes(
points, sampled_gt_boxes)
points = np.concatenate([sampled_points, points], axis=0)
# unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
if without_reflectivity:
used_point_axes = list(range(num_point_features))
used_point_axes.pop(3)
points = points[:, used_point_axes]
pc_range = voxel_generator.point_cloud_range
if bev_only: # set z and h to limits
gt_boxes[:, 2] = pc_range[2]
gt_boxes[:, 5] = pc_range[5] - pc_range[2]
prep.noise_per_object_v3_(
gt_boxes,
points,
gt_boxes_mask,
rotation_perturb=gt_rotation_noise,
center_noise_std=gt_loc_noise_std,
global_random_rot_range=global_random_rot_range,
group_ids=group_ids,
num_try=100)
# should remove unrelated objects after noise per object
gt_boxes = gt_boxes[gt_boxes_mask]
gt_names = gt_names[gt_boxes_mask]
if group_ids is not None:
group_ids = group_ids[gt_boxes_mask]
gt_classes = np.array(
[class_names.index(n) + 1 for n in gt_names], dtype=np.int32)
gt_boxes, points = prep.random_flip(gt_boxes, points)
gt_boxes, points = prep.global_rotation(
gt_boxes, points, rotation=global_rotation_noise)
gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
*global_scaling_noise)
# Global translation
gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
gt_boxes = gt_boxes[mask]
gt_classes = gt_classes[mask]
if group_ids is not None:
group_ids = group_ids[mask]
# limit rad to [-pi, pi]
gt_boxes[:, 6] = box_np_ops.limit_period(
gt_boxes[:, 6], offset=0.5, period=2 * np.pi)
if shuffle_points:
# shuffle is a little slow.
np.random.shuffle(points)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = voxel_generator.voxel_size
pc_range = voxel_generator.point_cloud_range
grid_size = voxel_generator.grid_size
# [352, 400]
voxels, coordinates, num_points = voxel_generator.generate(
points, max_voxels)
example = {
'voxels': voxels,
'num_points': num_points,
'coordinates': coordinates,
"num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
}
example.update({
'rect': rect,
'Trv2c': Trv2c,
'P2': P2,
})
# if not lidar_input:
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
if anchor_cache is not None:
anchors = anchor_cache["anchors"]
anchors_bv = anchor_cache["anchors_bv"]
matched_thresholds = anchor_cache["matched_thresholds"]
unmatched_thresholds = anchor_cache["unmatched_thresholds"]
else:
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
example["anchors"] = anchors
# print("debug", anchors.shape, matched_thresholds.shape)
# anchors_bv = anchors_bv.reshape([-1, 4])
anchors_mask = None
if anchor_area_threshold >= 0:
coors = coordinates
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
anchors_mask = anchors_area > anchor_area_threshold
# example['anchors_mask'] = anchors_mask.astype(np.uint8)
example['anchors_mask'] = anchors_mask
if generate_bev:
bev_vxsize = voxel_size.copy()
bev_vxsize[:2] /= 2
bev_vxsize[2] *= 2
bev_map = points_to_bev(points, bev_vxsize, pc_range,
without_reflectivity)
example["bev_map"] = bev_map
if not training:
return example
if create_targets:
targets_dict = target_assigner.assign(
anchors,
gt_boxes,
anchors_mask,
gt_classes=gt_classes,
matched_thresholds=matched_thresholds,
unmatched_thresholds=unmatched_thresholds)
example.update({
'labels': targets_dict['labels'],
'reg_targets': targets_dict['bbox_targets'],
'reg_weights': targets_dict['bbox_outside_weights'],
})
return example | 30,413 |
def fix_repo_url(repo_url, in_type='https', out_type='ssh', format_dict=format_dict):
""" Changes the repo_url format """
for old, new in izip(format_dict[in_type], format_dict[out_type]):
repo_url = repo_url.replace(old, new)
return repo_url | 30,414 |
def add_nodes_to_graph(
G: nx.Graph,
protein_df: Optional[pd.DataFrame] = None,
verbose: bool = False,
) -> nx.Graph:
"""Add nodes into protein graph.
:param G: ``nx.Graph`` with metadata to populate with nodes.
:type G: nx.Graph
:protein_df: DataFrame of protein structure containing nodes & initial node metadata to add to the graph.
:type protein_df: pd.DataFrame, optional
:param verbose: Controls verbosity of this step.
:type verbose: bool
:returns: nx.Graph with nodes added.
:rtype: nx.Graph
"""
# If no protein dataframe is supplied, use the one stored in the Graph object
if protein_df is None:
protein_df = G.graph["pdb_df"]
# Assign intrinsic node attributes
chain_id = protein_df["chain_id"].apply(str)
residue_name = protein_df["residue_name"]
residue_number = protein_df["residue_number"] # .apply(str)
coords = np.asarray(protein_df[["x_coord", "y_coord", "z_coord"]])
b_factor = protein_df["b_factor"]
atom_type = protein_df["atom_name"]
nodes = protein_df["node_id"]
element_symbol = protein_df["element_symbol"]
G.add_nodes_from(nodes)
# Set intrinsic node attributes
nx.set_node_attributes(G, dict(zip(nodes, chain_id)), "chain_id")
nx.set_node_attributes(G, dict(zip(nodes, residue_name)), "residue_name")
nx.set_node_attributes(
G, dict(zip(nodes, residue_number)), "residue_number"
)
nx.set_node_attributes(G, dict(zip(nodes, atom_type)), "atom_type")
nx.set_node_attributes(
G, dict(zip(nodes, element_symbol)), "element_symbol"
)
nx.set_node_attributes(G, dict(zip(nodes, coords)), "coords")
nx.set_node_attributes(G, dict(zip(nodes, b_factor)), "b_factor")
# TODO: include charge, line_idx for traceability?
if verbose:
print(nx.info(G))
print(G.nodes())
return G | 30,415 |
def mc(dataset):
"""
Modulus calculation.
Calculates sqrt(real^2 + imag^2)
"""
return np.sqrt(dataset.real ** 2 + dataset.imag ** 2) | 30,416 |
def _get_fields_list(data: Data) -> List[Field]:
"""Extracts all nested fields from the data as a flat list."""
result = []
def map_fn(value):
if isinstance(value, GraphPieceBase):
# pylint: disable=protected-access
tf.nest.map_structure(map_fn, value._data)
else:
result.append(value)
tf.nest.map_structure(map_fn, data)
return result | 30,417 |
def constructAdvancedQuery(qryRoot):
"""
Turns a qry object into a complex Q object by calling its helper and supplying the selected format's tree.
"""
return constructAdvancedQueryHelper(
qryRoot["searches"][qryRoot["selectedtemplate"]]["tree"]
) | 30,418 |
def slice_and_dice(text=text):
"""Strip the whitespace (newlines) off text at both ends,
split the text string on newline (\n).
Next check if the first char of each (stripped) line is lowercase,
if so split the line into words and append the last word to
the results list. Make sure the you strip off any trailing
exclamation marks (!) and dots (.), Return the results list."""
results = []
for line in text.strip().split('\n'):
line = line.strip()
if line[0].islower():
last_word = line.split()[-1]
if last_word[-1] == '.' or last_word[-1] == '!':
last_word = last_word[:-1]
results.append(last_word)
return results | 30,419 |
def solution(data):
""" Solution to the problem """
seats, first_visible_seats, dim_y, dim_x = preprocess(data)
solver = Simulation(seats, first_visible_seats, dim_y, dim_x)
return solver.solve() | 30,420 |
def dump_line_delimited_json(data, filename):
"""Dump a list of objects to the file as line-delimited JSON.
Parameters
----------
data : list
filename : str
"""
with open(filename, "w") as f_out:
for obj in data:
f_out.write(json.dumps(obj))
f_out.write("\n")
logger.debug("Dumped data to %s", filename) | 30,421 |
def downloadNameActives():
""" """
nameActivesURL = exp_config.name_actives_url
nameActivesRemote = exp_config.name_actives_remote
nameActivesFile = nameActivesURL.split('/')[-1]
output_folder = os.path.split(nameActivesRemote)[0]
#nameActivesRemote = os.path.join(output_folder, nameActivesFile)
os.system('./download_file.sh pl_ns ' + nameActivesURL + ' ' + nameActivesFile + ' ' + output_folder)
os.system('./cpNameActives_S3.sh pl_ns ' + nameActivesRemote)
os.system('./download_file.sh pl_lns ' + nameActivesURL + ' ' + nameActivesFile + ' ' + output_folder)
os.system('./cpNameActives_S3.sh pl_lns ' + nameActivesRemote) | 30,422 |
def test_next(circ_number, total):
"""Test total_matching_neighbours against known input and output"""
assert total_matching_neighbours(circ_number) == total | 30,423 |
def add_received_ip_tags(
rows: beam.pvalue.PCollection[Row],
ips_with_metadata: beam.pvalue.PCollection[Tuple[DateIpKey, Row]]
) -> beam.pvalue.PCollection[Row]:
"""Add tags for answer ips (field received.ip) - asnum, asname, http, cert
Args:
rows: PCollection of measurement rows
ips_with_metadata: PCollection of dated ips with geo metadata
Returns:
PCollection of measurement rows with tag information added to the recieved.ip row
"""
# PCollection[Tuple[DateIpKey,Row]]
received_keyed_by_ip_and_date = (
rows | 'key by received ips and dates' >> beam.Map(
lambda row: (_make_date_received_ip_key(row), row)).with_output_types(
Tuple[DateIpKey, Row]))
# Iterable[PCollection[Tuple[DateIpKey,Row]]]
partition_by_domain = (
received_keyed_by_ip_and_date | 'partition by domain' >> beam.Partition(
_get_domain_partition, NUM_DOMAIN_PARTITIONS))
collections = []
for i in range(0, NUM_DOMAIN_PARTITIONS):
elements = partition_by_domain[i]
# PCollection[Tuple[Tuple[date,ip],Dict[input_name_key,List[Row]]]]
grouped_received_metadata_and_rows = (({
IP_METADATA_PCOLLECTION_NAME: ips_with_metadata,
ROWS_PCOLLECION_NAME: elements
}) | f'group by received ip keys {i}' >> beam.CoGroupByKey())
# PCollection[Row]
domain_rows_with_tags = (
grouped_received_metadata_and_rows | f'tag received ips {i}' >>
beam.FlatMapTuple(lambda k, v: merge_metadata_with_rows(
k, v, field='received')).with_output_types(Row))
collections.append(domain_rows_with_tags)
# PCollection[Row]
rows_with_tags = (
collections |
'merge domain collections' >> beam.Flatten().with_output_types(Row))
return rows_with_tags | 30,424 |
def set_selector(*args):
"""set_selector(sel_t selector, ea_t paragraph) -> int"""
return _idaapi.set_selector(*args) | 30,425 |
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
tweet_text = tweet["text"]
##SENTIMENT
sentiment = sentiment_analyzer.polarity_scores(tweet_text)
words = local_tokenizer.tokenize(tweet_text) #Get text only
num_chars = sum(len(w) for w in words) #num chars in words
num_chars_total = len(tweet_text)
num_terms = len(tweet_text.split())
num_words = len(words)
num_unique_terms = len(set([x.lower() for x in words]))
caps_count = sum([1 if x.isupper() else 0 for x in tweet_text])
caps_ratio = caps_count / num_chars_total
twitter_objs = count_twitter_objs(tweet_text) #Count #, @, and http://
num_media = 0
if "media" in tweet["entities"]:
num_media = len(tweet["entities"]["media"])
retweet = 0
if "rt" in words or "retweeted_status" in tweet:
retweet = 1
has_place = 1 if "coordinates" in tweet else 0
author = tweet["user"]
is_verified = 1 if author["verified"] else 0
log_followers = 0 if author["followers_count"] == 0 else np.log(author["followers_count"])
log_friends = 0 if author["friends_count"] == 0 else np.log(author["friends_count"])
features = [num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'],
sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet, num_media,
is_verified,
# log_followers, log_friends,
# has_place,
caps_ratio,
]
return features | 30,426 |
def template(spec_fn):
"""
>>> from Redy.Magic.Classic import template
>>> import operator
>>> class Point:
>>> def __init__(self, p):
>>> assert isinstance(p, tuple) and len(p) is 2
>>> self.x, self.y = p
>>> def some_metrics(p: Point):
>>> return p.x + 2 * p.y
>>> @template
>>> def comp_on_metrics(self: Point, another: Point, op):
>>> if not isinstance(another, Point):
>>> another = Point(another)
>>> return op(*map(some_metrics, (self, another)))
>>> class Space(Point):
>>> @comp_on_metrics(op=operator.lt)
>>> def __lt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.eq)
>>> def __eq__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.gt)
>>> def __gt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.le)
>>> def __le__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.ge)
>>> def __ge__(self, other):
>>> ...
>>> p = Space((0, 1))
>>> p > (1, 2)
>>> p < (3, 4)
>>> p >= (5, 6)
>>> p <= (7, 8)
>>> p == (9, 10)
"""
def specify(*spec_args, **spec_kwds):
def call(_):
def inner(*args, **kwds):
return spec_fn(*spec_args, *args, **spec_kwds, **kwds)
return inner
return call
return specify | 30,427 |
def remove_build_storage_paths(paths):
"""
Remove artifacts from build media storage (cloud or local storage).
:param paths: list of paths in build media storage to delete
"""
storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
for storage_path in paths:
log.info('Removing %s from media storage', storage_path)
storage.delete_directory(storage_path) | 30,428 |
def list_sum(*argv, **kwargs):
"""
Summarise items in provided list
Arguments:
- argv: list of item for summarise
Options:
- type: list item type (int if omitted)
Note: All types provided by this lib supported
Returns sum number in 'type' format
"""
_type_name = kwargs.get('type', 'int')
_type = type_factory(_type_name)
_result: _type = 0
try:
for _list in argv:
if isinstance(_list, (list, tuple)):
_result += sum([_type(_item) for _item in _list])
else:
_number = _type(_list)
_result += _number
except (ValueError, IndexError) as e:
raise FrameworkError(f"ROBOT_MATH.LIST_SUM: {e}")
else:
return _result | 30,429 |
def _on_connect(dbapi_connection, **_):
"""Set MySQL mode to TRADITIONAL on databases that don't set this automatically.
Without this, MySQL will silently insert invalid values in the database, causing very long debugging sessions in the
long run.
http://www.enricozini.org/2012/tips/sa-sqlmode-traditional/
"""
LOG.debug('Setting SQL Mode to TRADITIONAL.')
dbapi_connection.cursor().execute("SET SESSION sql_mode='TRADITIONAL'") | 30,430 |
def locate_file(start_path, file_name):
""" locate filename and return file path.
searching will be recursive upward until current working directory.
Args:
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound("invalid path: {}".format(start_path))
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
return file_path
# current working directory
if os.path.abspath(start_dir_path) in [os.getcwd(), os.path.abspath(os.sep)]:
raise exceptions.FileNotFound("{} not found in {}".format(file_name, start_path))
# locate recursive upward
return locate_file(os.path.dirname(start_dir_path), file_name) | 30,431 |
def image_upload(request):
"""
If it's a post, then upload the image or store it locally based on config. Otherwise, return
the html of the upload.html template.
"""
if request.method == 'POST':
image_file = request.FILES['image_file']
image_type = request.POST['image_type']
if settings.USE_S3:
if image_type == 'private':
upload = UploadPrivate(file=image_file)
else:
upload = Upload(file=image_file)
upload.save()
image_url = upload.file.url
else:
filesystem_storage = FileSystemStorage()
filename = filesystem_storage.save(image_file.name, image_file)
image_url = filesystem_storage.url(filename)
return render(request, 'upload.html', {
'image_url': image_url
})
return render(request, 'upload.html') | 30,432 |
def test_expected(mapper):
""" Test the expected method. """
# set addresses with hostname inside (IP addresses are not valid on purpose)
hostname = socket.gethostname()
node_lst = ['292.168.0.1', '292.168.0.2', hostname]
mapper.node_names = node_lst
# find expected address from list of aliases of the same address
alias_lst = ['292.168.0.1', '10.0.200.1', '66.51.20.300']
assert mapper.expected(alias_lst) == '292.168.0.1'
# second try
random.shuffle(alias_lst)
assert mapper.expected(alias_lst) == '292.168.0.1'
# check with list containing no corresponding entry
alias_lst = ['292.168.0.3', '10.0.200.1', '66.51.20.300']
assert mapper.expected(alias_lst) is None | 30,433 |
def werbo_c(topics, word_embedding_model, weight=0.9, topk=10):
"""
computes Word embedding based RBO - centroid
Parameters
----------
topics: a list of lists of words
word_embedding_model: word embedding space in gensim word2vec format
weight: p (float), default 1.0: Weight of each agreement at depth d:
p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.
topk: top k words on which the topic diversity will be computed
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in combinations(topics, 2):
word2index = get_word2index(list1, list2)
index2word = {v: k for k, v in word2index.items()}
indexed_list1 = [word2index[word] for word in list1]
indexed_list2 = [word2index[word] for word in list2]
rbo_val = werc(indexed_list1[:topk], indexed_list2[:topk], p=weight,
index2word=index2word, word2vec=word_embedding_model, norm=False)[2]
collect.append(rbo_val)
return np.mean(collect) | 30,434 |
def G_t(t, G, tau, Ge = 0.0):
"""this function returns the relaxation modulus in time"""
G_rel = np.zeros(np.size(t))
if np.size(G) == 1: #the model is the SLS
for i in range(np.size(t)):
G_rel[i] = Ge + G*np.exp(-t[i]/tau)
else: #the model has more than one arm
for i in range(np.size(t)):
G_rel[i] = Ge + sum(G[:]*np.exp(-t[i]/tau[:]))
return G_rel | 30,435 |
def iou_set(set1, set2):
"""Calculate iou_set """
union = set1.union(set2)
return len(set1.intersection(set2)) / len(union) if union else 0 | 30,436 |
def time_measured(fkt):
"""
Decorator to measure execution time of a function
It prints out the measured time
Parameters
----------
fkt : function
function that shall be measured
Returns
-------
None
"""
def fkt_wrapper(*args, **kwargs):
t1 = time.time()
return_vals = fkt(*args, **kwargs)
t2 = time.time()
print("Job needed: {} seconds".format(t2-t1))
return return_vals
return fkt_wrapper | 30,437 |
def test_dict():
"""Test dict accessor works."""
js = pw.line(range(3)).dict
expected = {
'layout': {},
'data': [
{
'mode': 'lines+markers',
'marker': dict(size=6),
'text': "",
'y': [0, 1, 2],
'x': [0, 1, 2],
'yaxis': 'y',
'type': 'scatter',
}
],
}
compare_figs(js, expected) | 30,438 |
def test_find_codon(find_codon):
"""
A function to test another function that looks for a codon within
a coding sequence.
"""
synapto_nuc = ("ATGGAGAACAACGAAGCCCCCTCCCCCTCGGGATCCAACAACAACGAGAACAACAATGCAGCCCAGAAGA"
"AGCTGCAGCAGACCCAAGCCAAGGTGGACGAGGTGGTCGGGATTATGCGTGTGAACGTGGAGAAGGTCCT"
"GGAGCGGGACCAGAAGCTATCGGAACTGGGCGAGCGTGCGGATCAGCTGGAGCAGGGAGCATCCCAGTTC"
"GAGCAGCAGGCCGGCAAGCTGAAGCGCAAGCAATGGTGGGCCAACATGAAGATGATGATCATTCTGGGCG"
"TGATAGCCGTTGTGCTGCTCATCATCGTTCTGGTGTCGCTTTTCAATTGA")
assert find_codon('ATG', synapto_nuc) == 0
assert find_codon('AAT', synapto_nuc) == 54
assert find_codon('TGT', synapto_nuc) == -1
assert find_codon('TGC', synapto_nuc) == -1
return None | 30,439 |
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(main, ["show-servers"])
assert result.exit_code == 0
help_result = runner.invoke(main, ["--help"])
assert help_result.exit_code == 0
assert "--help Show this message and exit." in help_result.output | 30,440 |
def add_keys3(a, A, b, B):
"""
aA + bB
:param a:
:param A:
:param b:
:param B:
:return:
"""
return tcry.xmr_add_keys3_vartime_r(a, A, b, B) | 30,441 |
def complement(csv_files, output_prefix, delimiter=',', include_cols=None,
target=None):
"""Remove all shared rows from a list of csv/tsv files."""
dfs, headers = read_tables(csv_files, delimiter)
dfs = remove_shared_rows(dfs, include_cols)
output_filenames = [local.path(output_prefix + f.name) for f in csv_files]
if target:
target_filename = local.path(output_prefix + target.name)
target_index = output_filenames.index(target_filename)
write_table_with_header(
df=dfs[target_index],
header=headers[target_index],
filename=target_filename,
sep=delimiter)
else:
for df, header, filename in zip(dfs, headers, output_filenames):
write_table_with_header(df, header, filename, sep=delimiter) | 30,442 |
def cause_state(value):
"""
Usage::
{{ value|cause_state}}
"""
try:
if isinstance(value, (str, unicode)):
value = eval(value)
if Bushfire.CAUSE_STATE_POSSIBLE==value:
return Bushfire.CAUSE_STATE_CHOICES[Bushfire.CAUSE_STATE_POSSIBLE-1][1]
return Bushfire.CAUSE_STATE_CHOICES[Bushfire.CAUSE_STATE_KNOWN-1][1]
except:
return None | 30,443 |
def ReadRawSAData(DataDirectory, fname_prefix):
"""
This function reads in the raw SA data to a pandas dataframe
Args:
DataDirectory: the data directory
fname_prefix: the file name prefix
Returns:
pandas dataframe with the raw SA data
Author: FJC
"""
# get the csv filename
fname_suffix = "_SAvertical.csv"
fname = fname_prefix+fname_suffix
df = pd.read_csv(DataDirectory+fname)
return df | 30,444 |
def not_valid_score(scores: List[int]):
"""Checks if the set of estimations is ambiguous (all scores are different)."""
return True if len(np.unique(scores)) == len(scores) else False | 30,445 |
def sendEmail(sendTo,textfile,logfile,img):
"""Retrieves the error.txt and an the taken image and sends an email
with those attached"""
# Open a plain text file for reading
msg = MIMEMultipart()
# Read the text file <-- Error msg from OCR module
if(textfile!=""):
fp = open(textfile, 'rb')
text = MIMEText(fp.read())
fp.close()
msg.attach(text)
if(logfile=='y'):
filename = "log.txt"
fp = open(filename)
log = MIMEText(fp.read())
fp.close()
log.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(log)
msg['Subject'] = 'An event has occurred at the MS'
msg['From'] = "mass.checker@gmail.com"
msg['To'] = sendTo
# Load screenshot and attach to email
fp = open(img, 'rb')
img = MIMEImage(fp.read())
fp.close()
msg.attach(img)
# Send the message
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login("mass.checker@gmail.com", "massspecchecker1234")
server.sendmail("mass.checker@gmail.com", sendTo, msg.as_string())
server.quit() | 30,446 |
def readd(
archive: str,
# uid: str = '{blake2b}', # option disabled for now
config: str = '',
hashes='blake2b',
v_hashes='dhash',
metadata='',
exts: str = '',
limit: int = 0,
thumb_sz: int = 64,
thumb_qual: int = 70,
thumb_type: str = 'webp',
dbname: str = 'imgdb.htm',
workers: int = 4,
shuffle: bool = False,
silent: bool = False,
verbose: bool = False,
):
""" This is a IRREVERSIBLE rename operation, be CAREFUL!
Be extra careful if changing the default UID flag, because you CAN OVERWRITE and LOSE your images!
This will rename and move all the images from the archive folder,
back into the archive folder, but with different names depending on the hash and UID.
This is useful to normalize your DB, if you want all your images to have the same thumb size,
same hashes, same visual hashes, same metadata.
Also useful if the already imported images don't have enough props, maybe you want to calculate
all the visual-hashes for all the images.
It's also possible that some images from the archive don't have the same hash anymore,
because they were edited: eg by updating some XMP properties like rating stars, category or description.
"""
add(
archive,
config=config,
operation='move',
archive=archive,
hashes=hashes,
v_hashes=v_hashes,
metadata=metadata,
exts=exts,
limit=limit,
thumb_sz=thumb_sz,
thumb_qual=thumb_qual,
thumb_type=thumb_type,
dbname=dbname,
workers=workers,
deep=True,
force=True,
shuffle=shuffle,
silent=silent,
verbose=verbose,
) | 30,447 |
def train(
train_op,
logdir,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
init_fn=None,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: the directory where training logs are written to.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The BNS name of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training.
If the value is left as None, training proceeds indefinitely.
init_op: The initialization operation.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If none, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer. If the
argument is supplied, gradient updates will be synchronous. If left as
`None`, gradient updates will be asynchronous.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, or if `number_of_steps` is
negative.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if sync_optimizer and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
if global_step is None:
global_step = variables.get_or_create_global_step()
saver = saver or tf_saver.Saver()
if init_op is None:
init_op = control_flow_ops.group(
tf_variables.initialize_all_variables(),
tf_variables.initialize_local_variables(),
tf_variables.initialize_all_tables())
if summary_op == _USE_DEFAULT:
summary_op = logging_ops.merge_all_summaries()
local_init_op = None
cleanup_op = None
if is_chief and sync_optimizer:
if not isinstance(sync_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer')
# Need to create these BEFORE the supervisor finalizes the graph:
local_init_op = sync_optimizer.get_init_tokens_op()
chief_queue_runner = sync_optimizer.get_chief_queue_runner()
cleanup_op = sync_optimizer.get_clean_up_op()
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
should_log_op = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
summary_op=summary_op,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
with sv.managed_session(master, start_standard_services=False) as sess:
if is_chief:
sv.start_standard_services(sess)
elif not is_chief and startup_delay_steps > 0:
_wait_for_step(sess, global_step,
min(startup_delay_steps, number_of_steps or sys.maxint))
sv.start_queue_runners(sess)
if is_chief and sync_optimizer:
sv.start_queue_runners(sess, [chief_queue_runner])
total_loss = train_loop(
sv, sess, train_op, should_stop_op, should_log_op, global_step,
cleanup_op)
# This waits for service threads to finish.
sv.Stop()
if sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
return total_loss | 30,448 |
def dlopen_enclave_test(
name,
**kwargs):
"""Thin wrapper around enclave test, adds 'asylo-dlopen' tag and necessary linkopts
Args:
name: enclave_test name
**kwargs: same as enclave_test kwargs
"""
asylo = internal.package()
enclave_test(
name,
backends = [asylo + "/platform/primitives/dlopen"],
**kwargs
) | 30,449 |
def biom_to_pandas(biom_otu):
"""
Convert data from biom to SparseDataFrame (pandas) for easy access
:param biom_otu: Table
:rtype: DataFrame
"""
tmp_m = biom_otu.matrix_data
df = [SparseSeries(tmp_m[i].toarray().ravel()) for i in numpy.arange(tmp_m.shape[0])]
return (SparseDataFrame(df, index=biom_otu.ids('observation'), columns=biom_otu.ids('sample')).to_dense()) | 30,450 |
def __request(logger, cache, method, url, headers, body, keep_alive):
"""Returns file content for client request"""
if method == "HEAD" or method == "GET":
if url.startswith("/private/"):
base64_auth = base64.b64encode(
(settings.PRIVATE_USERNAME + ":" + settings.PRIVATE_PASSWORD).encode("utf-8"))
if "Authorization" not in headers:
return None, None, None, None, 401, keep_alive
auth_method, auth_credentials = headers["Authorization"].split()
auth_credentials = auth_credentials.encode("utf-8")
if auth_credentials != base64_auth:
return None, None, None, None, 401, keep_alive
if url == "/":
url = "/index.html"
file_path = settings.HTDOCS_PATH + url
# Check if requested file is on the cache
file_content, file_lastmodified = cache.get(file_path)
# Check if browser cache is valid
if __is_browser_cache_valid(file_lastmodified, method, headers):
return None, None, None, None, 304, keep_alive
file_type, file_encoding = mimetypes.guess_type(file_path, True)
if file_content is None:
try:
# Simulate disk loading time of 100ms for cache system
time.sleep(settings.SIMULATE_DISK_DELAY)
# Get last modification time of the file
file_lastmodified = os.path.getmtime(file_path)
# Check if browser cache is valid
if __is_browser_cache_valid(file_lastmodified, method, headers):
return None, None, None, None, 304, keep_alive
# Return file content
with open(file_path, "rb") as file:
file_content = file.read()
# Update the cache with the newly opened file
cache.update(file_path, file_content, file_lastmodified)
except (FileNotFoundError, OSError):
return None, None, None, None, 404, keep_alive # Not Found
return file_content, file_type, file_encoding, file_lastmodified,\
"HEAD" if method == "HEAD" else 200, keep_alive # HEAD / OK
elif method == "POST":
if headers["Content-Type"] != "application/x-www-form-urlencoded":
return None, None, None, None, 415, keep_alive # Unsupported Media Type
# Get parameters from request
response = {}
if len(body) > 0:
parameters = body.split("&")
for parameter in parameters:
key, value = parameter.split("=")
response[key] = value
while True:
# Generate a random unique identifier
post_uid = str(uuid.uuid4())
post_file_path = "%s/%s.json" % (settings.UPLOADED_PATH, post_uid)
# Check if the generate identifier already exists and if so regenerate it
if not os.path.exists(post_file_path):
break
# Create the file and open it
with open(post_file_path, "w") as post_file:
logger.trace().info("Created %s file" % post_file_path)
# Write post data to the file
json.dump(response, post_file, indent=4)
response["uid"] = post_uid
return json.dumps(response).encode(settings.ENCODING),\
"application/json", "utf-8", None, 201, keep_alive # Created
elif method == "DELETE":
delete_path = "%s/%s.json" % (settings.UPLOADED_PATH, url)
# Check if specified file exists
if os.path.exists(delete_path):
# Delete file
os.remove(delete_path)
logger.trace().info("Deleted %s file" % delete_path)
# Return response to client
return None, None, None, None, 204, keep_alive
else:
# Return response to client
return None, None, None, None, 404, keep_alive
elif method == "PUT":
if headers["Content-Type"] != "application/x-www-form-urlencoded":
return None, None, None, None, 415, keep_alive # Unsupported Media Type
put_path = "%s/%s.json" % (settings.UPLOADED_PATH, url)
# Check if specified file exists
if not os.path.exists(put_path):
# Return response to client
return None, None, None, None, 404, keep_alive
# Get parameters from request
put_data = {}
if len(body) > 0:
parameters = body.split("&")
for parameter in parameters:
key, value = parameter.split("=")
put_data[key] = value
# Open the file
with open(put_path, "w") as put_file:
# Write post data to the file
json.dump(put_data, put_file, indent=4)
logger.trace().info("Updated %s file" % put_path)
# Return response to client
return None, None, None, None, 204, keep_alive
elif method == "PATCH":
if headers["Content-Type"] != "application/x-www-form-urlencoded":
return None, None, None, None, 415, keep_alive # Unsupported Media Type
patch_path = "%s/%s.json" % (settings.UPLOADED_PATH, url)
# Check if specified file exists
if not os.path.exists(patch_path):
# Return response to client
return None, None, None, None, 404, keep_alive
try:
# Read file json data
with open(patch_path, "r+") as patch_file:
patch_file_data = json.load(patch_file)
# Update/create json file with passed parameters
if len(body) > 0:
parameters = body.split("&")
for parameter in parameters:
key, value = parameter.split("=")
patch_file_data[key] = value
# Delete file content
patch_file.seek(0)
patch_file.truncate()
# Write updated data to the file
patch_file.write(json.dumps(patch_file_data, indent=4))
logger.trace().info("Updated %s file" % patch_file)
except json.JSONDecodeError as error:
logger.trace().error("An error has occurred while processing PATCH request due to malformed JSON: %s" %
error)
# Return response to client
return None, None, None, None, None, keep_alive
# Return response to client
return None, None, None, None, 204, keep_alive
else:
return None, None, None, None, 501, keep_alive | 30,451 |
def is_classmethod(method: t.Callable):
"""
A python method is a wrapper around a function that also
holds a reference to the class it is a method of.
When bound, it also holds a reference to the instance.
@see https://stackoverflow.com/questions/12935241/python-call-instance-method-using-func
:param method:
"""
# print(instance.a_method) # Bounded
# print(AClass.a_method) # Unbounded
bound_to: t.Type = getattr(method, '__self__', None)
# Bound to: <class '__main__.AClass'>, False
# If double decorated with staticmethod and classmethod
# Bound to: <__main__.AClass object at 0x7ffb18699fd0>, True
if not isinstance(bound_to, type):
# must be bound to a class
return False
name: str = method.__name__
# MRO = Method resolution order
# E.g. Class A: pass
# A.__mro__
# Output: (<class '__main__.AClass'>, <class 'object'>)
for cls in bound_to.__mro__:
# Get decorator
descriptor = vars(cls).get(name)
if descriptor is not None:
return isinstance(descriptor, classmethod)
return False | 30,452 |
def masked_accuracy(y_true, y_pred):
"""An accuracy function that masks based on targets (value: 0.5)
Args:
y_true: The true training labels
y_pred: The predicted labels
Returns:
float: the masked accuracy
"""
a = kb.sum(kb.cast(kb.equal(y_true, kb.round(y_pred)), kb.floatx()))
c = kb.sum(kb.cast(kb.not_equal(y_true, 0.5), kb.floatx()))
acc = a / c
return acc | 30,453 |
def get_shap_interaction_values(x_df, explainer):
"""
Compute the shap interaction values for a given dataframe.
Also checks if the explainer is a TreeExplainer.
Parameters
----------
x_df : pd.DataFrame
DataFrame for which will be computed the interaction values using the explainer.
explainer : shap.TreeExplainer
explainer object used to compute the interaction values.
Returns
-------
shap_interaction_values : np.ndarray
Shap interaction values for each sample as an array of shape (# samples x # features x # features).
"""
if not isinstance(explainer, shap.TreeExplainer):
raise ValueError(f"Explainer type ({type(explainer)}) is not a TreeExplainer. "
f"Shap interaction values can only be computed for TreeExplainer types")
shap_interaction_values = explainer.shap_interaction_values(x_df)
# For models with vector outputs the previous function returns one array for each output.
# We sum the contributions here.
if isinstance(shap_interaction_values, list):
shap_interaction_values = np.sum(shap_interaction_values, axis=0)
return shap_interaction_values | 30,454 |
def plot_roc(
y_true: np.ndarray,
y_probas: np.ndarray,
labels: Optional[dict] = None,
classes_to_plot: Optional[list] = None,
plot_micro: Optional[bool] = False,
plot_macro: Optional[bool] = False,
title: str = "ROC Curve",
ax: Optional[matplotlib.axes.Axes] = None,
figsize: Optional[tuple] = None,
cmap: Union[str, matplotlib.colors.Colormap] = "Blues",
title_fontsize: Union[str, int] = "large",
text_fontsize: Union[str, int] = "medium",
) -> matplotlib.axes.Axes:
"""Plot ROC curve.
Parameters
----------
y_true : numpy.ndarray, (n_samples,)
Actual target values.
y_probas : numpy.ndarray, (n_samples, n_classes)
Predicted probabilities of each class.
labels: Optional[dict]
labels for y.
classes_to_plot : Optional[list]
Classes for which the ROC curve should be plotted.
If the class doesn't exists it will be ignored.
If ``None``, all classes will be plotted
(the default is ``None``).
plot_micro : Optional[bool]
Plot micro averaged ROC curve (the default is False)
plot_macro : Optional[bool]
Plot macro averaged ROC curve (the default is False)
title : str
Title for the ROC.
ax: Optional[`matplotlib.axes.Axes`] object
The axes on which plot was drawn.
figsize : Optional[tuple]
Size of the plot.
cmap : Union[str, `matplotlib.colors.Colormap`]
Colormap used for plotting.
https://matplotlib.org/tutorials/colors/colormaps.html
title_fontsize : Union[str, int]
Use 'small', 'medium', 'large' or integer-values
(the default is 'large')
text_fontsize : Union[str, int]
Use 'small', 'medium', 'large' or integer-values
(the default is 'medium')
Returns
-------
`matplotlib.axes.Axes` object
The axes on which plot was drawn.
References
----------
.. [1] https://github.com/reiinakano/scikit-plot
"""
classes = np.unique(y_true)
if not classes_to_plot:
classes_to_plot = classes
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(label=title, fontsize=title_fontsize)
fpr_dict = {}
tpr_dict = {}
indices_to_plot = np.isin(classes, classes_to_plot)
for i, to_plot in enumerate(indices_to_plot):
fpr_dict[i], tpr_dict[i], _ = mt.roc_curve(y_true, y_probas[:, i], pos_label=classes[i])
if to_plot:
roc_auc = mt.auc(fpr_dict[i], tpr_dict[i])
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
class_name = labels[classes[i]] if labels else classes[i]
ax.plot(
fpr_dict[i],
tpr_dict[i],
lw=2,
color=color,
label=f"ROC curve of class {class_name} (AUC= {roc_auc:.2f})",
)
if plot_micro:
binarized_y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
binarized_y_true = np.hstack((1 - binarized_y_true, binarized_y_true))
fpr, tpr, _ = mt.roc_curve(binarized_y_true.ravel(), y_probas.ravel())
roc_auc = mt.auc(tpr, fpr)
ax.plot(
fpr,
tpr,
label=f"micro-average ROC curve (AUC = {roc_auc:.2f})",
color="deeppink",
linestyle=":",
linewidth=4,
)
if plot_macro:
# Compute macro-average ROC curve and it's area.
# First aggregate all the false positive rates
all_fpr = np.unique(np.concatenate([fpr_dict[i] for i, _ in enumerate(classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i, _ in enumerate(classes):
mean_tpr += interp(all_fpr, fpr_dict[i], tpr_dict[i])
# Finally average it and compute AUC
mean_tpr /= len(classes)
roc_auc = mt.auc(all_fpr, mean_tpr)
ax.plot(
all_fpr,
mean_tpr,
label=f"macro-average ROC curve (AUC = {roc_auc:.2f})",
color="navy",
linestyle=":",
linewidth=4,
)
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(xlim=[0.0, 1.0], ylim=[0.0, 1.05])
ax.set_xlabel(f"False Positive Rate", fontsize=text_fontsize)
ax.set_ylabel(f"True Positive Rate", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="lower right", fontsize=text_fontsize)
return ax | 30,455 |
def get_cpu_info(host_info={}):
"""populate host_info with cpu information"""
if sys.platform == 'darwin':
host_info["host_cpu_model"] = \
popen(["sysctl", "-n", "machdep.cpu.brand_string"])
host_info["host_cpu_cores"] = \
popen(["sysctl", "-n", "machdep.cpu.core_count"])
host_info["host_logical_cpus"] = \
popen(["sysctl", "-n", "hw.logicalcpu"])
num_processor_result = popen(["system_profiler", "SPHardwareDataType"])
for x in num_processor_result.splitlines():
if x.strip().startswith("Number of Processors"):
host_info["host_physical_cpus"] = \
x.strip().split(":")[1].strip()
break
else:
with open(os.path.join(psutil.PROCFS_PATH, "cpuinfo")) as f:
nb_cpu = 0
nb_cores = 0
nb_units = 0
for p in f.readlines():
if ':' in p:
x, y = map(lambda x: x.strip(), p.split(':', 1))
if x.startswith("physical id"):
if nb_cpu < int(y):
nb_cpu = int(y)
if x.startswith("cpu cores"):
if nb_cores < int(y):
nb_cores = int(y)
if x.startswith("processor"):
if nb_units < int(y):
nb_units = int(y)
if x.startswith("model name"):
model = y
nb_cpu += 1
nb_units += 1
host_info["host_cpu_model"] = model
host_info["host_physical_cpus"] = str(nb_cpu)
host_info["host_cpu_cores"] = str(nb_cores)
host_info["host_logical_cpus"] = str(nb_units)
return host_info | 30,456 |
def _harmonize_input(data):
"""Harmonize different types of inputs by turning all inputs into dicts."""
if isinstance(data, (pd.DataFrame, pd.Series)) or callable(data):
data = {0: data}
elif isinstance(data, dict):
pass
else:
raise ValueError(
"Moments must be pandas objects or dictionaries of pandas objects."
)
return data | 30,457 |
def setup_testCase(self: unittest.TestCase, stop_when_sick: bool, login_success: bool, is_sick: bool) -> None:
"""
多个 TestCase 的 setUp 函数有公共代码,因此抽取出来。
:param self: 测试类实例
:param stop_when_sick: 是否开启 stop_when_sick 功能
:param login_success: (模拟的)登录是否成功
:param is_sick: 是否模拟用户数据带病的场景
:return: None
"""
self.config = generate_config(self, stop_when_sick=stop_when_sick)
self.sess = MockRequestsSession()
register_respond_to_mock(self.sess, login_success=login_success, is_sick=is_sick)
# 此处类型错误忽略
self.prog = Program(ProgramUtils(PureUtils()), self.sess, self.config)
self.prog.main() | 30,458 |
def herd_closest_to_cluster(
x: np.ndarray,
y: np.ndarray,
t: np.ndarray,
features: np.ndarray,
nb_per_class: np.ndarray
) -> np.ndarray:
"""Herd the samples whose features is the closest to their class mean.
:param x: Input data (images, paths, etc.)
:param y: Labels of the data.
:param t: Task ids of the data.
:param features: Features of shape (nb_samples, nb_dim).
:param nb_per_class: Number of samples to herd per class.
:return: The sampled data x, y, t.
"""
if len(features.shape) != 2:
raise ValueError(f"Expected features to have 2 dimensions, not {len(features.shape)}d.")
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=1, keepdims=True)
dist_to_mean = np.linalg.norm(class_mean - class_features, axis=1)
tmp_indexes = dist_to_mean.argsort()[:nb_per_class]
indexes.append(class_indexes[tmp_indexes])
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes] | 30,459 |
def main():
"""
This code, which must run on the EV3 ROBOT:
1. Makes the EV3 robot to various things.
2. Communicates via MQTT with the GUI code that runs on the LAPTOP.
"""
real_thing()
# run_test_ir(0)
# run_test_arm()
# run_test_camera() | 30,460 |
def generate_reference_config(config_entries: List[ConfigEntry]) -> {}:
"""
Generates a dictionary containing the expected config tree filled with default and example values
:return: a dictionary containing the expected config tree
"""
return config_entries_to_dict(config_entries, use_examples=True) | 30,461 |
def ShowStateTransition(dot_file, event_log_file, state_log_file, node_map_file):
"""
Take a dot file and log file and animate the state transition
"""
from matplotlib.pyplot import gca, figure
from numpy import genfromtxt
time_mult = __disp_defs__.TIME_MULTIPLIER
figure(figsize=__disp_defs__.FIGSIZE)
pos = __draw_state_nodes__(dot_file)
tran_list = __gen_tran__(event_log_file)
state_list = __gen_states__(state_log_file)
node_map = dict(genfromtxt(node_map_file, dtype=None))
node_patches, node_text = __generate_tran_nodes__(pos)
current_ax = gca()
__draw_state_transition__(tran_list, state_list, node_map, node_patches, node_text,
current_ax, time_mult) | 30,462 |
def reshape_tensor2list(tensor, n_steps, n_input):
"""Reshape tensor [?, n_steps, n_input] to lists of n_steps items with [?, n_input]
"""
# Prepare data shape to match `rnn` function requirements
# Current data input shape (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
#
# Permuting batch_size and n_steps
tensor = tf.transpose(tensor, perm=[1, 0, 2], name='transpose')
# Reshaping to (n_steps*batch_size, n_input)
tensor = tf.reshape(tensor, [-1, n_input], name='reshape')
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
tensor = tf.split(0, n_steps, tensor, name='split')
return tensor | 30,463 |
def test_flow_1():
"""
This MUST NOT fail
We simply add two blocks, each one with only the coinbase transaction
"""
coinbase_0 = Tx(
[TxIn(OutPoint("00" * 32, 0), Script.from_hex("00030000aa"))],
[TxOut(10 ** 10, Script())],
)
origin_transactions = [coinbase_0]
origin_header = BlockHeader("00" * 32, generate_merkle_root(origin_transactions), 0)
origin = Block(origin_header, origin_transactions)
blockchain = Blockchain()
blockchain._add_block(origin)
assert blockchain.last_block_pow == origin.header.pow
coinbase_1 = Tx(
[TxIn(OutPoint("00" * 32, 0), Script.from_hex("00030000bb"))],
[TxOut(10 ** 10, Script())],
)
block_1_header = BlockHeader(
origin.header.pow, generate_merkle_root([coinbase_1]), 0
)
block_1 = Block(block_1_header, [coinbase_1])
blockchain._add_block(block_1)
assert blockchain.last_block_pow == block_1.header.pow
reset_blockchain() | 30,464 |
def parse_arguments() -> dict:
"""Parse sys.argv arguments.
Args:
None
Returns:
A dictionary of arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent("""
additional information:
wtpython acts as a substitute for python. Simply add `wt` to the beginning
of the line and call your program with all the appropriate arguments:
$ wtpython [OPTIONS] <script.py> <arguments>"""),
)
parser.add_argument(
"-n",
"--no-display",
action="store_true",
default=False,
help="Run without display",
)
parser.add_argument(
"-c",
"--copy-error",
action="store_true",
default=False,
help="Copy error to clipboard",
)
parser.add_argument(
"--clear-cache",
action="store_true",
default=False,
help="Clear StackOverflow cache",
)
parser.add_argument(
"args",
nargs="*",
help="Arguments normally passed to wtpython",
)
opts = vars(parser.parse_args())
if not opts['args']:
parser.error("Please specify a script to run")
sys.exit(1)
if not os.path.isfile(opts['args'][0]):
parser.error(f"{opts['args'][0]} is not a file")
sys.exit(1)
return opts | 30,465 |
def squeeze(xray_obj, dimensions, dimension=None):
"""Squeeze the dimensions of an xray object."""
if dimension is None:
dimension = [d for d, s in dimensions.iteritems() if s == 1]
else:
if isinstance(dimension, basestring):
dimension = [dimension]
if any(dimensions[k] > 1 for k in dimension):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return xray_obj.indexed(**{dim: 0 for dim in dimension}) | 30,466 |
def write(settings_path, settings_data, merge=True):
"""Write data to a settings file.
:param settings_path: the filepath
:param settings_data: a dictionary with data
:param merge: boolean if existing file should be merged with new data
"""
settings_path = Path(settings_path)
if settings_path.exists() and merge: # pragma: no cover
existing = DynaconfDict()
load(existing, str(settings_path))
object_merge(existing, settings_data)
with io.open(
str(settings_path),
"w",
encoding=default_settings.ENCODING_FOR_DYNACONF,
) as f:
f.writelines(
[f"{upperfy(k)} = {repr(v)}\n" for k, v in settings_data.items()]
) | 30,467 |
def server_db(request, cp_server, api_server):
"""Enable database access for unit test vectors."""
db = database.get_connection(read_only=False, integrity_check=False)
api_server.db = db # inject into api_server
cursor = db.cursor()
cursor.execute('''BEGIN''')
util_test.reset_current_block_index(db)
request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
request.addfinalizer(lambda: util_test.reset_current_block_index(db))
return db | 30,468 |
def click_exception(exc, error_format):
"""
Return a ClickException object with the message from an input exception
in a desired error message format.
Parameters:
exc (exception or string):
The exception or the message.
error_format (string):
The error format (see ``--error-format`` general option).
Returns:
click.ClickException: The new exception.
"""
if error_format == 'def':
if isinstance(exc, zhmcclient.Error):
error_str = exc.str_def()
else:
assert isinstance(exc, six.string_types)
error_str = "classname: None, message: {msg}".format(msg=exc)
else:
assert error_format == 'msg'
if isinstance(exc, zhmcclient.Error):
error_str = "{exc}: {msg}".format(
exc=exc.__class__.__name__, msg=exc)
else:
assert isinstance(exc, six.string_types)
error_str = exc
new_exc = click.ClickException(error_str)
new_exc.__cause__ = None
return new_exc | 30,469 |
def build(program_code: str, data: Data = frozendict(), random_seed: Optional[int] = None) -> Model:
"""Build (compile) a Stan program.
Arguments:
program_code: Stan program code describing a Stan model.
data: A Python dictionary or mapping providing the data for the
model. Variable names are the keys and the values are their
associated values. Default is an empty dictionary, suitable
for Stan programs with no `data` block.
random_seed: Random seed, a positive integer for random number
generation. Used to make sure that results can be reproduced.
Returns:
Model: an instance of Model
Notes:
C++ reserved words and Stan reserved words may not be used for
variable names; see the Stan User's Guide for a complete list.
"""
# `data` must be JSON-serializable in order to send to httpstan
data = json.loads(DataJSONEncoder().encode(data))
async def go():
io = ConsoleIO()
# hack: use stdout instead of stderr because httpstan silences stderr during compilation
building_output = io.section().output
if not io.supports_ansi():
building_output.write("<comment>Building...</comment>")
async with stan.common.HttpstanClient() as client:
# Check to see if model is in cache.
model_name = httpstan.models.calculate_model_name(program_code)
resp = await client.post(f"/{model_name}/params", json={"data": data})
model_in_cache = resp.status != 404
task = asyncio.create_task(client.post("/models", json={"program_code": program_code}))
start = time.time()
while True:
done, pending = await asyncio.wait({task}, timeout=0.1)
if done:
break
if io.supports_ansi():
building_output.clear()
building_output.write(f"<comment>Building:</comment> {time.time() - start:0.1f}s")
building_output.clear() if io.supports_ansi() else building_output.write("\n")
# now that httpstan has released stderr, we can use error_output again
building_output = io.section().error_output
resp = task.result()
if resp.status != 201:
match = re.search(r"""ValueError\(['"](.*)['"]\)""", resp.json()["message"])
if not match: # unknown error, should not happen
raise RuntimeError(resp.json()["message"])
exception_body = match.group(1).encode().decode("unicode_escape")
error_type_match = re.match(r"(Semantic|Syntax) error", exception_body)
if error_type_match:
error_type = error_type_match.group(0)
exception_body_without_first_line = exception_body.split("\n", 1)[1]
building_output.write_line(f"<info>Building:</info> <error>{error_type}:</error>")
building_output.write_line(f"<error>{exception_body_without_first_line}</error>")
raise ValueError(error_type)
else:
raise RuntimeError(exception_body)
building_output.clear() if io.supports_ansi() else building_output.write("\n")
if model_in_cache:
building_output.write("<info>Building:</info> found in cache, done.")
else:
building_output.write(f"<info>Building:</info> {time.time() - start:0.1f}s, done.")
assert model_name == resp.json()["name"]
if resp.json().get("stanc_warnings"):
io.error_line("<comment>Messages from <fg=cyan;options=bold>stanc</>:</comment>")
io.error_line(resp.json()["stanc_warnings"])
resp = await client.post(f"/{model_name}/params", json={"data": data})
if resp.status != 200:
raise RuntimeError(resp.json()["message"])
params_list = resp.json()["params"]
assert len({param["name"] for param in params_list}) == len(params_list)
param_names, dims = zip(*((param["name"], param["dims"]) for param in params_list))
constrained_param_names = sum((tuple(param["constrained_names"]) for param in params_list), ())
return Model(model_name, program_code, data, param_names, constrained_param_names, dims, random_seed)
try:
return asyncio.run(go())
except KeyboardInterrupt:
return | 30,470 |
def _exec_with_broadcasting(func, *args, **keywords):
"""Main function to broadcast together the shapes of the input arguments
and return results with the broadcasted shape."""
# Identify arguments needing broadcasting
arg_ranks = []
arg_indices = []
for k in range(len(args)) + keywords.keys():
rank = func.BROADCAST_RANKS.get(k, None)
if rank is None: continue
# Get argument
if type(k) == int:
arg = args[k]
else:
arg = keywords[k]
# Ignore args that are not arrays
if not isinstance(arg, np.ndarray): continue
# Determine leading shape, if any
if rank == 0:
shape = arg.shape
else:
shape = arg.shape[:rank]
if shape == (): continue
if shape == (1,): continue
arg_ranks.append(rank)
arg_indices.append(k)
# Call function now if iteration is not needed
if not arg_indices:
return func.__call__(*args, **keywords)
# Broadcast the arrays
cspyce1.chkin(func.array.__name__)
(broadcasted_shape, reshaped_args) = _broadcast_arrays(arg_ranks, args)
if cspyce1.failed():
cspyce1.chkout(func.array.__name__)
return None
# Update the argument list with flattened arrays
args = list(args)
for (k,reshaped_arg) in zip(arg_indices, reshaped_args):
flattened_arg = np.ravel(reshaped_arg)
if type(k) == int:
args[k] = flattened_arg
else:
keywords[k] = flattened_arg
# Execute the function
results = func.__call__(*args, **keywords)
cspyce1.chkout(func.array.__name__)
if cspyce1.failed():
return results
# Reshape the results
if isinstance(results, np.ndarray):
return np.reshape(results, broadcasted_shape)
reshaped_results = []
for result in results:
reshaped_results.append(np.reshape(result, broadcasted_shape))
return reshaped_results | 30,471 |
def info_fba_optimization(input_file, model, target, sol):
"""
Print fluxes through relevan reactions if the target can be produced
If FBA with the exchange reaction of the target as objective
is feasible, this function prints the reaction rates of
relevant reactions
--------------------------------------------
Argument:
input_file--str input file in .csv format dictionary like
model--cobra.Model reference model in BiGG namespace
target--str target metabolite BiGG ID
sol--list of BiGG IDs of reactions found as solutions of
GapFilling for optimized growth on the indicated source.
"""
print('---{}---'.format(target.upper()))
ex_prod = model.reactions.get_by_id('EX_'+target+'_e')
print('\nA flux through the exchange reaction for {} is {} without the need of GapFilling'.format(target.upper(), ex_prod.flux))
biomass = get_biomass_equation(model)
print('\nThe growth rate is: ', biomass.flux)
metabs = get_metabolites(input_file)
for m in metabs:
r=model.reactions.get_by_id('EX_'+m+'_e')
print('\nThe flux throughr {} is: '.format(r.id), r.flux)
for x in sol:
r = model.reactions.get_by_id(x)
print('\nThe flux throughr {} is: '.format(x), r.flux) | 30,472 |
def compose_paths(path_0, path_1):
"""
The binary representation of a path is a 1 (which means "stop"), followed by the
path as binary digits, where 0 is "left" and 1 is "right".
Look at the diagram at the top for these examples.
Example: 9 = 0b1001, so right, left, left
Example: 10 = 0b1010, so left, right, left
How it works: we write both numbers as binary. We ignore the terminal in path_0, since it's
not the terminating condition anymore. We shift path_1 enough places to OR in the rest of path_0.
Example: path_0 = 9 = 0b1001, path_1 = 10 = 0b1010.
Shift path_1 three places (so there is room for 0b001) to 0b1010000.
Then OR in 0b001 to yield 0b1010001 = 81, which is right, left, left, left, right, left.
"""
mask = 1
temp_path = path_0
while temp_path > 1:
path_1 <<= 1
mask <<= 1
temp_path >>= 1
mask -= 1
path = path_1 | (path_0 & mask)
return path | 30,473 |
def combine(img1, img2, out_path, write=True):
"""combine(img1, img2, out_path, write=True)
Combines the data of two PyifxImages, ImageVolumes, or ImageLists to form new PyifxImages.
:type img1: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
:param img1: The first image to be added to the combination.
:type img2: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
:param img2: The second image to be added to the combination. Arguments of type ImageVolume and list can be used in conjunction, but images of type PyifxImage must be used together.
:type out_path: str
:param out_path: The path that the combine image(s) will be written to.
:type write: bool
:param write: Whether to write the image or not.
:return: PyifxImage instance, ImageVolume instance, or list with elements of type PyifxImage
:rtype: pyifx.misc.PyifxImage, pyifx.misc.ImageVolume, list
"""
INTERNAL._type_checker(img1, [PyifxImage, ImageVolume, list])
INTERNAL._type_checker(img2, [PyifxImage, ImageVolume, list])
INTERNAL._type_checker(out_path, [str])
INTERNAL._type_checker(write, [bool])
return INTERNAL._combine_handler(img1, img2, out_path, write=write) | 30,474 |
def invert_index(i, window, step):
"""Convert truncated squareform index back into row, col, and slice index
Task indexing for LD pruning is based on several optimizations that utilize a
cyclic, truncated squareform pattern for pairwise comparisons (between rows). This pattern
is primarily controlled by window and step parameters, where an example for window = 4 and
step = 3 would look like this:
row index row indexes of other rows to compare to
| |
0 | 1 2 3 4
1 | 2 3 4
2 | 3 4
3 | 4 5 6 7
4 | 5 6 7
5 | 6 7
6 | 7 8 9 10
... and so on ...
The parameter (`i`) indexes these comparisons where in the above, `i` = 0
corresponds to the comparison between rows 0 and 1, `i` = 1 to rows 0 and 2, `i` = 4
to rows 1 and 2, etc. This method converts this comparison index back into the
cycle number (arbitrarily called a "slice") as well as offsets within that cycle for the rows
being compared. The slice number itself indexes some row in the original array
and the offsets can be used to identify comparisons from that row index.
Examples for the same case above for given comparison index values are:
index -> (row, col, slice)
0 -> (0, 1, 0) -
1 -> (0, 2, 0) |
2 -> (0, 3, 0) |
3 -> (0, 4, 0) |
4 -> (1, 2, 0) |--> One "slice" (i.e. one cycle)
5 -> (1, 3, 0) |
6 -> (1, 4, 0) |
7 -> (2, 3, 0) |
8 -> (2, 4, 0) -
9 -> (0, 1, 1) # The pattern repeats here
Parameters
----------
i : int
Comparison index
window : int
Window size used to define pairwise comparisons
step : int
Step size used to define pairwise comparisons
Returns
-------
(i, j, s) : tuple
i = offset from slice (`s`) to first row in comparison
j = offset from slice (`s`) to second row in comparison
s = slice number/index
"""
assert window >= step
# Coerce to large float to avoid potential int overflow
window = np.float64(window)
step = np.float64(step)
# Number of pairs in a "slice" = window + (window - 1) + ... + (window - step)
p = _intsum(window) - _intsum(window - step)
# Calculate slice number (`s`) and offset into that slice (`k`)
s, k = np.int64(i // p), np.int64(i % p)
# Invert squareform index
# See: https://stackoverflow.com/questions/27086195/linear-index-upper-triangular-matrix
n = window + 1 # The "n" in this case is the size of the window + 1 since self comparisons are ignored
i = np.int64(n - 2 - math.floor(math.sqrt(-8 * k + 4 * n * (n - 1) - 7) / 2.0 - 0.5))
j = np.int64(k + i + 1 - n * (n - 1) / 2.0 + (n - i) * ((n - i) - 1) / 2.0)
assert i >= 0
assert j >= 0
assert s >= 0
return i, j, s | 30,475 |
def group_masses(ip, dm: float = 0.25):
"""
Groups masses in an isotope pattern looking for differences in m/z greater than the specified delta.
expects
:param ip: a paired list of [[mz values],[intensity values]]
:param dm: Delta for looking +/- within
:return: blocks grouped by central mass
:rtype: list
"""
num = 0
out = [[[], []]]
for ind, val in enumerate(ip[0]):
out[num][0].append(ip[0][ind])
out[num][1].append(ip[1][ind])
try:
if ip[0][ind + 1] - ip[0][ind] > dm:
num += 1
out.append([[], []])
except IndexError:
continue
return out | 30,476 |
def normalize_ballots(ballots_resource):
"""Normalize the given ballots in place.
Arguments:
ballots_resource: a ballots resource.
"""
with ballots_resource.replacement() as temp_resource:
normalize_ballots_to(ballots_resource, temp_resource) | 30,477 |
def save_user_labels(*args):
"""
save_user_labels(func_ea, user_labels)
Save user defined labels into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_labels: collection of user defined labels (C++: const
user_labels_t *)
"""
return _ida_hexrays.save_user_labels(*args) | 30,478 |
def add_play():
"""Adds a new play"""
if 'Logged in as: ' + flask_login.current_user.get_id():
play_json = request.json
if request.json is None and request.data:
play_json = request.data
if play_json:
try:
# insert the created by information in play
print('play_json is %s' % type(play_json))
play_obj = play_json
if isinstance(play_json, basestring):
play_obj = json.loads(play_json)
play_obj['created_by'] = '%s' % flask_login.current_user.get_id()
play_json = json.dumps(play_obj)
#
new_play = get_db().add_play_from_json(play_json)
return jsonify(msg='added play %s' % new_play.id,
id=new_play.id,
data=new_play.to_json()), 201
except StructureError, error:
return jsonify('BAD JSON %s: %s' % (error, play_json)), 400
else:
return jsonify('Failed to find JSON data in your POST'), 404
return jsonify('You must be logged in to add a play'), 401 | 30,479 |
def get_compliment(file_list, file):
"""Returns a file path from the provided list that has the same name as the
file passed as the second arugment.
:param file_list: A list of paths with the same filetype.
:type file_list: list
:param file: A single path with an opposite filetype.
:type file: str
:return: A file path
:return type: str, None if nothing found
"""
compliment = os.path.splitext(file)[0]
for path in file_list:
if os.path.splitext(path)[0] == compliment:
return path | 30,480 |
def electron_cyclotron_emission_hardware(ods, pulse, fast_ece=False):
"""
Gathers DIII-D Electron cyclotron emission locations
:param pulse: int
:param fast_ece: bool
Use data sampled at high frequency
"""
unwrap(electron_cyclotron_emission_data)(ods, pulse, fast_ece=fast_ece, _measurements=False) | 30,481 |
def load_string_list(file_path, is_utf8=False):
"""
Load string list from mitok file
"""
try:
with open(file_path, encoding='latin-1') as f:
if f is None:
return None
l = []
for item in f:
item = item.strip()
if len(item) == 0:
continue
l.append(item)
except IOError:
print('open error %s' % file_path)
return None
else:
return l | 30,482 |
def flash_dev(
disk=None, image_path=None, copy_method="default", port=None, program_cycle_s=4
):
"""Flash a firmware image to a device.
Args:
disk: Switch -d <disk>.
image_path: Switch -f <image_path>.
copy_method: Switch -c <copy_method> (default: shell).
port: Switch -p <port>.
program_cycle_s: Sleep time.
"""
if copy_method == "default":
copy_method = "shell"
result = False
result = host_tests_plugins.call_plugin(
"CopyMethod",
copy_method,
image_path=image_path,
serial=port,
destination_disk=disk,
)
sleep(program_cycle_s)
return result | 30,483 |
def _get_bucket_and_object(gcs_blob_path):
"""Extract bucket and object name from a GCS blob path.
Args:
gcs_blob_path: path to a GCS blob
Returns:
The bucket and object name of the GCS blob
Raises:
ValueError: If gcs_blob_path parsing fails.
"""
if not gcs_blob_path.startswith(_GCS_PATH_PREFIX):
raise ValueError(
f'GCS blob paths must start with gs://, got {gcs_blob_path}')
path = gcs_blob_path[len(_GCS_PATH_PREFIX):]
parts = path.split('/', 1)
if len(parts) < 2:
raise ValueError(
'GCS blob paths must be in format gs://bucket-name/object-name, '
f'got {gcs_blob_path}')
return parts[0], parts[1] | 30,484 |
def dq_data(request):
"""Main home method and view."""
try:
cases = []
sdate, edate = None, None
sts = {0: 'Pending', 1: 'Open', 2: 'Closed'}
# Conditions
qa = request.GET.get('q_aspect')
va = request.GET.get('variance')
age = request.GET.get('age')
from_date = request.GET.get('from_date')
to_date = request.GET.get('to_date')
org_unit = request.GET.get('org_unit')
if from_date and to_date:
sdate = convert_date(from_date)
edate = convert_date(to_date)
cage = int(age) if age else 0
vid = int(va) if va else 0
qid = int(qa) if qa else 0
q2 = Q(case_category_id__in=('CTRF', 'CCCT'), age__lt=6)
q3 = Q(case_category_id__in=('CSAB', 'CSHV', 'CCCM', 'CORP'),
age__lt=11)
if qa:
acases = RPTCaseLoad.objects.filter(is_void=False)
if qid == 1:
acases = acases.filter(
Q(age__gte=25) | Q(dob__isnull=True) | Q(age__lt=0))
elif qid == 2:
acases = acases.filter(
Q(case_category_id='CDIS',
age__gt=15) | Q(case_category_id='CSIC',
age__gt=18) | q2 | q3)
elif qid == 3:
acases = acases.filter(
case_category_id__in=('CSHV', 'CSCS'), sex_id='SMAL')
elif qid == 4:
acases = acases.filter(
case_status=1, intervention__isnull=True)
else:
acases = RPTCaseLoad.objects.filter(
Q(age__gte=25) | Q(dob__isnull=True))
if vid == 1:
acases = acases.filter(age=cage)
elif vid == 2:
acases = acases.filter(age__gt=cage)
elif vid == 3:
acases = acases.filter(age__lt=cage)
if edate and sdate:
acases = acases.filter(case_date__range=(sdate, edate))
if org_unit:
acases = acases.filter(org_unit_id=org_unit)
else:
if not request.user.is_superuser:
acases = acases.filter(org_unit_id=org_unit)
for case in acases[:1000]:
cs = case.case_status
fname = case.case.person.first_name
sname = case.case.person.surname[0]
o_name = case.case.person.other_names
oname = o_name[0] if o_name else ''
dt = {"cpims_id": case.case.person_id}
dt['age'] = case.age
dt['case_category'] = case.case_category
dt['case_date'] = case.case_date
dt['sex'] = case.sex
dt['case_status'] = sts[cs] if cs in sts else 'Open'
dt['dob'] = case.dob
dt['org_unit'] = case.org_unit_name
dt['intervention'] = case.intervention
dt['org_unit'] = case.org_unit_name
dt['names'] = '%s %s%s' % (fname, sname, oname)
cases.append(dt)
result = {"data": cases}
return JsonResponse(result, content_type='application/json',
safe=False)
except Exception as e:
print('error - %s' % (e))
raise e
else:
pass | 30,485 |
def convert_to_differential(file_in, file_out):
"""
This function reformats the txt deAPA output file to file for
differential challenges
:param file_in: txt file to be reformatted
:param file_out: differential challenge output file
:return: N/A
"""
differential_out = open(file_out, "wt")
df = pd.read_csv(file_in, sep='\t')
rows = dict()
for index, row in df.iterrows():
# write differential file
# keep just the gene id obtained from the gene column
# this column has transcript id, gene id, chromosome, orientation
# e.g. ENSMUST00000203335.1|ENSMUSG00000045962.16|chr6|-
name = row['Gene'].split("|")[1]
# p val obtained from P_val column
significance = row['P_val']
if name not in rows:
rows[name] = [significance]
else:
rows[name].append(significance)
# only get the smallest p-value for the corresponding gene
for gene in rows:
output = [gene, str(min(rows[gene]))]
differential_out.write("\t".join(output) + "\n")
differential_out.close() | 30,486 |
def create_round_model(
protocol: Protocol,
ber: float,
n_tags: int) -> 'RoundModel':
"""
Factory function for creating round model.
This routine is cached, so calling it multiple time won't add much
overhead.
Parameters
----------
protocol : Protocol
ber : float
n_tags : int
Returns
-------
model : SlotModel
"""
return RoundModel(protocol, ber=ber, n_tags=n_tags) | 30,487 |
def AddPrivateIpv6GoogleAccessTypeFlag(api_version, parser, hidden=False):
"""Adds --private-ipv6-google-access-type={disabled|outbound-only|bidirectional} flag."""
messages = apis.GetMessagesModule('container', api_version)
util.GetPrivateIpv6GoogleAccessTypeMapper(
messages, hidden).choice_arg.AddToParser(parser) | 30,488 |
def local_maxima(a_list):
"""
Takes a NoteList object.
Returns a list of tuples of the form returned by note_onsets().
Each of these (int: bar #, float: beat #) tuples will represent the onset
of a note that is a local maximum in the melody in a_list.
"""
return local_extremities(a_list, maxima=True) | 30,489 |
def get_fitted_model(data: pd.DataFrame, dataframe: pd.DataFrame) -> CTGAN:
""" The function get_fitted_model uses a CTGAN Checkpoint (see chapter about checkpoints),
to load a trained CTGAN model if one is available with the desired hyperparameters, or
train a new one if none is available. The function then returns the trained CTGAN model.
The CTGAN model created here uses a 'Positive' constraint for the dataframe column 'duration',
which contains the duration of each activity. The 'reject_sampling' strategy is used as
handling strategy for this constraint.
The function logs wether a pre-trained model was loaded or a new one was generated.
"""
cp = CTGANCheckpoint(
config.get_dataset_basename(), config.EPOCHS_CTGAN, config.ENABLED_DP_CTGAN, "{:.1f}".format(config.EPSILON_CTGAN))
return cp.load_if_exists_else_generate(config.RETRAIN_CTGAN, _fit_ctgan, data, dataframe) | 30,490 |
def test_relu():
"""Tests relu"""
pf.set_backend("pytorch")
_test_elementwise(
ops.relu, [-1.0, -0.1, 0.0, 0.1, 1.0], [0.0, 0.0, 0.0, 0.1, 1.0]
) | 30,491 |
def point_based_matching(point_pairs):
"""
This function is based on the paper "Robot Pose Estimation in Unknown Environments by Matching 2D Range Scans"
by F. Lu and E. Milios.
:param point_pairs: the matched point pairs [((x1, y1), (x1', y1')), ..., ((xi, yi), (xi', yi')), ...]
:return: the rotation angle and the 2D translation (x, y) to be applied for matching the given pairs of points
"""
x_mean = 0
y_mean = 0
xp_mean = 0
yp_mean = 0
n = len(point_pairs)
if n == 0:
return None, None, None
for pair in point_pairs:
(x, y), (xp, yp) = pair
x_mean += x
y_mean += y
xp_mean += xp
yp_mean += yp
x_mean /= n
y_mean /= n
xp_mean /= n
yp_mean /= n
s_x_xp = 0
s_y_yp = 0
s_x_yp = 0
s_y_xp = 0
for pair in point_pairs:
(x, y), (xp, yp) = pair
s_x_xp += (x - x_mean)*(xp - xp_mean)
s_y_yp += (y - y_mean)*(yp - yp_mean)
s_x_yp += (x - x_mean)*(yp - yp_mean)
s_y_xp += (y - y_mean)*(xp - xp_mean)
rot_angle = math.atan2(s_x_yp - s_y_xp, s_x_xp + s_y_yp)
translation_x = xp_mean - (x_mean*math.cos(rot_angle) - y_mean*math.sin(rot_angle))
translation_y = yp_mean - (x_mean*math.sin(rot_angle) + y_mean*math.cos(rot_angle))
return rot_angle, translation_x, translation_y | 30,492 |
async def async_setup_platform(hass, config, async_add_devices,
discovery_info=None):
"""Setup the sensor platform."""
if discovery_info is None:
return
data = hass.data[DOMAIN].data
if not data.cars:
_LOGGER.info("No Cars found.")
return
devices = []
for car in data.cars:
for key, value in sorted(LOCKS.items()):
if value[5] is None or getattr(car.features, value[5]) is True:
devices.append(
MercedesMELock(
hass,
data,
key,
value[0],
car.finorvin,
value[1],
car.licenseplate,
value[2],
value[3],
value[4],
None))
async_add_devices(devices, True) | 30,493 |
def kb_ids2known_facts(kb_ids):
"""Creates list of all known facts from kb dict"""
facts = set()
for struct in kb_ids:
arrays = kb_ids[struct][0]
num_facts = len(arrays[0])
for i in range(num_facts):
fact = [x[i] for x in arrays]
facts.add(tuple(fact))
return facts | 30,494 |
def vm_name_check(vm_names, item):
"""
Check vm name
:param vm_names: dictionary of vm name
:param item: vm name item in xml
:return: None
"""
for name_i, name_str in vm_names.items():
name_len = len(name_str)
if name_len > 32 or name_len == 0:
key = "vm:id={},{}".format(name_i, item)
ERR_LIST[key] = "VM name length should be in range [1,32] bytes" | 30,495 |
def get_filepath(url, illustration, save_path='.', add_user_folder=False, add_rank=False):
"""return (filename,filepath)"""
if add_user_folder:
user_id = illustration.user_id
user_name = illustration.user_name
current_path = get_default_save_path()
cur_dirs = list(filter(os.path.isdir, [os.path.join(current_path, i) for i in os.listdir(current_path)]))
cur_user_ids = [os.path.basename(cur_dir).split()[0] for cur_dir in cur_dirs]
if user_id not in cur_user_ids:
dir_name = re.sub(r'[<>:"/\\|\?\*]', ' ', user_id + ' ' + user_name)
else:
dir_name = list(i for i in cur_dirs if os.path.basename(i).split()[0] == user_id)[0]
save_path = os.path.join(save_path, dir_name)
filename = url.split('/')[-1]
# name, ext = os.path.splitext(filename)
if add_rank:
# name = illustration.rank + ' - ' + name
filename = illustration.rank + ' - ' + filename
# filename = name + ' - ' + illustration.title + ext
filepath = os.path.join(save_path, filename)
return filename, filepath | 30,496 |
def PUtilAvgT (inUV, outUV, err, scratch=False, timeAvg=1.0):
""" Average A UV data set in Time
returns Averaged UV data object
inUV = Python UV object to copy
Any selection editing and calibration applied before average.
outUV = Predefined UV data if scratch is False, ignored if
scratch is True.
err = Python Obit Error/message stack
scratch = True if this is to be a scratch file (same type as inUV)
timeAvg = Averaging time in min
"""
################################################################
if inUV.myClass=='AIPSUVData':
raise TypeError("Function unavailable for "+inUV.myClass)
# Checks
if not inUV.UVIsA():
raise TypeError("inUV MUST be a Python Obit UV")
if ((not scratch) and (not outUV.UVIsA())):
raise TypeError("outUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Save parameter
dim = [1,1,1,1,1]
inInfo = PGetList(inUV) #
InfoList.PAlwaysPutFloat (inInfo, "timeAvg", dim, [timeAvg])
# Create output for scratch
if scratch:
outUV = UV("None")
outUV.me = Obit.UVUtilAvgT(inUV.me, scratch, outUV.me, err.me)
if err.isErr:
OErr.printErrMsg(err, "Error averaging UV data")
# Get scratch file info
if scratch:
PUVInfo (outUV, err)
return outUV
# end PUtilAvgT | 30,497 |
def check_view_restrictions(request, page):
"""
Mimic default Wagtail core behaviour but throw a 403 exception instead of
a redirect.
See wagtail.wagtailcore.wagtail_hooks.check_view_restrictions
"""
restrictions = page.get_view_restrictions()
if restrictions:
passed_restrictions = request.session.get('passed_page_view_restrictions', [])
for restriction in restrictions:
if restriction.id not in passed_restrictions:
raise PermissionDenied | 30,498 |
def save_hist_batch(hist, idx_batch, idx_epoch, g_loss, d_loss, e_loss, d_x, d_g_z): #, d_fake
"""
Sauvegarde les données du batch dans l'historique après traitement
"""
d_x = d_x.detach().cpu().numpy()
# d_fake = d_fake.detach().cpu().numpy()
d_g_z = d_g_z.detach().cpu().numpy()
g_loss = g_loss.item()
d_loss = d_loss.item()
e_loss = e_loss.item()
hist["g_losses"][idx_batch] = g_loss
hist["d_losses"][idx_batch] = d_loss
hist["e_losses"][idx_batch] = e_loss
hist["d_x_mean"][idx_batch] = d_x.mean()
# hist["d_fake_mean"][idx_batch] = d_fake.mean()
hist["d_g_z_mean"][idx_batch] = d_g_z.mean() | 30,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.