content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import json
from datetime import datetime
def get_github_info(packages=packages):
"""
Get information about subpackage releases that have been tagged on github
"""
no_release = []
release = {}
for package in packages:
url = f"https://api.github.com/repos/pysal/{package}/releases/latest"
print(url)
d = json.loads(gh_session.get(url).text)
if 'message' in d:
if d['message'] == 'Not Found':
print(f"{package} has no latest release")
no_release.append(package)
else:
print('Something else happened')
else:
tag_name = d['tag_name']
tarball_url = d['tarball_url']
#release_date = datetime.datetime.strptime(d['published_at'], '%Y-%m-%dT%H:%M:%SZ')
release[package] = {'version': tag_name,
'url': tarball_url,
'release_date': d['published_at']}
with open('tarballs.json', 'w') as fp:
json.dump(release, fp)
for package in release:
dt = release[package]['release_date']
release[package]['release_date'] = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%SZ' )
return release | 1c13522ad816f4f4898a193bd5b7bb04febbf67a | 29,300 |
from typing import Dict
def matrix_str_to_dict(matrix_str: str) -> Dict[str, Dict[str, int]]:
"""Transform dictionary string to 2-d array of ints."""
scoring_matrix = {}
table = [line.split() for line in matrix_str.split("\n") if line[0] != "#"]
aa_tos = table[0]
for row in table[1:]:
aa_from = row[0]
scores = [int(entry) for entry in row[1:]] # convert scores to ints
pairs = dict(list(zip(aa_tos, scores))) # dict of pairs, {aa_to: score}
scoring_matrix[
aa_from
] = pairs # 2-D dictionary scoring_matrix[from][to] == score
return scoring_matrix | de1d4fd581a40cb6e61bd2b13c3a89718a67736f | 29,301 |
def restriction(model, sfield, residual, sc_dir):
"""Downsampling of grid, model, and fields to a coarser grid.
The restriction of the residual is used as source term for the coarse grid.
Corresponds to Equations 8 and 9 and surrounding text in [Muld06]_. In the
case of the restriction of the residual, this function is a wrapper for the
jitted functions :func:`emg3d.core.restrict_weights` and
:func:`emg3d.core.restrict` (consult these functions for more details and
corresponding theory).
This function is called by :func:`emg3d.solver.multigrid`.
Parameters
----------
model : VolumeModel
Input model; a :class:`emg3d.models.Model` instance.
sfield : Field
Input source field; a :class:`emg3d.fields.Field` instance.
sc_dir : int
Direction of semicoarsening.
Returns
-------
cmodel : VolumeModel
Coarse model.
csfield : Field
Coarse source field. Corresponds to restriction of fine-grid residual.
cefield : Field
Coarse electric field, complex zeroes.
"""
# 1. RESTRICT GRID
# We take every second element for the direction(s) of coarsening.
rx, ry, rz = 2, 2, 2
if sc_dir in [1, 5, 6]: # No coarsening in x-direction.
rx = 1
if sc_dir in [2, 4, 6]: # No coarsening in y-direction.
ry = 1
if sc_dir in [3, 4, 5]: # No coarsening in z-direction.
rz = 1
# Compute distances of coarse grid.
ch = [np.diff(model.grid.nodes_x[::rx]),
np.diff(model.grid.nodes_y[::ry]),
np.diff(model.grid.nodes_z[::rz])]
# Create new `TensorMesh` instance for coarse grid
cgrid = meshes.BaseMesh(ch, model.grid.origin)
# 2. RESTRICT MODEL
class VolumeModel:
"""Dummy class to create coarse-grid model."""
def __init__(self, case, grid):
"""Initialize with case."""
self.case = case
self.grid = grid
cmodel = VolumeModel(model.case, cgrid)
cmodel.eta_x = _restrict_model_parameters(model.eta_x, sc_dir)
if model.case in ['HTI', 'triaxial']:
cmodel.eta_y = _restrict_model_parameters(model.eta_y, sc_dir)
else:
cmodel.eta_y = cmodel.eta_x
if model.case in ['VTI', 'triaxial']:
cmodel.eta_z = _restrict_model_parameters(model.eta_z, sc_dir)
else:
cmodel.eta_z = cmodel.eta_x
cmodel.zeta = _restrict_model_parameters(model.zeta, sc_dir)
# 3. RESTRICT FIELDS
# Get the weights (Equation 9 of [Muld06]_).
wx, wy, wz = _get_restriction_weights(model.grid, cmodel.grid, sc_dir)
# Compute the source terms (Equation 8 in [Muld06]_).
# Initiate zero field.
csfield = fields.Field(cgrid, dtype=sfield.field.dtype,
frequency=sfield._frequency)
core.restrict(csfield.fx, csfield.fy, csfield.fz, residual.fx,
residual.fy, residual.fz, wx, wy, wz, sc_dir)
# Initiate empty e-field.
cefield = fields.Field(cgrid, dtype=sfield.field.dtype,
frequency=sfield._frequency)
return cmodel, csfield, cefield | b3f97cef119ae219f5bca2d56d1c6ec55bb3d5db | 29,302 |
def check_good_not_bad(decider, good, bad):
"""Check if bad prof becomes GOOD by adding funcs it lacks from good prof"""
bad_copy = bad.copy()
for func in good:
if func not in bad:
bad_copy[func] = good[func]
return decider.run(bad_copy) == StatusEnum.GOOD_STATUS | 0464721aae2c20310eefb3e78f1b3e4c36a558ed | 29,303 |
from typing import List
def filter_by_length(data: List[list], lower_bound: int, upper_bound: int) -> List[list]:
"""
:param data: [[word1, word2, word3], ..., [word1, word2]]
:param lower_bound: 3
:param upper_bound: 5
:return: [[word1, word2, word3, ... ], ..., None]
"""
for index in range(len(data)):
if not is_in_range(len(data[index]), lower_bound, upper_bound):
data[index] = None
return data | 4b7d83a82c29622d4d26fc718b6bb5fbe959c8dd | 29,304 |
def run_games_and_record_payoffs(game_queries, evaluate_game, ckpt_to_policy):
"""Simulate games according to game queries and return results.
Args:
game_queries: set of tuples containing indices specifying each players strat
evaluate_game: callable function that takes a list of policies as argument
ckpt_to_policy: maps a strat (or checkpoint) to a policy
Returns:
dictionary: key=query, value=np.array of payoffs (1 for each player)
"""
game_results = {}
for query in game_queries:
policies = [ckpt_to_policy[ckpt] for ckpt in query]
payoffs = evaluate_game(policies)
game_results.update({query: payoffs})
return game_results | 7d03b48e076efab99353b582baf04b96503695c5 | 29,305 |
def gnocchiclient(request):
""" Initialization of Gnocchi client.
"""
auth_endpoint = getattr(settings, 'OPENSTACK_KEYSTONE_URL', None)
loader = loading.get_plugin_loader('token')
auth = loader.load_from_options(auth_url=auth_endpoint,
token=request.user.token.id,
project_id=request.user.project_id)
sess = session.Session(auth=auth)
LOG.debug('gnocchiclient connection created using token "%s" '
'and endpoint "%s"' % (request.user.token.id, auth_endpoint))
return gnocchi_client.Client('1', session=sess) | cd933c348d0a10333473bb6b7bcfbac007a6b3d5 | 29,306 |
from typing import Type
from typing import Tuple
from typing import Any
def create_namespace_handler(
file_type: Type[NamespaceFile],
namespace_scope: Tuple[str, ...],
namespace_extension: str,
) -> Type[NamespaceFile]:
"""Create handler that turns yaml namespace files into json."""
class AutoYamlNamespaceHandler(YamlFile, NamespaceFile):
scope = namespace_scope
extension = namespace_extension
def bind(self, pack: Any, path: str):
super().bind(pack, path)
pack[path] = file_type(dump_json(self.data))
raise Drop()
return AutoYamlNamespaceHandler | a9013dfbbe48a4b6cc5b48ca3e5cb07c833e870c | 29,307 |
from typing import Union
import os
def trsh_job_on_server(server: str,
job_name: str,
job_id: Union[int, str],
job_server_status: str,
remote_path: str,
server_nodes: list = None):
"""
Troubleshoot server errors.
Args:
server (str): The server name.
job_name (str): The job's name (e.g., 'opt_a103').
job_id (int, str): The job's ID on the server.
job_server_status (str): The job server status (either 'initializing', 'running', 'errored', or 'done').
remote_path (str): The remote path to the job folder.
server_nodes (list, optional): The nodes already tried on this server for this jobs.
Returns: Tuple[str, bool]
- The new node on the server (or None).
- Whether to re-run the job, `True` to rerun.
"""
server_nodes = server_nodes if server_nodes is not None else list()
cluster_soft = servers[server]['cluster_soft']
if job_server_status != 'done':
logger.error(f'Job {job_name} has server status "{job_server_status}" on {server}.')
# delete current server run
if server == 'local':
cmd = delete_command[cluster_soft] + ' ' + str(job_id)
execute_command(cmd)
return None, True
else:
with SSHClient(server) as ssh:
ssh.delete_job(job_id)
# find available node
logger.error('Troubleshooting by changing node.')
ssh = SSHClient(server)
nodes = ssh.list_available_nodes()
for node in nodes:
if node not in server_nodes:
server_nodes.append(node)
break
else:
logger.error(f'Could not find an available node on the server {server}')
# TODO: continue troubleshooting; if all else fails, put the job to sleep,
# and try again searching for a node
return None, False
# modify the submit file
remote_submit_file = os.path.join(remote_path, submit_filename[cluster_soft])
with SSHClient(server) as ssh:
content = ssh.read_remote_file(remote_file_path=remote_submit_file)
if cluster_soft.lower() == 'oge':
node_assign = '#$ -l h='
insert_line_num = 7
elif cluster_soft.lower() == 'slurm':
node_assign = '#$BATCH -w, --nodelist='
insert_line_num = 5
else:
# Other software?
logger.denug(f'Unknown cluster software {cluster_soft} is encountered when '
f'troubleshooting by changing node.')
return None, False
for i, line in enumerate(content):
if node_assign in line:
content[i] = node_assign + node
break
else:
content.insert(insert_line_num, node_assign + node)
content = ''.join(content) # convert list into a single string, not to upset paramiko
# resubmit
with SSHClient(server) as ssh:
ssh.upload_file(remote_file_path=os.path.join(remote_path,
submit_filename[cluster_soft]), file_string=content)
return node, True | 837c399553c3d58a91444cda9973f92fb317751f | 29,308 |
def get_native_backend_config_dict():
""" Get backend_config_dict for PyTorch Native backend (fbgemm/qnnpack). """
binary_op_dtype_configs = [
weighted_op_int8_dtype_config,
default_op_fp16_dtype_config,
]
share_qparams_op_dtype_configs = [
default_op_quint8_dtype_config,
default_op_fp16_dtype_config
]
return {
# optional
"name": "native",
"configs": [
*_DEFAULT_OP_INT8_CONFIGS,
*_get_linear_configs(),
*_get_conv_configs(),
*_get_binary_op_configs(binary_op_dtype_configs),
*_get_fixed_qparams_op_configs(),
_CAT_CONFIG,
*_get_bn_configs(),
*_get_share_qparams_op_configs(share_qparams_op_dtype_configs),
*_get_rnn_op_configs(),
*_get_embedding_op_configs(),
],
} | 3b0332feb223b9e08724f45d6397b418a5690e0e | 29,309 |
def dummy_content_widget(title='Dummy Content Widget', content="Dummy Content",
slug="dummy_content_widget", is_published=True):
"""Will directly write to the database a dummy content widget for our
tests.
Parameters
----------
title : str
Widget title.
content : str
Widget content.
slug : str
Url friendly representation of the widget title.
is_published : bool
Whether the widget will be published or not.
Return
-------
ContentWidget model object.
Contains data of the content widgets.
"""
cw = ContentWidget(title=title,
content=content,
slug=slug,
is_published=is_published)
db.session.add(cw)
db.session.commit()
return cw | e0164f5512c02abc79ddd6149c1c855fbf6b4241 | 29,310 |
import os
def get_ckpt_epoch(ckpt_dir):
"""Get checkpoint epoch"""
ckpt_epoch = {}
files = os.listdir(ckpt_dir)
if not files:
print("No ckpt files")
return None, None
for file_name in files:
file_path = os.path.join(ckpt_dir, file_name)
if os.path.splitext(file_path)[1] == '.ckpt':
epoch = get_epoch(file_name)
ckpt_epoch[file_name] = epoch
newest_ckpt = max(ckpt_epoch, key=ckpt_epoch.get)
max_epoch = ckpt_epoch[newest_ckpt]
return newest_ckpt, max_epoch | fe972b5fc9c7701ebb90323a17d47d6f2842c498 | 29,311 |
def ask_note():
"""Function to ask user for task notes"""
task_note = input("Enter any additional task notes here >")
return task_note | 952010409c0430b697b899edf7c29b5fa2f9bedb | 29,312 |
import os
def mkdir_for_girl(f_path):
"""
创建以标题命令的目录
:param f_path: 文件根路径
:return: 返回创建的目录路径
"""
if not os.path.exists(f_path):
os.mkdir(f_path)
return f_path | d37e4d4d34fce29fcb5e7928dfbc02b8153d654b | 29,313 |
def main(prmtop_file, mdcrd_traj_file, inpcrd_file, get_B = False, target_length=15, cluster_size = 10, simplify_only = False, rod=None, radius = 5e-9, rod_out=None, unroll_rod=True, get_inhomogenous_beta=False, get_inhomogenous_kappa=False):
"""
For an atomistic trajectory, this will create an equivalent rod trajectory,
and use that trajectory to compute the material parameters of the rod.
Cool huh?
Parameters:
prmtop_file, mdcrd_tarj_file, inpcrd_file: paths to AMBER output files
(strings)
get_B: whether to also compute the anisotropic B matrices (bool)
target_length: the number of elements in the rod to be created (int)
simplify_only: if True, don't parameterise rod, just make it (bool)
rod: if you already have a rod trajectory, supply a rod object here.
Returns:
analysis, an instance of FFEA_rod.anal_rod
delta_omega, L_i, B - the values used to compute the anisotropic B
matrix. Arrays.
It also prints out the isotropic results, for reference.
"""
if not rod:
print("Loading files...")
input_frame = inpcrd_file
topology = prmtop_file
trajectory = mdcrd_traj_file
#topology = '/home/rob/Downloads/ndc80long/ndc80_amber_vac.prmtop'
#trajectory = '/home/rob/Downloads/ndc80long/05_prod.mdcrd'
u_initial = MDAnalysis.Universe(topology, inpcrd_file, format="INPCRD")
initial_backbone = u_initial.select_atoms('protein and backbone').positions/1e10
#initial_chain_len = (np.shape(initial_backbone)[0])/2
#initial_chain2 = initial_backbone[:initial_chain_len]
#initial_chain1 = initial_backbone[initial_chain_len:]
#initial_r = (initial_chain1+initial_chain2)/2
#initial_m = initial_chain2 - initial_chain1
u = MDAnalysis.Universe(topology, trajectory, format="TRJ")
backbone = u.select_atoms('protein and backbone')
backbone_traj = np.zeros([len(u.trajectory), len(backbone), 3])
print("Retrieving backbone trajectory (warning: slow as hell, blame MDAnalysis)...")
for frame_no in range(len(u.trajectory)):
backbone_traj[frame_no] = backbone.positions
try:
u.trajectory.next()
except ValueError:
break
except StopIteration:
break
chain1_end = 1373
# chain2_hinge = backbone_traj[:][1997:2023]
# chain1_hinge_len = 78
# chain2_hinge_len = len(chain2_hinge)
pivot_index = 624
print("Constructing rod...")
chain_len = (np.shape(backbone_traj)[1])/2
backbone_traj /= 1e10 # convert from A to m
#1) work out how many elements we need, create a rod
#2) set the indices of averaging clusters
#3) get the nearest (opposite) element lookup table
def set_cluster_indices(target_length, cluster_size, chain1_end):
"""
To construct the rod trajectory, only small clusters of atoms are
averaged. This function sets the indexes of the nodes which will
be used in each cluster.
Params:
target_length: target number of nodes, an int
cluster_size: width (in atoms) of each cluster
chain1_end: index of the atom at the end of the first chain
Returns:
cluster_indices, a 2-d array of integers where the first axis
is the rod node id and the second is the atomistic atom id.
"""
# note: these indices are defined for the FIRST chain only.
cluster_indices = np.zeros([target_length,cluster_size], dtype=int)
starting_cluster_index = int(cluster_size/2)
end_cluster_index = chain1_end - int(cluster_size/2)
cluster_center_indices = np.linspace(starting_cluster_index, end_cluster_index, target_length, dtype=int)
for cluster_index in range(len(cluster_indices)):
i = 0
for node_index in range(len(cluster_indices[cluster_index])):
cluster_indices[cluster_index][node_index] = cluster_center_indices[cluster_index]+i
if i == 0:
i+=1
elif i>0:
i *= -1
elif i<0:
i *= -1
i += 1
return cluster_indices
def get_nearest_node_table(initial_backbone, chain1_end):
"""
Get a lookup table of the index of the nearest node in the opposite
chain for each node.
Params:
initial_backbone - the initial (equilibrium) state of the
atomistic structure.
chain1_end - integer, index of the atom at the end of the first
chain
Returns:
nearest_node_lookup_table, a 1-d array of integers where the
index is the node index and the value is the index of the
nearest node in the opposite chain.
"""
chain1 = initial_backbone[:chain1_end]
chain2 = initial_backbone[chain1_end:]
chain1_nearest_node_table = np.zeros(len(chain1), dtype=int)
#chain2_nearest_node_table = np.zeros(len(chain2), dtype=int)
for node_index in range(len(chain1)):
distances_vec = chain1[node_index] - chain2
distances_scalar = np.linalg.norm(distances_vec, axis=1)
min_index = np.argmin(distances_scalar)
chain1_nearest_node_table[node_index] = min_index
#chain2_nearest_node_table[min_index] = node_index
#return np.concatenate([chain1_nearest_node_table, chain2_nearest_node_table])
return chain1_nearest_node_table+chain1_end
def get_node_traj(cluster_indices, node_index, backbone_traj, nearest_node_table):
"""
Given the cluster indices and nearest nodes, (see above), get the
cluster-averaged position of a particular rod node, for the whole
trajectory. The position is the average of 10 atoms in both chains.
The material axis is the average vector that goes between those
atoms (normalized).
Params:
cluster_indices: ndarray generated by get_cluster_indices
node_index: index of the rod node to find a trajectory for
backbone_traj: all-atom trajectory from mdanalysis as an ndarray
nearest_node_table: generated from get_nearest_node_table
Returns:
node position as a 3-d array where the first index is the frame,
second axis is the node index, and third axis is the dimension.
material axis vector in the same structure.
"""
cluster_size = len(cluster_indices[0])
chain1_nodes = np.zeros([cluster_size, len(backbone_traj), 3])
chain2_nodes = np.zeros([cluster_size, len(backbone_traj), 3])
i=0
for curr_node_index in cluster_indices[node_index]:
chain1_nodes[i] = backbone_traj[:,curr_node_index]
chain2_nodes[i] = backbone_traj[:,nearest_node_table[curr_node_index]]
i+=1
chain1_avg = np.average(chain1_nodes, axis=0)
chain2_avg = np.average(chain2_nodes, axis=0)
mataxis = chain1_nodes - chain2_nodes
m_average = np.average(mataxis, axis=0)
absolutes = np.linalg.norm(m_average, axis=1)
absolutes = np.array([absolutes, absolutes, absolutes])
absolutes = np.swapaxes(absolutes, 0, 1)
m_average = m_average/absolutes
return np.average([chain1_avg, chain2_avg], axis=0), m_average
print("Setting cluster indices...")
cluster_indices = set_cluster_indices(target_length, cluster_size, chain1_end)
print("Calculating nearest node table...")
nearest_node_table = get_nearest_node_table(initial_backbone, chain1_end)
print("Calculating node positions and material axes...")
rod_r = np.zeros([len(backbone_traj), target_length, 3])
rod_m = np.zeros([len(backbone_traj), target_length, 3])
for node_index in range(target_length):
rod_r[:,node_index], rod_m[:,node_index] = get_node_traj(cluster_indices, node_index, backbone_traj, nearest_node_table)
equil_r = np.zeros([len(backbone_traj), target_length, 3])
equil_m = np.zeros([len(backbone_traj), target_length, 3])
for node_index in range(target_length):
equil_r[:,node_index], equil_m[:,node_index] = get_node_traj(cluster_indices, node_index, np.array([initial_backbone]), nearest_node_table)
print("Initializing FFEA rod object...")
rod = FFEA_rod.FFEA_rod(num_elements=target_length)
rod.current_r = rod_r
rod.current_m = rod_m
rod.equil_r = equil_r
rod.equil_m = equil_m
rod.material_params = np.ones([len(backbone_traj), target_length, 3])
rod.B_matrix = np.ones([len(backbone_traj), target_length, 4])
rod.perturbed_x_energy_positive = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.perturbed_y_energy_positive = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.perturbed_z_energy_positive = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.perturbed_x_energy_negative = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.perturbed_y_energy_negative = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.perturbed_z_energy_negative = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.twisted_energy_positive = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
rod.twisted_energy_negative = np.zeros([len(rod.current_m), len(rod.current_m[0]), 3])
print("Setting parameters...")
FFEA_rod.rod_creator.set_params(rod, 1, 1, radius, 1)
rod.num_frames = len(rod.current_r)
if simplify_only:
return rod
#path_to_rod = '/home/rob/Downloads/ndc80long/ndc80_nohinge.rod'
#print("Saving rod...")
#rod.write_rod(path_to_rod)
analysis = FFEA_rod.anal_rod(rod)
if unroll_rod:
print("Unrolling...")
analysis = unroll(analysis)
try:
analysis.p_i
analysis.equil_p_i
except AttributeError:
rod.p_i = rod.get_p_i(rod.current_r)
rod.equil_p_i = rod.get_p_i(rod.equil_r)
analysis.p_i = analysis.rod.p_i
analysis.p_i_equil = analysis.rod.equil_p_i
analysis.equil_p_i = analysis.rod.equil_p_i
temp = 300 #kelvin
analytical_kbT = temp*1.38064852 * 10**-23
half_kbT = 0.5*analytical_kbT
print("Computing average constants...")
analysis.get_stretch_energy()
half_stretch_squared_avg = np.average(analysis.stretch_energy)
kappa_stretch = half_kbT/half_stretch_squared_avg
analysis.get_bending_response_mutual()
half_bend_squared_avg = np.nanmean(analysis.bending_energy)
B_isotropic = half_kbT/half_bend_squared_avg
analysis.get_twist_amount()
half_twist_squared_avg = np.average(analysis.twist_energy)
beta = half_kbT/half_twist_squared_avg
avg_stretch, stretch_error = get_avg_and_mean_error(analysis.stretch_energy, half_kbT)
avg_bend, bend_error = get_avg_and_mean_error(analysis.bending_energy, half_kbT)
avg_twist, twist_error = get_avg_and_mean_error(analysis.twist_energy, half_kbT)
print("Done.")
print("Kappa (stretch constant) = "+str(kappa_stretch)+"+/-"+str(stretch_error))
print("B (isotropic bend constant) = "+str(B_isotropic)+"+/-"+str(bend_error))
print("beta (twist constant) = "+str(beta)+"+/-"+str(twist_error))
#if not get_B:
# if rod_out:
# print("todo: write rod!")
# return analysis
B_error = False
if get_inhomogeneous_beta or get_inhomogeneous_kappa:
print("Computing inhomogeneous constants...")
if get_inhomogeneous_beta:
inhomogeneous_beta, beta_err = get_inhomogeneous_param(analysis, 1, kbT=analytical_kbT)
inhomogeneous_beta = np.concatenate([[inhomogeneous_beta[0]], inhomogeneous_beta, [inhomogeneous_beta[-1]] ])
analysis.rod.material_params[:,:,1] = inhomogeneous_beta
print("beta (inhomogeneous)"+str(inhomogeneous_beta))
if get_inhomogeneous_kappa:
inhomogeneous_kappa, kappa_err = get_inhomogeneous_param(analysis, 0, kbT=analytical_kbT)
inhomogeneous_kappa = np.concatenate([inhomogeneous_kappa, [inhomogeneous_kappa[-1]] ])
analysis.rod.material_params[:,:,0] = inhomogeneous_kappa
print("kappa (inhomogeneous)"+str(inhomogeneous_kappa))
if get_B:
print("Computing B...")
delta_omega, L_i = get_delta_omega(analysis, fast=True)
B = []
for element_no in range(len(delta_omega[0])):
B.append(get_B_avg(delta_omega, temp, L_i, element_no))
if rod_out:
B_flat = np.ndarray.flatten(np.asarray(B))
B_arr = np.zeros([np.shape(delta_omega)[1]+2, 4 ])
B_arr[1:-1] = np.reshape(B_flat, [np.shape(delta_omega)[1], 4])
B_arr[0] = B_arr[1]
B_arr[-1] = B_arr[-2]
analysis.rod.B_matrix[:] = B_arr
print("B (inhomogeneous, anisotropic): "+str(B_arr))
print("Writing rod...")
analysis.rod.write_rod(rod_out)
return analysis, delta_omega, L_i, np.array(B), inhomogeneous_beta, inhomogeneous_kappa | b6f243f7e5c2a9f30f3af928b452c3ec00e912aa | 29,314 |
def Yt_1d_full():
"""
1d Yt fully observed
"""
y = np.array([[[2]], [[1.3]], [[2.5]], [[3.1]]])
return y | e44e98ab25336e16802c670361ad18359fab0957 | 29,315 |
def threshold_isodata(image, nbins=256, shift=None, max_limit=None, min_limit=None):
"""Return threshold value based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or intermeans.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
shift : int, optional
Shift threshold value by percent up (positive) or down (negative).
max_limit : int, optional
Percent 0-100. If calculated threshold higher then max_limit,
return corresponding to max_limit threshold value.
min_limit : int, optional
Percent 0-100. If calculated threshold lower then min_limit,
return corresponding to min_limit threshold value.
Returns
-------
threshold : float or int, corresponding input array dtype.
Upper threshold value. All pixels intensities that less or equal of
this value assumed as background.
`foreground (cells) > threshold >= background`.
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
if max_limit is not None and min_limit is not None:
if min_limit > max_limit:
raise ValueError('min_limit greater then max_limit')
hist, bin_centers = histogram(image, nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# It is not necessary to calculate the probability mass function here,
# because the l and h fractions already include the normalization.
pmf = hist.astype(np.float32) # / hist.sum()
cpmfl = np.cumsum(pmf, dtype=np.float32)
cpmfh = np.cumsum(pmf[::-1], dtype=np.float32)[::-1]
binnums = np.arange(pmf.size, dtype=np.uint8)
# l and h contain average value of pixels in sum of bins, calculated
# from lower to higher and from higher to lower respectively.
l = np.ma.divide(np.cumsum(pmf * binnums, dtype=np.float32), cpmfl)
h = np.ma.divide(
np.cumsum((pmf[::-1] * binnums[::-1]), dtype=np.float32)[::-1],
cpmfh)
allmean = (l + h) / 2.0
threshold = bin_centers[np.nonzero(allmean.round() == binnums)[0][0]]
ptp = (bin_centers[-1] - bin_centers[0]) / 100. # Peek to peek range
if shift:
threshold += ptp * shift
# print("Threshold value shift", (threshold - bin_centers[0]) / float(ptp))
if max_limit is not None:
# Limit foreground
lim = bin_centers[0] + ptp * max_limit
if threshold > lim:
threshold = lim
if min_limit is not None:
# Limit background
lim = bin_centers[0] + ptp * min_limit
if threshold < lim:
threshold = lim
return threshold | 7c660baac593cbd965f1b6a48a0dfc861c69b878 | 29,316 |
def populate_task_view(request):
"""
populate project_task view
"""
project_id = request.GET.get('project_id')
project_name = request.GET.get('project_name')
template = loader.get_template('project_management/add_project_tasks.html')
project = Project.objects.get(id=int(project_id))
status = Status.objects.all()
milestones = Milestone.objects.filter(project_id=project.id)
if ProjectTeam.objects.filter(project_id=int(project_id)).exists():
team = ProjectTeam.objects.get(project_id=project.id)
project_team = team.id
team_members = ProjectTeamMember.objects.filter(project_team=project_team)
member_list = list(team_members)
old = []
if len(member_list) != 0:
for member in member_list:
old_user = User.objects.get(id=member.member_id)
old.append(old_user)
members = old
else:
members = ""
template = loader.get_template('project_management/add_project_tasks.html')
context = {
'project_id': project_id,
'project_name': project_name,
'members': members,
'milestones': milestones,
'statuses': status
}
return HttpResponse(template.render(context, request)) | 33f7ab5b60ef4cbc8fd0e39bf74f5a851f13659a | 29,317 |
def cleanup_queryset(queryset):
"""
Remove multiple joins on the same table, if any
WARNING: can alter the origin queryset order
"""
return queryset.model.objects.filter(pk__in=[instance.pk for instance in queryset.all()]) | ecdab862fd67359fab1a5706092fe0d023d31321 | 29,318 |
def svn_diff_output_fns_invoke_output_conflict(*args):
"""
svn_diff_output_fns_invoke_output_conflict(svn_diff_output_fns_t _obj, void output_baton, apr_off_t original_start,
apr_off_t original_length,
apr_off_t modified_start, apr_off_t modified_length,
apr_off_t latest_start, apr_off_t latest_length,
svn_diff_t resolved_diff) -> svn_error_t
"""
return _diff.svn_diff_output_fns_invoke_output_conflict(*args) | a434298342e50c2803c3ccaf69fecbe0de36b816 | 29,319 |
def capacity_factors():
"""
This function generates a hard-coded dictionary
containing the capacity factors for different fuels
and movers. The values are defaulted to 1.0 if no
capacity factor information can be found. The
numbers are specific to the 2018 year and may lead
to innacurate results or KeyErrors if applied to
other years.
Returns:
--------
cf: dict
A dictionary containing the capacity factors for
different fuels and movers.
"""
cf = {
'DFO': {
'IC': 0.019,
'GT': 0.013,
'ST': 0.142,
'CA': 0.142,
'CT': 0.019
},
'NG': {
'IC': 0.13,
'GT': 0.119,
'CA': 0.55,
'CT': 0.55,
'ST': 0.126,
'CS': 0.55,
'FC': 0.729
},
'WAT': {'HY': 0.428},
'BIT': {
'ST': 0.536,
'CT': 0.536,
'GT': 0.536,
'IC': 0.536
},
'WDS': {'ST': 0.493},
'RFO': {
'IC': 0.019,
'ST': 0.142,
'CT': 0.019,
'GT': 0.013
},
'SUN': {'PV': 26.1},
'KER': {'GT': 1.0},
'PC': {'ST': 0.142},
'PG': {'ST': 1.0},
'SUB': {'ST': 0.436},
'LFG': {
'CA': 0.733,
'CT': 0.733,
'IC': 0.733,
'GT': 0.733
},
'MWH': {'BA': 1.0},
'OBS': {'ST': 0.493},
'WND': {'WT': 0.374},
'OBL': {'IC': 0.493}
}
return cf | b75e4a86224af565ffab919e63a1a352696e73bc | 29,320 |
def _is_greater(list1: list, list2: list):
"""
return True if `list1[i] > list2[i]` for each `i`
"""
return all([list1[i] > list2[i] for i in range(len(list1))]) | 925fb214f741d6503b41b49d57a268506f05a048 | 29,321 |
def isenabled(handle):
"""Return True if the window is enabled"""
return bool(win32functions.IsWindowEnabled(handle)) | edb99831f561fd84ab0bb17bf568e200699a7aa1 | 29,322 |
def monte_carlo(d):
"""
Calculate Monte Carlo value for π.
Arguments:
d: list of unsigned byte values.
Returns:
Approximation of π as Decimal
"""
MONTEN = 6
incirc = Decimal((256.0 ** (MONTEN // 2) - 1) ** 2)
d = (Decimal(j) for j in d[: len(d) // MONTEN * MONTEN])
intermediate = (
i * j
for i, j in zip(d, it.cycle([Decimal(256 ** 2), Decimal(256), Decimal(1)]))
)
args = [intermediate] * 3
values = [sum(j) for j in it.zip_longest(*args)]
montex = values[0::2]
montey = values[1::2]
dist2 = (i * i + j * j for i, j in zip(montex, montey))
inmont = Decimal(sum(j <= incirc for j in dist2))
montepi = 4 * inmont / len(montex)
return montepi | fe6fdc07c18ae35abcc3ab428f13188a0ff306c2 | 29,323 |
def verifyIfRepo(dirName):
"""
"""
dbPath = getDbPath(dirName)
print dbPath
try:
if verifyShelveVersionCompatibility(dbPath):
return True
else:
return False
except:
return False | ecedb0569a0a1d41ad23dcef1f9155145756cb8c | 29,324 |
def machine_setter(_latfile=None, _machine=None, _handle_name=None):
""" set flame machine, prefer *_latfile*
:return: FLAME machine object
"""
if _latfile is not None:
try:
with open(_latfile, 'rb') as f:
m = Machine(f)
except:
if _machine is None:
_LOGGER.error("{}: Failed to initialize flame machine".format(
_handle_name))
return None
else:
_LOGGER.warning("{}: Failed to initialize flame machine, "
"use _machine instead".format(_handle_name))
m = _machine
else:
if _machine is None:
return None
else:
m = _machine
return m | 8fa5802d7307269d57667c84e3d832b1985dba8f | 29,325 |
def vgg_preprocess_images(image_tensor):
"""
:param image_tensor: float 32 array of Batch x Height x Width x Channel immages (range 0 - 1)
:return: pre-processed images (ready to input to VGG)
"""
vgg_mean = tf.convert_to_tensor(np.array([103.939, 116.779, 123.68], dtype=np.float32))
red, green, blue = tf.split(axis=-1, num_or_size_splits=3, value=image_tensor * 255)
return tf.concat(axis=3, values=[
blue - vgg_mean[0],
green - vgg_mean[1],
red - vgg_mean[2],
]) | 8354e3023af981b4d7f9d2c98f60a7e05943e218 | 29,326 |
def computeIntCorrections(npix,factor):
"""Compute `newnpix` and `newfactor` such that `npix_new` is the
nearest odd-valued integer to ``npix*factor``.
Parameters
----------
npix : int
Odd-valued integer.
factor : float
Multiplicative factor.
Returns
-------
newnpix : int
Nearest odd integer to ``npix*factor``.
newfactor : float
`factor` but corrected such that ``npix*newfactor = newnpix``.
Examples
--------
.. code-block:: python
computeIntCorrections(3,2.)
(7, 2.33333) # factor was corrected
computeIntCorrections(3,5)
(15, 5.0) # no corrections necessary
computeIntCorrections(15,0.6)
(9, 0.6) # can also scale down
computeIntCorrections(15,0.5)
(7, 0.46667) # factor was corrected
"""
checkOdd(npix)
newnpix = npix*factor
newnpix = np.int((2*np.floor(newnpix//2)+1)) # rounded up or down to the nearest odd integer
newfactor = newnpix/float(npix)
return newnpix, newfactor | 9b662acecd54795c50eb8ca5e4ac7af45a006c5c | 29,327 |
def LU_factor(A,LOUD=True):
"""Factor in place A in L*U=A. The lower triangular parts of A
are the L matrix. The L has implied ones on the diagonal.
Args:
A: N by N array
Returns:
a vector holding the order of the rows, relative to the original order
Side Effects:
A is factored in place.
"""
[Nrow, Ncol] = A.shape
assert Nrow == Ncol
N = Nrow
#create scale factors
s = np.zeros(N)
count = 0
row_order = np.arange(N)
for row in A:
s[count] = np.max(np.fabs(row))
count += 1
if LOUD:
print("s =",s)
if LOUD:
print("Original Matrix is\n",A)
for column in range(0,N):
#swap rows if needed
largest_pos = np.argmax(np.fabs(A[column:N,column]/s[column])) + column
if (largest_pos != column):
if (LOUD):
print("Swapping row",column,"with row",largest_pos)
print("Pre swap\n",A)
swap_rows(A,column,largest_pos)
#keep track of changes to RHS
tmp = row_order[column]
row_order[column] = row_order[largest_pos]
row_order[largest_pos] = tmp
#re-order s
tmp = s[column]
s[column] = s[largest_pos]
s[largest_pos] = tmp
if (LOUD):
print("A =\n",A)
for row in range(column+1,N):
mod_row = A[row]
factor = mod_row[column]/A[column,column]
mod_row = mod_row - factor*A[column,:]
#put the factor in the correct place in the modified row
mod_row[column] = factor
#only take the part of the modified row we need
mod_row = mod_row[column:N]
A[row,column:N] = mod_row
return row_order | aa74e20888892ba5edafd9deaf10976633319544 | 29,328 |
def get_zvals_from_grid(line,xyzgrid):
""" Input: line (LineString or ARRAY with first two columns the xy coords)
get z values at every point in line from xyzgrid
xyzgrid: DataFrame with x,y,z columns or 3 column ARRAY
returns: array of z values for each point on line """
#create dataframe if np.array input
if type(xyzgrid) == np.ndarray:
gridnew = pd.DataFrame(xyzgrid,columns = [['x','y','z']])
else:
gridnew = xyzgrid.copy()
#get xy coords of line
if type(line) == np.ndarray:
xy = line[:,0:2]
else:
xy = np.array(line)[:,0:2]
#reduce grid size for use by griddata function
extents = (min(xy[:,0]),min(xy[:,1]),max(xy[:,0]),max(xy[:,1]))
gridnew = crop_xyz_grid_by_extents(gridnew,extents)
#convert to np array to avoid dodgy indexing issues as results looked funny
gridnew = gridnew[['x','y','z']].values
zvals = sinterp.griddata(gridnew[:,0:2],gridnew[:,2],xy,method = 'nearest') #get z coordinates - nearest seems to give most reliable results
return zvals | 5eee6c46559bcf30e9d3dca30bb18ca7e3b30489 | 29,329 |
def HBIh(time, gsd = None, gh = None):
"""
paras
gsd: optional
Conductance of slow depolarizing
gh : optional
Return
Var_t : action potential
"""
def HyB(Var,t,tempF):
[rrho,pphi] = tempF
[v,ar,asd,ca,ah]=Var
ad = 1/(1+np.exp(-zd*(v-V0d)))
isd = rrho*gsd*asd*(v - Ed)
#Imemb=isd + rho*gd*ad*(v - Ed) + rho*(gr*ar + gsr*asr)*(v-Er) + gl*(v - El)
Imemb = isd + rrho*gd*ad*(v - Ed) + rrho*(gr*ar + gsr*(ca**2)/(ca**2+0.4**2))*(v-Er) + rrho*gl*(v - El) \
+ rrho*gh*ah*(v - Eh)
arinf = 1/(1+np.exp(-zr*(v-V0r)))
asdinf = 1/(1+np.exp(-zsd*(v-V0sd)))
ahinf = 1/(1+np.exp(-zh*(v-V0h)));
return np.array([-Imemb,
pphi*(arinf - ar)/tr,
pphi*(asdinf - asd)/tsd,
pphi*(-eta*isd - kappa*ca)/tsr,
pphi*(ahinf-ah)/th])
#Parámetros del modelo
gd = 2.5; gr = 2.8; gsr = 0.28;
gl = 0.06;
V0d = -25; V0r = -25; zd = 0.25; zr = 0.25;tr = 2;
V0sd = -40; zsd = 0.11; tsd = 10;
eta = 0.014; kappa = 0.18; tsr = 35;
V0h= -85; zh = -0.14; th=125;
Ed = 50; Er = -90; El = -80; Eh = -30;
if gsd is None and gh is None:
gsd = 0.21
gh = 0.4
temp=36
rho=1.3**((temp-25.)/10)
phi = 3**((temp-25.)/10)
#voltaje inicial e inicialización de variables
v = -60
#Luego calulamos el valor de las variables a ese voltaje
ad = 1/(1+np.exp(-zd*(v-V0d)));
ar = 1/(1+np.exp(-zr*(v-V0r)));
asd = 1/(1+np.exp(-zsd*(v-V0sd)));
ca = -eta*rho*gsd*asd*(v - Ed)/kappa;
ah = 1/(1+np.exp(-zh*(v-V0h)));
#Ahora viene la simulacion misma
#Creamos un vector con los valores iniciales
X=np.array([v,ar,asd,ca,ah])
Var_t = integrate.odeint(HyB, X, time, args = ((rho,phi),))
return Var_t | 2abbf9611097e65b6a6e03e0dfdfbecd61a5bb35 | 29,330 |
import requests
import json
def discovery(uri="", data_format="json"):
"""
Method description:
Deletes/Unregisters an application entity(AE) from the OneM2M framework/tree
under the specified CSE
Parameters:
uri_cse : [str] URI of parent CSE
ae_name : [str] name of the AE
fmt_ex : [str] payload format
"""
headers = {
"X-M2M-Origin": f"{credentials()}",
"Content-type": "application/{}".format(data_format),
}
response = requests.delete(uri, headers=headers)
print("Return code : {}".format(response.status_code))
print("Return Content : {}".format(response.text))
_resp = json.loads(response.text)
return response.status_code, _resp["m2m:uril"] | 5b09ce0f7ce533c02386cba9f4cf2ffebc85edcf | 29,331 |
async def test_map_two_iterables_expected_result(
arange: ty.Type[ty.AsyncIterator[int]], stop: int
):
"""Look for expected results which should returned by
:class:`none.collection.a.map` with two iterables given.
"""
async def _add(a: int, b: int) -> int:
return a + b
async for i, x in none.collection.a.enumerate(
none.collection.a.map(_add, arange(stop), arange(stop))
):
assert x == (i + i) | 10749b3315e911e4f5c07d1adeef43e258e18092 | 29,332 |
def dijkstra_search(problem):
"""
Best first search that uses g(n) to evaluate cost
:param problem:
:return: final node
"""
return best_first_graph_search(problem, lambda n: n.path_cost) | e3c9dbdea4f4fc8ce99dcf4be41d4d42cfac1555 | 29,333 |
def _get_pair_nodes(root_node):
"""
Internal method to get "pair" nodes under root_node
"""
method_elem = root_node
in_configs_elem_list = method_elem.getElementsByTagName("inConfigs")
in_configs_elem = in_configs_elem_list[0]
pair_elems_list = in_configs_elem.getElementsByTagName("pair")
return pair_elems_list | c2b74f7a507394d2117cd6292116e62d34f3e556 | 29,334 |
import sys
def get_string(message="Enter your response", title="Title",
default_response=""):
"""Simple text input box. Used to query the user and get a string back.
:param message: Message displayed to the user, inviting a response
:param title: Window title
:param default_response: default response appearing in the text box
:return: a string, or ``None`` if "cancel" is clicked or window
is closed.
>>> import easygui_qt as easy
>>> reply = easy.get_string()
.. image:: ../docs/images/get_string.png
>>> reply = easy.get_string("new message", default_response="ready")
.. image:: ../docs/images/get_string2.png
"""
app = SimpleApp()
dialog = VisibleInputDialog()
flags = get_common_input_flags()
text, ok = dialog.getText(None, title, message, qt_widgets.QLineEdit.Normal,
default_response, flags)
app.quit()
if ok:
if sys.version_info < (3,):
return unicode(text)
return text | ce8f3fbd566531f1cf7da56ece7502050268ca2d | 29,335 |
def has_ao_2e_int_eri_lr(trexio_file) -> bool:
"""Check that ao_2e_int_eri_lr variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_2e_int_eri_lr(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False | 3f903aa4e03ed78313b59ff0b03ddcc65c6de0ca | 29,336 |
def corelucy(image, h):
"""
Make core for the LR estimation. Calculates f to produce the next
iteration array that maximizes the likelihood that the entire suite
satisfies the Poisson statistics.
This is a simplified version of MATLAB corelucy function without
damping, weights and externally defined functions.
Parameters
----------
image : ndarray
Input image.
h : ndarray
Zero-padded OTF. h should have the same dimensions as image.
Returns
-------
f : ndarray
LR extimation core.
References
----------
.. [1] Acceleration of iterative image restoration algorithms, by D.S.C. Biggs
and M. Andrews, Applied Optics, Vol. 36, No. 8, 1997.
.. [2] Deconvolutions of Hubble Space Telescope Images and Spectra,
R.J. Hanisch, R.L. White, and R.L. Gilliland. in "Deconvolution of Images
and Spectra", Ed. P.A. Jansson, 2nd ed., Academic Press, CA, 1997.
"""
u_t = image
reblur = np.real(ifftn(h * fftn(u_t, u_t.shape), u_t.shape))
reblur = _ensure_positive(reblur)
im_ratio = image / reblur
f = fftn(im_ratio)
return f | b376be72f3572ac06f0a401f5d768abf76d3ba00 | 29,337 |
def convert_to_bipartite(S):
"""
convert a standard stoichiometric matrix (in a Pandas DataFrame)
to a bipartite graph with an edge between every reactant and all its
reactions
"""
# convert the stoichiometric matrix to a sparse representation
S_sparse = pd.melt(S.reset_index(),
id_vars='bigg.metabolite', value_name='coeff')
S_sparse = S_sparse[S_sparse.coeff != 0]
# remove the high-degree metabolites that we want to ignore for graph
# distance
met_comp = S_sparse['bigg.metabolite'].str.rsplit('_', 1, expand=True)
S_sparse = S_sparse[(~met_comp[0].isin(METS_TO_REMOVE)) & (met_comp[1] == 'c')]
S_sparse['bigg.metabolite'] = met_comp[0].str.upper()
mets = set(S_sparse['bigg.metabolite'].unique())
rxns = set(S_sparse['bigg.reaction'].unique())
B = nx.Graph()
B.add_nodes_from(mets, bipartite=0)
B.add_nodes_from(rxns, bipartite=1)
B.add_weighted_edges_from(S_sparse.as_matrix())
return B, mets, rxns | cd931f92b4ef32624202ab1145236d54801ab048 | 29,338 |
def proj_l1_neg_tan_cone(u, v):
"""
Project u onto the negative tangent cone of l1 ball with diameter ||v||_1
at v. It's actually the tangent cone of l1 ball with bound ||v||_1 at -v.
@param u: the vector to calculate projection from.
@param v: the vector at whom the tangent cone forms.
@return normalized projection of u.
"""
return proj_l1_tan_cone(u, -v) | a5a1dbf74ba3f58dc31bda50ffbc9483ab258301 | 29,339 |
def generate(prompt: str, context: str) -> str:
"""Generates a response for the given prompt and context.
:param prompt: The prompt to generate a response for.
:type prompt: str
:param context: The context to generate a response for.
:type context: str
:return: The generated response.
:rtype: str
"""
return _generate[get_mode()](prompt, context) | e752c3fa10d1de40d944bfa41b88f34c2d3e4894 | 29,340 |
def fista(input_size, eval_fun, regulariser,
regulariser_function=None, thresholding_function=None, initial_x=0,
L0=1., eta=2., update_L=True,
verbose=1, verbose_output=0):
"""
FISTA (Fast Iterative Shrinkage Thresholding Algorithm) is an algorithm to solve the convex minimization of
y = f(x) + regulariser * g(x) with g(x) can be a continuous and non-differentiable function, such as L1 norm or total variation in compressed sensing.
If f(x) = || Ax - b ||^2, then the gradient is Df(x) = 2 * A.T * (Ax - b).
This is from A. Beck and M. Teboulle's paper in 2009: A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems.
Arguments
---------
input_size: (int or tuple of ints) shape of the signal
eval_fun: (function with two outputs) evaluation function to calculate f(x) and its gradient, Df(x)
regulariser: (float) regulariser weights to be multiplied with the regulariser function, g(x)
regulariser_function: (function or string) the regulariser function, g(x), or string to specify the regulariser, such as "l1" or "tv" (default: reg_l1)
thresholding_function: (function) function to apply thresholding (or denoising) to the signal in the gradient descent.
This is ignored if regulariser function is a string (default: soft_threshold_l1)
initial_x: (int or array) 0 for zeros, 1 for random, or array with shape = input_size to specify the initial guess of the signal (default: 0)
L0: (float) initial guess of the inverse of step size (default: 1)
eta: (float) the increment of L0 if the step size is too large (default: 2)
update_L: (bool or int) flag whether to update L or keep it fix (default: True)
verbose: (bool or int) flag to show the iteration update (default: True)
verbose_output: (bool or int) indicate whether the function should return the full information or just the signal (default: False)
Returns
-------
The signal if (verbose_output == False) or a dictionary with the output signal (x), number of iterations (n_iter),
evaluation function (fx), gradient (gradx), and regulariser function (gx) values
"""
############################### argument check ###############################
eta = float(eta)
initial_x = _get_initial_x(initial_x, input_size)
regulariser_fun, thresholding_fun = _get_regulariser(regulariser, regulariser_function, thresholding_function)
############################### initialisation ###############################
L = float(L0)
x = initial_x
y_next = x
t_next = 1.
F_prev = None
############################### main iteration ###############################
n_iter = 1
while True:
# obtain the parameters from the previous iteration
L_prev = L
x_prev = x
y = y_next
t = t_next
# calculate the function and the gradient of the evaluation function
f_y, grad_y = eval_fun(y)
g_y = regulariser_fun(y)
F = f_y + g_y
# print the message
if verbose == 1:
if printHeader(n_iter): print(header)
if printContent(n_iter): print(contentFormat % (n_iter, f_y, g_y, F, np.sum(np.abs(grad_y)), np.sum(y > 0)))
# check convergence and update F_prev
if F_prev != None and np.abs(F - F_prev) / (1e-10+np.abs(F_prev)) < 1e-6: break
F_prev = F
# find i_k for L=eta**i_k*L such that F(pL(yk)) <= QL(pL(yk), yk)
L_test = L_prev
while True:
pLy = thresholding_fun(y - 1./L_test * grad_y, float(regulariser)/L_test) # gradient descent with thresholding afterwards
if not update_L: break
pLy_min_y = pLy - y
reg_pLy = regulariser_fun(pLy)
f_pLy, grad_pLy = eval_fun(pLy)
F_pLy = f_pLy + reg_pLy
Q_pLy = f_y + np.sum(pLy_min_y * grad_y) + L_test/2. * np.sum(pLy_min_y * pLy_min_y) + reg_pLy
if (F_pLy <= Q_pLy): break
L_test *= eta
# calculate the next parameters
L = L_test
x = pLy
t_next = (1. + np.sqrt(1 + 4.*t**2))/2.
y_next = x + ((t - 1.) / t_next) * (x - x_prev)
n_iter += 1
############################### output ###############################
if verbose_output:
return {"x": y, "n_iter": n_iter, "fx": f_y, "gradx": grad_y, "gx": g_y}
else:
return y | 17710174114e6b8772d3e53e49a379bc476f3c81 | 29,341 |
def __ldns_pkt_set_question(*args):
"""LDNS buffer."""
return _ldns.__ldns_pkt_set_question(*args) | 2dfb8d0bea1aee4348d5ddc5850e36963c0f092e | 29,342 |
import random
def get_nick(infraction_id: int, member_id: int) -> str:
"""Randomly select a nickname from the Superstarify nickname list."""
rng = random.Random(str(infraction_id) + str(member_id))
return rng.choice(STAR_NAMES) | 120ba3abcf5f68bff46672b18cbce3e18c5088d0 | 29,343 |
def find_best_path(starting):
"""Find the best moves using A*"""
@dataclass(order=True)
class PrioritizedState:
"""Dataclass to insert the RoomStates into the priority queue"""
cost: int
config: RoomState=field(compare=False)
open_set = PriorityQueue()
open_set.put(PrioritizedState(0, starting))
closed_set = set()
came_from = {} # map of pod config with the (pods, moved_pods)
g_score = defaultdict(lambda: float('inf'))
g_score[starting.pods] = 0
def h_func(state: RoomState):
# As the heuristic for the A* we will use the sum of distances
# to the entrance of that amphipod room
_cost = 0
for i, pod_idx in enumerate(state.pods):
type_ = get_pod_type(i)
dest = destination_columns[type_]
column = pod_idx % state.grid.cols
if column != dest:
dist = mhd(state.grid.get_pos(pod_idx), (dest, 1))
k = dist * move_cost[type_]
_cost += k
return _cost
f_score = defaultdict(lambda: float('inf'))
f_score[starting.pods] = h_func(starting)
max_done = -1
while not open_set.empty():
item = open_set.get()
current = item.config
closed_set.add(current.pods)
if current.is_finished():
# if reached the best state, reconstruct the movements using
# the came_from dictionary
_cost = g_score[current.pods]
pods = current.pods
path = []
while g_score[pods] != 0:
path.append(pods)
pods, _ = came_from[pods]
current.pods = starting.pods
current.moved_pods = []
for pos in reversed(path):
print(g_score[pos] - g_score[current.pods])
current.pods = pos
current.moved_pods = came_from[pos][1]
print(current.render())
return _cost
# I render the first state which has more sorted to keep track of
# the progress
current_done = sum([1 for p in range(4*PODS_PER_TYPE) if current.is_done(p)])
if current_done > max_done:
print(current.render())
max_done = current_done
for _cost, new_state in get_moves(current):
if new_state.pods in closed_set:
continue
tentative_g_score = g_score[current.pods] + _cost
if tentative_g_score < g_score[new_state.pods]:
# this is the better move
came_from[new_state.pods] = (current.pods, new_state.moved_pods)
g_score[new_state.pods] = tentative_g_score
f_score[new_state.pods] = tentative_g_score + h_func(new_state)
open_set.put(PrioritizedState(f_score[new_state.pods], new_state)) | 2328af1e8fbf01808c321e18b27e7c6e0e9b8f9c | 29,344 |
def Shard(ilist, shard_index, num_shards):
"""Shard a given list and return the group at index |shard_index|.
Args:
ilist: input list
shard_index: 0-based sharding index
num_shards: shard count
"""
chunk_size = len(ilist) / num_shards
chunk_start = shard_index * chunk_size
if shard_index == num_shards - 1: # Exhaust the remainder in the last shard.
chunk_end = len(ilist)
else:
chunk_end = chunk_start + chunk_size
return ilist[chunk_start:chunk_end] | 7f79ade521c1264d0ddc8c5a228679d7053d9651 | 29,345 |
from typing import Any
from typing import Tuple
def get_oof_pred(
model: Any,
X_train: pd.DataFrame,
y_train: pd.Series,
X_test: pd.DataFrame,
n_fold: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Get Out of Fold prediction for both training and testing dataset
Args:
model (Any): Any model class which have `fit` and `predict` method, and takes in numpy array as input
X_train (pd.DataFrame): DataFrame containing features for the training dataset
y_train (pd.Series): Series containing labels for the training dataset
X_test (pd.DataFrame): DataFrame containing features for the test dataset
n_fold (int): Number of Fold for Out of Fold prediction
Returns:
oof_train: Out-of-fold prediction for the training dataset
oof_test_mean: Mean of the out-of-fold prediction for the test dataset
"""
n_train = X_train.shape[0]
n_test = X_test.shape[0]
kf = KFold(n_fold)
oof_train = np.zeros(shape=(n_train,))
oof_test = np.zeros(shape=(n_fold, n_test))
model = model()
for idx, (dev_idx, val_idx) in enumerate(kf.split(X_train)):
X_dev, X_val, y_dev, _ = cv_split(X_train, y_train, dev_idx, val_idx)
model.fit(X_dev, y_dev)
oof_train[val_idx] = model.predict(X_val)
oof_test[idx, :] = model.predict(X_test)
oof_test_mean = oof_test.mean(axis=0)
return oof_train, oof_test_mean | 19871121a674ad6730db17d97930c7bd6c09f7e2 | 29,346 |
async def get_aes_key(password: str, settings: EncryptionSettings) -> bytes:
"""Returns the 32 byte system encryption key."""
assert isinstance(password, str)
assert isinstance(settings, EncryptionSettings)
return await decrypt_chunk(get_decryption_key(password, settings), settings.encrypted_key) | 8f5c67d1d64c25d02c33363c572fc04c6e364e55 | 29,347 |
def nextpow2(value):
"""
Extracted from
caiman.source_extraction.cnmf.deconvolution import axcov
Find exponent such that 2^exponent is >= abs(value).
Parameters:
----------
value : int
Returns:
-------
exponent : int
"""
exponent = 0
avalue = np.abs(value)
while avalue > np.power(2, exponent):
exponent += 1
return exponent | 4e485d9ebb8e2103dff45d777783f1f2bcdc5509 | 29,348 |
def workshopinquiry_accept_event(request, inquiry_id):
"""Accept workshop inquiry by creating a new event."""
wr = get_object_or_404(WorkshopInquiryRequest, state='p', pk=inquiry_id)
if request.method == 'POST':
form = EventCreateForm(request.POST)
if form.is_valid():
event = form.save()
person = wr.host()
if person:
Task.objects.create(event=event, person=person,
role=Role.objects.get(name="host"))
wr.state = 'a'
wr.event = event
wr.save()
return redirect(reverse('event_details',
args=[event.slug]))
else:
messages.error(request, 'Fix errors below.')
else:
# non-POST request
form = EventCreateForm()
context = {
'object': wr,
'form': form,
}
return render(request, 'requests/workshopinquiry_accept_event.html',
context) | fde98bd8924ac1bc4f5c39365c90c1e53bbc7546 | 29,349 |
def load_level_data():
"""Read the `level.dat` file and return the `Data` compound."""
try:
level_data = nbt.load(LEVEL_DATA_PATH, gzipped=True).root['Data']
except FileNotFoundError:
display_error(f'Couldn\'t find any "{LEVEL_DATA_PATH}" file. Are you '
'sure that the current directory is a minecraft '
'world folder?')
except Exception: # pylint: disable = broad-except
display_error(f'Couldn\'t load level data "{LEVEL_DATA_PATH}".')
else:
world_version = level_data.get('Version', {'Id': 0, 'Name': 'unknown'})
if MIN_VERSION_ID <= world_version['Id']:
return level_data
version_name = world_version['Name']
display_error(f'Minecraft version "{version_name}" is not compatible '
'with endermite.')
return None | 82039e0ba2156e0231d1403081e9cb83a140e5ce | 29,350 |
def separation_scorer(catalogue,name_TGSS,name_NVSS):
"""given two names, gives separation
by set-up, only gives non-zero for those in catalogue"""
if (name_TGSS,name_NVSS) in catalogue.index:
sep = catalogue.loc[name_TGSS,name_NVSS].separation
sep *= 3600
return max(0,(40-sep)/40)
else:
return 0 | f70a6cf58ec12caba784ff7f51cbdbbf74f536b6 | 29,351 |
def mms_hpca_calc_anodes(fov=[0, 360], probe='1', suffix=''):
"""
This function will sum (or average, for flux) the HPCA data over the requested field-of-view (fov)
Parameters
----------
fov : list of int
field of view, in angles, from 0-360
probe : str
probe #, e.g., '4' for MMS4
suffix: str
suffix of the loaded data
Returns
----------
List of tplot variables created.
"""
sum_anodes = [a+suffix for a in ['*_count_rate', '*_RF_corrected', '*_bkgd_corrected', '*_norm_counts']]
avg_anodes = ['*_flux'+suffix]
output_vars = []
species_map = {'hplus': 'H+', 'oplus': 'O+', 'heplus': 'He+', 'heplusplus': 'He++', 'oplusplus': 'O++'}
fov_str = '_elev_'+str(fov[0])+'-'+str(fov[1])
for sum_anode in sum_anodes:
vars_to_sum = tnames(sum_anode)
for var in vars_to_sum:
var_species = var.split('_')[2]
times, data, angles, energies = get_data(var)
updated_spectra = mms_hpca_sum_fov(times, data, angles, energies, fov=fov)
store_data(var+fov_str, data={'x': times, 'y': updated_spectra, 'v': energies})
options(var+fov_str, 'spec', True)
options(var+fov_str, 'ylog', True)
options(var+fov_str, 'zlog', True)
options(var+fov_str, 'ztitle', species_map[var_species] + ' ' + var.split('_')[3] + ' (cm^2-s-sr-eV)^-1')
options(var+fov_str, 'ytitle', species_map[var_species] + ' Energy (eV)')
options(var+fov_str, 'Colormap', 'jet')
output_vars.append(var+fov_str)
for avg_anode in avg_anodes:
vars_to_avg = tnames(avg_anode)
for var in vars_to_avg:
var_species = var.split('_')[2]
times, data, angles, energies = get_data(var)
updated_spectra = mms_hpca_avg_fov(times, data, angles, energies, fov=fov)
store_data(var+fov_str, data={'x': times, 'y': updated_spectra, 'v': energies})
options(var+fov_str, 'spec', True)
options(var+fov_str, 'ylog', True)
options(var+fov_str, 'zlog', True)
options(var+fov_str, 'ztitle', species_map[var_species] + ' ' + var.split('_')[3] + ' (cm^2-s-sr-eV)^-1')
options(var+fov_str, 'ytitle', species_map[var_species] + ' Energy (eV)')
options(var+fov_str, 'Colormap', 'jet')
output_vars.append(var+fov_str)
return output_vars | 3c6ca26f06eb65c4e2c5e3f71724d890aba5a6a9 | 29,352 |
def get_host_call_fn(model_dir):
"""`host_call` function for creating training summaries when using TPU."""
def host_call_fn(**kwargs):
"""Host_call_fn.
Args:
**kwargs: dict of summary name to tf.Tensor mapping. The value we see here
is the tensor across all cores, concatenated along axis 0. This function
will take make a scalar summary that is the mean of the whole tensor (as
all the values are the same - the mean, trait of
tpu.CrossShardOptimizer).
Returns:
A merged summary op.
"""
gs = kwargs.pop('global_step')[0]
with tf_summary.create_file_writer(model_dir).as_default():
with tf_summary.record_if(tf.equal(gs % 10, 0)):
for name, tensor in kwargs.items():
# Take the mean across cores.
tensor = tf.reduce_mean(tensor)
tf_summary.scalar(name, tensor, step=gs)
return tf.summary.all_v2_summary_ops()
return host_call_fn | 4dafbbed695205f97501ab44f4eeae480d562f7e | 29,353 |
from datetime import datetime
def determine_horizons_id(lines, now=None):
"""Attempts to determine the HORIZONS id of a target body that has multiple
possibilities. The passed [lines] (from the .args attribute of the exception)
are searched for the HORIZONS id (column 1) whose 'epoch year' (column 2)
which is closest to [now] (a passed-in datetime or defaulting to datetime.utcnow()"""
now = now or datetime.utcnow()
timespan = timedelta.max
horizons_id = None
for line in lines:
chunks = line.split()
if len(chunks) >= 5 and chunks[0].isdigit() is True and chunks[1].isdigit() is True:
try:
epoch_yr = datetime.strptime(chunks[1], "%Y")
if abs(now-epoch_yr) <= timespan:
# New closer match to "now"
horizons_id = int(chunks[0])
timespan = now-epoch_yr
except ValueError:
logger.warning("Unable to parse year of epoch from", line)
return horizons_id | 1cbf143bfd8a7b5a3178a443b3dfc4298e02bbfd | 29,354 |
def export_read_file(channel, start_index, end_index, bulkfile, output_dir, remove_pore=False):
"""Generate a read FAST5 file from channel and coordinates in a bulk FAST5 file
Parameters
----------
channel : int
channel number from bulk FAST5 file
start_index : int
start index for read (time in seconds * sample_frequency)
end_index : int
end index for read (time in seconds * sample_frequency)
bulkfile : h5py.File
bulk FAST5 file opened as an h5py object
output_dir : str
output directory, must include trailing slash
remove_pore : bool
remove pore-like signal (>1500) from pore trace
Returns
-------
str
filename of the exported read
"""
out_filename = bulkfile["UniqueGlobalKey"]["context_tags"].attrs["filename"].decode('utf8')
out_filename = '{fn}_bulkvis-read_{start}-{end}_ch_{ch}.fast5'.format(
fn=out_filename,
start=start_index,
end=end_index,
ch=channel
)
output_arg = "{dir}{fn}".format(
dir=output_dir,
fn=out_filename
)
readfile = h5py.File(output_arg, "w")
read_id_str = "{ch}-{start}-{end}".format(
ch=channel,
start=start_index,
end=end_index
)
version_num = 0.6
ch_num = channel
ch_str = "Channel_{ch}".format(ch=ch_num)
ugk = readfile.create_group("UniqueGlobalKey")
bulkfile.copy('UniqueGlobalKey/context_tags', ugk)
bulkfile.copy('UniqueGlobalKey/tracking_id', ugk)
bulkfile.copy("IntermediateData/{ch}/Meta".format(ch=ch_str), ugk)
readfile["UniqueGlobalKey"]["channel_id"] = readfile["UniqueGlobalKey"]["Meta"]
readfile["UniqueGlobalKey"]["channel_id"].attrs.create(
'sampling_rate',
readfile["UniqueGlobalKey"]["Meta"].attrs["sample_rate"],
None,
dtype='Float64'
)
del readfile["UniqueGlobalKey"]["Meta"]
readfile["UniqueGlobalKey"]["channel_id"].attrs.create('channel_number', ch_num, None, dtype='<S4')
remove_attrs = ["description", "elimit", "scaling_used", "smallest_event", "threshold", "window", "sample_rate"]
for attr in remove_attrs:
del readfile["UniqueGlobalKey"]["channel_id"].attrs[attr]
int_data_path = bulkfile["IntermediateData"][ch_str]["Reads"]
int_dict = {
'read_start': int_data_path["read_start"],
'median_before': int_data_path["median_before"],
'current_well_id': int_data_path["current_well_id"]
}
df = pd.DataFrame(data=int_dict)
df = df.where(df.read_start > start_index).dropna()
read_number = 0
attrs = {
'duration': {'val': end_index - start_index, 'd': 'uint32'},
'median_before': {'val': df.iloc[0].median_before, 'd': 'Float64'},
'read_id': {'val': read_id_str, 'd': '<S38'},
'read_number': {'val': read_number, 'd': 'uint16'},
'start_mux': {'val': int(df.iloc[0].current_well_id), 'd': 'uint8'},
'start_time': {'val': start_index, 'd': 'uint64'}
}
dataset = bulkfile["Raw"][ch_str]["Signal"][()]
if remove_pore:
dataset = smooth_pore(dataset[start_index:end_index])
else:
dataset = dataset[start_index:end_index]
readfile.create_group('Raw/Reads/Read_{n}'.format(n=read_number))
readfile.attrs.create('file_version', version_num, None, dtype='Float64')
# add read_### attrs
for k, v in attrs.items():
readfile["Raw"]["Reads"]["Read_{n}".format(n=read_number)].attrs.create(k, v['val'], None, dtype=v['d'])
ms = [18446744073709551615]
readfile.create_dataset(
'Raw/Reads/Read_{n}/Signal'.format(n=read_number),
data=(dataset),
maxshape=(ms),
chunks=True,
dtype='int16',
compression="gzip",
compression_opts=1
)
readfile.close()
return out_filename | 8fc7b93492eb709d6e08d66c4089f48908d91919 | 29,355 |
def escape_html(s: str)-> str:
"""
Escape html
:param str s: string
:return: replaced html-string
"""
s = s.replace('&','&')
s = s.replace('<','<')
s = s.replace( '>','>')
s = s.replace('"','"')
s = s.replace("'", ''')
return s | d66f1af0990a108b7c6fa9ce1e5a0e51bf44f416 | 29,356 |
def CSCH(*args) -> Function:
"""
The CSCH function returns the hyperbolic cosecant of any real number.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/9116336.
"""
return Function("CSCH", args) | 506c16e1d53a91e18e73d416f252a47f5c35fc95 | 29,357 |
def _get_valid_name(proposed_name):
"""Return a unique slug name for a service"""
slug_name = slugify(proposed_name)
name = slug_name
if len(slug_name) > 40:
name = slug_name[:40]
return name | cce3a986fc671233c7c25029cd306c8438465618 | 29,358 |
def create_pif(headers, row):
"""
Creates PIFs from lists of table row
:param headers: header data from the table
:param row: the row of data
:return: ChemicalSystem containing the data from that row
"""
sys_dict = {}
keywords, names, units, systs = get_header_info(headers)
sys_dict, all_condition = add_fields(keywords, names, units, systs, sys_dict, row)
main_system = sys_dict['main']
main_system.sub_systems = []
if main_system.properties:
main_system.properties = format_main_prop(main_system.properties, all_condition)
if main_system.preparation:
main_system.preparation = [step for step in main_system.preparation if step.name != '']
for item in sys_dict:
if item != 'main':
if len([s for s in sys_dict[item].as_dictionary()]) > 1:
main_system.sub_systems.append(sys_dict[item])
return main_system | 25c69e69787d6abb173318d112e1fdcc7a400174 | 29,359 |
def update_user(old_email, new_email=None, password=None):
"""Update the email and password of the user.
Old_email is required, new_email and password are optional, if both parameters are
empty update_user() will do nothing. Not asking for the current password is
intentional, creating and updating are only possible while connected to the server
via SSH. If a malicious person is on your server you got other problems than just
protecting your blog account.
:param old_email: the old email address of the user.
:param new_email: the new email address of the user.
:param password: the new password of the user.
:return: True if the user was updated, even if no parameters where given. Otherwise
it will return False if the user does not exist.
"""
db.connect()
try:
user = User.get(User.email == old_email)
except User.DoesNotExist:
print("The user: {} does not exist".format(old_email))
return False
old_hash = user.password
if new_email:
user.email = new_email
if password:
user.password = bcrypt.hashpw(str.encode(password), bcrypt.gensalt(12))
user.save()
print("The user has been updated:\n"
"old email: {}\n"
"new email: {}\n"
"password has been updated: {}".format(old_email,
old_email if new_email is None else new_email,
old_hash != user.password))
db.close()
return True | cb8f23ccd2d9e0d0b390358ef440af28d67e549d | 29,360 |
from typing import Optional
def get_text(text_node: Optional[ET.Element]) -> Optional[str]:
"""Return stripped text from node. None otherwise."""
if text_node is None:
return None
if not text_node.text:
return None
return text_node.text.strip() | 2bb7c8ae6500d9a8ca5ef6be09dbf3abfc04a013 | 29,361 |
def function_d(d, d1, d2=1):
"""doc string"""
return d + d1 + d2 | 92d3bb788191612c6a67f67a05bd703a02f43a04 | 29,362 |
from unittest.mock import patch
def qgs_access_control_filter():
"""
Mock some QgsAccessControlFilter methods:
- __init__ which does not accept a mocked QgsServerInterface;
- serverInterface to return the right server_iface.
"""
class DummyQgsAccessControlFilter:
def __init__(self, server_iface):
self.server_iface = server_iface
def serverInterface(self): # noqa: ignore=N806
return self.server_iface
with patch.multiple(
"geomapfish_qgisserver.accesscontrol.QgsAccessControlFilter",
__init__=DummyQgsAccessControlFilter.__init__,
serverInterface=DummyQgsAccessControlFilter.serverInterface,
) as mocks:
yield mocks | df84f1ff78c52376777c9238a3ee857c8c31f3d2 | 29,363 |
def ppmv2pa(x, p):
"""Convert ppmv to Pa
Parameters
----------
x Gas pressure [ppmv]
p total air pressure [Pa]
Returns
-------
pressure [Pa]
"""
return x * p / (1e6 + x) | 974d79d022a7fb655040c7c2900988cd4a10f064 | 29,364 |
def make_elastic_uri(schema: str, user: str, secret: str, hostname: str, port: int) -> str:
"""Make an Elasticsearch URI.
:param schema: the schema, e.g. http or https.
:param user: Elasticsearch username.
:param secret: Elasticsearch secret.
:param hostname: Elasticsearch hostname.
:param port: Elasticsearch port.
:return: the full Elasticsearch URI.
"""
return f"{schema}://{user}:{secret}@{hostname}:{port}" | be959e98330913e75485006d1f4380a57e990a05 | 29,365 |
def _truncate(s: str, max_length: int) -> str:
"""Returns the input string s truncated to be at most max_length characters
long.
"""
return s if len(s) <= max_length else s[0:max_length] | 52c49c027057024eaa27a705a0d2c013bff7a2ce | 29,366 |
def verify_days_of_week_struct(week, binary=False):
"""Given a dictionary, verify its keys are the correct days
of the week and values are lists of 24 integers greater than zero.
"""
if set(DAYS_OF_WEEK) != set(week.keys()):
return False
# Each day must be a list of ints
for _, v in week.items():
if not isinstance(v, list):
return False
if len(v) != DAY_LENGTH:
return False
# Every item should be an int >= 0
for d in v:
if not isinstance(d, int):
return False
if d < 0:
return False
if d > 1 and binary is True:
return False
return True | 57b4b23d0b492f2fc25a0bdb9d218c6fd9deefc0 | 29,367 |
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = bert_utils.get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = bert_utils.get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask | d0a5c11108717c1e389d0940c9740d7a0c2671f0 | 29,368 |
def execute_inspection_visits_data_source(operator_context, return_value, non_data_function_args) -> BackendResult:
"""Execute inspections when the current operator is a data source and does not have parents in the DAG"""
# pylint: disable=unused-argument
inspection_count = len(singleton.inspections)
iterators_for_inspections = iter_input_data_source(inspection_count, return_value, operator_context,
non_data_function_args)
return_value = execute_visits_and_store_results(iterators_for_inspections, return_value)
return return_value | f42ce3cb7b0900a5e7458bf0ad478843860db0f9 | 29,369 |
def parse(template, delimiters=None, name='<string>'):
"""
Parse a template string and return a ParsedTemplate instance.
Arguments:
template: a template string.
delimiters: a 2-tuple of delimiters. Defaults to the package default.
Examples:
>>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}")
>>> print(str(parsed).replace('u', '')) # This is a hack to get the test to pass both in Python 2 and 3.
['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_InterpolateNode(key='name'), '!'])]
"""
if type(template) is not str:
raise Exception("Template is not str: %s" % type(template))
parser = _Parser(delimiters)
return parser.parse(template, name) | 51a9da21831afb0b124cc9481f49b546a51a587a | 29,370 |
def get_phases(t, P, t0):
"""
Given input times, a period (or posterior dist of periods)
and time of transit center (or posterior), returns the
phase at each time t. From juliet =]
"""
if type(t) is not float:
phase = ((t - np.median(t0)) / np.median(P)) % 1
ii = np.where(phase >= 0.5)[0]
phase[ii] = phase[ii] - 1.0
else:
phase = ((t - np.median(t0)) / np.median(P)) % 1
if phase >= 0.5:
phase = phase - 1.0
return phase | 8d5e821112c7fffd0766dbb0158fd2f4034ef313 | 29,371 |
async def my_profile(current_user: User = Depends(get_current_active_user)):
"""GET Current user's information."""
return current_user | fd03fe06b9737565e338b3b3ccd5999b0da32cc1 | 29,372 |
def are_2d_vecs_collinear(u1, u2):
"""Check that two 2D vectors are collinear"""
n1 = np.array([-u1[1], u1[0]])
dot_prod = n1.dot(u2)
return np.abs(dot_prod) < TOL_COLLINEAR | 2861b6316a5125799a91a471129bfc1ce2e91992 | 29,373 |
from datetime import datetime
import time
def TimeFromTicks(ticks: int) -> datetime.time: # pylint: disable=invalid-name
"""
Constructs an object holding a time value from the given ticks value.
Ticks should be in number of seconds since the epoch.
"""
return Time(*time.gmtime(ticks)[3:6]) | a53564b2890080a7fbe0f406ae76c7237c92c34a | 29,374 |
import importlib
def load_model(opt, dataloader):
""" Load model based on the model name.
Arguments:
opt {[argparse.Namespace]} -- options
dataloader {[dict]} -- dataloader class
Returns:
[model] -- Returned model
"""
model_name = opt.model
model_path = f"lib.models.{model_name}"
print('use model:',model_name)
model_lib = importlib.import_module(model_path)
model = getattr(model_lib, model_name.title())
return model(opt, dataloader) | 8ad05c4a0f51c40851a9daecf81ed8bf9862979c | 29,375 |
def process(cntrl):
""" We have all are variables and parameters set in the object, attempt to
login and post the data to the APIC
"""
if cntrl.aaaLogin() != 200:
return (1, "Unable to login to controller")
rc = cntrl.genericGET()
if rc == 200:
return (0, format_content(cntrl.get_content()))
else:
return (1, "%s: %s" % (rc, httplib.responses[rc])) | 8e20f4b81314436e53713a418447072820e5c55b | 29,376 |
from typing import Callable
from typing import Iterator
def create_token_swap_augmenter(
level: float, respect_ents: bool = True, respect_eos: bool = True
) -> Callable[[Language, Example], Iterator[Example]]:
"""Creates an augmenter that randomly swaps two neighbouring tokens.
Args:
level (float): The probability to swap two tokens.
respect_ents (bool, optional): Should the pipeline respect entities? Defaults to True. In which
case it will not swap a token inside an entity with a token outside the entity span, unless
it is a one word span. If false it will disregard correcting the entity labels.
respect_eos (bool, optional): Should it respect end of sentence bounderies? Default to True, indicating
that it will not swap and end of sentence token. If False it will disregard correcting the sentence
start as this becomes arbitrary.
Returns:
Callable[[Language, Example], Iterator[Example]]: The augmenter.
"""
return partial(
token_swap_augmenter,
level=level,
respect_eos=respect_eos,
respect_ents=respect_ents,
) | 1fc41e75d96ea7d4153802f4e86f8ab25d7227c3 | 29,377 |
def getUnigram(str1):
"""
Input: a list of words, e.g., ['I', 'am', 'Denny']
Output: a list of unigram
"""
words = str1.split()
assert type(words) == list
return words | d540ee199ab62c383461893e91034399d22fe6d6 | 29,378 |
def expval_and_stddev(items, exp_ops=''):
"""Compute expectation values from distributions.
.. versionadded:: 0.16.0
Parameters:
items (list or dict or Counts or ProbDistribution or QuasiDistribution): Input
distributions.
exp_ops (str or dict or list): String or dict representation of diagonal qubit
operators used in computing the expectation value.
Returns:
float : Expectation value.
ndarray: Array of expectation values
Notes:
Cannot mix Counts and dicts with M3 Distributions in the same call.
The dict operator format is a sparse diagonal format
using bitstrings as the keys.
"""
return _expval_std(items, exp_ops=exp_ops, method=2) | c024797f00d87ece0b0e871530c747a93d151f3c | 29,379 |
def newton_polish(polys,root,niter=100,tol=1e-8):
"""
Perform Newton's method on a system of N polynomials in M variables.
Parameters
----------
polys : list
A list of polynomial objects of the same type (MultiPower or MultiCheb).
root : ndarray
An initial guess for Newton's method, intended to be a candidate root from root_finder.
niter : int
A maximum number of iterations of Newton's method.
tol : float
Tolerance for convergence of Newton's method.
Returns
-------
x1 : ndarray
The terminal point of Newton's method, an estimation for a root of the system
"""
m = len(polys)
dim = max(poly.dim for poly in polys)
f_x = np.empty(m,dtype="complex_")
jac = np.empty((m,dim),dtype="complex_")
def f(x):
#f_x = np.empty(m,dtype="complex_")
for i, poly in enumerate(polys):
f_x[i] = poly(x)
return f_x
def Df(x):
#jac = np.empty((m,dim),dtype="complex_")
for i, poly in enumerate(polys):
jac[i] = poly.grad(x)
return jac
i = 0
x0, x1 = root, root
while True:
if i == niter:
break
delta = np.linalg.solve(Df(x0),-f(x0))
x1 = delta + x0
if np.linalg.norm(delta) < tol:
break
x0 = x1
i+=1
return x1 | cdf2b993bb34142cf82de9f7a2a003f7eb9a0a24 | 29,380 |
def COUNT(logic, n=2):
"""
统计满足条件的周期数
:param logic:
:param n:
:return:
"""
return pd.Series(np.where(logic, 1, 0), index=logic.index).rolling(n).sum() | e175629e301152978e5d9a46caa4921e080048a8 | 29,381 |
import yaml
def get_model(name):
"""
Get the warpped model given the name. Current support name:
"COCO-Detection/retinanet_R_50_FPN";
"COCO-Detection/retinanet_R_101_FPN";
"COCO-Detection/faster_rcnn_R_50_FPN";
"COCO-Detection/faster_rcnn_R_101_FPN";
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN";
"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN";
Args:
-- name (string): model name.
Returns:
-- model: warped model with visualization function.
-- args: visualization config.
-- cfg: detector cfg.
-- predictor: D2 default predictor instances.
"""
if name == "COCO-Detection/retinanet_R_50_FPN":
stream = open("./config/retina.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-Detection/retinanet_R_50_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_retina(predictor.model)
elif name == "COCO-Detection/retinanet_R_101_FPN":
stream = open("./config/retina.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-Detection/retinanet_R_101_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_retina(predictor.model)
elif name == "COCO-Detection/faster_rcnn_R_50_FPN":
stream = open("./config/fasterrcnn.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_rcnn(predictor.model)
elif name == "COCO-Detection/faster_rcnn_R_101_FPN":
stream = open("./config/fasterrcnn.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_rcnn(predictor.model)
elif name == "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN":
stream = open("./config/maskrcnn.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_rcnn(predictor.model)
elif name == "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN":
stream = open("./config/maskrcnn.yaml", 'r')
args = config(yaml.load(stream, Loader=yaml.FullLoader))
stream.close()
name_ = "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"
predictor, cfg = load_default_predictor(name_)
model = warp_rcnn(predictor.model)
model.eval()
return model, args, cfg, predictor | fe1842950c93d790623d6ba072c5cab48eb03eb9 | 29,382 |
import os
import requests
import tqdm
import math
import torch
def load_model_params(model, model_name, model_uri, ignore_cache=False, device=None):
"""Load model parameters from disk or from the web.
Parameters
----------
model : torch.nn.modules.container.Sequential
The model instance to load the parameters for.
model_name : str
The name of the model which should be loaded.
model_uri : str
Part of the URL or full URL to the model parameters. If not specified, then the latest version is pulled from
the internet.
ignore_cache : bool
When true, all caches are ignored and the model parameters are forcefully downloaded.
device : torch.device
The device to use.
Returns
-------
torch.nn.modules.container.Sequential
The loaded PyTorch model instance.
Raises
------
ValueError
When the model name is not supported.
"""
model_names = ['refxtract', 'titlextract']
if model_name not in model_names:
raise ValueError('The model name should be one of the following: {}.'.format(str(model_names)))
base_url = "https://github.com/kmjjacobs/citextract-models/blob/master/" + model_name + "/"
cache_path = os.path.join(user_cache_dir('citextract'), 'models')
if not os.path.exists(cache_path):
os.makedirs(cache_path)
path = os.path.join(cache_path, model_name + '-' + project_settings.get(model_name + '_version') + '.torch')
if not os.path.exists(path) or ignore_cache:
url = base_url + model_name + "-" + project_settings.get(model_name + '_version') + ".torch?raw=true"
if model_uri:
if '://' in model_uri:
url = model_uri
else:
url = base_url + model_uri
url = url + ".torch" if ".torch" not in model_uri else url
url += "?raw=true"
response = requests.get(url, stream=True)
total_size = int(response.headers.get('content-length', 0))
wrote = 0
with open(path, 'wb') as out_file:
for data in tqdm(response.iter_content(1024), total=math.ceil(total_size // 1024), unit='KB',
unit_scale=True):
wrote = wrote + len(data)
out_file.write(data)
if 0 < total_size != wrote:
raise ValueError('Error downloading the model parameters from URL "' + url + '".')
model.load_state_dict(torch.load(path), strict=False)
model.eval()
return model.to(device) | 64fc4282065f44e4e179ce3d5d23381705fc1cf6 | 29,383 |
def energy_sensor(
gateway_nodes: dict[int, Sensor], energy_sensor_state: dict
) -> Sensor:
"""Load the energy sensor."""
nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state)
node = nodes[1]
return node | 8e2560aa8b442c94fb39d602b100a7aa8757de84 | 29,384 |
def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos):
"""Computes the average of all the metric for one decoding.
Args:
iterator: dataset iterator.
feed_dict: feed dict to initialize iterator.
num_videos: number of videos.
Returns:
all_psnr: 2-D Numpy array, shape=(num_samples, num_frames)
all_ssim: 2-D Numpy array, shape=(num_samples, num_frames)
"""
output, target = iterator.get_next()
metrics = psnr_and_ssim(output, target)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
initalizer = iterator._initializer # pylint: disable=protected-access
if initalizer is not None:
sess.run(initalizer, feed_dict=feed_dict)
all_psnr, all_ssim = [], []
for i in range(num_videos):
print("Computing video: %d" % i)
psnr_np, ssim_np = sess.run(metrics)
all_psnr.append(psnr_np)
all_ssim.append(ssim_np)
all_psnr = np.array(all_psnr)
all_ssim = np.array(all_ssim)
return all_psnr, all_ssim | 8acd5cd1b564d22b26ebfd0ddd40fb76e90aa9a4 | 29,385 |
import uuid
import os
def supplier_rif_file_path(instance, filename):
"""Generate file path for new suppliers rif image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/supplier/rif/', filename) | 1c15484dd173712c94db70e81752d832c4b83144 | 29,386 |
import os
import hashlib
def get_hash(name):
""" This hash function receives the name of the file
and returns the hash code
"""
readsize = 64 * 1024
with open(name, 'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest() | b0962aeacd6747b5a0ee4cef1d30a78e9d7c7686 | 29,387 |
def geocode_locations(df: gpd.GeoDataFrame, loc_col: str):
"""
Geocode location names into polygon coordinates
Parameters
----------
df: Geopandas DataFrame
loc_col:str
name of column in df which contains locations
Returns
-------
"""
locations = geocode(df.loc[:, loc_col])
df["geometry"] = locations.loc[:, "geometry"]
df["address"] = locations.loc[:, "address"]
return df | c1ba4edfa31ca4d7d6a2e01a5c3342025936b085 | 29,388 |
import json
def get_data():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(data)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json') | c3df0a63dbb06822bbea1278c539ab1386e59d99 | 29,389 |
import pandas
def coerce_integer(df):
"""
Loop through the columns of a df, if it is numeric,
convert it to integer and fill nans with zeros.
This is somewhat heavy-handed in an attempt to force
Esri to recognize sparse columns as integers.
"""
# Numeric columns to not coerce to integer
EXCEPT = ["latitude", "longitude", "zipCode"]
def numeric_column_to_int(series):
return (
series.fillna(0).astype(int)
if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT
else series
)
return df.transform(numeric_column_to_int, axis=0) | d4b5963378a10a4bde6f7e1e2111908b83d90b7d | 29,390 |
def read_cfg(floc, cfg_proc=process_cfg):
"""
Reads the given configuration file, returning a dict with the converted values supplemented by default values.
:param floc: The location of the file to read.
:param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw
value is missing.
:return: A dict of the processed configuration file's data.
"""
config = ConfigParser()
good_files = config.read(floc)
if good_files:
main_proc = cfg_proc(dict(config.items(MAIN_SEC)), def_cfg_vals=DEF_CFG_VALS, req_keys=REQ_KEYS)
if main_proc[NUM]:
main_proc[NUM] = int(main_proc[NUM])
else:
main_proc = {GAU_TPL_FILE: None, CONFIG_NAME: floc}
for key, def_val in DEF_CFG_VALS.items():
main_proc[key] = def_val
main_proc[DIH_DATA] = []
if main_proc[DIH_ROT] is not None:
try:
dih_list = main_proc[DIH_ROT].split(";")
for dih in dih_list:
dih_data = dih.split(",")
if len(dih_data) != 5:
raise IndexError
# note: RDKit is zero-based with atom indices, thus subtracting one from each number
dih_data[:4] = [int(x) - 1 for x in dih_data[:4]]
# noinspection PyTypeChecker
dih_data[4] = float(dih_data[4])
main_proc[DIH_DATA].append(dih_data)
except (ValueError, IndexError):
raise InvalidDataError("Error in parsing dihedral entry. Enter multiple dihedrals by separating data "
"with a semicolon (';'). Each dihedral should be specified with 5 values, were the "
"first four are one-based integer atom ids, and the last value is the rotation "
"increment in degrees. ")
if main_proc[MAX_CONF]:
main_proc[MAX_CONF] = int(main_proc[MAX_CONF])
return main_proc | 1ec276ad434ce36e32fab73b1cc65c05a14e032a | 29,391 |
import os
def get_jsmol_input(request, pk):
"""Return a statement to be executed by JSmol.
Go through the atomic structure data subsets of the representative
data set of the given system. Pick the first one that comes with a
geometry file and construct the "load data ..." statement for
JSmol. If there are no geometry files return an empty response.
"""
dataset = models.Dataset.objects.get(pk=pk)
if not dataset:
return HttpResponse()
if dataset.input_files.exists():
filename = os.path.basename(
dataset.input_files.first().dataset_file.path)
return HttpResponse(
f'load /media/data_files/dataset_{dataset.pk}/{filename} '
'{1 1 1}')
return HttpResponse() | 836a5920f7510e82259cbcc7abea8d5a863406b2 | 29,392 |
from datetime import datetime
def convert_date(raw_date: str, dataserver=True):
"""
Convert raw date field into a value interpretable by the dataserver.
The date is listed in mddyy format,
"""
date = datetime.strptime(raw_date, "%Y%m%d")
if not dataserver:
return date.strftime("%m/%d/%Y")
return date.strftime("%m/%d/%YZ") | 6fc9ec6bf5a336998e4bd9752abb8804251d8c33 | 29,393 |
from re import VERBOSE
def getComputerMove(board):
"""
Given a board and the computer's letter, determine where to move and return that move. \n
Here is our algorithm for our Tic Tac Toe AI:
"""
copy = getBoardCopy(board)
for i in range(1, NUMBER_SPACES):
if isSpaceFree(copy, i):
# Play out the next move on a new copy of the board so we don't affect the actual game
makeMove(copy, COMPUTER_LETTER, i)
# Check if the computer could win on their next move, and take it.
if isWinner(copy, COMPUTER_LETTER):
if VERBOSE:
print("Computer Decison 1: Best Move For Computer")
return i
# Check if the player could win on their next move, and block them.
makeMove(copy, PLAYER_LETTER, i)
if isWinner(copy, PLAYER_LETTER):
if VERBOSE:
print("Computer Decison 2: Block Players Best Move")
return i
# Try to take one of the corners, if they are free.
computer_next_move = chooseRandomMoveFromList(board, [1, 3, 7, 9])
if computer_next_move is not None:
if VERBOSE:
print("Computer Decison 3: Go For A Corner")
return computer_next_move
# Try to take the center, if it is free.
if isSpaceFree(board, 5):
if VERBOSE:
print("Computer Decison 4: Take The Center")
return 5
# Move on one of the sides.
if VERBOSE:
print("Computer Decison 5: Take A Side")
return chooseRandomMoveFromList(board, [2, 4, 6, 8]) | 83328215ca64170ec88c577ae41fcbd0e2076c47 | 29,394 |
from .. import Plane
def triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
"""
Tesselate a triangular prism whose base is the triangle `p1`, `p2`, `p3`.
If the vertices are oriented in a counterclockwise direction, the prism
extends from behind them.
Args:
p1 (np.ndarray): A 3D point on the base of the prism.
p2 (np.ndarray): A 3D point on the base of the prism.
p3 (np.ndarray): A 3D point on the base of the prism.
height (float): The height of the prism, which should be positive.
ret_unique_vertices_and_faces (bool): When `True` return a vertex
array containing the unique vertices and an array of faces (i.e.
vertex indices). When `False`, return a flattened array of
triangle coordinates.
Returns:
object:
- With `ret_unique_vertices_and_faces=True`: a tuple containing
an `6x3` array of vertices and a `8x3` array of triangle faces.
- With `ret_unique_vertices_and_faces=False`: a `8x3x3` matrix of
flattened triangle coordinates.
"""
vg.shape.check(locals(), "p1", (3,))
vg.shape.check(locals(), "p2", (3,))
vg.shape.check(locals(), "p3", (3,))
if not isinstance(height, float):
raise ValueError("`height` should be a number")
base_plane = Plane.from_points(p1, p2, p3)
lower_base_to_upper_base = height * -base_plane.normal
vertices = np.vstack(([p1, p2, p3], [p1, p2, p3] + lower_base_to_upper_base))
faces = np.array(
[
[0, 1, 2], # base
[0, 3, 4],
[0, 4, 1], # side 0, 3, 4, 1
[1, 4, 5],
[1, 5, 2], # side 1, 4, 5, 2
[2, 5, 3],
[2, 3, 0], # side 2, 5, 3, 0
[5, 4, 3], # base
],
)
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces) | 99ecdc6054dba1f2b955b08bf082636cac546fb8 | 29,395 |
def chain_species_base(base, basesite, subunit, site1, site2, size, comp=1):
"""
Return a MonomerPattern representing a chained species, chained to a base complex.
Parameters
----------
base : Monomer or MonomerPattern
The base complex to which the growing chain will be attached.
basesite : string
Name of the site on complex where first subunit binds.
subunit : Monomer or MonomerPattern
The subunit of which the chain is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
size : integer
The number of subunits in the chain.
comp : optional; a ComplexPattern to which the base molecule is attached.
Returns
-------
A ComplexPattern corresponding to the chain.
Notes
-----
Similar to pore_species, but never closes the chain.
Examples
--------
Get the ComplexPattern object representing a chain of size 4 bound to a base, which is itself bound to a complex:
Model()
Monomer('Base', ['b1', 'b2'])
Monomer('Unit', ['p1', 'p2'])
Monomer('Complex1', ['s1'])
Monomer('Complex2', ['s1', 's2'])
chain_tetramer = chain_species_base(Base(b1=1, b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> Monomer('Base', ['b1', 'b2'])
Monomer('Base', ['b1', 'b2'])
>>> Monomer('Complex1', ['s1'])
Monomer('Complex1', ['s1'])
>>> Monomer('Complex2', ['s1', 's2'])
Monomer('Complex2', ['s1', 's2'])
>>> chain_species_base(Base(b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY))
MatchOnce(Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=4) % Unit(p1=4, p2=None))
"""
_verify_sites(base, basesite)
_verify_sites(subunit, site1, site2)
if size <= 0:
raise ValueError("size must be an integer greater than 0")
if comp == 1:
compbase = base({basesite: 1})
else:
compbase = comp % base({basesite: 1})
if size == 1:
chainlink = compbase % subunit({site1: 1, site2: None})
elif size == 2:
chainlink = compbase % subunit({site1: 1, site2: 2}) % \
subunit({site1: 2, site2: None})
else:
# build up a ComplexPattern, starting with a single subunit
chainbase = compbase
chainlink = chainbase % subunit({site1: 1, site2: 2})
for i in range(2, size):
chainlink %= subunit({site1: i, site2: i+1})
chainlink %= subunit({site1: size, site2: None})
chainlink.match_once = True
return chainlink | b7b619a810b7d84ee64a92bcda4b4578d797be63 | 29,396 |
def find_one_item(itemname):
"""
GET the one item in the shop whose title matches itemname.
:param itemname: The title to look for in the shop.
:type itemname: str
:return: dict(str, Decimal, int). A dict representing the requested item.
:raise: werkzeug.exceptions.NotFound
"""
try:
return MY_SHOP.get(itemname).dict()
except KeyError:
abort(404, "There's no product named {}!".format(itemname)) | cec13fec0489489660da375e7b7fc2168324909f | 29,397 |
import string
def PromGraph(data_source, title, expressions, **kwargs):
"""Create a graph that renders Prometheus data.
:param str data_source: The name of the data source that provides
Prometheus data.
:param title: The title of the graph.
:param expressions: List of tuples of (legend, expr), where 'expr' is a
Prometheus expression. Or a list of dict where keys are Target's args.
:param kwargs: Passed on to Graph.
"""
letters = string.ascii_uppercase
expressions = list(expressions)
if len(expressions) > len(letters):
raise ValueError(
'Too many expressions. Can support at most {}, but got {}'.format(
len(letters), len(expressions)))
if all(isinstance(expr, dict) for expr in expressions):
targets = [
G.Target(refId=refId, **args)
for (args, refId) in zip(expressions, letters)]
else:
targets = [
G.Target(expr=expr, legendFormat=legend, refId=refId)
for ((legend, expr), refId) in zip(expressions, letters)]
return G.Graph(
title=title,
dataSource=data_source,
targets=targets,
**kwargs
) | 7a2a8d0902bc9ef2fcc03e16678c4a40976bdb0e | 29,398 |
import os
def create_output_subdirectory(subdirectory: str) -> str:
""" Creates a subdirectory in the output directory. """
path = os.path.join(GLOBAL_CONFIG['output_directory_path'], subdirectory)
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
return path | bd63573a6dc0500662535a541879a47b156b6e42 | 29,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.