content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import pathlib
from datetime import datetime
import traceback
def parse_amwg_obs(file):
"""Atmospheric observational data stored in"""
file = pathlib.Path(file)
info = {}
try:
stem = file.stem
split = stem.split('_')
source = split[0]
temporal = split[-2]
if len(temporal) == 2:
month_number = int(temporal)
time_period = 'monthly'
temporal = datetime(2020, month_number, 1).strftime('%b').upper()
elif temporal == 'ANN':
time_period = 'annual'
else:
time_period = 'seasonal'
with xr.open_dataset(file, chunks={}, decode_times=False) as ds:
variable_list = [var for var in ds if 'long_name' in ds[var].attrs]
info = {
'source': source,
'temporal': temporal,
'time_period': time_period,
'variable': variable_list,
'path': str(file),
}
return info
except Exception:
return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()}
|
60e00a6d2c9ac426d20e1d02f6baab05e0619988
| 3,642,700
|
def taylor(x,f,i,n):
"""taylor(x,f,i,n):
This function approximates the function f over the domain x,
using a taylor expansion centered at x[i]
with n+1 terms (starts counting from 0).
Args:
x: The domain of the function
f: The function that will be expanded/approximated
i: The ith term in the domain x that the expansion is centered around
n: The number of terms in the expansion (n+1 terms)
Returns:
(x,fapprox): A pair of numpy arrays where x is the original domain array and
f approx is the approximation of f over all of the domain points x using the
taylor expansion.
"""
a = x[i]
N = np.size(x)
fa = f[i]*np.ones_like(x)
D = ac.derivative(x[0],x[N-1],N)
fact = 1
fapprox = fa
Dk = np.eye(N)
for k in range(1,n+1):
fact = fact*k
Dk = np.matmul(Dk,D)
#fapprox += (np.matmul(np.matmul(Dk,fa),((x-a)**k)))/fact
fapprox = np.add(fapprox, (np.matmul(np.matmul(Dk,fa),((x-a)**k)))/fact, out=fapprox, casting="unsafe")
return (x,fapprox)
|
c6d5ed8b583dba8959554bb761e7a961c84624e8
| 3,642,701
|
def precision_and_recall_at_k(ground_truth, prediction, k=-1):
"""
:param ground_truth:
:param prediction:
:param k: how far down the ranked list we look, set to -1 (default) for all of the predictions
:return:
"""
if k == -1:
k = len(prediction)
prediction = prediction[0:k]
numer = len(set(ground_truth).intersection(set(prediction)))
prec = numer / k
recall = numer / len(ground_truth)
return prec, recall
|
cf8543279c6d7874f99c5badeb3064b621fa36a4
| 3,642,702
|
def bubble_sort(array: list, key_func=lambda x: x) -> list:
"""
best:O(N) avg:O(N^2) worst:O(N^2)
"""
if key_func is not None:
assert isfunction(key_func)
for pos in range(0, len(array)):
for idx in range(0, len(array) - pos - 1):
if key_func(array[idx]) > key_func(array[idx + 1]):
array[idx], array[idx + 1] = array[idx + 1], array[idx]
return array
|
40e0baa0f9a36e73b2020db8c07c332dc973b919
| 3,642,703
|
from qiskit.aqua.operators import MatrixOperator
from qiskit.aqua.operators.legacy.op_converter import to_weighted_pauli_operator
import scipy
def limit_paulis(mat, n=5, sparsity=None):
"""
Limits the number of Pauli basis matrices of a hermitian matrix to the n
highest magnitude ones.
Args:
mat (np.ndarray): Input matrix
n (int): number of surviving Pauli matrices (default=5)
sparsity (float): sparsity of matrix < 1
Returns:
scipy.sparse.csr_matrix: matrix
"""
# pylint: disable=import-outside-toplevel
# Bringing matrix into form 2**Nx2**N
__l = mat.shape[0]
if np.log2(__l) % 1 != 0:
k = int(2 ** np.ceil(np.log2(__l)))
m = np.zeros([k, k], dtype=np.complex128)
m[:__l, :__l] = mat
m[__l:, __l:] = np.identity(k - __l)
mat = m
# Getting Pauli matrices
# pylint: disable=invalid-name
op = MatrixOperator(matrix=mat)
op = to_weighted_pauli_operator(op)
paulis = sorted(op.paulis, key=lambda x: abs(x[0]), reverse=True)
g = 2**op.num_qubits
mat = scipy.sparse.csr_matrix(([], ([], [])), shape=(g, g),
dtype=np.complex128)
# Truncation
if sparsity is None:
for pa in paulis[:n]:
mat += pa[0] * pa[1].to_spmatrix()
else:
idx = 0
while mat[:__l, :__l].nnz / __l ** 2 < sparsity:
mat += paulis[idx][0] * paulis[idx][1].to_spmatrix()
idx += 1
n = idx
mat = mat.toarray()
return mat[:__l, :__l]
|
cd0b88316eb9cebda2a58ce30384f22fed17cb54
| 3,642,704
|
def list_clusters(configuration: Configuration = None,
secrets: Secrets = None) -> AWSResponse:
"""
List EKS clusters available to the authenticated account.
"""
client = aws_client("eks", configuration, secrets)
logger.debug("Listing EKS clusters")
return client.list_clusters()
|
289ef257e65cc68400a0562d647d4b694b4ec3bf
| 3,642,705
|
def tar_cat(tar, path):
"""
Reads file and returns content as bytes
"""
mem = tar.getmember(path)
with tar.extractfile(mem) as f:
return f.read()
|
f07f00156c34bd60eea7fcae5d923ea9f1650f6f
| 3,642,706
|
def __get_base_name(input_path):
""" /foo/bar/test/folder/image_label.ext --> test/folder/image_label.ext """
return '/'.join(input_path.split('/')[-3:])
|
5df2ef909f4b570cf6b6224031ad705d16ffff42
| 3,642,707
|
def or_ipf28(xpath):
"""change xpath to match ipf <2.8 or >2.9 (for noise range)"""
xpath28 = xpath.replace('noiseRange', 'noise').replace('noiseAzimuth', 'noise')
if xpath28 != xpath:
xpath += " | %s" % xpath28
return xpath
|
7bf508c48d5a6fc09edba340e2bfc9ec13513fc8
| 3,642,708
|
def make_form(x, current_dict, publication_dict):
"""Create or update a Taxon of rank Form.
Some forms have no known names between species and form.
These keep the form name in the ``infra_name`` field.
e.g.
Caulerpa brachypus forma parvifolia
Others have a known subspecies/variety/subvariety name in the
``infra_name`` field, and keep the form name in ``infra_name2``:
e.g.
Caulerpa cupressoides var. lycopodium forma elegans
Arguments
x An instance of HbvSpecies, rank_name "Variety"
current_dict A lookup dict for is_current
publication_dict A lookup dict for publication_status
taxon_dict A lookup dict for parent species name to Taxon instance
Return The created or updated instance of Taxon.
"""
if not tax_models.HbvParent.objects.filter(name_id=x.name_id).exists():
logger.warn("[make_species] missing HbvParent with name_id {0}".format(x.name_id))
return None
parent_nid = tax_models.HbvParent.objects.get(name_id=x.name_id).parent_nid
dd = dict(
name=force_text(x.infra_name) if force_text(
x.infra_rank) == 'forma' else force_text(x.infra_name2),
rank=tax_models.Taxon.RANK_FORMA,
current=current_dict[x.is_current],
parent=tax_models.Taxon.objects.get(name_id=parent_nid),
author=x.author.replace("(", "").replace(")", "").strip() if x.author else "",
field_code=x.species_code
)
if x.informal is not None:
dd['publication_status'] = publication_dict[x.informal]
obj, created = tax_models.Taxon.objects.update_or_create(name_id=x.name_id, defaults=dd)
action = "Created" if created else "Updated"
logger.info("[make_form] {0} {1}.".format(action, obj))
return obj
|
43ce623d67db4142f804b0c01f8e6690831387cd
| 3,642,709
|
def render_view(func):
"""
Render this view endpoint's specified template with the provided context, with additional context parameters
as specified by context_config().
@app.route('/', methods=['GET'])
@render_view
def view_function():
return 'template_name.html', {'context': 'details'}
"""
@wraps(func)
def decorated_view(*args, **kwargs):
template, context = func(*args, **kwargs)
if 'config' in context:
context['config'].update(context_config())
else:
context['config'] = context_config()
return render_template(template, **context)
return decorated_view
|
07e2f4394af5c774d3aad421cacd8f89448525b6
| 3,642,710
|
def extract_and_resize_frames(path, resize_to=None):
"""
Iterate the GIF, extracting each frame and resizing them
Returns:
An array of all frames
"""
mode = analyseImage(path)["mode"]
im = PImage.open(path)
if not resize_to:
resize_to = (im.size[0] // 2, im.size[1] // 2)
i = 0
p = im.getpalette()
last_frame = im.convert("RGBA")
all_frames = []
try:
while True:
# print("saving %s (%s) frame %d, %s %s" % (path, mode, i, im.size, im.tile))
"""
If the GIF uses local colour tables, each frame will have its own palette.
If not, we need to apply the global palette to the new frame.
"""
if not im.getpalette():
im.putpalette(p)
new_frame = PImage.new("RGBA", im.size)
"""
Is this file a "partial"-mode GIF where frames update a region of a different size to the entire image?
If so, we need to construct the new frame by pasting it on top of the preceding frames.
"""
if mode == "partial":
new_frame.paste(last_frame)
new_frame.paste(im, (0, 0), im.convert("RGBA"))
new_frame.thumbnail(resize_to, PImage.ANTIALIAS)
all_frames.append(new_frame)
i += 1
last_frame = new_frame
im.seek(im.tell() + 1)
except EOFError:
pass
return all_frames
|
27632e7485f98697b4bfe1fbb6aeaee18a29b5db
| 3,642,711
|
def voter_star_off_save_doc_view(request):
"""
Show documentation about voterStarOffSave
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = voter_star_off_save_doc.voter_star_off_save_doc_template_values(url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values)
|
2f00fc92c43e7ebb6541f0e9e5bd773d3e3168a2
| 3,642,712
|
import torch
def generate_offsets(size_map, flow_map=None, kernel_shape=(3, 3, 3), dilation=(1, 1, 1)):
"""
Generates offsets for deformable convolutions from scalar maps.
Maps should be of shape NxCxDxHxW, i.e. one set of parameters for every
pixel. ``size_map`` and ``orientation_map`` expect a single channel,
and flow_map should have 2
"""
kernel_shape = _triple(kernel_shape)
dilation = _triple(dilation)
zeros = torch.zeros_like(size_map)
if flow_map is None:
flow_map = torch.cat([zeros, zeros], dim=1)
"""
at each pixel, size_map predicts the size of the object at that pixel
0. Inverse optical flow if not given
1. Sample sizes from previous/next frame according to optical flow
2. scale, and shift kernel shape
a. Multiply with sizes
b. Add optical flow shift
"""
if flow_map.shape[1] == 2:
flow_prev = flow_map
# TODO: properly inverse optical flow
flow_next = -flow_map
else:
flow_prev, flow_next = torch.split(flow_map, [2, 2], dim=1)
kz, ky, kx = kernel_shape
assert kz == ky == kx == 3, "not implemented for kernel shapes != 3x3x3"
N, _, D, H, W = size_map.shape
grid = grid_like(size_map)
grid_prev = einops.rearrange(grid + flow_prev, "n zyx d h w -> n d h w zyx")
grid_next = einops.rearrange(grid + flow_next, "n zyx d h w -> n d h w zyx")
size_prev = F.grid_sample(size_map, grid_prev, align_corners=False)
size_next = F.grid_sample(size_map, grid_next, align_corners=False)
base_offset = get_kernel_grid(kernel_shape, dilation, size_map.device)
base_offset = einops.rearrange(base_offset, "zyx kz ky kx -> kz ky kx zyx 1 1 1", zyx=3)
def _inflate(m):
return torch.cat([zeros, m, m], dim=1)
offsets = torch.cat([
base_offset[i] * _inflate(size)[:, None, None, None, ...]
for i, size in enumerate((size_prev, size_map, size_next))
], dim=1)
offsets = offsets + flow_map
return offsets - base_offset
|
f86bd6a87b887900820eb28e1a1e9f0c796e6195
| 3,642,713
|
import colorsys
def lighten_color(color, amount=0.5):
""" Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color("g", 0.3)
>> lighten_color("#F034A3", 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
c = colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
return mc.to_hex(c)
|
4ef801fff6cb145a687a62dd18725258f1534abd
| 3,642,714
|
def unf_gas_density_kgm3(t_K, p_MPaa, gamma_gas, z):
"""
Equation for gas density
:param t_K: temperature
:param p_MPaa: pressure
:param gamma_gas: specific gas density by air
:param z: z-factor
:return: gas density
"""
m = gamma_gas * 0.029
p_Pa = 10 ** 6 * p_MPaa
rho_gas = p_Pa * m / (z * 8.31 * t_K)
return rho_gas
|
6e41802367bbe70ab505ae5db89ee3e9a32e7d7c
| 3,642,715
|
def poll():
"""Get Modbus agent data.
Performance data from Modbus enabled targets.
Args:
None
Returns:
agentdata: AgentPolledData object for all data gathered by the agent
"""
# Initialize key variables.
config = Config()
_pi = config.polling_interval()
# Initialize AgentPolledData
agent_program = PATTOO_AGENT_OPCUAD
agentdata = AgentPolledData(agent_program, _pi)
# Get registers to be polled
tpp_list = config.target_polling_points()
arguments = [(tpp,) for tpp in tpp_list]
# Poll registers for all targets and update the TargetDataPoints
target_datapoints_list = _parallel_poller(arguments)
agentdata.add(target_datapoints_list)
# Return data
return agentdata
|
b922b59c659e40ddc2b341a4605465d53e8cdaa8
| 3,642,716
|
import time
def dot_product_timer(x_shape=(5000, 5000),
y_shape=(5000, 5000),
mean=0,
std=10,
seed=8053):
"""
A timer for the formula array1.dot(array2).
Inputs:
x_shape: Tuple of 2 Int
Shape of array1;
y_shape: Tuple of 2 Int
Shape of array2;
mean: Float
Mean of the normal distribution used for random
selection of elements of array1 and array2;
std: Float
Standard deviation of the normal distribution used for random
selection of elements of array1 and array2;
seed: Int
Seed used in np.random.seed
Outputs:
delta_t: Float
Number of seconds for completing array1.dot(array2);
"""
np.random.seed(seed)
array1 = np.random.normal(mean, std, x_shape)
array2 = np.random.normal(mean, std, y_shape)
start = time.time()
array1.dot(array2)
delta_t = time.time() - start
return delta_t
|
91d03070ea6efbe8d29e7bb5323aad0f20cead90
| 3,642,717
|
import os
def availible_files(path:str, contains:str='') -> list:
"""Returns the availible files in directory
Args:
path(str): Path to directory
contains(str, optional): (Default value = '')
Returns:
Raises:
"""
return [f for f in os.listdir(path) if contains in f]
|
c43db03d06d849c017382daee9715c8dde91b61d
| 3,642,718
|
import os
def init_pretraining_params(exe,
pretraining_params_path,
main_program):
"""init pretraining params"""
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=existed_params)
print("Load pretraining parameters from {}.".format(
pretraining_params_path))
|
d15b3fbf2933e5f286d785c6debe309b0e1fa6c2
| 3,642,719
|
def npareatotal(values, areaclass):
"""
numpy area total procedure
:param values:
:param areaclass:
:return:
"""
return np.take(np.bincount(areaclass,weights=values),areaclass)
|
b5e79c7648569b84a9fad77dd9ee555392a676ab
| 3,642,720
|
from AeroelasticSE.FusedFAST import openFAST
def create_aerocode_wrapper(aerocode_params, output_params, options):
""" create wind code wrapper"""
solver = 'FAST'
# solver = 'HAWC2'
if solver=='FAST':
## TODO, changed when we have a real turbine
# aero code stuff: for constructors
w = openFAST(output_params) ## need better name than output_params
# w = openFAST(None, atm, output_params) ## need better name than output_params
w.setOutput(output_params)
elif solver == 'HAWC2':
w = openHAWC2(None)
raise NotImplementedError, "HAWC2 aeroecode wrapper not implemented in runBatch.py yet"
else:
raise ValueError, "unknown aerocode: %s" % solver
return w
|
1a530607555ce2714348184806405e68185f4012
| 3,642,721
|
import scipy
def lqr_ofb_cost(K, R, Q, X, ss_o):
# type: (np.array, np.array, np.array, np.array, control.ss) -> np.array
"""
Cost for LQR output feedback optimization.
@K gain matrix
@Q process noise covariance matrix
@X initial state covariance matrix
@ss_o open loop state space system
@return cost
"""
K = np.matrix(K).T
A = np.matrix(ss_o.A)
B = np.matrix(ss_o.B)
C = np.matrix(ss_o.C)
A_c = A - B * K * C
Q_c = C.T * K.T * R * K * C + Q
P = scipy.linalg.solve_lyapunov(A_c.T, -Q_c)
J = np.trace(P * X)
return J
|
b0a22685c640c2970dad63628d1c87aac73b241a
| 3,642,722
|
def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False):
"""
Calculates the effective steady state for a driven
system with a time-dependent cosinusoidal term:
.. math::
\\mathcal{\\hat{H}}(t) = \\hat{H}_0 +
\\mathcal{\\hat{O}} \\cos(\\omega_d t)
Parameters
----------
H_0 : :obj:`~Qobj`
A Hamiltonian or Liouvillian operator.
c_ops : list
A list of collapse operators.
Op_t : :obj:`~Qobj`
The the interaction operator which is multiplied by the cosine
w_d : float, default 1.0
The frequency of the drive
n_it : int, default 3
The number of iterations for the solver
sparse : bool, default False
Solve for the steady state using sparse algorithms.
Actually, dense seems to be faster.
Returns
-------
dm : qobj
Steady state density matrix.
.. note::
See: Sze Meng Tan,
https://copilot.caltech.edu/documents/16743/qousersguide.pdf,
Section (10.16)
"""
if sparse:
N = H_0.shape[0]
L_0 = liouvillian(H_0, c_ops).data.tocsc()
L_t = liouvillian(Op_t)
L_p = (0.5 * L_t).data.tocsc()
# L_p and L_m correspond to the positive and negative
# frequency terms respectively.
# They are independent in the model, so we keep both names.
L_m = L_p
L_p_array = L_p.todense()
L_m_array = L_p_array
Id = sp.eye(N ** 2, format="csc", dtype=np.complex128)
S = T = sp.csc_matrix((N ** 2, N ** 2), dtype=np.complex128)
for n_i in np.arange(n_it, 0, -1):
L = sp.csc_matrix(L_0 - 1j * n_i * w_d * Id + L_m.dot(S))
L.sort_indices()
LU = splu(L)
S = - LU.solve(L_p_array)
L = sp.csc_matrix(L_0 + 1j * n_i * w_d * Id + L_p.dot(T))
L.sort_indices()
LU = splu(L)
T = - LU.solve(L_m_array)
M_subs = L_0 + L_m.dot(S) + L_p.dot(T)
else:
N = H_0.shape[0]
L_0 = liouvillian(H_0, c_ops).full()
L_t = liouvillian(Op_t)
L_p = (0.5 * L_t).full()
L_m = L_p
Id = np.eye(N ** 2)
S, T = np.zeros((N ** 2, N ** 2)), np.zeros((N ** 2, N ** 2))
for n_i in np.arange(n_it, 0, -1):
L = L_0 - 1j * n_i * w_d * Id + np.matmul(L_m, S)
lu, piv = la.lu_factor(L)
S = - la.lu_solve((lu, piv), L_p)
L = L_0 + 1j * n_i * w_d * Id + np.matmul(L_p, T)
lu, piv = la.lu_factor(L)
T = - la.lu_solve((lu, piv), L_m)
M_subs = L_0 + np.matmul(L_m, S) + np.matmul(L_p, T)
return steadystate(Qobj(M_subs, type="super", dims=L_t.dims))
|
8e59e8f138116877678d7d203d4767c6fc6bd1fa
| 3,642,723
|
def gsl_blas_dtrmm(*args, **kwargs):
"""
gsl_blas_dtrmm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA,
CBLAS_DIAG_t Diag, double alpha,
gsl_matrix A, gsl_matrix B) -> int
"""
return _gslwrap.gsl_blas_dtrmm(*args, **kwargs)
|
5efee7571f49afc20c3f33d010caccb199613315
| 3,642,724
|
def scale(value, upper, lower, min_, max_):
"""Scales value between upper and lower values, depending on the given
minimun and maximum value.
"""
numerator = ((lower - upper) * float((value - min_)))
denominator = float((max_ - min_))
return numerator / denominator + upper
|
3e13c80b765cffb1e75a6856d343bd9a88c353e9
| 3,642,725
|
def conditional_response(view, video=None, **kwargs):
"""
Redirect to login page if user is anonymous and video is private.
Raise a permission denied error if user is logged in but doesn't have permission.
Otherwise, return standard template response.
Args:
view(TemplateView): a video-specific View object (ViewDetail, ViewEmbed, etc).
video(ui.models.Video): a video to display with the view
Returns:
TemplateResponse: the template response to render
"""
if not ui_permissions.has_video_view_permission(video, view.request):
if view.request.user.is_authenticated:
raise PermissionDenied
else:
return redirect_to_login(view.request.get_full_path())
context = view.get_context_data(video, **kwargs)
return view.render_to_response(context)
|
ea8e8176a979fcd46c0c72d5201c6f85c7b4ed48
| 3,642,726
|
def Flatten(nmap_list):
"""Flattens every `.NestedMap` in nmap_list and concatenate them."""
ret = []
for x in nmap_list:
ret += x.Flatten()
return ret
|
c630869b725d69338830e1a14ef920d6d1e87ade
| 3,642,727
|
from re import T
def get_data_schema() -> T.StructType:
"""
Return the kafka data schema
"""
return T.StructType(
[T.StructField('key', T.StringType()),
T.StructField('message', T.StringType())]
)
|
0cbc2fc6e7015c458e70b8d0ec6efb5fbc0d84f5
| 3,642,728
|
from typing import List
import random
def build_graph(num: int = 0) -> (int, List[int]):
"""Build a graph of num nodes."""
if num < 3:
raise app.UsageError('Must request graph of at least 3 nodes.')
weight = 5.0
nodes = [(0, 1, 1.0), (1, 2, 2.0), (0, 2, 3.0)]
for i in range(num-3):
l = random.sample(range(0, 3 + i - 1), 2)
nodes.append((3 + i, l[0],
weight*np.random.random()))
nodes.append((3 + i, l[1],
weight*np.random.random()))
return num, nodes
|
05efb60ae5cdcc561c93cf2faba172ca5a3ff0d7
| 3,642,729
|
from typing import List
from typing import Collection
def concatenate(boxes_list:List[Boxes], fields:Collection[str]=None) -> Boxes:
"""Merge multiple boxes to a single instance
B = A[:10]
C = A[10:]
D = concatenate([A, B])
D should be equal to A
"""
if not boxes_list:
if fields is None:
fields = []
return empty(*fields)
if fields is None:
# Get fields common to all sub-boxes
common_fields = set.intersection( *[set(x.get_fields()) for x in boxes_list] )
else:
common_fields = fields
coords = np.concatenate([x.get() for x in boxes_list], axis=0)
new_fields = dict()
for f in common_fields:
new_fields[f] = np.concatenate([x.get_field(f) for x in boxes_list], axis=0)
return Boxes(coords, **new_fields)
|
096067aea3d01e984befd2cadfce5a86c33580e9
| 3,642,730
|
def detect_peaks(array, freq=0, cthr=0.2, unprocessed_array=False, fs=44100):
"""
Function detects the peaks in array, based from the mirpeaks algorithm.
:param array: Array in which to detect peaks
:param freq: Scale representing the x axis (sample length as array)
:param cthr: Threshold for checking adjacent peaks
:param unprocessed_array: Array that in unprocessed (normalised), if False will default to the same as array.
:param fs: Sampe rate of the array
:return: index of peaks, values of peaks, peak value on freq.
"""
# flatten the array for correct processing
array = array.flatten()
if np.isscalar(freq):
# calculate the frerquency scale - assuming a samplerate if none provided
freq = np.linspace(0, fs / 2.0, len(array))
if np.isscalar(unprocessed_array):
unprocessed_array = array
# add values to allow peaks at the first and last values
# to allow peaks at start and end (default of mir)
array_appended = np.insert(array, [0, len(array)], -2.0)
# unprocessed array to get peak values
array_unprocess_appended = np.insert(
unprocessed_array, [0, len(unprocessed_array)], -2.0
)
# append the frequency scale for precise freq calculation
freq_appended = np.insert(freq, [0, len(freq)], -1.0)
# get the difference values
diff_array = np.diff(array_appended)
# find local maxima
mx = (
np.array(
np.where((array >= cthr) & (
diff_array[0:-1] > 0) & (diff_array[1:] <= 0))
)
+ 1
)
# initialise arrays for output
finalmx = []
peak_value = []
peak_x = []
peak_idx = []
if np.size(mx) > 0:
# unpack the array if peaks found
mx = mx[0]
j = 0 # scans the peaks from beginning to end
mxj = mx[j] # the current peak under evaluation
jj = j + 1
bufmin = 2.0
bufmax = array_appended[mxj]
if mxj > 1:
oldbufmin = min(array_appended[: mxj - 1])
else:
oldbufmin = array_appended[0]
while jj < len(mx):
# if adjacent mx values are too close, returns no array
if mx[jj - 1] + 1 == mx[jj] - 1:
bufmin = min([bufmin, array_appended[mx[jj - 1]]])
else:
bufmin = min(
[bufmin, min(array_appended[mx[jj - 1]: mx[jj] - 1])])
if bufmax - bufmin < cthr:
# There is no contrastive notch
if array_appended[mx[jj]] > bufmax:
# new peak is significant;y higher than the old peak,
# the peak is transfered to the new position
j = jj
mxj = mx[j] # the current peak
bufmax = array_appended[mxj]
oldbufmin = min([oldbufmin, bufmin])
bufmin = 2.0
elif array_appended[mx[jj]] - bufmax <= 0:
bufmax = max([bufmax, array_appended[mx[jj]]])
oldbufmin = min([oldbufmin, bufmin])
else:
# There is a contrastive notch
if bufmax - oldbufmin < cthr:
# But the previous peak candidate is too weak and therefore discarded
oldbufmin = min([oldbufmin, bufmin])
else:
# The previous peak candidate is OK and therefore stored
finalmx.append(mxj)
oldbufmin = bufmin
bufmax = array_appended[mx[jj]]
j = jj
mxj = mx[j] # The current peak
bufmin = 2.0
jj += 1
if bufmax - oldbufmin >= cthr and (
bufmax - min(array_appended[mx[j] + 1:]) >= cthr
):
# The last peak candidate is OK and stored
finalmx.append(mx[j])
""" Sort the values according to their level """
finalmx = np.array(finalmx, dtype=np.int64)
sort_idx = np.argsort(array_appended[finalmx])[::-1] # descending sort
finalmx = finalmx[sort_idx]
# indexes were for the appended array, -1 to return to original array index
peak_idx = finalmx - 1
peak_value = array_unprocess_appended[finalmx]
peak_x = freq_appended[finalmx]
""" Interpolation for more precise peak location """
corrected_value = []
corrected_position = []
for current_peak_idx in finalmx:
# if there enough space to do the fitting
if 1 < current_peak_idx < (len(array_unprocess_appended) - 2):
y0 = array_unprocess_appended[current_peak_idx]
ym = array_unprocess_appended[current_peak_idx - 1]
yp = array_unprocess_appended[current_peak_idx + 1]
p = (yp - ym) / (2 * (2 * y0 - yp - ym))
corrected_value.append(y0 - (0.25 * (ym - yp) * p))
if p >= 0:
correct_pos = ((1 - p) * freq_appended[current_peak_idx]) + (
p * freq_appended[current_peak_idx + 1]
)
corrected_position.append(correct_pos)
elif p < 0:
correct_pos = ((1 + p) * freq_appended[current_peak_idx]) - (
p * freq_appended[current_peak_idx - 1]
)
corrected_position.append(correct_pos)
else:
corrected_value.append(
array_unprocess_appended[current_peak_idx])
corrected_position.append(freq_appended[current_peak_idx])
if corrected_position:
peak_x = corrected_position
peak_value = corrected_value
peak_idx = peak_idx.astype(np.int64)
return peak_idx, np.array(peak_value, dtype=np.float64), np.array(peak_x, np.float64)
else:
return np.array([0], dtype=np.int64), np.array(
[0], dtype=np.float64), np.array([0], np.float64)
|
c11a09624085d505d36a9e374954dd6ba5c1e05a
| 3,642,731
|
def left_index_iter(shape):
"""Iterator for the left boundary indices of a structured grid."""
return range(0, shape[0] * shape[1], shape[1])
|
c7da6f5de48d0446cb0729593d3dc0eb95f5ab9a
| 3,642,732
|
import logging
def calculate_precision_recall(df_merged):
"""Calculates precision and recall arrays going through df_merged row-wise."""
all_positives = get_all_positives(df_merged)
# Populates each row with 1 if this row is a true positive
# (at its score level).
df_merged["is_tp"] = np.where(
(df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") &
(df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0)
# Counts true positives up to and including that row.
df_merged["tp"] = df_merged["is_tp"].cumsum()
# Calculates precision for every row counting true positives up to
# and including that row over the index (1-based) of that row.
df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1)
# Calculates recall for every row counting true positives up to
# and including that row over all positives in the groundtruth dataset.
df_merged["recall"] = df_merged["tp"] / all_positives
logging.info(
"\n%s\n",
df_merged.head(10)[[
"uid", "score", "label_groundtruth", "is_tp", "tp", "precision",
"recall"
]])
return np.array(df_merged["precision"]), np.array(df_merged["recall"])
|
80d2c82c99e0bbbab8460ff997fc1358f758f2f6
| 3,642,733
|
def combine(shards, judo_file):
"""combine
this class is passed the
"""
# Recombine the shards to create the kek
combined_shares = Shamir.combine(shards)
combined_shares_string = "{}".format(combined_shares)
# decrypt the dek uysing the recombined kek
decrypted_dek = decrypt(
judo_file['wrappedKey'],
unhexlify(combined_shares_string)
)
# decrypt the data using the dek
decrypted_data = decrypt(
judo_file['data'],
unhexlify(decrypted_dek)
)
decrypted_text = unhexlify(decrypted_data)
return(decrypted_data, decrypted_text)
|
3ba88307c3d0cb0a43473e89b731c61e9bbfe83d
| 3,642,734
|
def shiftRightUnsigned(e, numBits):
"""
:rtype: Column
>>> from pysparkling import Context
>>> from pysparkling.sql.session import SparkSession
>>> from pysparkling.sql.functions import shiftLeft, shiftRight, shiftRightUnsigned
>>> spark = SparkSession(Context())
>>> df = spark.range(-5, 4)
>>> df.select("id", shiftRight("id", 1), shiftRightUnsigned("id", 1)).show()
+---+-----------------+-------------------------+
| id|shiftright(id, 1)|shiftrightunsigned(id, 1)|
+---+-----------------+-------------------------+
| -5| -3| 9223372036854775805|
| -4| -2| 9223372036854775806|
| -3| -2| 9223372036854775806|
| -2| -1| 9223372036854775807|
| -1| -1| 9223372036854775807|
| 0| 0| 0|
| 1| 0| 0|
| 2| 1| 1|
| 3| 1| 1|
+---+-----------------+-------------------------+
"""
return col(ShiftRightUnsigned(parse(e), lit(numBits)))
|
4f528609bb72a44a99581bca997fbde2f19af861
| 3,642,735
|
def change_wallpaper_job(profile, force=False):
"""Centralized wallpaper method that calls setter algorithm based on input prof settings.
When force, skip the profile name check
"""
with G_WALLPAPER_CHANGE_LOCK:
if profile.spanmode.startswith("single") and profile.ppimode is False:
thrd = Thread(target=span_single_image_simple, args=(profile, force), daemon=True)
thrd.start()
elif ((profile.spanmode.startswith("single") and profile.ppimode is True) or
profile.spanmode.startswith("advanced")):
thrd = Thread(target=span_single_image_advanced, args=(profile, force), daemon=True)
thrd.start()
elif profile.spanmode.startswith("multi"):
thrd = Thread(target=set_multi_image_wallpaper, args=(profile, force), daemon=True)
thrd.start()
else:
sp_logging.G_LOGGER.info("Unkown profile spanmode: %s", profile.spanmode)
return None
return thrd
|
b4013e847cae337f83af5f3282d5551a52b4a7b3
| 3,642,736
|
import sys
from typing import ForwardRef
from typing import _eval_type
from typing import _strip_annotations
import types
from typing import _get_defaults
from typing import Optional
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = sys.modules[base.__module__].__dict__
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False)
value = _eval_type(value, base_globals, localns)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
|
a847d42f25355c3109f650206c1673674f1201b5
| 3,642,737
|
def sheets_from_excel(xlspath):
"""
Reads in an xls(x) file,
returns an array of arrays, like:
Xijk, i = sheet, j = row, k = column
(but it's not a np ndarray, just nested arrays)
"""
wb = xlrd.open_workbook(xlspath)
n_sheets = wb.nsheets
sheet_data = []
for sn in xrange(n_sheets):
sheet = wb.sheet_by_index(sn)
rows = [sheet.row_values(i) for i in xrange(sheet.nrows)]
if len(rows) > 0:
sheet_data.append(rows)
return sheet_data
|
11099d2929ef0078ae0e5b07a700bdb2021eaa56
| 3,642,738
|
import numpy
import logging
def fitStatmechPseudoRotors(Tlist, Cvlist, Nvib, Nrot, molecule=None):
"""
Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to
the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist`
in K. This method assumes that there are enough heat capacity points
provided that the vibrational frequencies can be fit directly, but the
hindered rotors must be combined into a single "pseudo-rotor".
"""
# Construct the lower and upper bounds for each variable
bounds = []
# Bounds for harmonic oscillator frequencies
for i in range(Nvib):
bounds.append((hoFreqLowerBound, hoFreqUpperBound))
# Bounds for pseudo-hindered rotor frequency and barrier height
bounds.append((hrFreqLowerBound, hrFreqUpperBound))
bounds.append((hrBarrLowerBound, hrBarrUpperBound))
# Construct the initial guess
# Initial guesses within each mode type must be distinct or else the
# optimization will fail
x0 = numpy.zeros(Nvib + 2, numpy.float64)
# Initial guess for harmonic oscillator frequencies
if Nvib > 0:
x0[0] = 200.0
x0[1:Nvib] = numpy.linspace(800.0, 1600.0, Nvib-1)
# Initial guess for hindered rotor frequencies and barrier heights
x0[Nvib] = 100.0
x0[Nvib+1] = 300.0
# Execute the optimization
fit = PseudoRotorFit(Tlist, Cvlist, Nvib, Nrot)
fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter)
x, igo = fit.solve(x0)
# Check that the results of the optimization are valid
if not numpy.isfinite(x).all():
raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x))
if igo == 8:
logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule.toSMILES()))
# Postprocess optimization results
vib = list(x[0:Nvib])
hind = []
for i in range(Nrot):
hind.append((x[Nvib], x[Nvib+1]))
return vib, hind
|
eb110aab6a5ed35bd2ec1bdb2ca262524fe44dcf
| 3,642,739
|
def add_numbers(a, b):
"""Sums the given numbers.
:param int a: The first number.
:param int b: The second number.
:return: The sum of the given numbers.
>>> add_numbers(1, 2)
3
>>> add_numbers(50, -8)
42
"""
return a + b
|
7d9a0c26618a2aee5a8bbff6a65e315c33594fde
| 3,642,740
|
def get_version(table_name):
"""Get the most recent version number held in a given table."""
db = get_db()
cur = db.cursor()
cur.execute("select * from {} order by entered_on desc".format(table_name))
return cur.fetchone()["version"]
|
7bc55bacf7aa84ccc9ba6f6bb51bbc51c1556395
| 3,642,741
|
def area(a, indices=(0, 1, 2, 3)):
"""
:param a:
:param indices:
:return:
"""
x0, y0, x1, y1 = indices
return (a[..., x1] - a[..., x0]) * (a[..., y1] - a[..., y0])
|
17df4d4f4ad818be0b2ed7a1fe65aaeccbe63638
| 3,642,742
|
from xbbg.io import logs
def latest_file(path_name, keyword='', ext='', **kwargs) -> str:
"""
Latest modified file in folder
Args:
path_name: full path name
keyword: keyword to search
ext: file extension
Returns:
str: latest file name
"""
files = sort_by_modified(
all_files(path_name=path_name, keyword=keyword, ext=ext, full_path=True)
)
if not files:
logger = logs.get_logger(latest_file, level=kwargs.pop('log', 'warning'))
logger.debug(f'no file in folder: {path_name}')
return ''
return str(files[0]).replace('\\', '/')
|
7d6db9994525a5fc4f109c52bede04f2c568c906
| 3,642,743
|
def infer_tf_dtypes(image_array):
"""
Choosing a suitable tf dtype based on the dtype of input numpy array.
"""
return dtype_casting(
image_array.dtype[0], image_array.interp_order[0], as_tf=True)
|
fd8fc353fd6a76a1dae2a693a9121415393b8d50
| 3,642,744
|
def get_cifar10_datasets(n_devices, batch_size=256, normalize=False):
"""Get CIFAR-10 dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
train_dataset = tfds.load('cifar10', split='train[:90%]')
val_dataset = tfds.load('cifar10', split='train[90%:]')
test_dataset = tfds.load('cifar10', split='test')
def decode(x):
decoded = {
'inputs':
tf.cast(tf.image.rgb_to_grayscale(x['image']), dtype=tf.int32),
'targets':
x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.shuffle(
buffer_size=256, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 10, 256, (batch_size, 32, 32,
1)
|
50dd1b02792ab13f4b6d42d52e6467503f319bb2
| 3,642,745
|
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job, result_iterator
def predict(dataset, fitmodel_url, save_results=True, show=False):
"""
Function starts a job that makes predictions to input data with a given model
Parameters
----------
input - dataset object with input urls and other parameters
fitmodel_url - model created in fit phase
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls with predictions on ddfs
"""
if dataset.params["y_map"] == []:
raise Exception("Logistic regression requires a target label mapping parameter.")
if "logreg_fitmodel" not in fitmodel_url:
raise Exception("Incorrect fit model.")
job = Job(worker=Worker(save_results=save_results))
# job parallelizes execution of mappers
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))]
job.params = dataset.params # job parameters (dataset object)
job.params["thetas"] = [v for k, v in result_iterator(fitmodel_url["logreg_fitmodel"]) if k == "thetas"][
0] # thetas are loaded from ddfs
job.run(name="logreg_predict", input=dataset.params["data_tag"])
results = job.wait(show=show)
return results
|
dbf56e82a3ff81a899cf2c33fa83f8c0f1b73947
| 3,642,746
|
def format_string_to_json(balance_info):
"""
Format string to json.
e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00'''
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
"""
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split('|')
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES")
)
return dumps(balance_dict)
|
1be0d4d8ad3c5373e18e6f78957e18d8f0c0c846
| 3,642,747
|
from typing import Tuple
from typing import List
def get_relevant_texts(subject: Synset, doc_threshold: float) -> Tuple[List[str], List[int], int, int]:
"""Get all lines from all relevant articles. Also return the number of retrieved documents and retained ones."""
article_dir = get_article_dir(subject)
rel_path = get_relevant_scores_path(subject)
subject_name = get_concept_name(subject)
with rel_path.open() as f: # read file to get the ids of relevant articles
scores = [float(line) for line in f if line.strip()]
num_doc_retrieved = len(scores)
line_list = []
doc_id_list = []
num_doc_retained = 0
for doc_id, score in enumerate(scores):
path = article_dir / "{}.txt".format(doc_id)
try:
with path.open() as f:
lines = [line.strip() for line in f if line.strip()]
if len(lines) > 500: # ignore huge files
continue
text = "\n".join(lines)
if score >= doc_threshold or (len(text.split()) <= 200 and subject_name in text.lower()):
line_list.extend(lines)
doc_id_list.extend([doc_id] * len(lines))
num_doc_retained += 1
except FileNotFoundError:
logger.warning(f"Subject {subject.name()} - {path} does not exist!")
continue
return line_list, doc_id_list, num_doc_retrieved, num_doc_retained
|
150dca990fe67ed3fb5381e6d4a6bce8656f2619
| 3,642,748
|
def plot_mae(X, y, model):
"""
Il est aussi pertinent de logger les graphiques sous forme d'artifacts.
"""
fig = plt.figure()
plt.scatter(y, model.predict(X))
plt.xlabel("Durée réelle du trajet")
plt.ylabel("Durée estimée du trajet")
image = fig
fig.savefig("MAE.png")
plt.close(fig)
return image
|
3bc4225f530f7f80ea903d55963cb0a33fe1cb45
| 3,642,749
|
from typing import Optional
from typing import List
import re
def compile_options(
rst_roles: Optional[List[str]],
rst_directives: Optional[List[str]],
*,
allow_autodoc: bool = False,
allow_toolbox: bool = False,
):
"""
Compile the list of allowed roles and directives.
:param rst_roles:
:param rst_directives:
:param allow_autodoc:
:param allow_toolbox:
"""
default_allowed_rst_directives = []
default_allowed_rst_roles = []
config = ConfigParser()
config.read("tox.ini", encoding="UTF-8")
if "flake8" in config:
if "rst-directives" in config["flake8"]:
default_allowed_rst_directives.extend(re.split(r"[\n,]", config["flake8"]["rst-directives"]))
if "rst-roles" in config["flake8"]:
default_allowed_rst_roles.extend(re.split(r"[\n,]", config["flake8"]["rst-roles"]))
domain: Domain
if allow_toolbox:
domain = Toolbox()
elif allow_autodoc:
domain = Autodoc()
else:
domain = Builtin()
if rst_roles is None:
rst_roles = sorted({*default_allowed_rst_roles, *domain.roles})
if rst_directives is None:
rst_directives = sorted({*default_allowed_rst_directives, *domain.directives})
return rst_roles, rst_directives
|
2c5ff56797ce8eb37dfd193fd0522c057e265da5
| 3,642,750
|
import types
def get_pure_function(method):
"""
Retreive pure function, for a method.
Depends on features specific to CPython
"""
assert(isinstance(method, types.MethodType))
assert(hasattr(method, 'im_func'))
return method.im_func
|
f0a7f25a38fd9da061f281f5c55453f8e7ae37d0
| 3,642,751
|
def _agg_samples_2d(sample_df: pd.DataFrame) -> pd.DataFrame:
"""Aggregate ENN samples for plotting."""
def pct_95(x):
return np.percentile(x, 95)
def pct_5(x):
return np.percentile(x, 5)
enn_df = (sample_df.groupby(['x0', 'x1'])['y']
.agg([np.mean, np.std, pct_5, pct_95]).reset_index())
enn_df = enn_df.rename({'mean': 'y'}, axis=1)
enn_df['method'] = 'enn'
return enn_df
|
d2decff9ae5224ad77ce6f133ac0cf0099dda89f
| 3,642,752
|
def get_np_num_array_str(data_frame_rows):
"""
Get a complete code str that creates a np array with random values
"""
test_code = cleandoc("""
from sklearn.preprocessing import StandardScaler
import pandas as pd
from numpy.random import randint
series = randint(0,100,size=({}))
df = pd.DataFrame(series, columns=["num"])
""".format(data_frame_rows))
return test_code
|
66a81bba8666a02770f1de233e458a5067e08f62
| 3,642,753
|
from typing import Any
def get_config(name: str = None, default: Any = _MISSING) -> Any:
"""Gets the global configuration.
Parameters
----------
name : str, optional
The name of the setting to get the value for. If no name is
given then the whole :obj:`Configuration` object is returned.
default : optional
The default value to return if `name` is provided but the
setting doesn't exist in the global configuration.
Returns
-------
:obj:`Configuration` or :obj:`object`
The global configuration object or the configuration setting
requested.
"""
global _GLOBAL_CONFIG
if not name:
return _GLOBAL_CONFIG.copy()
if default == _MISSING:
return _GLOBAL_CONFIG[name]
return _GLOBAL_CONFIG.get(name, default)
|
da43dd18c3841489cf6c909acb12a95b34179135
| 3,642,754
|
def domain_domain_distance(ptg1, ptg2, pdb_struct, domain_distance_dict):
"""
Return the distance between two domains, which will be defined as
the distance between their two closest SSEs
(using SSE distnace defined in ptdistmatrix.py)
Parameters:
ptg1 - PTGraph2 object for one domain
ptg2 - PTGraph2 object for the other domain
pdb_struct - parsed PDB structure from Bio.PDB
domain_distance_dict (In/Out) - dict { (dom1, dom2) : ret_tuple }
for memoizing domiain-domain distances. (dom1,dom2)
is tuple of two PTGraph2 objects, note both (dom1,dom2)
and (dom2,dom1) are always added
and ret_tuple is the return value tuple as defined below.
Return value:
tuple (dist, closest_sse1, closest_sse2, closest_res1, closest_res2)
distance in Angstroms between the two domains, as defined above and
closest_sse1, closest_sse2 are PTNode objects for the closest
SSEs in ptg1 and ptg2 domains respectively and
closest_res1 and closest_res2 are the closest residues in
closest_sse1 and closest_sse2 respectively.
"""
# This function is memoized by the domain_distance_dict parmeter,
# to save recomputations of distances that are previously computed.
if domain_distance_dict.has_key((ptg1, ptg2)):
return domain_distance_dict[(ptg1, ptg2)]
min_dist = float("inf")
closest_sse1 = closest_sse2 = None
closest_res1 = closest_res2 = None
# exclude the terminus nodes
ptg1_sses = [ node for node in ptg1.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
ptg2_sses = [ node for node in ptg2.iter_nodes()
if not isinstance(node, PTNodeTerminus) ]
for sse1 in ptg1_sses:
for sse2 in ptg2_sses:
(dist, res1, res2) = calc_sse_sse_dist(sse1, sse2, pdb_struct)
if dist < min_dist:
min_dist = dist
closest_sse1 = sse1
closest_sse2 = sse2
closest_res1 = res1
closest_res2 = res2
ret_tuple12 = (min_dist,closest_sse1,closest_sse2,closest_res1,closest_res2)
ret_tuple21 = (min_dist,closest_sse2,closest_sse1,closest_res2,closest_res1)
domain_distance_dict[(ptg1, ptg2)] = ret_tuple12
domain_distance_dict[(ptg2, ptg1)] = ret_tuple21
# if verbose:
# sys.stderr.write('dist between domain ' + ptg1.domainid + ' and ' +
# ptg2.domainid + ' is ' + str(min_dist) + '\n')
return ret_tuple12
|
6f2f68714717a32da0182db814629ac0e55b59e8
| 3,642,755
|
def pred_error(f_pred, prepare_data, data, iterator, max_len, n_words, filter_h):
""" compute the prediction error.
"""
valid_err = 0
for _, valid_index in iterator:
x = [data[0][t] for t in valid_index]
x = prepare_data(x,max_len,n_words,filter_h)
preds = f_pred(x)
targets = np.array([data[1][t] for t in valid_index],dtype='int32')
valid_err += (preds == targets).sum()
valid_err = 1. - numpy_floatX(valid_err) / len(data[0])
return valid_err
|
c8f667a2eb6b9cc67d96ea0b6848f27cd337a2f9
| 3,642,756
|
def standardize_10msample(frac: float=0.01):
"""Runs each data processing function in series to save a new .csv data file.
Intended for Pandas DataFrame. For Dask DataFrames, use standardize_10msample_dask
Args:
frac (float, optional): Fraction of data file rows to sample. Defaults to 0.01.
Returns:
df_10msample(pd.core.Frame.DataFrame): Finished DataFrame\
,that should match the same when using .read_csv() method
"""
sample_10m = '../data/10m_sample_common_passwords/10-million-combos.txt'
df_10msample = pd.read_csv(sample_10m, header=None, delimiter='\t').astype(str).sample(frac=frac)
df_10msample.columns = ['username', 'password']
df_10msample.drop('username', axis=1, inplace=True)
df_10msample['length'] = df_10msample['password'].apply(len)
strength_features(df_10msample)
df_10msample['class'] = df_10msample['password'].apply(withPassClass)
pass_class_expand(df_10msample)
to_csv(df_10msample, filename='../data/10m_sample_common_passwords/10m_standardized.csv')
return df_10msample
|
d834cc31220a34204966160bb72399a53b99ff5b
| 3,642,757
|
def is_ansible_managed(file_path):
"""
Gets whether the fail2ban configuration file at the given path is managed by Ansible.
:param file_path: the file to check if managed by Ansible
:return: whether the file is managed by Ansible
"""
with open(file_path, "r") as file:
return file.readline().strip() == ANSIBLE_MANAGED_LINE
|
a8e70d242f598ad26a00cf0b3ccc1a1494475ba8
| 3,642,758
|
import ctypes
def sumai(array):
"""
Return the sum of the elements of an integer array.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sumai_c.html
:param array: Input Array.
:type array: Array of ints
:return: The sum of the array.
:rtype: int
"""
n = ctypes.c_int(len(array))
array = stypes.toIntVector(array)
return libspice.sumai_c(array, n)
|
ece9b6a171dff66d4f66c7ce711b6a7a7b4c59a2
| 3,642,759
|
import os
import re
def _get_connection_dir(app):
"""Gets the connection dir to use for the IPKernelApp"""
connection_dir = None
# Check the pyxll config first
cfg = get_config()
if cfg.has_option("JUPYTER", "runtime_dir"):
connection_dir = cfg.get("JUPYTER", "runtime_dir")
if not os.path.abspath(connection_dir):
connection_dir = os.path.join(os.path.dirname(pyxll.__file__), connection_dir)
# If not set in the pyxll config use the default from the kernel
if not connection_dir:
connection_dir = app.connection_dir
# If Excel is installed as a UWP then AppData will appear as a different folder when the
# child Python process is run so use a different path.
excel_path = _get_excel_path()
if "WindowsApps" in re.split(r"[/\\]+", excel_path):
_log.debug("Excel looks like a UWP app.")
if "AppData" in re.split(r"[/\\]+", connection_dir):
connection_dir = os.path.join(os.path.dirname(pyxll.__file__), ".jupyter", "runtime")
_log.warning("Jupyter's runtime directory is in AppData but Excel is installed as a UWP. ")
_log.warning(f"{connection_dir} will be used instead.")
_log.warning("Set 'runtime_dir' in the '[JUPYTER]' section of your pyxll.cfg to change this directory.")
return connection_dir
|
246094655a185fc1a2029ec0d5a899a641c62a13
| 3,642,760
|
import os
import zlib
def download(accession):
"""Downloads GEO file based on accession number. Returns a SOFTFile or ANNOTFile
instance.
For reading and unzipping binary chunks, see:
http://stackoverflow.com/a/27053335/1830334
http://stackoverflow.com/a/2424549/1830334
"""
if 'GPL' not in accession: # soft file
geo_file = SOFTFile(accession)
if 'GDS' in accession:
url = _construct_GDS_url(accession)
else:
url = _construct_GSE_url(accession)
else:
geo_file = ANNOTFile(accession)
url = _construct_GPL_url(accession)
if os.path.isfile(geo_file.path()): # avoid downloading the same file again if exists
return geo_file
CHUNK_SIZE = 1024
decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
response = _get_file_by_url(url)
with open(geo_file.path(), 'w+') as f:
while True:
bin_chunk = response.read(CHUNK_SIZE)
if not bin_chunk:
break
string = decompressor.decompress(bin_chunk)
f.write(string)
return geo_file
|
4f207d7dbe2fdc99142690565c3266d7341af548
| 3,642,761
|
from typing import Union
from typing import Optional
from typing import Mapping
from typing import Any
def invoke(
node: Union[DAG, Task],
params: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, NodeOutput]:
"""
Invoke a node with a series of parameters.
Parameters
----------
node
Node to execute
params
Inputs to the task, indexed by input/parameter name.
Returns
-------
Serialized outputs of the task, indexed by output name.
Raises
------
ValueError
When any required parameters are missing
TypeError
When any of the outputs cannot be obtained from the return value of the task's function
SerializationError
When some of the outputs cannot be serialized with the specified Serializer
"""
if isinstance(node, DAG):
return _invoke_dag(node, params=params)
else:
return _invoke_task(node, params=params)
|
f05a49996912a52db37a809d078faaa208942e7f
| 3,642,762
|
def convert_acl_to_iam_policy(acl):
"""Converts the legacy ACL format to an IAM Policy proto."""
owners = acl.get('owners', [])
readers = acl.get('readers', [])
if acl.get('all_users_can_read', False):
readers.append('allUsers')
writers = acl.get('writers', [])
bindings = []
if owners:
bindings.append({'role': 'roles/owner', 'members': owners})
if readers:
bindings.append({'role': 'roles/viewer', 'members': readers})
if writers:
bindings.append({'role': 'roles/editor', 'members': writers})
return {'bindings': bindings}
|
990cdb6a51a696cf2b7825af94cf4265b2229be9
| 3,642,763
|
def get_valid_start_end(mask):
"""
Args:
mask (ndarray of bool): invalid mask
Returns:
"""
ns = mask.shape[0]
nt = mask.shape[1]
start_idx = np.full(ns, -1, dtype=np.int32)
end_idx = np.full(ns, -1, dtype=np.int32)
for s in range(ns):
# scan from start to the end
for t in range(nt):
if not mask[s][t]:
start_idx[s] = t
break
# reverse scan, from end to start
for t in range(nt - 1, -1, -1):
if not mask[s][t]:
end_idx[s] = t + 1
break
return start_idx, end_idx
|
41520c051d25aed203e5db9f64497f75eaab4f6c
| 3,642,764
|
def pahrametahrize(*args, **kwargs) -> t.Callable:
"""Pass arguments straight through to `pytest.mark.parametrize`."""
return pytest.mark.parametrize(*args, **kwargs)
|
43bbc1e8323956f1ed2e1da60abf23e5b35130ba
| 3,642,765
|
from datetime import datetime
def utcnow():
"""Return the current time in UTC with a UTC timezone set."""
return datetime.utcnow().replace(microsecond=0, tzinfo=UTC)
|
496c80cfa4a2b00b514346705fc0709739e2d3c0
| 3,642,766
|
def default_to(default, value):
"""
Ramda implementation of default_to
:param default:
:param value:
:return:
"""
return value or default
|
58338f67332a0ff116cd2ff46d65ee92bf59c360
| 3,642,767
|
def insertGraph():
"""
Create a new graph
"""
root = Xref.getroot().elem
ref = getNewRef()
elem = etree.Element(etree.QName(root, sgraph), reference=ref)
name = makeNewName(sgraph, elem)
root.append(elem)
Xref.setDirty()
return name, (elem, newDotGraph(name, ref, elem))
|
2a60fac192d6d3448c3e48637585af2d54bdf87f
| 3,642,768
|
from datetime import datetime
def get_line_notif(line_data: str):
"""
Извлечь запись из таблицы.
:param line_data: запрашиваемая строка
"""
try:
connection = psycopg2.connect(
user=USER,
password=PASSWORD,
host="127.0.0.1",
port="5432",
database=DATABASE)
cursor = connection.cursor(cursor_factory=extras.DictCursor)
date_time = datetime.datetime.now()
hour = '0' + str(date_time.hour) if date_time.hour < 10 else date_time.hour
minute = '0' + str(date_time.minute) if date_time.minute < 10 else date_time.minute
day = '0' + str(date_time.day) if date_time.day < 10 else date_time.day
month = '0' + str(date_time.month) if date_time.month < 10 else date_time.month
if line_data in ("Мобильная Связь", "Подписки", "ЖКХ"):
cursor.execute(f'SELECT * from reminders WHERE date = \'{day}\' and ' +
f'time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "Планер":
cursor.execute(f'SELECT * from reminders WHERE date = \'{date_time.date}\' ' +
f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "День Рождения":
cursor.execute(f'SELECT * from reminders WHERE date = \'{day}.{month}\' ' +
f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';')
elif line_data == "Приём Лекарств":
cursor.execute('SELECT * from reminders WHERE ' +
f'time = \'{hour}:{minute}\' and type=\'{line_data}\';')
connection.commit()
except (Exception, Error) as error:
print(ERROR_MESSAGE, error)
finally:
res = cursor.fetchall()
if connection:
cursor.close()
connection.close()
return res
|
8fbeb195faaa1f49928e3d0e49310cc3d4bcb37f
| 3,642,769
|
import os
def load_alloc_model(matfilepath, prefix):
""" Load allocmodel stored to disk in bnpy .mat format.
Parameters
------
matfilepath : str
String file system path to folder where .mat files are stored.
Usually this path is a "taskoutpath" like where bnpy.run
saves its output.
prefix : str
Indicates which stored checkpoint to use.
Can look like 'Lap0005.000'.
Returns
------
allocModel : bnpy.allocmodel object
This object has valid set of global parameters
and valid hyperparameters that define its prior.
"""
apriorpath = os.path.join(matfilepath, 'AllocPrior.mat')
amodelpath = os.path.join(matfilepath, prefix + 'AllocModel.mat')
APDict = loadDictFromMatfile(apriorpath)
ADict = loadDictFromMatfile(amodelpath)
AllocConstr = AllocModelConstructorsByName[ADict['name']]
amodel = AllocConstr(ADict['inferType'], APDict)
amodel.from_dict(ADict)
return amodel
|
459426256a4eee25133b7baa8fca7811432b6238
| 3,642,770
|
def bouts_per_minute(boutlist):
"""Takes list of times of bouts in seconds, returns bpm = total_bouts / total_time."""
bpm = (total_bouts(boutlist) / total_time(boutlist)) * 60
return bpm
|
949f0d8758d7fcc8a1e19d4772788504b5ba10a5
| 3,642,771
|
import re
def convert_to_snake_case(string: str) -> str:
"""Helper function to convert column names into snake case. Takes a string
of any sort and makes conversions to snake case, replacing double-
underscores with single underscores."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
draft = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return draft.replace('__', '_')
|
2a8de69a6915e87e46582a1af7a7897ff6fd97ce
| 3,642,772
|
def list_keys(client, keys):
"""
:param client: string
:param keys: list of candidate keys
:return: True if all keys exist, None otherwise
"""
objects = client.get_multi(keys)
if bool(objects):
return objects
else:
return None
|
4370053b76ea526e1f43309112f85f968ce76b6b
| 3,642,773
|
import os
import pickle
def load_config(config_name):
"""
Load a configuration object from a file and return the object. The given configuration name
must be a valid saved configuration.
:param config_name: The name of the configuration file to load from.
:return: The configuration object saved in that file.
"""
if config_name is None:
return None
file_name = config_name + ".dat"
file_path = os.path.join(util.working_directory(), CONFIG_DIRECTORY, file_name)
config_file = open(file_path, "rb")
config = pickle.load(config_file)
config_file.close()
return config
|
dfa99668bf3f1eae39f1678cbd0eda4e3fe151aa
| 3,642,774
|
def estimate_variance(ip_image: np.ndarray, x: int, y: int, nbr_size: int) -> float:
"""Estimates local variances as described in pg. 6, eqn. 20"""
nbrs = get_neighborhood(x, y, nbr_size, ip_image.shape[0], ip_image.shape[1])
vars = list()
for channel in range(3):
pixel_avg = 0
for i, j in nbrs:
pixel_avg += ip_image[i, j, channel]
pixel_avg /= len(nbrs)
pixel_var = 0
for i, j in nbrs:
pixel_var += (ip_image[i, j, channel] - pixel_avg) * (ip_image[i, j, channel] - pixel_avg)
pixel_var /= len(nbrs)
vars.append(pixel_var)
return np.average(vars)
|
26932d333a50526f5f3bc4b10e5dd2b0bd15e871
| 3,642,775
|
def api_key_regenerate():
"""
Generate a new API key for the currently logged-in user.
"""
try:
return flask.jsonify({
constants.api.RESULT: constants.api.RESULT_SUCCESS,
constants.api.MESSAGE: None,
'api_key': database.user.generate_new_api_key(current_user.user_id).api_key,
}), constants.api.SUCCESS_CODE
except:
return flask.jsonify(constants.api.UNDEFINED_FAILURE), constants.api.UNDEFINED_FAILURE_CODE
|
59ccc904dc80386910370dae0752c4810107224c
| 3,642,776
|
def almost_equal_ignore_nan(a, b, rtol=None, atol=None):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
return almost_equal(a, b, rtol, atol)
|
ca364b23e5a6106a98ba52629ccb152dc0d95214
| 3,642,777
|
def make_commands(manager):
"""Prototype"""
# pylint: disable=no-member
return (cmd_t(manager) for cmd_t in
AbstractTwitterFollowersCommand.__subclasses__())
|
54443970dc69b06c530b746cb42b418bc5a7ee42
| 3,642,778
|
import logging
def copy_rds_snapshot(
target_snapshot_identifier: str,
source_snapshot_identifier: str,
target_kms: str,
wait: bool,
rds,
):
"""Copy snapshot from source_snapshot_identifier to target_snapshot_identifier and encrypt using target_kms"""
logger = logging.getLogger("copy_rds_snapshot")
xs = rds.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier=source_snapshot_identifier,
TargetDBClusterSnapshotIdentifier=target_snapshot_identifier,
KmsKeyId=target_kms,
)["DBClusterSnapshot"]
if not wait:
return xs
else:
sleep(5)
waiter = rds.get_waiter("db_cluster_snapshot_available")
logger.warning(
"Waiting for snapshot {} to be created...".format(
xs["DBClusterSnapshotIdentifier"]
)
)
try:
waiter.wait(
DBClusterSnapshotIdentifier=xs["DBClusterSnapshotIdentifier"],
SnapshotType="manual",
Filters=[
{
"Name": "db-cluster-id",
"Values": [
xs["DBClusterIdentifier"],
],
},
],
WaiterConfig={"Delay": 10, "MaxAttempts": 100},
)
except:
logger.exception(
"Unable to wait for snapshot {} to be created for cluster {}".format(
xs["DBClusterSnapshotIdentifier"], xs["DBClusterIdentifier"]
)
)
else:
return xs
|
f7d3c3b9b5588afb9dd1b6e65fc3a51f6411e997
| 3,642,779
|
def get_other_menuitems():
"""
returns other menu items
each menu pk will be dict key
{0: QuerySet, 1: QuerySet, ..}
"""
menuitems = {}
all_objects = Menu.objects.all()
for obj in all_objects:
menuitems[obj.pk] = obj.menuitem_set.all()
return menuitems
|
7e868e3d434dd168dfe6d9938093044e97e2bc5c
| 3,642,780
|
from typing import Union
from typing import List
import os
import warnings
def gather_simulation_file_paths(in_folder: str, filePrefix: str = "",
fileSuffixes: Union[str, List[str]] = [".tre", ".tre.tar.gz"],
files_per_folder: int = 1,
verbose: bool = False) -> List[str]:
"""gather_simulation_file_paths
find energy trajectory files in a folder
Parameters
----------
in_folder : str
directory where the files should be searched
filePrefix : str, optional
prefix of the file name pattern (default "")
fileSuffixes : Union[str, List[str]]
suffixes of the file name pattern (default [".tre", ".tre.tar.gz"])
files_per_folder : int, optional
number of files per folder (default 1)
verbose : bool, optional
verbose output (default False)
Returns
-------
List[str]
list of sorted files
"""
files = []
if (isinstance(fileSuffixes, str)):
fileSuffixes = [fileSuffixes]
if (verbose): print("SEARCH PATTERN: " + filePrefix + " + * +" + str(fileSuffixes))
for dirname, dirnames, filenames in os.walk(in_folder):
if (str(dirname[-1]).isdigit() and os.path.basename(dirname).startswith("eq")):
continue
# check actual in_dir for fle pattern
tmp_files = [file for file in filenames if
(filePrefix in file and any([suffix in file for suffix in fileSuffixes]))]
if (len(tmp_files) == files_per_folder):
files.extend(list(map(lambda x: dirname + "/" + x, tmp_files)))
if verbose: print("walking to in_dir: ", os.path.basename(dirname), "found: ", len(tmp_files))
try:
keys = [[int(y) for y in x.split("_") if (y.isdecimal())][-1] for x in files]
sorted_files = list(map(lambda y: y[1], sorted(zip(keys, files), key=lambda x: x[0])))
except:
warnings.warn("Files are not all enumerated! no file sorting.")
sorted_files = files
if (verbose):
print("\nfoundFiles:\n")
print("\t" + "\n\t".join(sorted_files))
if (len(sorted_files) == 0):
raise ValueError("could not find any file with the prefix: " + filePrefix + " in folder : \n" + in_folder)
return sorted_files
|
1f5473b147a16dfc6cb2f5101d264f45161c8e6b
| 3,642,781
|
import random
def create_deck(shuffle=False):
"""Create a new deck of 52 cards"""
deck = [(s, r) for r in RANKS for s in SUITS]
if shuffle:
random.shuffle(deck)
return deck
|
92b828ce373c48a0a403c519a2e25b0cb1ab7409
| 3,642,782
|
def mock_gate_util_provider_oldest_namespace_feed_sync(
monkeypatch, mock_distromapping_query
):
"""
Mocks for anchore_engine.services.policy_engine.engine.policy.gate_util_provider.GateUtilProvider.oldest_namespace_feed_sync
"""
# required for FeedOutOfDateTrigger.evaluate
# setup for anchore_engine.services.policy_engine.engine.feeds.feeds.FeedRegistry.registered_vulnerability_feed_names
init_feed_registry()
@contextmanager
def mock_session_scope():
"""
Mock context manager for anchore_engine.db.session_scope.
"""
yield None
def raise_no_active_grypedb(session):
raise NoActiveGrypeDB
def _setup_mocks(feed_group_metadata=None, grype_db_feed_metadata=None):
# required for FeedOutOfDateTrigger.evaluate
# mocks anchore_engine.services.policy_engine.engine.feeds.db.get_feed_group_detached
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.session_scope",
mock_session_scope,
)
if grype_db_feed_metadata:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb",
lambda x: grype_db_feed_metadata,
)
else:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb",
raise_no_active_grypedb,
)
# mocks anchore_engine.db.db_grype_db_feed_metadata.get_most_recent_active_grypedb
# if feed_group_metadata:
monkeypatch.setattr(
"anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_feed_group_detached",
lambda x, y: feed_group_metadata,
)
return _setup_mocks
|
c6cf043b49574be44114110f5c1092d06fe531a0
| 3,642,783
|
def ESMP_LocStreamGetBounds(locstream, localDe=0):
"""
Preconditions: An ESMP_LocStream has been created.\n
Postconditions: .\n
Arguments:\n
:RETURN: Numpy.array :: \n
:RETURN: Numpy.array :: \n
ESMP_LocStream :: locstream\n
"""
llde = ct.c_int(localDe)
# locstream rank is always one
locstreamrank = 1
exLB = np.zeros(locstreamrank, dtype=np.int32)
exUB = np.zeros(locstreamrank, dtype=np.int32)
rc = _ESMF.ESMC_LocStreamGetBounds(locstream.ptr, llde, exLB, exUB)
# adjust bounds to be 0 based
exLB = exLB - 1
if rc != constants._ESMP_SUCCESS:
raise ValueError('ESMC_LocStreamGetBounds() failed with rc = '+str(rc)+'. '+
constants._errmsg)
return exLB, exUB
|
179b24463cd8dd5f70ad63530a50b6fe4dd4dfb8
| 3,642,784
|
def reverse(collection):
"""
Reverses a collection.
Args:
collection: `dict|list|depset` - The collection to reverse
Returns:
`dict|list|depset` - A new collection of the same type, with items in the reverse order
of the input collection.
"""
forward_list = None
collection_type = type(collection)
if collection_type == "dict":
forward_list = collection.items()
elif collection_type == "list":
forward_list = collection
elif collection_type == "depset":
forward_list = collection.to_list()
else:
fail("Unsupported collection type: " + collection_type)
reverse_list = []
for value in forward_list:
reverse_list.insert(0, value)
ret = None
if collection_type == "dict":
ret = dict(reverse_list)
elif collection_type == "list":
ret = reverse_list
elif collection_type == "depset":
ret = depset(reverse_list)
else:
fail("Unsupported collection type: " + collection_type)
return ret
|
587bf847028f485783e74633b1aa2ed0ef003daa
| 3,642,785
|
def A_fast_full5(S, phase_factors, r, r_min, MY, MX):
""" Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included
:param S: B x NY x NX
:param phase_factors: K x B
:param r: K x 2
:param out: K x MY x MX
:return: exit waves in out
"""
B = S.shape[0]
K, _ = r.shape
out = th.zeros((K, B, MY, MX), dtype=th.complex64, device=S.device)
K, B, MY, MX = out.shape
gpu = cuda.get_current_device()
stream = th.cuda.current_stream().cuda_stream
threadsperblock = gpu.MAX_THREADS_PER_BLOCK // 2
blockspergrid = m.ceil(np.prod(np.array((K, B, MY, MX))) / threadsperblock)
# 1 - get crops from S-matrix
split_kernel4[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, th.view_as_real(out))
# threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK
# blockspergrid = m.ceil(np.prod(np.array((K, B))) / threadsperblock)
# # 1 - get crops from S-matrix
# split_kernel2[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out)
out = out.view((K, B, MY * MX))
# 1.5 - convert to cupy
# 2 - complex batched matmul: K x 1 x B @ K x B x MY*MX --> K x 1 x MY * MX
# print(out.shape)
# print(phase_factors2.shape)
# print(out.dtype)
# print(phase_factors2.dtype)
phase_factors2 = phase_factors.unsqueeze(1)
exitwaves = phase_factors2 @ out
# 3 - reshape
exitwaves = exitwaves.view((K, MY, MX))
#4 convert to pytorch
return exitwaves
|
3bffd01037f317c88328a751958aca67bc90b2dd
| 3,642,786
|
from pathlib import Path
import requests
import logging
def get_metadata_for_druid(druid, redownload_mods):
"""Obtains a .mods metadata file for the roll specified by DRUID either
from the local mods/ folder or the Stanford Digital Repository, then
parses the XML to build the metadata dictionary for the roll.
"""
def get_value_by_xpath(xpath):
try:
return xml_tree.xpath(
xpath,
namespaces=NS,
)[0]
except IndexError:
return None
# Takes an array of potential xpaths, returns the first one that matches,
# or None
def get_value_by_xpaths(xpaths):
for xpath in xpaths:
value = get_value_by_xpath(xpath)
if value is not None:
return value
return value
mods_filepath = Path(f"input/mods/{druid}.mods")
if not mods_filepath.exists() or redownload_mods:
response = requests.get(f"{PURL_BASE}{druid}.mods")
try:
xml_tree = etree.fromstring(response.content)
except etree.XMLSyntaxError:
logging.error(
f"Unable to parse MODS metadata for {druid} - record is likely missing."
)
return None
with mods_filepath.open("w") as _fh:
_fh.write(etree.tostring(xml_tree, encoding="unicode", pretty_print=True))
else:
xml_tree = etree.parse(mods_filepath.open())
# The representation of the roll type in the MODS metadata continues to
# evolve. Hopefully this logic covers all cases.
roll_type = "NA"
type_note = get_value_by_xpath(
"x:physicalDescription/x:note[@displayLabel='Roll type']/text()"
)
scale_note = get_value_by_xpath(
"x:physicalDescription/x:note[@displayLabel='Scale']/text()"
)
if type_note is not None and type_note in ROLL_TYPES:
roll_type = ROLL_TYPES[type_note]
if (
scale_note is not None
and scale_note in ROLL_TYPES
and (roll_type == "NA" or type_note == "standard")
):
roll_type = ROLL_TYPES[scale_note]
if roll_type == "NA" or type_note == "standard":
for note in xml_tree.xpath("(x:note)", namespaces=NS):
if note is not None and note.text in ROLL_TYPES:
roll_type = ROLL_TYPES[note.text]
metadata = {
"title_prefix": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:nonSort)[1]/text()"
),
"title": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:title)[1]/text()"
),
"title_part_number": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:partNumber)[1]/text()"
),
"title_part_name": get_value_by_xpath(
"(x:titleInfo[@usage='primary']/x:partName)[1]/text()"
),
"subtitle": get_value_by_xpath("(x:titleInfo/x:subTitle)[1]/text()"),
"composer": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='composer']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='Composer']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='composer.']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='cmp']]/x:namePart[not(@type='date')]/text()",
]
),
"performer": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='instrumentalist']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='instrumentalist.']]/x:namePart[not(@type='date')]/text()",
]
),
"arranger": get_value_by_xpaths(
[
"x:name[descendant::x:roleTerm[text()='arranger of music']]/x:namePart[not(@type='date')]/text()",
"x:name[descendant::x:roleTerm[text()='arranger']]/x:namePart[not(@type='date')]/text()",
]
),
"original_composer": get_value_by_xpaths(
[
"x:relatedItem[@displayLabel='Based on (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Based on']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabele='Adaptation of (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Adaptation of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Arrangement of :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
"x:relatedItem[@displayLabel='Arrangement of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()",
]
),
"label": get_value_by_xpaths(
[
"x:identifier[@type='issue number' and @displayLabel='Roll number']/text()",
"x:identifier[@type='issue number']/text()",
]
),
"publisher": get_value_by_xpaths(
[
"x:identifier[@type='publisher']/text()",
"x:originInfo[@eventType='publication']/x:publisher/text()",
"x:name[@type='corporate']/x:nameType/text()",
"x:name[descendant::x:roleTerm[text()='publisher.']]/x:namePart/text()",
]
),
"number": get_value_by_xpath("x:identifier[@type='publisher number']/text()"),
"publish_date": get_value_by_xpaths(
[
"x:originInfo[@eventType='publication']/x:dateIssued[@keyDate='yes']/text()",
"x:originInfo[@eventType='publication']/x:dateIssued/text()",
"x:originInfo/x:dateIssued[@point='start']/text()",
"x:originInfo[@displayLabel='publisher']/x:dateIssued/text()",
]
),
"publish_place": get_value_by_xpaths(
[
"x:originInfo[@eventType='publication']/x:place/x:placeTerm[@type='text']/text()",
"x:originInfo[@displayLabel='publisher']/x:place/x:placeTerm/text()",
]
),
"recording_date": get_value_by_xpaths(
[
"x:note[@type='venue']/text()",
"x:originInfo[@eventType='publication']/x:dateCaptured/text()",
]
),
# The call number is not consistently available in all MODS variants
# "call_number": get_value_by_xpath("x:location/x:shelfLocator/text()"),
"type": roll_type,
"PURL": PURL_BASE + druid,
}
return metadata
|
982c2a89e85b07692901f1452a62c144ab1181b7
| 3,642,787
|
def logistic_dataset_gen_data(num, w, dim, temp, rng_key):
"""Samples data from a standard Gaussian with binary noisy labels.
Args:
num: An integer denoting the number of data points.
w: An array of size dim x odim, the weight vector used to generate labels.
dim: An integer denoting the number of input dimensions.
temp: A float denoting the temperature parameter controlling label noise.
rng_key: JAX random number generator key.
Returns:
x: An array of size dim x num denoting data points.
y_pm: An array of size num x odim denoting +/-1 labels.
"""
rng_subkey = jax.random.split(rng_key, 3)
x = jax.random.normal(rng_subkey[0], (dim, num))
prob = jax.nn.sigmoid(-(1 / temp) * w.T.dot(x))
y = jax.random.bernoulli(rng_subkey[1], (prob))
y_pm = 2. * y - 1
return x, y_pm
|
99fed2fd2cdb1250a444a986dd182ab846477890
| 3,642,788
|
def sech(x):
"""Computes the hyperbolic secant of the input"""
return 1 / cosh(x)
|
1cded1fbf37070dbecba0f8518990c3eef8e6406
| 3,642,789
|
import torch
def _map_triples_elements_to_ids(
triples: LabeledTriples,
entity_to_id: EntityMapping,
relation_to_id: RelationMapping,
) -> MappedTriples:
"""Map entities and relations to pre-defined ids."""
if triples.size == 0:
logger.warning('Provided empty triples to map.')
return torch.empty(0, 3, dtype=torch.long)
heads, relations, tails = slice_triples(triples)
# When triples that don't exist are trying to be mapped, they get the id "-1"
entity_getter = np.vectorize(entity_to_id.get)
head_column = entity_getter(heads, [-1])
tail_column = entity_getter(tails, [-1])
relation_getter = np.vectorize(relation_to_id.get)
relation_column = relation_getter(relations, [-1])
# Filter all non-existent triples
head_filter = head_column < 0
relation_filter = relation_column < 0
tail_filter = tail_column < 0
num_no_head = head_filter.sum()
num_no_relation = relation_filter.sum()
num_no_tail = tail_filter.sum()
if (num_no_head > 0) or (num_no_relation > 0) or (num_no_tail > 0):
logger.warning(
f"You're trying to map triples with {num_no_head + num_no_tail} entities and {num_no_relation} relations"
f" that are not in the training set. These triples will be excluded from the mapping.",
)
non_mappable_triples = (head_filter | relation_filter | tail_filter)
head_column = head_column[~non_mappable_triples, None]
relation_column = relation_column[~non_mappable_triples, None]
tail_column = tail_column[~non_mappable_triples, None]
logger.warning(
f"In total {non_mappable_triples.sum():.0f} from {triples.shape[0]:.0f} triples were filtered out",
)
triples_of_ids = np.concatenate([head_column, relation_column, tail_column], axis=1)
triples_of_ids = np.array(triples_of_ids, dtype=np.long)
# Note: Unique changes the order of the triples
# Note: Using unique means implicit balancing of training samples
unique_mapped_triples = np.unique(ar=triples_of_ids, axis=0)
return torch.tensor(unique_mapped_triples, dtype=torch.long)
|
5d4db571e9b9d37329df7689b7e7629559580522
| 3,642,790
|
from typing import Tuple
def pinf_two_networks(grgd: Tuple[float, float],
k: Tuple[float, float] = (3, 3),
alpha_i: Tuple[float, float] = (1, 1),
solpoints: int = 10,
eps: float = 1e-5,
method: str = "hybr"):
"""Find the fixed points for two recovery coupled ER networks (not-symmetric)
Args:
grgd (Tuple[float, float]): gamma_r / gamma_d ratio in each network
k (Tuple[float, float], optional): avg degree in each network. Defaults to (3, 3).
alpha_i (Tuple[float, float], optional): coupling strength in each network. Defaults to (1, 1).
solpoints (int, optional): number of guesses to feed solver. Defaults to 10.
eps (float, optional): precision of solution. Defaults to 1e-5.
method (str, optional): method to pass to `root`. Defaults to "hybr".
Returns:
List[np.ndarray]: a list of all solutions found
"""
g = list(map(u_factory, k))
mu = lambda p: (1 - alpha_i[0] * g[0]
(1 - p)), lambda p: (1 - alpha_i[1] * g[1](1 - p))
def two_networks_self_consistent(f1f2):
cond1 = 1 / (1 + (grgd[0] * mu[1](f1f2[1]))) - f1f2[0]
cond2 = 1 / (1 + (grgd[1] * mu[0](f1f2[0]))) - f1f2[1]
return np.array([cond1, cond2], dtype=float).squeeze()
return get_all_sols_two_networks(
two_networks_self_consistent,
eps=eps,
method=method,
solpoints=solpoints,
)
|
c90db1bb6d9d314086887e4f5b98f422731b3853
| 3,642,791
|
def uncapped_flatprice_goal_reached(chain, uncapped_flatprice, uncapped_flatprice_finalizer, preico_funding_goal, preico_starts_at, customer) -> Contract:
"""A ICO contract where the minimum funding goal has been reached."""
time_travel(chain, preico_starts_at + 1)
wei_value = preico_funding_goal
uncapped_flatprice.functions.buy().transact({"from": customer, "value": wei_value})
return uncapped_flatprice
|
20a6a10b4cb1318e2be7fd1995d025b582ee4768
| 3,642,792
|
def depfile_name(request, tmp_path_factory):
"""A fixture for a temporary doit database file(s) that will be removed after running"""
depfile_name = str(tmp_path_factory.mktemp('x', True) / 'testdb')
def remove_depfile():
remove_db(depfile_name)
request.addfinalizer(remove_depfile)
return depfile_name
|
cbe99e664abeea52a038898f3e76547795bca30a
| 3,642,793
|
from typing import OrderedDict
def _convert_v3_response_to_v2(pbx_name, termtype, command, v3_response):
"""
Convert the v3 response to the legacy v2 xml format.
"""
logger.debug(v3_response)
obj = {
'command': {'@cmd': command, '@cmdType': termtype, '@pbxName': pbx_name}
}
if v3_response.get('error') is not None:
obj['command']['error'] = 'ERROR: {}'.format(v3_response['error'])
elif v3_response.get('screens') is not None:
screens = []
for i, screen in enumerate(v3_response['screens']):
screens.append(OrderedDict([('@page', i + 1), ('#text', screen)]))
obj['command']['screen'] = screens
elif v3_response.get('ossi_objects') is not None:
ossi_objects = []
for i, o in enumerate(v3_response['ossi_objects']):
fields = []
for field in o:
fields.append(OrderedDict([('@fid', field), ('#text', o[field])]))
od = OrderedDict([('@i', i + 1), ('field', fields)])
ossi_objects.append(od)
if len(ossi_objects) == 0:
ossi_objects = {}
obj['command']['ossi_object'] = ossi_objects
logger.debug(obj)
xml = xmltodict.unparse(obj, pretty=True, indent=' ')
return xml
|
c4883706e4bbbf3297781e4bd91360c68c3ea786
| 3,642,794
|
from typing import OrderedDict
import collections
import warnings
def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs):
"""
Sample the property surface of 'output' containing the specified
components and phases. Model parameters are taken from 'dbf' and any
state variables (T, P, etc.) can be specified as keyword arguments.
Parameters
----------
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : str or sequence
Names of components to consider in the calculation.
phases : str or sequence
Names of phases to consider in the calculation.
mode : string, optional
See 'make_callable' docstring for details.
output : string, optional
Model attribute to sample.
fake_points : bool, optional (Default: False)
If True, the first few points of the output surface will be fictitious
points used to define an equilibrium hyperplane guaranteed to be above
all the other points. This is used for convex hull computations.
broadcast : bool, optional
If True, broadcast given state variable lists against each other to create a grid.
If False, assume state variables are given as equal-length lists.
points : ndarray or a dict of phase names to ndarray, optional
Columns of ndarrays must be internal degrees of freedom (site fractions), sorted.
If this is not specified, points will be generated automatically.
pdens : int, a dict of phase names to int, or a seq of both, optional
Number of points to sample per degree of freedom.
Default: 2000; Default when called from equilibrium(): 500
model : Model, a dict of phase names to Model, or a seq of both, optional
Model class to use for each phase.
sampler : callable, a dict of phase names to callable, or a seq of both, optional
Function to sample phase constitution space.
Must have same signature as 'pycalphad.core.utils.point_sample'
grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True)
Whether to add evenly spaced points between end-members.
The density of points is determined by 'pdens'
parameters : dict, optional
Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database.
Returns
-------
Dataset of the sampled attribute as a function of state variables
Examples
--------
None yet.
"""
# Here we check for any keyword arguments that are special, i.e.,
# there may be keyword arguments that aren't state variables
pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000)
points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None)
model_dict = unpack_kwarg(kwargs.pop('model', FallbackModel), default_arg=FallbackModel)
callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None)
sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None)
fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True)
parameters = parameters or dict()
if isinstance(parameters, dict):
parameters = OrderedDict(sorted(parameters.items(), key=str))
param_symbols = tuple(parameters.keys())
param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float))
if isinstance(phases, str):
phases = [phases]
if isinstance(comps, str):
comps = [comps]
if points_dict is None and broadcast is False:
raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.')
components = [x for x in sorted(comps) if not x.startswith('VA')]
# Convert keyword strings to proper state variable objects
# If we don't do this, sympy will get confused during substitution
statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \
for (key, value) in sorted(kwargs.items()))
# XXX: CompiledModel assumes P, T are the only state variables
if statevar_dict.get(v.P, None) is None:
statevar_dict[v.P] = 101325
if statevar_dict.get(v.T, None) is None:
statevar_dict[v.T] = 300
str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \
for (key, value) in statevar_dict.items())
all_phase_data = []
comp_sets = {}
largest_energy = 1e30
maximum_internal_dof = 0
# Consider only the active phases
active_phases = dict((name.upper(), dbf.phases[name.upper()]) \
for name in unpack_phases(phases))
for phase_name, phase_obj in sorted(active_phases.items()):
# Build the symbolic representation of the energy
mod = model_dict[phase_name]
# if this is an object type, we need to construct it
if isinstance(mod, type):
try:
model_dict[phase_name] = mod = mod(dbf, comps, phase_name, parameters=parameters)
except DofError:
# we can't build the specified phase because the
# specified components aren't found in every sublattice
# we'll just skip it
warnings.warn("""Suspending specified phase {} due to
some sublattices containing only unspecified components""".format(phase_name))
continue
if points_dict[phase_name] is None:
maximum_internal_dof = max(maximum_internal_dof, sum(len(x) for x in mod.constituents))
else:
maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1])
for phase_name, phase_obj in sorted(active_phases.items()):
try:
mod = model_dict[phase_name]
except KeyError:
continue
# this is a phase model we couldn't construct for whatever reason; skip it
if isinstance(mod, type):
continue
if (not isinstance(mod, CompiledModel)) or (output != 'GM'):
if isinstance(mod, CompiledModel):
mod = Model(dbf, comps, phase_name, parameters=parameters)
# Construct an ordered list of the variables
variables, sublattice_dof = generate_dof(phase_obj, mod.components)
# Build the "fast" representation of that model
if callable_dict[phase_name] is None:
try:
out = getattr(mod, output)
except AttributeError:
raise AttributeError('Missing Model attribute {0} specified for {1}'
.format(output, mod.__class__))
# As a last resort, treat undefined symbols as zero
# But warn the user when we do this
# This is consistent with TC's behavior
undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable))
for undef in undefs:
out = out.xreplace({undef: float(0)})
warnings.warn('Setting undefined symbol {0} for phase {1} to zero'.format(undef, phase_name))
comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables,
include_obj=True, include_grad=False, include_hess=False,
parameters=param_symbols)
else:
comp_sets[phase_name] = callable_dict[phase_name]
phase_record = PhaseRecord_from_cython(comps, list(statevar_dict.keys()) + variables,
np.array(dbf.phases[phase_name].sublattices, dtype=np.float),
param_values, comp_sets[phase_name], None, None)
else:
variables = sorted(set(mod.variables) - {v.T, v.P}, key=str)
sublattice_dof = mod.sublattice_dof
phase_record = PhaseRecord_from_compiledmodel(mod, param_values)
points = points_dict[phase_name]
if points is None:
points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps,
tuple(variables), sampler_dict[phase_name] or point_sample,
fixedgrid_dict[phase_name], pdens_dict[phase_name])
points = np.atleast_2d(points)
fp = fake_points and (phase_name == sorted(active_phases.keys())[0])
phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict,
points, phase_record, output,
maximum_internal_dof, broadcast=broadcast,
largest_energy=float(largest_energy), fake_points=fp)
all_phase_data.append(phase_ds)
# speedup for single-phase case (found by profiling)
if len(all_phase_data) > 1:
final_ds = _fast_concat(all_phase_data, dim='points')
final_ds['points'].values = np.arange(len(final_ds['points']))
final_ds.coords['points'].values = np.arange(len(final_ds['points']))
else:
final_ds = all_phase_data[0]
return final_ds
|
c69769fa322831cc021db497a329de816541a20a
| 3,642,795
|
def is_negative(value):
"""Checks if `value` is negative.
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is negative.
Example:
>>> is_negative(-1)
True
>>> is_negative(0)
False
>>> is_negative(1)
False
.. versionadded:: 2.0.0
"""
return is_number(value) and value < 0
|
ce0183d95a2394db18904f0ca7f1225e43cf671d
| 3,642,796
|
import torch
def get_optimizer_noun(lr, decay, mode, cnn_features, role_features):
""" To get the optimizer
mode 0: training from scratch
mode 1: cnn fix, verb fix, role training
mode 2: cnn fix, verb fine tune, role training
mode 3: cnn finetune, verb finetune, role training"""
if mode == 0:
set_trainable_param(cnn_features, True)
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': cnn_features},
{'params': role_features}
], lr=lr, weight_decay=decay)
elif mode == 1:
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': role_features}
], lr=lr, weight_decay=decay)
elif mode == 2:
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': role_features}],
lr=1e-3)
elif mode == 3:
set_trainable_param(cnn_features, True)
set_trainable_param(role_features, True)
optimizer = torch.optim.Adam([
{'params': cnn_features},
{'params': role_features}
], lr=lr, weight_decay=decay)
return optimizer
|
6ac2df23f6a50d3488302cfe2da6189a995c0d85
| 3,642,797
|
import os
def lan_manifold(
parameter_df=None,
vary_dict={"v": [-1.0, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0]},
model="ddm",
n_rt_steps=200,
max_rt=5,
fig_scale=1.0,
save=False,
show=True,
):
"""Plots lan likelihoods in a 3d-plot.
:Arguments:
parameter_df: pandas.core.frame.DataFrame <default=None>
DataFrame that holds a parameter vector and has parameter names as keys.
vary_dict: dict <default={'v': [-1.0, -0.75, -.5, -0.25, 0, 0.25, 0.5, 0.75, 1.0]}>
Dictionary where key is a valid parameter name, and value is either a list of numpy.ndarray() of values
of the respective parameter that you want to plot.
model: str <default='ddm'>
String that specifies the model to be used to plotting. (The plot loads the corresponding LAN)
n_rt_steps: int <default=200>
Numer of rt steps to include (x-axis)
max_rt: numeric <default=5.0>
The n_rt_steps argument splits the reaction time axis in to n_rt_step from 0 to max_rt.
fig_scale: numeric <default=1.0>
Basic handle to scale the figure.
save: bool <default=False>
Whether to save the plot.
show: bool <default=True>
Whether to show the plot.
:Returns:
empty
"""
# mpl.rcParams.update(mpl.rcParamsDefault)
# mpl.rcParams['text.usetex'] = True
# #matplotlib.rcParams['pdf.fonttype'] = 42
# mpl.rcParams['svg.fonttype'] = 'none'
assert (
model_config[model]["n_choices"] == 2
), "This plot works only for 2-choice models at the moment. Improvements coming!"
if parameter_df.shape[0] > 0:
parameters = parameter_df.iloc[0, :]
print("Using only the first row of the supplied parameter array !")
if type(parameter_df) == pd.core.frame.DataFrame:
parameters = np.squeeze(
parameters[model_config[model]["params"]].values.astype(np.float32)
)
else:
parameters = parameter_df
# Load Keras model and initialize batch container
torch_model = get_torch_mlp(model=model)
# Prepare data structures
# Data template
plot_data = np.zeros((n_rt_steps * 2, 2))
plot_data[:, 0] = np.concatenate(
(
[(i * (max_rt / n_rt_steps)) for i in range(n_rt_steps, 0, -1)],
[(i * (max_rt / n_rt_steps)) for i in range(1, n_rt_steps + 1, 1)],
)
)
plot_data[:, 1] = np.concatenate(
(np.repeat(-1, n_rt_steps), np.repeat(1, n_rt_steps))
)
n_params = model_config[model]["n_params"]
n_levels = vary_dict[list(vary_dict.keys())[0]].shape[0]
data_var = np.zeros(((n_rt_steps * 2) * n_levels, n_params + 3))
cnt = 0
vary_param_name = list(vary_dict.keys())[0]
for par_tmp in vary_dict[vary_param_name]:
tmp_begin = (n_rt_steps * 2) * cnt
tmp_end = (n_rt_steps * 2) * (cnt + 1)
parameters[model_config[model]["params"].index(vary_param_name)] = par_tmp
data_var[tmp_begin:tmp_end, :n_params] = parameters
data_var[tmp_begin:tmp_end, n_params : (n_params + 2)] = plot_data
data_var[tmp_begin:tmp_end, (n_params + 2)] = np.squeeze(
np.exp(torch_model(data_var[tmp_begin:tmp_end, :-1].astype(np.float32)))
)
cnt += 1
fig = plt.figure(figsize=(8 * fig_scale, 5.5 * fig_scale))
ax = fig.add_subplot(111, projection="3d")
ax.plot_trisurf(
data_var[:, -2] * data_var[:, -3],
data_var[:, model_config[model]["params"].index(vary_param_name)],
data_var[:, -1],
linewidth=0.5,
alpha=1.0,
cmap=cm.coolwarm,
)
ax.set_ylabel(vary_param_name.upper().replace("_", "-"), fontsize=16, labelpad=20)
ax.set_xlabel("RT", fontsize=16, labelpad=20)
ax.set_zlabel("Likelihood", fontsize=16, labelpad=20)
ax.set_zticks(
np.round(np.linspace(min(data_var[:, -1]), max(data_var[:, -1]), 5), 1)
)
ax.set_yticks(
np.round(
np.linspace(
min(data_var[:, model_config[model]["params"].index(vary_param_name)]),
max(data_var[:, model_config[model]["params"].index(vary_param_name)]),
5,
),
1,
)
)
ax.set_xticks(
np.round(
np.linspace(
min(data_var[:, -2] * data_var[:, -3]),
max(data_var[:, -2] * data_var[:, -3]),
5,
),
1,
)
)
ax.tick_params(labelsize=16)
ax.set_title(
model.upper().replace("_", "-") + " - MLP: Manifold", fontsize=20, pad=20
)
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Save plot
if save:
if os.path.isdir("figures/"):
pass
else:
os.mkdir("figures/")
plt.savefig("figures/mlp_manifold_" + model + ".png", format="png")
if show:
return plt.show()
plt.close()
return
|
2faddac2b992b022ce27eb0c66a12968ff6d2da7
| 3,642,798
|
def _loc_str_to_pars(loc, x=None, y=None, halign=None, valign=None, pad=_PAD):
"""Convert from a string location specification to the specifying parameters.
If any of the specifying parameters: {x, y, halign, valign}, are 'None', they are set to
default values.
Returns
-------
x : float
y : float
halign : str
valign : str
"""
_valid_loc = [['t', 'u', 'b', 'l', 'c'], ['l', 'r', 'c']]
for ii, (ll, vv) in enumerate(zip(loc, _valid_loc)):
if ll not in vv:
err = "Unrecognized `loc`[{}] = '{}' (`loc` = '{}').".format(ii, ll, loc)
err += "\n\t`loc`[{}] must be one of '{}'".format(ii, vv)
raise ValueError(err)
pad = np.atleast_1d(pad)
if pad.size == 1:
pad = np.concatenate([pad, pad])
if loc[0] == 't' or loc[0] == 'u':
if valign is None:
valign = 'top'
if y is None:
y = 1 - pad[1]
elif loc[0] == 'b' or loc[0] == 'l':
if valign is None:
valign = 'bottom'
if y is None:
y = pad[1]
elif loc[0] == 'c':
if valign is None:
valign = 'center'
if y is None:
y = 0.5
if loc[1] == 'l':
if halign is None:
halign = 'left'
if x is None:
x = pad[0]
elif loc[1] == 'r':
if halign is None:
halign = 'right'
if x is None:
x = 1 - pad[0]
elif loc[1] == 'c':
if halign is None:
halign = 'center'
if x is None:
x = 0.5
return x, y, halign, valign
|
84094b2eaf39390a1d30fd26d8ae36ecd32a7665
| 3,642,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.