content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def _make_decorator(
obj: Wrappable,
to_wrap: tp.Iterable[str]
) -> tp.Callable[[tp.Type[WrapperInjector]], tp.type[WrapperInjector]]:
"""Makes the decorator function to use for wrapping.
Parameters
----------
obj : :obj:`ModuleType`, :obj:`type` or :obj:`object`
The source object to wrap the `to_wrap` attributes of.
to_wrap : Iterable[str]
The names of the attributes of `obj` to wrap.
Returns
-------
Callable[[Type[WrapperInjector]], Type[WrapperInjector]]
The decorator to use for wrapping a new :obj:`WrapperInjector`
class.
"""
def _wrapper(cls: tp.Type[WrapperInjector]) -> tp.Type[WrapperInjector]:
cls.__wrapped__ = tuple(to_wrap)
to_wrap = {x: getattr(obj, x) for x in to_wrap}
for k, v in to_wrap.items():
if isinstance(v, FunctionType):
setattr(cls, k, cls.__wrap_function(v))
else:
setattr(cls, k, cls.__wrap_method(v))
return cls
return _wrapper | 33ab2d7670f518f15c55ebafcde3667447c73c4d | 25,400 |
import numbers
import warnings
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
default_weights = False
else:
default_weights = (class_weight is None)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
daal_ready = use_daal and solver in ['lbfgs', 'newton-cg'] and not sparse.issparse(X)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if daal_ready:
w0 = np.zeros(n_features + 1, dtype=X.dtype)
y_bin[~mask] = 0.
else:
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
else:
daal_ready = daal_ready and (default_weights or np.allclose(sample_weight, np.ones_like(sample_weight)))
if solver not in ['sag', 'saga']:
if daal_ready:
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
else:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
if daal_ready:
w0 = np.zeros((classes.size, n_features + 1),
order='C', dtype=X.dtype)
else:
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
if daal_ready:
w0[-coef.size:] = np.roll(coef, 1, -1) if coef.size != n_features else coef
else:
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if daal_ready:
w0[:, -coef.shape[1]:] = np.roll(coef, 1, -1) if coef.shape[1] != n_features else coef
else:
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
C_daal_multiplier = 1
# commented out because this is Py3 feature
#def _map_to_binary_logistic_regression():
# nonlocal C_daal_multiplier
# nonlocal w0
# C_daal_multiplier = 2
# w0 *= 2
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
if daal_ready and classes.size == 2:
w0_saved = w0
w0 = w0[-1:, :]
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_and_grad
else:
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
if daal_ready:
if classes.size == 2:
# _map_to_binary_logistic_regression()
C_daal_multiplier = 2
w0 *= 2
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
daal_extra_args_func = _daal4py_cross_entropy_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
if daal_ready:
func = _daal4py_loss_and_grad
daal_extra_args_func = _daal4py_logistic_loss_extra_args
else:
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
if daal_ready:
daal_extra_args_func = _daal4py_logistic_loss_extra_args
func = _daal4py_loss_
grad = _daal4py_grad_
hess = _daal4py_grad_hess_
else:
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
if daal_ready:
extra_args = daal_extra_args_func(classes.size, w0, X, target, 0., 0.5 / C / C_daal_multiplier,
fit_intercept, value=True, gradient=True, hessian=False)
else:
extra_args = (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=extra_args,
iprint=iprint, pgtol=tol, maxiter=max_iter)
if daal_ready and C_daal_multiplier == 2:
w0 *= 0.5
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
if daal_ready:
def make_ncg_funcs(f, value=False, gradient=False, hessian=False):
daal_penaltyL2 = 0.5 / C / C_daal_multiplier
_obj_, X_, y_, n_samples = daal_extra_args_func(
classes.size, w0, X, target, 0., daal_penaltyL2, fit_intercept,
value=value, gradient=gradient, hessian=hessian)
_func_ = lambda x, *args: f(x, _obj_, *args)
return _func_, (X_, y_, n_samples, daal_penaltyL2)
loss_func, extra_args = make_ncg_funcs(func, value=True)
grad_func, _ = make_ncg_funcs(grad, gradient=True)
grad_hess_func, _ = make_ncg_funcs(hess, gradient=True)
w0, n_iter_i = _newton_cg(grad_hess_func, loss_func, grad_func, w0, args=extra_args,
maxiter=max_iter, tol=tol)
else:
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
if penalty == 'l1':
alpha = 0.
beta = 1. / C
else:
alpha = 1. / C
beta = 0.
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
if daal_ready:
if classes.size == 2:
multi_w0 = w0[np.newaxis, :]
else:
multi_w0 = np.reshape(w0, (classes.size, -1))
else:
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(np.require(multi_w0, requirements='O'))
else:
coefs.append(np.require(w0, requirements='O'))
n_iter[i] = n_iter_i
if daal_ready:
if fit_intercept:
for i, ci in enumerate(coefs):
coefs[i] = np.roll(ci, -1, -1)
else:
for i, ci in enumerate(coefs):
coefs[i] = np.delete(ci, 0, axis=-1)
return coefs, np.array(Cs), n_iter | 8f42d708532d255f0d61f7e695eab778b5aad99c | 25,401 |
def merge_labels_below_minsize(labels: np.array,
min_size: int,
connectivity: int = 8) -> np.array:
"""
Takes labels below min_size and merges a label with a connected neighbor
(with respect to the connectivity description). Ignores label 0 as
background.
Parameters
----------
labels : np.array
2d label array. Assumes 0 is background and ignores.
min_size : int
Keeps only segments of at least this size.
connectivity : int
4 or 8 connectivity accepted. Default 8. If
`apply_mask_buffer` was used to compute distance,
then connectivity must be 8.
See: https://en.wikipedia.org/wiki/Pixel_connectivity
Returns
-------
np.array:
Updated 2d label array
Note
----
Does not recursively update size and simply assigns a label to its
neighbor based on initialize size.
"""
size_features = get_superpixel_area_as_features(labels)
unique_labels = np.arange(0, labels.max() + 1)
labels_to_merge = list(unique_labels[size_features.ravel() < min_size])
neighbor_dict = get_RAG_neighbors(labels,
label_subset=labels_to_merge,
connectivity=connectivity)
def merger(label_arr):
label = label_arr[0]
neighbors = neighbor_dict.get(label)
# Do nothing if label is background or doesn't meet size criterion.
if (label == 0) or (label not in labels_to_merge):
return label
if len(neighbors) > 0:
return neighbors[0]
# If neighbor is isolated then assign it to background
else:
return 0
label_features = apply_func_to_superpixels(merger,
labels,
labels, dtype=int)
labels = get_array_from_features(labels, label_features)
labels, _, _ = relabel_sequential(labels)
return labels | 9d9af5e1ddcc288149304d8138f8076dccab1ad8 | 25,402 |
def custom_cnn_model(config, labels, model_weights=None):
"""
Convolutional Neural network architecture based on 'Photonic Human Identification based on
Deep Learning of Back Scattered Laser Speckle Patterns' paper.
:param conf: Configuration list of models hyper & learning params
:param labels: List of data labels
:param model_weights: Weights of pre-trained model
:return: CNN model
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(config[1], config[2], input_shape=(config[0], config[0], 1)),
tf.keras.layers.MaxPooling2D(pool_size=(config[3], config[3])),
tf.keras.layers.Dropout(config[4]),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config[5]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[6]),
tf.keras.layers.Dense(config[7]),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation=tf.nn.relu),
tf.keras.layers.Dropout(config[8]),
tf.keras.layers.Dense(len(labels), activation=tf.nn.softmax)
])
if model_weights is not None:
print('loading pre-trained model')
model.load_weights(model_weights, by_name=True)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model | 1440c1ac911c4fc4dce06e10ced5b94ae97f407b | 25,403 |
def check_day_crossover(tRxSeconds, tTxSeconds):
"""
Checks time propagation time for day crossover
:param tRxSeconds: received time in seconds of week
:param tTxSeconds: transmitted time in seconds of week
:return: corrected propagation time
"""
tau = tRxSeconds - tTxSeconds
if tau > DAYSEC / 2:
del_sec = round(tau/DAYSEC)*DAYSEC
rho_sec = tau - del_sec
if rho_sec > 10:
tau = 0.0
else:
tau = rho_sec
return tau | f44b5ef90e130fbcbc741f79bea69ee79f55a574 | 25,404 |
def matrix2xyx_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray:
"""
Rx(k3) @ Ry(k2) @ Rx(k1) = [[c2, s1s2, c1s2],
[s2s3, -s1c2s3+c1c3, -c1c2s3-s1c3],
[-s2c3, s1c2c3+c1s3, c1c2c3-s1s3]]
"""
rotation_matrices = rotation_matrices.reshape((-1, 3, 3))
angles_radians = np.zeros((rotation_matrices.shape[0], 3))
# Angle 2 can be taken directly from matrices
angles_radians[:, 1] = np.arccos(rotation_matrices[:, 0, 0])
# Gimbal lock case (s2 = 0)
tolerance = 1e-4
# Find indices where this is the case
gimbal_idx = np.abs(rotation_matrices[:, 0, 2]) < tolerance
# Calculate angle 1 and set angle 3 = 0 for those indices
r23 = rotation_matrices[gimbal_idx, 1, 2]
r22 = rotation_matrices[gimbal_idx, 1, 1]
angles_radians[gimbal_idx, 0] = np.arctan2(-r23, r22)
angles_radians[gimbal_idx, 2] = 0
# Normal case (s2 > 0)
idx = np.invert(gimbal_idx)
r12 = rotation_matrices[idx, 0, 1]
r13 = rotation_matrices[idx, 0, 2]
r21 = rotation_matrices[idx, 1, 0]
r31 = rotation_matrices[idx, 2, 0]
angles_radians[idx, 0] = np.arctan2(r12, r13)
angles_radians[idx, 2] = np.arctan2(r21, -r31)
# convert to degrees
euler_angles = np.rad2deg(angles_radians)
return euler_angles | 74185b505e54239128e4b22eb709e1d08f50b206 | 25,405 |
def _merge_low_rank_eigendecomposition(S1, V1, S2, V2, rank=None):
"""Private helper function for merging SVD based low rank approximations.
Given factors S1, V1 and S2, V2 of shapes [K1], [M, K1] and [K2], [M, K2]
respectively of singular value decompositions
A1 = U1 @ np.diag(S1) @ V1.T
A2 = U2 @ np.diag(S2) @ V2.T
merge them into factors S, V of shape [K], [M, K] of an approximate
decomposition A = U @ np.diag(S) @ V.T, where A is the concatenation of A1
and A2 along the first axis. This is done without the need of calculating
U1, U2, and U.
This is useful for merging eigendecompositions V @ np.diag(S**2) @ V.T of
autocorrelation (or similarly covariance) matrices A.T @ A that do not
require U. Using truncated singular value decompositons can be used for
merging low rank approximations.
Parameters
----------
S1 : array
Singular values of first matrix.
V1 : array
Factor of the singular value decomposition of first matrix.
S2 : array
Singular values of second matrix.
V2 : array
Factor of the singular value decomposition of second matrix.
rank : int
Number of singular values to keep after merging. If set to `None`
no truncation will be done, thus rank will equal the sum of
singular values given in S1 and S2.
Returns
-------
S : array
(Truncated) singular values of the singular value decomposition of
concatenated matrix.
V : array
Factor of the singular value decomposition of concatenated matrix.
Notes
-----
The algorithm for combining SVD based low rank approximations is
described in more detail in [1]_.
References
----------
.. [1] Radim, Rehurek,
"Scalability of Semantic Analysis in Natural Language Processing",
2011.
"""
rank1, rank2 = S1.size, S2.size
if not rank or rank > rank1 + rank2:
rank = rank1 + rank2
if rank > min(V1.shape[0], V2.shape[0]):
rank = min(V1.shape[0], V2.shape[0])
Z = np.matmul(V1.T, V2)
Q, R = np.linalg.qr(V2 - np.matmul(V1, Z), mode="reduced")
Zfill = np.zeros([rank2, rank1])
B = np.concatenate(
[
np.concatenate([np.diag(S1), np.matmul(Z, np.diag(S2))], axis=1),
np.concatenate([Zfill, np.matmul(R, np.diag(S2))], axis=1),
],
axis=0,
)
U, S, VT = _truncated_svd(B, rank=rank)
V = np.matmul(V1, U[:rank1, :]) + np.matmul(Q, U[rank1:, :])
return S, V | 9a04fb922e87b78ee217890c0094f3cdd8690f60 | 25,406 |
def usgs(path):
"""Reads USGS-formatted ASCII files.
Reads the ascii format spectral data from USGS and returns an object with the mean
and +/- standard deviation. Reference: https://www.sciencebase.gov/catalog/item/5807a2a2e4b0841e59e3a18d
Args:
path: file path the the USGS spectra text file.
Returns:
s: an earthlib spectralObject with the USGS reflectance data.
"""
# open the file and read header info
with open(path, "r") as f:
x_start = "gibberish"
for line in f:
if x_start in line:
break
if "Name:" in line:
spectrum_name = line.strip().split("Name:")[-1].strip()
if "X Units:" in line:
band_unit = line.strip().split()
band_unit = band_unit[-1].strip("()").capitalize()
if "Y Units:" in line:
refl_unit = line.strip().split()
refl_unit = refl_unit[-1].strip("()").capitalize()
if "First X Value:" in line:
x_start = line.strip().split()[-1]
if "Number of X Values:" in line:
n_values = int(line.strip().split()[-1])
# now that we got our header info, create the arrays
band_centers = _np.empty(n_values)
reflectance = _np.empty(n_values)
line = line.strip().split()
band_centers[0] = float(line[0])
reflectance[0] = float(line[1])
# resume reading through file
i = 1
for line in f:
line = line.strip().split()
band_centers[i] = float(line[0])
reflectance[i] = float(line[1])
i += 1
# some files read last -> first wavelength
if band_centers[0] > band_centers[-1]:
band_centers = band_centers[::-1]
reflectance = reflectance[::1]
# convert units to nanometers and scale 0-1
if band_unit.lower() == "micrometers":
band_centers *= 1000.0
band_unit = "Nanometers"
if refl_unit.lower() == "percent":
reflectance /= 100.0
# create the spectral object
s = spectralObject(
1,
n_values,
band_centers=band_centers,
band_unit=band_unit,
band_quantity="Wavelength",
)
# assign relevant values
s.spectra[0] = reflectance
if spectrum_name:
s.names[0] = spectrum_name
return s | 329df0fe919cf126ae363f384619c8fc5419b073 | 25,407 |
import json
def change_service(**kwargs):
"""Makes a given change to a MRS service
Args:
**kwargs: Additional options
Keyword Args:
service_id (int): The id of the service
change_type (int): Type of change
url_context_root (str): The context root for this service
url_host_name (str): The host name for this service
value (str): The value to be set as string or a dict if all are set
session (object): The database session to use
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If set to true exceptions are raised
Returns:
The result message as string
"""
service_id = kwargs.get("service_id")
change_type = kwargs.get("change_type")
url_context_root = kwargs.get("url_context_root")
url_host_name = kwargs.get("url_host_name")
value = kwargs.get("value")
session = kwargs.get("session")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
try:
session = core.get_current_session(session)
# Make sure the MRS metadata schema exists and has the right version
core.ensure_rds_metadata_schema(session)
# List of services to be changed, initialized with service_id if given
service_ids = [service_id] if service_id else []
# Get the right service_id(s) if service_id is not given
if not url_context_root and not service_id:
# Check if there already is at least one service
res = session.run_sql("""
SELECT COUNT(*) AS service_count, MAX(id) AS id
FROM `mysql_rest_service_metadata`.`service`
""")
row = res.fetch_one()
service_count = row.get_field("service_count") if row else 0
# If there is no service to change, error out.
if service_count == 0:
Exception("No service available.")
# If there is exactly 1 service, use that one
# elif service_count == 1:
# service_ids.append(row.get_field("id"))
# If there are more services, let the user select one or all
if interactive:
allow_multi_select = (
change_type == SERVICE_DISABLE or
change_type == SERVICE_ENABLE or
change_type == SERVICE_DELETE)
if allow_multi_select:
caption = ("Please select a service index, type "
"'hostname/root_context' or type '*' "
"to select all: ")
else:
caption = ("Please select a service index or type "
"'hostname/root_context'")
services = get_services(session=session, interactive=False)
selection = core.prompt_for_list_item(
item_list=services,
prompt_caption=caption,
item_name_property="host_ctx",
given_value=None,
print_list=True,
allow_multi_select=allow_multi_select)
if not selection or selection == "":
raise ValueError("Operation cancelled.")
if allow_multi_select:
service_ids = [item["id"] for item in selection]
else:
service_ids.append(selection["id"])
# Lookup the service id
res = session.run_sql(
"""
SELECT se.id FROM `mysql_rest_service_metadata`.`service` se
LEFT JOIN `mysql_rest_service_metadata`.url_host h
ON se.url_host_id = h.id
WHERE h.name = ? AND se.url_context_root = ?
""",
[url_host_name if url_host_name else "", url_context_root])
row = res.fetch_one()
if row:
service_ids.append(row.get_field("id"))
if len(service_ids) == 0:
raise ValueError("The specified service was not found.")
# Check the given value
if interactive and not value:
if change_type == SERVICE_SET_PROTOCOL:
value = prompt_for_service_protocol()
elif change_type == SERVICE_SET_COMMENTS:
value = core.prompt_for_comments()
if change_type == SERVICE_SET_PROTOCOL and not value:
raise ValueError("No value given.")
# Update all given services
for service_id in service_ids:
service = get_service(
service_id=service_id, session=session, interactive=False,
return_formatted=False)
if change_type == SERVICE_SET_CONTEXT_ROOT:
url_ctx_root = value
elif change_type == SERVICE_SET_ALL:
if type(value) == str: # TODO: Check why dicts cannot be used
value = json.loads(value)
url_ctx_root = value.get("url_context_root")
if (change_type == SERVICE_SET_CONTEXT_ROOT or
change_type == SERVICE_SET_ALL):
if interactive and not url_ctx_root:
url_ctx_root = prompt_for_url_context_root(
default=service.get('url_context_root'))
# If the context root has changed, check if the new one is valid
if service.get("url_context_root") != url_ctx_root:
if (not url_ctx_root or not url_ctx_root.startswith('/')):
raise ValueError(
"The url_context_root has to start with '/'.")
core.check_request_path(
url_ctx_root, session=session)
params = [service_id]
if change_type == SERVICE_DISABLE:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = FALSE
WHERE id = ?
"""
elif change_type == SERVICE_ENABLE:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = TRUE
WHERE id = ?
"""
elif change_type == SERVICE_DELETE:
sql = """
DELETE FROM `mysql_rest_service_metadata`.`service`
WHERE id = ?
"""
elif change_type == SERVICE_SET_DEFAULT:
res = session.run_sql("""
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = FALSE
""")
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = TRUE
WHERE id = ?
"""
elif change_type == SERVICE_SET_CONTEXT_ROOT:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET url_context_root = ?
WHERE id = ?
"""
params.insert(0, url_ctx_root)
elif change_type == SERVICE_SET_PROTOCOL:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET url_protocol = ?
WHERE id = ?
"""
params.insert(0, value)
elif change_type == SERVICE_SET_COMMENTS:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET comments = ?
WHERE id = ?
"""
params.insert(0, value)
elif change_type == SERVICE_SET_ALL:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = ?,
url_context_root = ?,
url_protocol = ?,
comments = ?,
is_default = ?
WHERE id = ?
"""
if str(value.get("is_default")).lower() == "true":
res = session.run_sql("""
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = FALSE
""")
params.insert(
0, (str(value.get("enabled")).lower() == "true" or
str(value.get("enabled")) == "1"))
params.insert(1, url_ctx_root)
params.insert(2, value.get("url_protocol", ""))
params.insert(3, value.get("comments", ""))
params.insert(
4, (str(value.get("is_default")).lower() == "true" or
str(value.get("is_default")) == "1"))
else:
raise Exception("Operation not supported")
res = session.run_sql(sql, params)
if res.get_affected_row_count() == 0:
raise Exception(
f"The specified service with id {service_id} was not "
"found.")
if change_type == SERVICE_SET_DEFAULT:
return "The service has been made the default."
if len(service_ids) == 1:
msg = "The service has been "
else:
msg = "The services have been "
if change_type == SERVICE_DISABLE:
msg += "disabled."
elif change_type == SERVICE_ENABLE:
msg += "enabled."
elif change_type == SERVICE_DELETE:
msg += "deleted."
else:
msg += "updated."
return msg
except Exception as e:
if raise_exceptions:
raise
else:
print(f"Error: {str(e)}") | ed1cc6725f26791becd37733068cae468cc5486b | 25,408 |
def text_3d(string, depth=0.5):
"""Create 3D text."""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput()) | 7f851303bf9eea1a4777e70f697d7679a37cfcf8 | 25,409 |
from typing import Optional
def get_secret_version(project: Optional[str] = None,
secret: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretVersionResult:
"""
Get a Secret Manager secret's version. For more information see the [official documentation](https://cloud.google.com/secret-manager/docs/) and [API](https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets.versions).
:param str project: The project to get the secret version for. If it
is not provided, the provider project is used.
:param str secret: The secret to get the secret version for.
:param str version: The version of the secret to get. If it
is not provided, the latest version is retrieved.
"""
__args__ = dict()
__args__['project'] = project
__args__['secret'] = secret
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:secretmanager/getSecretVersion:getSecretVersion', __args__, opts=opts, typ=GetSecretVersionResult).value
return AwaitableGetSecretVersionResult(
create_time=__ret__.create_time,
destroy_time=__ret__.destroy_time,
enabled=__ret__.enabled,
id=__ret__.id,
name=__ret__.name,
project=__ret__.project,
secret=__ret__.secret,
secret_data=__ret__.secret_data,
version=__ret__.version) | bda99d80fe49f7e799272c48418b605a327f6dfa | 25,410 |
import logging
def logged_class(cls):
"""Class Decorator to add a class level logger to the class with module and
name."""
cls.logger = logging.getLogger("{0}.{1}".format(cls.__module__, cls.__name__))
return cls | 4a6c878c0061b2b1587e8efcdfae21de87a96e71 | 25,411 |
import os
import inspect
def get_available_plugin_screens():
"""
Gets the available screens in this package for dynamic instantiation.
"""
ignore_list = ['__init__.py']
screens = []
for plugin in os.listdir(os.path.join(os.path.dirname(__file__))):
if (os.path.isdir(os.path.join(os.path.dirname(__file__), plugin))):
# we are inside the plugin directory, get screens available in
# screens directory
for module in os.listdir(os.path.join(os.path.dirname(__file__),
plugin, "screen")):
if module in ignore_list or module[-3:] != '.py':
continue
module_name = plugin + ".screen." + module[:-3]
m = __import__(module_name, globals(), locals(),
[module_name.rsplit(".", 1)[-1]])
# loop module's classes in search for the ones inheriting
# Screenbase and ignore name (no need) with underscore variable
for name, obj in inspect.getmembers(m):
if inspect.isclass(obj) and issubclass(obj,
base.ScreenBase) and not name.endswith("Base"):
screens.append(obj)
return screens | 454659f02fa4a13105d4e849db7ed3e35d21f6a2 | 25,412 |
def crop_point(image, height_rate, width_rate):
"""Crop the any region of the image.
Crop region area = height_rate * width_rate *image_height * image_width
Args:
image: a Image instance.
height_rate: flaot, in the interval (0, 1].
width_rate: flaot, in the interval (0, 1].
Returns:
a Image instance.
Raises:
ValueError: if central_crop_fraction is not within (0, 1].
"""
assert 0<height_rate<=1 and 0<width_rate<=1, 'height_rate and width_rate should be in the interval (0, 1].'
left = image.size[0]*np.random.uniform(0, 1-width_rate)
upper = image.size[1]*np.random.uniform(0, 1-height_rate)
right = left+image.size[0]*width_rate
lower = upper+image.size[1]*height_rate
return image.crop((left, upper, right, lower)) | f00992597aa5d03fd2aab724668071a17120efbd | 25,413 |
import ast
def test_name_rename():
"""
Test a simple transformer to rename
"""
class Renamer(NodeTransformer):
def visit_Name(self, node, meta):
node.id = node.id + '_visited'
return node
renamer = Renamer()
mod = ast.parse("bob = frank")
transform(mod, renamer)
bob_node = mod.body[0].targets[0]
frank_node = mod.body[0].value
assert bob_node.id == "bob_visited"
assert frank_node.id == "frank_visited" | a609211bb1f7fc0055abe4cf99cdc0e131f0924b | 25,414 |
import logging
import random
import string
def fileobj_video(contents=None):
"""
Create an "mp4" video file on storage and return a File model pointing to it.
if contents is given and is a string, then write said contents to the file.
If no contents is given, a random string is generated and set as the contents of the file.
"""
if contents:
logging.warning("input = {}".format(contents))
filecontents = contents
else:
filecontents = "".join(random.sample(string.printable, 20)).encode('utf-8')
logging.warning("contents = {}".format(filecontents))
temp_file_dict = create_studio_file(filecontents, preset=format_presets.VIDEO_HIGH_RES, ext='mp4')
return temp_file_dict['db_file'] | d3fb1d9c9e97c53853489486e37af1f2497ae0d3 | 25,415 |
def _toIPv4AddrString(intIPv4AddrInteger):
"""Convert the IPv4 address integer to the IPv4 address string.
:param int intIPv4AddrInteger: IPv4 address integer.
:return: IPv4 address string.
:rtype: str
Example::
intIPv4AddrInteger Return
---------------------------------
3221225985 -> '192.0.2.1'
Test:
>>> _toIPv4AddrString(3221225985)
'192.0.2.1'
"""
return (
str((intIPv4AddrInteger >> 24) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 16) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 8) & 0xFF) + '.' +
str( intIPv4AddrInteger & 0xFF)) | ac5f55146eedaf0b7caca19327ae0a88c9d5282a | 25,416 |
def expand_case_matching(s):
"""Expands a string to a case insensitive globable string."""
t = []
openers = {"[", "{"}
closers = {"]", "}"}
nesting = 0
drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None
if drive_part:
drive_part = drive_part.group(0)
t.append(drive_part)
s = s[len(drive_part) :]
for c in s:
if c in openers:
nesting += 1
elif c in closers:
nesting -= 1
elif nesting > 0:
pass
elif c.isalpha():
folded = c.casefold()
if len(folded) == 1:
c = "[{0}{1}]".format(c.upper(), c.lower())
else:
newc = ["[{0}{1}]?".format(f.upper(), f.lower()) for f in folded[:-1]]
newc = "".join(newc)
newc += "[{0}{1}{2}]".format(folded[-1].upper(), folded[-1].lower(), c)
c = newc
t.append(c)
return "".join(t) | 7d9f32e641671cf570c7c95397d9de559bab84b4 | 25,417 |
import re
def look_behind(s: str, end_idx: int) -> str:
"""
Given a string containing semi-colons, find the span of text after the last
semi-colon.
"""
span = s[: (end_idx - 1)]
semicolon_matches = [
(m.group(), m.start(), m.end()) for m in re.finditer(r"(?<=(;))", span)
]
if len(semicolon_matches) == 0:
start_idx = 0
else:
start_idx = semicolon_matches[-1][2]
return span[start_idx:end_idx].strip() | 0cc478e73edd713fa72743f36e29001bb214e26c | 25,418 |
def sum_fspec(files, outname=None):
"""Take a bunch of (C)PDSs and sums them."""
# Read first file
ftype0, contents = get_file_type(files[0])
pdstype = ftype0.replace("reb", "")
outname = _assign_value_if_none(
outname, "tot_" + ftype0 + HEN_FILE_EXTENSION
)
def check_and_distribute_files(files):
for i, f in enumerate(files):
ftype, contents = get_file_type(files[0])
if i == 0:
contents0, ftype0 = contents, ftype
else:
assert ftype == ftype0, "Files must all be of the same kind"
contents.fftlen = contents.segment_size
yield contents
tot_contents = average_periodograms(check_and_distribute_files(files))
log.info("Saving %s to %s" % (pdstype, outname))
save_pds(tot_contents, outname)
return tot_contents | 8d8eb0f9f75e44b2d1e34abcff4479a055c40483 | 25,419 |
import six
import pytz
def make_aware(dt, tz=None):
"""
Convert naive datetime object to tz-aware
"""
if tz:
if isinstance(tz, six.string_types):
tz = pytz.timezone(tz)
else:
tz = pytz.utc
if dt.tzinfo:
return dt.astimezone(dt.tzinfo)
else:
return tz.localize(dt) | b5003de5055c5d283f47e33dfdd6fbe57d6fce96 | 25,420 |
def decrypt(ctxt, kx, spice, blocksize):
""" Main decryption function
Args:
ctxt: ciphertext
kx: key expansion table
spice: spice
blocksize: size of block
Returns:
Decrypted ciphertext
"""
spice = int_to_arr(spice, 512)
ctxt_arr = int_to_arr(ctxt, blocksize)
args = (ctxt_arr, kx, spice, blocksize)
lmask = (1 << blocksize % 64) - 1
if blocksize < 36:
s = tiny_decrypt(*args)
elif blocksize < 65:
s = short_decrypt(*args, lmask)
elif blocksize < 129:
s = medium_decrypt(*args, lmask)
elif blocksize < 513:
s = long_decrypt(*args, lmask)
else:
s = extended_decrypt(*args, lmask)
return s | 56ca0b7cb61f3eca1c20fcf174b14b6ba79c5dfe | 25,421 |
from itertools import product
def listCombination(lists) -> list:
"""
输入多个列表组成的列表,返回多列表中元素的所有可能组合
:param lists: 多个列表组成的列表
:return: 所有元素可能的组合
"""
result = []
resultAppend = result.append
for i in product(*lists):
resultAppend(i)
return result | 6023cdc205b2780c5cd2cf56113d48a0675b98bf | 25,422 |
import os
def DoChopTraj(trajf, chopf, startns, stopns, translate=False):
"""
Chops a provided trajectory file based on a given
start time and end time in nanoseconds. Assuming
2 fs time step and writing results every 1000 steps.
Helpful for seeing how PMF evolves over time.
Parameters
----------
trajf
translate: Boolean, translate negative angles to positive ones,
e.g., -90 translates to 270
Returns
-------
True if successful, False otherwise
"""
if startns != 0:
time1 = (1000*startns/2)+1
else:
time1 = startns
if stopns is not None:
time2 = (1000*stopns/2)
with open(trajf,'r') as f:
lines=f.readlines()
# filter out lines to remove commented ones
filtlines = []
for line in lines:
if not line.startswith('#'):
filtlines.append(line)
if os.path.exists(trajf):
#if os.path.exists(trajf) and not os.path.exists(chopf):
outf = open(chopf, 'w')
if stopns is not None:
subset = filtlines[time1:time2]
else:
subset = filtlines[time1:]
for i in subset:
value = float(i.split()[1])
if translate and value < 0:
value = value+360 ##### ======== condition to change
elif not translate and value > 180:
value = value-360
outf.write("%s %.14e \n" % (i.split()[0], value))
outf.close()
return True
else:
print("%s not found in %s" % (trajf, os.getcwd()))
return False | e0f3d22c4bae850e33ae846229b41c6e88e86ac8 | 25,423 |
import warnings
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs) | 0fe3bf204d48489c65ad2497c54f009000ce89b8 | 25,424 |
def edge_slope(e):
"""Calculate the slope of an edge, 'inf' for vertical edges"""
v = edge_vector(e)
try:
return v.z / round(v.xy.length, 4)
except ZeroDivisionError:
return float("inf") | 742427d4f97712504fcb9dc09c2168178f500ac8 | 25,425 |
def pkt_addrs(addr_fragment: str) -> tuple[Address, Address, Address, Address, Address]:
"""Return the address fields from (e.g): '01:078710 --:------ 01:144246'.
Will raise an InvalidAddrSetError is the address fields are not valid.
"""
# for debug: print(pkt_addrs.cache_info())
try:
addrs = [id_to_address(addr_fragment[i : i + 9]) for i in range(0, 30, 10)]
except ValueError as exc:
raise InvalidAddrSetError(f"Invalid addr set: {addr_fragment}: {exc}")
if (
not (
# .I --- 01:145038 --:------ 01:145038 1F09 003 FF073F # valid
# .I --- 04:108173 --:------ 01:155341 2309 003 0001F4 # valid
addrs[0] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[1] == NON_DEV_ADDR
and addrs[2] != NON_DEV_ADDR
)
and not (
# .I --- 32:206250 30:082155 --:------ 22F1 003 00020A # valid
# .I --- 29:151550 29:237552 --:------ 22F3 007 00023C03040000 # valid
addrs[0] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[1] not in (NON_DEV_ADDR, addrs[0])
and addrs[2] == NON_DEV_ADDR
)
and not (
# .I --- --:------ --:------ 10:105624 1FD4 003 00AAD4 # valid
addrs[2] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[0] == NON_DEV_ADDR
and addrs[1] == NON_DEV_ADDR
)
):
raise InvalidAddrSetError(f"Invalid addr set: {addr_fragment}")
device_addrs = list(filter(lambda a: a.type != "--", addrs)) # dex
src_addr = device_addrs[0]
dst_addr = device_addrs[1] if len(device_addrs) > 1 else NON_DEV_ADDR
if src_addr.id == dst_addr.id: # incl. HGI_DEV_ADDR == HGI_DEV_ADDR
src_addr = dst_addr
return src_addr, dst_addr, *addrs | 9855b9ed9ecc78d66094fadcd0e662155217a3d5 | 25,426 |
import torch
def token_downup(target_dict, source_dict):
"""Transform token features between different distribution.
Returns:
x_out (Tensor[B, N, C]): token features.
Args:
target_dict (dict): dict for target token information
source_dict (dict): dict for source token information.
"""
x_s = source_dict['x']
idx_token_s = source_dict['idx_token']
idx_token_t = target_dict['idx_token']
T = target_dict['token_num']
B, S, C = x_s.shape
N_init = idx_token_s.shape[1]
weight = target_dict['agg_weight'] if 'agg_weight' in target_dict.keys() else None
if weight is None:
weight = x_s.new_ones(B, N_init, 1)
weight = weight.reshape(-1)
# choose the way with fewer flops.
if N_init < T * S:
# use sparse matrix multiplication
# Flops: B * N_init * (C+2)
idx_token_t = idx_token_t + torch.arange(B, device=x_s.device)[:, None] * T
idx_token_s = idx_token_s + torch.arange(B, device=x_s.device)[:, None] * S
coor = torch.stack([idx_token_t, idx_token_s], dim=0).reshape(2, B * N_init)
# torch.sparse.spmm does not support fp16
with torch.cuda.amp.autocast(enabled=False):
# torch.sparse does not support grad for sparse matrix
weight = weight.float().detach()
# build a matrix with shape [B*T, B*S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# normalize the matrix
all_weight = A.type(torch.float32) @ x_s.new_ones(B * S, 1).type(torch.float32) + 1e-6
weight = weight / all_weight[(idx_token_t).reshape(-1), 0]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# sparse matmul
x_out = A.type(torch.float32) @ x_s.reshape(B * S, C).type(torch.float32)
else:
# use dense matrix multiplication
# Flops: B * T * S * (C+2)
idx_batch = torch.arange(B, device=x_s.device)[:, None].expand(B, N_init)
coor = torch.stack([idx_batch, idx_token_t, idx_token_s], dim=0).reshape(3, B * N_init)
weight = weight.detach() # detach to reduce training time
# build a matrix with shape [B, T, S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B, T, S])).to_dense()
# normalize the matrix
A = A / (A.sum(dim=-1, keepdim=True) + 1e-6)
# dense matmul
x_out = A @ x_s
x_out = x_out.reshape(B, T, C).type(x_s.dtype)
return x_out | 2fab5ef8aabc9231b0342d74fb07b2b89000dca3 | 25,427 |
def get_score(train_data,train_labels,test_data,test_labels,problem_type):
"""
Returns the f1 score resulting from 3NN classification if problem_type = 'classification',
or the mse from regression if problem_type = 'regression'
"""
if (problem_type=="classification"):
predictor = KNeighborsClassifier(n_neighbors=3)
else:
predictor = KNeighborsRegressor(n_neighbors=3)
predictor.fit(train_data,train_labels)
predicted_labels = predictor.predict(test_data)
if (problem_type=="regression"):
score = mean_squared_error(test_labels,predicted_labels)
else:
score = accuracy_score(test_labels,predicted_labels)
return score | fc957b09d0d0a60ea21b0fc50fbca94ac8ba4647 | 25,428 |
def build_client_datasets_fn(train_dataset, train_clients_per_round):
"""Builds the function for generating client datasets at each round.
Args:
train_dataset: A `tff.simulation.ClientData` object.
train_clients_per_round: The number of client participants in each round.
Returns:
A function which returns a list of `tff.simulation.ClientData` objects at a
given round round_num.
"""
def client_datasets(round_num):
del round_num # Unused.
sampled_clients = np.random.choice(
train_dataset.client_ids, size=train_clients_per_round, replace=False)
return [
train_dataset.create_tf_dataset_for_client(client)
for client in sampled_clients
]
return client_datasets | bcdb7d9c450401bff88635b5bd74c0eb6a0e7da5 | 25,429 |
def get_simple_grid(xbounds, ybounds, shift_origin=None):
""" """
xbounds = np.atleast_1d(xbounds)
if len(xbounds)==1:
xmin,xmax = 0,xbounds[0]
else:
xmin,xmax = xbounds
ybounds = np.atleast_1d(ybounds)
if len(ybounds)==1:
ymin,ymax = 0,ybounds[0]
else:
ymin,ymax = ybounds
pixels = np.mgrid[xmin:xmax,ymin:ymax]
pixels2_flat = np.concatenate(pixels.T, axis=0)
if shift_origin is not None:
# not += because conflict between int and float array
pixels2_flat = pixels2_flat+ shift_origin
return Grid(pixels2_flat, UNIT_SQUARE) | 8580e37ca98dc5d8214b7da563b31e8819b870cd | 25,430 |
def query_hecate(session, ra, dec, _radius, _verbose: bool = True):
""" Query the HECATE catalog """
m=0
gal_offset = []; mag = []; filt = []; dist = []; dist_err = []; gal_ra = []; gal_dec = []; distflag = []; source = []
# set up query
try:
query = session.query(HecateQ3cRecord)
query = hecate_q3c_orm_filters(query, {'cone': f'{ra},{dec},{_radius}'})
except Exception as _e3:
if _verbose:
print(f"{_e3}")
print(f"Failed to execute query for RA, Dec = ({ra},{dec})")
if len(query.all()) > 0:
m+=1
for _x in HecateQ3cRecord.serialize_list(query.all()):
if _x['bt']== _x['bt']:
mag.append(_x['bt'])
filt.append('B')
gal = SkyCoord(_x['ra']*u.deg, _x['dec']*u.deg)
cand = SkyCoord(ra*u.deg, dec*u.deg)
gal_offset.append(cand.separation(gal).arcsec)
gal_ra.append(_x['ra'])
gal_dec.append(_x['dec'])
dist.append(_x['d']) # Mpc
dist_err.append(_x['e_d']) # Mpc
source.append('HECATE')
return m, gal_ra, gal_dec, gal_offset, mag, filt, dist, dist_err, source | bf321d6151226d801479ef5c9170bed597cac903 | 25,431 |
def all_main_characters(raw_data: AniListRawResponse) -> list[Character]:
"""Returns all of the main characters from the data."""
characters: list[Character] = anime_media(raw_data)["mainCharacters"]["nodes"]
return characters | 9ec3f0cc2757fdbec24923aa0953a0c8f094bd24 | 25,432 |
from typing import List
def sequence_to_ngram(sequence: str, N: int) -> List[str]:
"""
Chops a sequence into overlapping N-grams (substrings of length N)
:param sequence: str Sequence to convert to N-garm
:type sequence: str
:param N: Length ofN-grams (int)
:type N: int
:return: List of n-grams
:rtype: List[str]
"""
return [sequence[i : i + N] for i in range(len(sequence) - N + 1)] | 8cbe97ee34c75ca3aad038236bd875ea0c3450cd | 25,433 |
def _convert_for_receive(profile):
"""Convert profile to be fed into the receive model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _transform_age_group(
_transform_generation(
_transform_gender(
_explode_membership_date(
_extract_age_bins(
profile)))))
return profile, without_profile | 16196499547c7f6e25e75ee8e814d8c89f8ea30d | 25,434 |
def _format_path(path):
"""Format path to data for which an error was found.
:param path: Path as a list of keys/indexes used to get to a piece of data
:type path: collections.deque[str|int]
:returns: String representation of a given path
:rtype: str
"""
path_with_brackets = (
''.join('[{!r}]'.format(fragment) for fragment in path)
)
return '{}'.format(path_with_brackets) | 1809080453af154824e867cd8104cedbd616b937 | 25,435 |
import os
def common_mean_watson(Data1, Data2, NumSims=5000, print_result=True, plot='no', save=False, save_folder='.', fmt='svg'):
"""
Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF
from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns
-------
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical_angle : critical angle for the test to pass
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B)
"""
pars_1 = pmag.fisher_mean(Data1)
pars_2 = pmag.fisher_mean(Data2)
cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
V = 2 * (Sw - Rw)
# keep weighted sum for later when determining the "critical angle"
# let's save it as Sr (notation of McFadden and McElhinny, 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data,
# but a common mean
counter = 0
Vp = [] # set of Vs from simulations
for k in range(NumSims):
# get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = []
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1 = pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2 = pmag.fisher_mean(Dirp)
# get the V for these
Vk = pmag.vfunc(pars_p1, pars_p2)
Vp.append(Vk)
# sort the Vs, get Vcrit (95th percentile one)
Vp.sort()
k = int(.95 * NumSims)
Vcrit = Vp[k]
# equation 18 of McFadden and McElhinny, 1990 calculates the critical
# value of R (Rwc)
Rwc = Sr - (Vcrit/2)
# following equation 19 of McFadden and McElhinny (1990) the critical
# angle is calculated. If the observed angle (also calculated below)
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95% confidence
# level. The critical angle is simply a different way to present
# Watson's V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results. What calculating the critical angle allows for is the
# classification of McFadden and McElhinny (1990) to be made
# for data sets that are consistent with sharing a common mean.
k1 = pars_1['k']
k2 = pars_2['k']
R1 = pars_1['r']
R2 = pars_2['r']
critical_angle = np.degrees(np.arccos(((Rwc**2) - ((k1 * R1)**2)
- ((k2 * R2)**2))/
(2 * k1 * R1 * k2 * R2)))
D1 = (pars_1['dec'], pars_1['inc'])
D2 = (pars_2['dec'], pars_2['inc'])
angle = pmag.angle(D1, D2)
if print_result == True:
print("Results of Watson V test: ")
print("")
print("Watson's V: " '%.1f' % (V))
print("Critical value of V: " '%.1f' % (Vcrit))
if V < Vcrit:
if print_result == True:
print('"Pass": Since V is less than Vcrit, the null hypothesis')
print('that the two populations are drawn from distributions')
print('that share a common mean direction can not be rejected.')
result = 1
elif V > Vcrit:
if print_result == True:
print('"Fail": Since V is greater than Vcrit, the two means can')
print('be distinguished at the 95% confidence level.')
result = 0
if print_result == True:
print("")
print("M&M1990 classification:")
print("")
print("Angle between data set means: " '%.1f' % (angle))
print("Critical angle for M&M1990: " '%.1f' % (critical_angle))
if print_result == True:
if V > Vcrit:
print("")
elif V < Vcrit:
if critical_angle < 5:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'A'")
elif critical_angle < 10:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'B'")
elif critical_angle < 20:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'C'")
else:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'INDETERMINATE;")
if plot == 'yes':
CDF = {'cdf': 1}
# pmagplotlib.plot_init(CDF['cdf'],5,5)
plt.figure(figsize=(3.5, 2.5))
p1 = pmagplotlib.plot_cdf(CDF['cdf'], Vp, "Watson's V", 'r', "")
p2 = pmagplotlib.plot_vs(CDF['cdf'], [V], 'g', '-')
p3 = pmagplotlib.plot_vs(CDF['cdf'], [Vp[k]], 'b', '--')
# pmagplotlib.draw_figs(CDF)
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_watson') + '.' + fmt)
pmagplotlib.show_fig(CDF['cdf'])
return result, angle[0], critical_angle | 7d329d06f2bcc3de137cf0bb007d274cda5bb744 | 25,436 |
def GetLayouts():
"""Returns the layout proxies on the active session.
Layout proxies are used to place views in a grid."""
return servermanager.ProxyManager().GetProxiesInGroup("layouts") | 8099264d77e4daab61d24eb22edb397aeacfa294 | 25,437 |
import os
def GetRelativePath(starting_dir, dest):
"""Creates a relative path from the starting_dir to the dest."""
assert starting_dir
assert dest
starting_dir = os.path.realpath(starting_dir).rstrip(os.path.sep)
dest = os.path.realpath(dest).rstrip(os.path.sep)
common_prefix = GetCommonPath(starting_dir, dest)
if not common_prefix:
return dest
starting_dir = starting_dir[len(common_prefix):]
dest = dest[len(common_prefix):]
if not starting_dir:
if not dest:
return '.'
return os.path.join(".", dest)
seps = os.path.splitdrive(starting_dir)[1].count(os.path.sep) + 1
return "{}{}".format((("..{}".format(os.path.sep)) * seps), dest) | 75678142a26d8b0dea47d539c9251f2a1b580ac4 | 25,438 |
def _calculate_mean_cvss():
"""Calcuate the mean CVSS score across all known vulnerabilities"""
results = db.osvdb.aggregate([
{"$unwind": "$cvss_metrics"},
{"$group": {
"_id": "null",
"avgCVSS": {"$avg": "$cvss_metrics.calculated_cvss_base_score"}
}}
])
logger.info("There are {} entries in this aggregation.".format(
len(results['result'])))
logger.debug("The headers are: {}".format(results['result'][0].keys()))
try:
avgCVSS = results['result'][0]['avgCVSS']
except:
avgCVSS = None
return avgCVSS | 5d0f7ca346dd9077127351f60d1b0da7849c5144 | 25,439 |
def decomposition_super1(centroid, highway, coherence,coordinates,input):
"""
Function to perform Experiment 2: Differential Decomposition with level-specific weight
Args:
centroid: Cluster centroid of super pixels
highway: Super pixels after Stage I Super pixeling
coherence: Coherence value at super pixel level
coordinates: Coordinates of pixels in each highway clusters
input: 4 channel input data
Returns:
decom_super_coh: Coherence estimate passed from super pixel to pixel level
"""
c = 0
decom_super_coh = []
for i in range (0, 300):
new = []
for j in range (0, 300):
new.append(0)
decom_super_coh.append(new)
# Normalizing centroids and input_sl
input_min = input.min(axis=(0, 1), keepdims=True)
input_max = input.max(axis=(0, 1), keepdims=True)
input_norm = (input - input_min)/(input_max - input_min)
c_min = centroid.min(axis=(0, 1), keepdims=True)
c_max = centroid.max(axis=(0, 1), keepdims=True)
c_norm = (centroid - c_min)/(c_max - c_min)
# Traversing through each cluster coordinates to calculate
# distance between pixels and cluster coordinates
# To assign coherence value to pixel level
for cluster in coordinates:
clusterCenter = c_norm[0][c]
for point in cluster:
x,y = point[0],point[1]
superPixel = input_norm[x,y]
distance = norm(clusterCenter-superPixel)
coh = (coherence[c]*(1-distance))
decom_super_coh[x][y] = coh
c+=1
return decom_super_coh | 9649d1d267bc442cd999f3afd622156f4c5e1895 | 25,440 |
from typing import List
import math
def get_deck_xs(bridge: Bridge, ctx: BuildContext) -> List[float]:
"""X positions of nodes on the bridge deck.
First the required X positions 'RX' are determined, positions of loads and
abutments etc.. After that a number of X positions are calculated between
each pair of adjacent X positions 'RX_i' and 'RX_j', such that the maximum
distance between X positions does not exceed 'bridge.base_mesh_deck_max_x'.
"""
all_xs = set()
# From piers.
for pier in bridge.supports:
for x in pier.x_min_max_top():
all_xs.add(round_m(x))
# Bridge ends.
all_xs.add(round_m(bridge.x_min))
all_xs.add(round_m(bridge.x_max))
# From loads.
for point in ctx.add_loads:
all_xs.add(round_m(point.x))
# From material propertes.
for x in get_deck_section_grid(bridge)[0]:
all_xs.add(round_m(x))
# Additional nodes requested by the Bridge.
for x in bridge.additional_xs:
all_xs.add(round_m(x))
all_xs = sorted(all_xs)
print_i(f"Required node X positions on deck (from all sources) =\n {all_xs}")
deck_xs = set()
for i in range(len(all_xs) - 1):
x0, x1 = all_xs[i], all_xs[i + 1]
num = math.ceil((x1 - x0) / bridge.base_mesh_deck_max_x) + 1
for x in np.linspace(x0, x1, num=num):
deck_xs.add(round_m(x))
return sorted(deck_xs) | 75447a1929035685aeb14f212beea74bb7b814ad | 25,441 |
def helicsGetFederateByName(fed_name: str) -> HelicsFederate:
"""
Get an existing `helics.HelicsFederate` from a core by name.
The federate must have been created by one of the other functions and at least one of the objects referencing the created federate must still be active in the process.
**Parameters**
- **`fed_name`** - The name of the federate to retrieve.
**Returns**: `helics.HelicsFederate`.
"""
f = loadSym("helicsGetFederateByName")
err = helicsErrorInitialize()
result = f(cstring(fed_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederate(result) | a262ee67a4b87212be401442d99482a569862f92 | 25,442 |
import functools
import logging
def persistant_property(*key_args):
"""Utility decorator for Persistable-based objects. Adds any arguments as properties
that automatically loads and stores the value in the persistence table in the database.
These arguments are created as permanent persistent properties."""
def _decorator(cls):
@functools.wraps(cls)
def wrapper(*args, **kwargs):
for key in key_args:
# this _closure function is required since we're using a for loop and a closure
# see http://www.discoversdk.com/blog/closures-in-python-3
def _closure(key=key):
internal_key = f'_{key}' # internal value
internal_key_loaded = f'_{key}_loaded' # boolean set to True after it's loaded
def _getter(self):
try:
self.load_persistent_property(key)
except Exception as e:
logging.error(f"unable to load persistence key {key}: {e}")
return getattr(self, internal_key)
def _setter(self, value):
try:
retry(self.save_persistent_property(key, value))
except Exception as e:
logging.error(f"unable to save persistence key {key}: {e}")
setattr(self, internal_key, value)
setattr(cls, internal_key, None)
setattr(cls, internal_key_loaded, False)
setattr(cls, key, property(_getter, _setter))
_closure(key)
return cls(*args, **kwargs)
return wrapper
return _decorator | bba4de85830496d414c80960b59422d51af30572 | 25,443 |
import pkg_resources
import csv
def states():
"""
Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states
"""
states = {}
fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
states[row[0]] = row[1]
return states | 4781170b9f8c8ab654ebb39dd733577351571b3e | 25,444 |
def matrix_set_diag(input_x, diagonal, k=0, alignment="RIGHT_LEFT"):
"""
Calculate a batched matrix tensor with new batched diagonal values.
Args:
input_x (Tensor): a :math:`(..., M, N)` matrix to be set diag.
diagonal (Tensor): a :math`(..., max_diag_len)`, or `(..., num_diags, max_diag_len)` vector to be placed to
output's diags.
k (Tensor): a scalar or 1D list. it's max length is to which indicates the diag's lower index and upper index.
(k[0], k[1]).
alignment (str): Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default),
"LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to
the right (left-pads the row) and subdiagonals to the left (right-pads the row).
Returns:
- Tensor, :math:`(...,M, N)`. a batched matrix with the same shape and values as `input`,
except for the specified diagonals of the innermost matrices.
Supported Platforms:
``CPU`` ``GPU``
Examples:
>>> import numpy as onp
>>> from mindspore.common import Tensor
>>> from mindspore.scipy.ops_wrapper import matrix_set_diag
>>> input_x = Tensor(
>>> onp.array([[[7, 7, 7, 7],[7, 7, 7, 7], [7, 7, 7, 7]],
>>> [[7, 7, 7, 7],[7, 7, 7, 7],[7, 7, 7, 7]]])).astype(onp.int)
>>> diagonal = Tensor(onp.array([[1, 2, 3],[4, 5, 6]])).astype(onp.int)
>>> output = matrix_set_diag(input_x, diagonal)
>>> print(output)
>>> [[[1 7 7 7]
[7 2 7 7]
[7 7 3 7]]
[[4 7 7 7]
[7 5 7 7]
[7 7 6 7]]
"""
matrix_set_diag_net = MatrixSetDiag(alignment)
k_vec = mnp.zeros((2,), dtype=mstype.int32)
if isinstance(k, int):
k_vec += k
elif isinstance(k, (list, tuple)):
k_vec = k
else:
_raise_value_error("input k to indicate diagonal region is invalid.")
k_vec = _to_tensor(k_vec, dtype=mstype.int32)
output = matrix_set_diag_net(input_x, diagonal, k_vec)
return output | e8dddc42438ae2bc8bf70ef9a0db1b1cdce9dad3 | 25,445 |
def execute_payment(pp_req):
"""Executes a payment authorized by the client."""
payment = paypalrestsdk.Payment.find(pp_req['paymentId'])
if payment.execute({"payer_id": pp_req['PayerID']}):
return True
return False | 4d7f94610b6f8360371099d3774fd4902e47b6c7 | 25,446 |
def create_structural_eqs(X, Y, G, n_nodes_se=40, n_nodes_M=100, activation_se='relu'):
"""
Method to create structural equations (F:U->X) and the original prediction model (M:X->Y). This also calculates and stores residuals.
Parameters
----------
X : pandas DataFrame
input features of the dataset
Y : pandas Series
target to be predicted
G : networkx.classes.digraph.DiGraph
causal graph of the data
n_nodes_se : int
number of nodes for the neural network of the strutural equations (SE)
n_nodes_M: int
number of nodes in the neural network of the original model (M)
activation_se: str
type of activation for the structural equations
Returns
----------
struct_eq: keras.engine.functional.Functional - keras Model
structural equations (F:U->X)
final : keras.engine.functional.Functional - keras Model
model in the latent space. Final model that uses structural equations and original prediction model: M^:U->Y. M^(u)=M(F(u))
Additionally:
In the folder data, residuals are stored
In the folder data, the original prediction model (M - stored as "nn_model"), the model for child nodes and structural equations are stored.
Performance metrics are printed in the terminal
"""
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
# take all nodes except target >>> classification
nodes = [n for n in list(G.nodes) if n != Y.name]
# Standardise data
scaler = StandardScaler()
scaler.fit(X_train)
X_train.loc[:, :] = scaler.transform(X_train)
X_test.loc[:, :] = scaler.transform(X_test)
# get root nodes
root_nodes = [n for n, d in G.in_degree() if d == 0]
# define variables where residuals and residual inputs will be stored
U_train = X_train[root_nodes].copy()
U_test = X_test[root_nodes].copy()
res_inputs = []
#define tf inputs, one for each node
node_inputs = {n: keras.Input(shape=(1,), name=n) for n in nodes}
# define dic to store the final X = F(U) with U = (roots, residuals) for each node
# fill the root nodes directly with input layers
X_n = {r: node_inputs[r] for r in root_nodes}
# auxiliary while-loop variables
added_nodes = []
root_nodes_tmp = root_nodes
while set(root_nodes_tmp) != set(nodes):
# loop until all nodes are either root or dealt with (root_nodes_tmp
# contains root nodes and is updated with dealt with nodes)
for n in nodes:
parents = list(G.predecessors(n))
# go on only when:
# n has parents
# parents are root_nodes or nodes already dealt with
# n is not a root node and has not been dealt with yet
if G.in_degree[n] != 0 and set(parents).issubset(set(root_nodes_tmp)) and not n in root_nodes_tmp:
print("dealing with ", n, " with parents: ", parents)
# build the model from parents to node n
if len(parents) == 1:
parent = parents[0]
inputs = node_inputs[parent]
conc = tf.identity(inputs)
X_train_p = X_train[parent].values
X_test_p = X_test[parent].values
else:
inputs = [node_inputs[p] for p in parents]
conc = layers.Concatenate()(inputs)
X_train_p = [X_train[p].values for p in parents]
X_test_p = [X_test[p].values for p in parents]
x = layers.Dense(n_nodes_se, activation=activation_se)(conc)
x = layers.Dense(n_nodes_se, activation=activation_se)(x)
out = layers.Dense(1)(x)
ff = keras.Model(inputs=inputs, outputs=out, name=n)
ff.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=0.0001))
hist = ff.fit(X_train_p, X_train[n].values, batch_size=512,
epochs=200, verbose=0, validation_split=0.25, callbacks=[early_stopping])
#plot history
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
#
# plt.figure()
# pred_tmp=ff.predict(X_test_p)
# plt.plot(X_test[n].values, pred_tmp.reshape(1,-1)[0], '.', alpha=0.2)
score = ff.evaluate(X_train_p, X_train[n].values, verbose=0)
print('The TRAIN score for model node ', n, ' is ', score)
score = ff.evaluate(X_test_p, X_test[n].values, verbose=0)
print('The TEST score for model node ', n, ' is ', score)
# save picture of the model
#dot_img_file = 'model_nn' + node_tmp +'.png'
#keras.utils.plot_model(nn, to_file=dot_img_file, show_shapes=True)
# plot model graph
# keras.utils.plot_model(ff, show_shapes=True)
# Calculate residuals as the value of the node - the prediction of the model for that node
pred = ff.predict(X_train_p).reshape(X_train.shape[0],)
U_train['r_' + n] = X_train[n].values - pred
pred = ff.predict(X_test_p).reshape(X_test.shape[0],)
U_test['r_' + n] = X_test[n].values - pred
# build input for residual of node n
res = keras.Input(shape=(1,), name="r_" + n)
res_inputs.append(res)
# create the reconstructed node as the built model ff + the residual
X_n[n] = layers.Add(name=n + "_reconstructed")([ff([X_n[p] for p in parents]), res])
# Save nn of the structural equation
ff.save('models/'+str(n)+'.h5')
added_nodes.append(n)
# Add the node in the roots node, so the graph can be explored in the next dependence level
root_nodes_tmp = root_nodes_tmp + added_nodes
added_nodes = []
# Define the structural equation model
inputs = [X_n[r] for r in root_nodes] + res_inputs
# Reorder the inputs and list "nodes" is
col_name_inputs = [i.name[:-2].split('r_')[-1] for i in inputs]
inputs = list(np.array(inputs)[[col_name_inputs.index(col) for col in nodes]])
# concatenate outputs to build a stacked tensor (actually a vector),
# respecting the order of the original nodes (i.e. same order of X_in)
X_out = tf.concat([X_n[x] for x in nodes], axis=1, name='X_out')
struct_eq_tmp = keras.Model(inputs=inputs, outputs=X_out, name="struct_eq_tmp")
dim_input_se = U_train.shape[1]
inputs = keras.Input(shape=(dim_input_se,), name="U")
# define the model struct_eq U->X
x = keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=dim_input_se, axis=1))(inputs)
out_x = struct_eq_tmp(x)
struct_eq = keras.Model(inputs=inputs, outputs=out_x, name="struct_eq")
struct_eq.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam())
struct_eq.save('models/nn_struct_eq.h5')
# Save residual dataset
columns_dataset_u = [i.split('r_')[-1] for i in U_train.columns]
columns_dataset_u = list(np.array(U_train.columns)[[columns_dataset_u.index(col) for col in nodes]])
U_train[columns_dataset_u].to_csv('data/res_train.csv', index=False)
U_test[columns_dataset_u].to_csv('data/res_test.csv', index=False)
### Build M, standard ML model
# model going from features X to target Y
# the inputs are precisely the node inputs
# X matrix -> Y
X_in = keras.Input(shape=(len(nodes)), name='X_in')
x = layers.Dense(n_nodes_M, activation='relu')(X_in)
x = layers.Dense(int(n_nodes_M/2), activation='relu')(x)
out = layers.Dense(2, activation='softmax')(x)
M = keras.Model(inputs=X_in, outputs=out, name="M")
M.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam(learning_rate=0.001))
hist=M.fit(X_train, y_train, batch_size=512, epochs=200, verbose=0,
validation_split=0.25, callbacks=[early_stopping])
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
M.save('models/nn_model.h5')
### Build a model from root_nodes + residuals to Y, i.e. Y^ = M(F(U))
# matrix U -> Y
inputs = keras.Input(shape=(U_train.shape[1],), name="U")
out = M(struct_eq(inputs))
final = keras.Model(inputs=inputs, outputs=out, name="final")
final.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam())
# final.summary()
# dot_img_file = 'final.png'
# keras.utils.plot_model(final, to_file=dot_img_file, show_shapes=True)
# final.save('final.h5')
### make predictions
# Load final model (the weights are already computed in model M and
# structural equation F, no need to fit)
pred = final.predict(U_test)[:, 1]
# Print report
print(classification_report(y_test, pred > 0.5))
return struct_eq, final | f8ebc336360fa7d04ac1aa90dbb8165e54181f6b | 25,447 |
import logging
def twitch_checkdspstatus(_double_check: bool) -> bool:
"""
Uses current Selenium browser to determine if DSP is online on Twitch.
:param _double_check: Internally used to recursively call function again to double check if DSP is online
:return: True if DSP is online. False is DSP is offline.
"""
try:
if _double_check:
logging.debug("Double Checking Run. Refresh and Wait")
trackerglobals.BROWSER.get("https://www.duckduckgo.com")
sleep(2.0)
trackerglobals.BROWSER.get(trackerglobals.URL)
sleep(5.0)
logging.debug("Refreshed. Checking the Second Time.")
logging.debug("Checking if DSP is online.")
# Check if the "Follow and get notified when darksydephil is live" text overlay exists.
_ = WebDriverWait(trackerglobals.BROWSER, 2).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, 'a[href="/darksydephil"][status="tw-channel-status-indicator--live"'))) # noqa
if _double_check:
logging.debug("DSP is online. Returning True.")
return True
return twitch_checkdspstatus(True)
except TimeoutException:
logging.debug("DSP is offline. Returning False.")
return False | 7ffac955b3cd415e5deac41eda380655bcbed876 | 25,448 |
import random
import math
def random_walk(humans, dt, energy, temperature):
"""
calculates location, speed and acceleration by adding random values to the speed
Args:
humans (list): list of all humans
dt (float): time step in which the movement is calculated
energy (float): amount of movement
Returns:
humans (list): list of all humans
"""
new_energy = 0
old_humans = humans
for i, h in enumerate(humans):
infection(humans, h, i)
new_location = h.location + dt * h.velocity
velocity_gen_x = random.gauss(0, 1)
velocity_gen_y = random.gauss(0, 1)
velocity_random = [
velocity_gen_x * float(temperature)/15, velocity_gen_y * float(temperature)/15]
new_velocity = h.velocity + velocity_random
# handle maximum velocity based on total energy
new_energy += np.linalg.norm(new_velocity)**2
factor = math.sqrt(energy / new_energy)
new_velocity = new_velocity*factor
abs_speed = np.linalg.norm(new_velocity)**2
factor_v = math.sqrt(abs_speed / energy)
if factor_v > 3*(1/len(humans)):
scaling = 0.03/factor_v
new_velocity = new_velocity*scaling
h.update(new_location, new_velocity)
return humans | bfc04b4d0ae1a5c6a7c510a72a9ae2607a225a37 | 25,449 |
def format_name(name_format: str, state: State):
"""Format a checkpoint filename according to the ``name_format`` and the training :class:`~.State`.
The following format variables are available:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
| ``{epoch}`` | The total epoch count, as returned by |
| | :meth:`~composer.core.time.Timer.epoch`. |
+------------------------+-------------------------------------------------------+
| ``{batch}`` | The total batch count, as returned by |
| | :meth:`~composer.core.time.Timer.batch`. |
+------------------------+-------------------------------------------------------+
| ``{batch_in_epoch}`` | The batch count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.batch_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{sample}`` | The total sample count, as returned by |
| | :meth:`~composer.core.time.Timer.sample`. |
+------------------------+-------------------------------------------------------+
| ``{sample_in_epoch}`` | The sample count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.sample_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{token}`` | The total token count, as returned by |
| | :meth:`~composer.core.time.Timer.token`. |
+------------------------+-------------------------------------------------------+
| ``{token_in_epoch}`` | The token count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.token_in_epoch`. |
+------------------------+-------------------------------------------------------+
.. note::
If using DeepSpeed, and ``name_format`` does not end with an tarfile archive extension (``'.tar'``, ``'.tgz'``,
``'.tar.gz'``, ``'.tar.bz2'``, or ``'.tar.lzma'``), then ``'.tar'`` will be appended. DeepSpeed uses a tarball
format as it saves model and optimizer states in separate files within the tarball.
Consider the following scenario, where the current epoch count is ``1`` and the current batch count is ``42``:
* When not using DeepSpeed, then the rank zero process will call this function:
.. testsetup:: composer.utils.checkpoint.format_name.no_deepspeed
from composer.utils.checkpoint import format_name
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.no_deepspeed
>>> format_name("ep{epoch}-ba{batch}", state)
'ep1-ba42'
* When using DeepSpeed, each rank (process) will call this function. ``'{rank}'`` should appear within
``name_format``, so each rank (process) will write to its own file. For example, on the rank zero process:
.. testsetup:: composer.utils.checkpoint.format_name.deepspeed
from composer.utils.checkpoint import format_name
original_is_model_deepspeed = State.is_model_deepspeed
setattr(State, 'is_model_deepspeed', property(lambda x: True))
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.deepspeed
>>> format_name("ep{epoch}-ba{batch}-rank{rank}", state)
'ep1-ba42-rank0.tar'
.. testcleanup:: composer.utils.checkpoint.format_name.deepspeed
setattr(State, 'is_model_deepspeed', original_is_model_deepspeed)
"""
checkpoint_name = name_format.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank(),
epoch=int(state.timer.epoch),
batch=int(state.timer.batch),
batch_in_epoch=int(state.timer.batch_in_epoch),
sample=int(state.timer.sample),
sample_in_epoch=int(state.timer.sample_in_epoch),
token=int(state.timer.token),
token_in_epoch=int(state.timer.token_in_epoch),
)
if state.is_model_deepspeed and not _is_archive(checkpoint_name):
# Deepspeed requires tarballs; appending `.tar`
checkpoint_name += ".tar"
return checkpoint_name | 72c9d5a50f1c05e726702f33befe3373a0ba4486 | 25,450 |
from mpi4py import MPI
def allsync(local_values, comm=None, op=None):
"""Perform allreduce if MPI comm is provided."""
if comm is None:
return local_values
if op is None:
op = MPI.MAX
return comm.allreduce(local_values, op=op) | d10174d7774e5691193ae4c08d7fe6838e8c1ee4 | 25,451 |
def vec_bin_array(arr, m):
"""
Arguments:
arr: Numpy array of positive integers
m: Number of bits of each integer to retain
Returns a copy of arr with every element replaced with a bit vector.
Bits encoded as int8's.
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(m))
strs = to_str_func(arr)
ret = np.zeros(list(arr.shape) + [m], dtype=np.int8)
for bit_ix in range(0, m):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
ret[...,bit_ix] = fetch_bit_func(strs).astype("int8")
return ret | bb56f94413ef611b9a685b835203aad9064b3092 | 25,452 |
from typing import Iterator
from typing import List
def parse_bafs(stream: Iterator[str]) -> List[BAF]:
"""Parses allelic counts output from GATK ModelSegments, which is a SAM-style
header comprising lines starting with @ followed by single line with column
names (CONTIG, POSITION, REF_COUNT, ALT_COUNT, REF_NUCLEOTIDE, ALT_NUCLEOTIDE)."""
skip_header(stream)
bafs: List[BAF] = []
for line in stream:
chromosome, position, ref_count, alt_count, ref_nucleotide, alt_nucleotide = line.split('\t')
baf = BAF(chromosome=chromosome, position=int(position), ref_count=int(ref_count), alt_count=int(alt_count), ref_nucleotide=ref_nucleotide, alt_nucleotide=alt_nucleotide)
bafs.append(baf)
return bafs | b490b007841afd707576780f436175aec6526f14 | 25,453 |
import mpmath
def logpdf(x, chi, c):
"""
Logarithm of the PDF of the ARGUS probability distribution.
"""
if c <= 0:
raise ValueError('c must be positive')
if chi <= 0:
raise ValueError('chi must be positive')
if x < 0 or x > c:
return mpmath.mp.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
chi = mpmath.mpf(chi)
c = mpmath.mpf(c)
z = x/c
t1 = (3*mpmath.log(chi)
- mpmath.log(2*mpmath.pi)/2
- mpmath.log(_psi(chi)))
t2 = -mpmath.log(c) + mpmath.log(z)
t3 = mpmath.log1p(-z**2)/2
t4 = -chi**2/2*(1 - z**2)
return t1 + t2 + t3 + t4 | 8df44305dfaeaa9b725de7a9224259929c4c8900 | 25,454 |
def sample(colors: list, max_colors: int = 8, sensitivity: int = 75) -> list:
"""
Sample most common colors from a PIL Image object.
:param colors: list of RGB color tuples eg. [(0, 0, 0), (255, 255, 255)]
:param max_colors: maximum number of colors to return
:param sensitivity: how perceptively different (Euclidean Distance) a color
must be from others to be included in the sampled palette.
:returns: list of most common colors in RGB tuples (255, 255, 255)
"""
# reduce all found colors using supplied sensitivity
sampled_colors = []
for color in colors:
# if max_color limit reached stop looking
if len(sampled_colors) == max_colors:
break
# clean-up any slight color differences in PIL sampling
color = normalize_rgb_values(color)
# if most common color (first color) append it
if sampled_colors == []:
sampled_colors.append(color)
continue
# calculate Euclidean distance for a color against colors
# already appended to determine if it shoule be ignored
if not any(
color_distance(color, found) <= sensitivity for found in sampled_colors
):
sampled_colors.append(color)
return sampled_colors | 5d90dfa3d097ea923f25deafda5b907a41c5909d | 25,455 |
def tf_example_to_feature_description(example,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Takes a string tensor encoding an tf example and returns its features."""
if not tf.executing_eagerly():
raise AssertionError(
'tf_example_to_reverb_sample() only works under eager mode.')
example = tf.train.Example.FromString(example.numpy())
ret = {}
for k, v in example.features.feature.items():
l = len(v.float_list.value)
if l % num_timesteps:
raise ValueError('Unexpected feature length %d. It should be divisible '
'by num_timesteps: %d' % (l, num_timesteps))
size = l // num_timesteps
ret[k] = tf.io.FixedLenFeature([num_timesteps, size], tf.float32)
return ret | edf4f829b1c0746a34093ea36672a094412794f1 | 25,456 |
def setupmethod(f: F) -> F:
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if self._is_setup_finished():
raise AssertionError(
"A setup function was called after the first request "
"was handled. This usually indicates a bug in the"
" application where a module was not imported and"
" decorators or other functionality was called too"
" late.\nTo fix this make sure to import all your view"
" modules, database models, and everything related at a"
" central place before the application starts serving"
" requests."
)
return f(self, *args, **kwargs)
return t.cast(F, update_wrapper(wrapper_func, f)) | be268e747fce91d2d2ffc368b54d6a2843cec4b5 | 25,457 |
import numpy
def generateStructuredGridPoints(nx, ny, v0, v1, v2, v3):
"""
Generate structured grid points
:param nx: number of x cells
:param ny: number of y cells
:param v0: south west corner
:param v1: south east corner
:param v2: north east corner
:param v3: north west corner
:returns array of size (nx, ny, 3)
"""
# parametric
nx1 = nx + 1
ny1 = ny + 1
x = numpy.linspace(0., 1., nx1)
y = numpy.linspace(0., 1., ny1)
xx1, yy1 = numpy.meshgrid(x, y, indexing='ij')
xx0 = 1.0 - xx1
yy0 = 1.0 - yy1
# structured points
spts = numpy.zeros(list(xx0.shape) + [3], numpy.float64)
for j in range(3):
spts[..., j] = xx0*yy0*v0[j] + \
xx1*yy0*v1[j] + \
xx1*yy1*v2[j] + \
xx0*yy1*v3[j]
return spts | 0de9a3a3a47b26c3c3d56088c7ec55d241edeff3 | 25,458 |
def Keywords(lang_id=0):
"""Returns Specified Keywords List
@param lang_id: used to select specific subset of keywords
"""
return [PY_KW, PY_BIN] | 1a0f0ac7d22e4da00438d823c50258cb5ade8574 | 25,459 |
def clear(keyword):
"""``clear`` property validation."""
return keyword in ('left', 'right', 'both', 'none') | c16cc980b9af82b4210e3c8c430cd65934596aa1 | 25,460 |
from typing import List
def PermissionsListOfUser(perm_list: List[str]) -> List[str]:
"""
Takes a list of items and asserts that all of them are in the permissions list of
a user.
:param perm_list: A list of permissions encoded as ``str``
:return: The input perm_list
:raises Invalid: If the user does not have a permission in the list
"""
if isinstance(perm_list, list):
for perm in perm_list:
if not flask.g.user.has_permission(perm):
break
else:
return perm_list
raise Invalid('permissions must be in the user\'s permissions list') | 811adedcdc9b90a066d6253269de33e0813c8d7b | 25,461 |
import collections
def PrepareForBuild(input_proto, output_proto, _config):
"""Prepare to build toolchain artifacts.
The handlers (from _TOOLCHAIN_ARTIFACT_HANDLERS above) are called with:
artifact_name (str): name of the artifact type.
chroot (chroot_lib.Chroot): chroot. Will be None if the chroot has not
yet been created.
sysroot_path (str): sysroot path inside the chroot (e.g., /build/atlas).
Will be an empty string if the sysroot has not yet been created.
build_target_name (str): name of the build target (e.g., atlas). Will be
an empty string if the sysroot has not yet been created.
input_artifacts ({(str) name:[str gs_locations]}): locations for possible
input artifacts. The handler is expected to know which keys it should
be using, and ignore any keys that it does not understand.
profile_info ({(str) name: (str) value}) Dictionary containing profile
information.
They locate and modify any ebuilds and/or source required for the artifact
being created, then return a value from toolchain_util.PrepareForBuildReturn.
This function sets output_proto.build_relevance to the result.
Args:
input_proto (PrepareForToolchainBuildRequest): The input proto
output_proto (PrepareForToolchainBuildResponse): The output proto
_config (api_config.ApiConfig): The API call config.
"""
if input_proto.chroot.path:
chroot = controller_util.ParseChroot(input_proto.chroot)
else:
chroot = None
input_artifacts = collections.defaultdict(list)
for art in input_proto.input_artifacts:
item = _TOOLCHAIN_ARTIFACT_HANDLERS.get(art.input_artifact_type)
if item:
input_artifacts[item.name].extend(
['gs://%s' % str(x) for x in art.input_artifact_gs_locations])
profile_info = _GetProfileInfoDict(input_proto.profile_info)
results = set()
sysroot_path = input_proto.sysroot.path
build_target = input_proto.sysroot.build_target.name
for artifact_type in input_proto.artifact_types:
# Unknown artifact_types are an error.
handler = _TOOLCHAIN_ARTIFACT_HANDLERS[artifact_type]
if handler.prepare:
results.add(handler.prepare(
handler.name, chroot, sysroot_path, build_target, input_artifacts,
profile_info))
# Translate the returns from the handlers we called.
# If any NEEDED => NEEDED
# elif any UNKNOWN => UNKNOWN
# elif any POINTLESS => POINTLESS
# else UNKNOWN.
if toolchain_util.PrepareForBuildReturn.NEEDED in results:
output_proto.build_relevance = PrepareForBuildResponse.NEEDED
elif toolchain_util.PrepareForBuildReturn.UNKNOWN in results:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
elif toolchain_util.PrepareForBuildReturn.POINTLESS in results:
output_proto.build_relevance = PrepareForBuildResponse.POINTLESS
else:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
return controller.RETURN_CODE_SUCCESS | 5c77d9ad318e0b5dd604e5127e9864aac50e7d77 | 25,462 |
from pathlib import Path
import json
def for_properties(path: Path = Path('config.json')):
"""
Simple externalized configuration loader. Properties are loaded from a file containing a JSON object.
:param path: Path to the file.
:return: Simple namespace with the key/value pairs matching the loaded json object.
"""
if not path or not path.exists():
raise ValueError(f"Configuration file [{path}] doesn't exist")
return json.loads(path.read_text(), object_hook=lambda d: SimpleNamespace(**d)) | 44e377ff28cef3b77adbbcc653f6b1ec196f2a2d | 25,463 |
import os
def guess_components(paths, stop_words=None, n_clusters=8):
"""Guess components from an iterable of paths.
Args:
paths: list of string containing file paths in the project.
stop_words: stop words. Passed to TfidfVectorizer.
n_clusters: number of clusters. Passed to MiniBatchKMeans.
Returns:
pandas.DataFrame
See Also:
sklearn.feature_extraction.text.TfidfVectorizer
sklearn.cluster.MiniBatchKMeans
"""
dirs = [os.path.dirname(p.replace("\\", "/")) for p in paths]
vectorizer = sklearn.feature_extraction.text.TfidfVectorizer(stop_words=stop_words)
transformed_dirs = vectorizer.fit_transform(dirs)
algo = sklearn.cluster.MiniBatchKMeans
clustering = algo(compute_labels=True, n_clusters=n_clusters)
clustering.fit(transformed_dirs)
def __cluster_name(center, threshold):
df = pd.DataFrame(
data={"feature": vectorizer.get_feature_names(), "weight": center}
)
df.sort_values(by=["weight", "feature"], ascending=False, inplace=True)
if (df["weight"] <= threshold).all():
return ""
df = df[df["weight"] > threshold]
return ".".join(df["feature"].tolist())
cluster_names = [
__cluster_name(center, 0.4) for center in clustering.cluster_centers_
]
components = [cluster_names[lbl] for lbl in clustering.labels_]
rv = pd.DataFrame(data={"path": paths, "component": components})
rv.sort_values(by="component", inplace=True)
return rv | 9d53a2961b4ea1316facc64327f536cf90a7292f | 25,464 |
def get_instance_tags(ec2_client: boto3.Session.client, instance_id: str):
"""Get instance tags to parse through for selective hardening"""
tag_values = []
tags = ec2_client.describe_tags(
Filters=[
{
"Name": "resource-id",
"Values": [
instance_id,
],
},
],
)["Tags"]
for tag in tags:
tag_values.append(tag["Value"])
return tag_values | 94506f230e44d730a89b15263ef74367c777f654 | 25,465 |
from scorer.controller import prediction_app
def create_app(config_name=None) -> Flask:
"""Create a flask app instance."""
app = Flask("__name__")
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# import blueprints
app.register_blueprint(prediction_app)
_logger.debug("Application instance created")
return app | f973bca064792d6c39b783b55e75f319a6bc702f | 25,466 |
from typing import Dict
def getmasterxpub(client: HardwareWalletClient, addrtype: AddressType = AddressType.WIT, account: int = 0) -> Dict[str, str]:
"""
Get the master extended public key from a client
:param client: The client to interact with
:return: A dictionary containing the public key at the ``m/44'/0'/0'`` derivation path.
Returned as ``{"xpub": <xpub string>}``.
"""
return {"xpub": client.get_master_xpub(addrtype, account).to_string()} | 58e20780672b0c7cd1dc0912ef3565e83e220a53 | 25,467 |
from typing import Any
def serialize(
obj: Any,
annotation: Any,
config: SerializerConfig
) -> str:
"""Convert the object to JSON
Args:
obj (Any): The object to convert
annotation (Annotation): The type annotation
config (SerializerConfig): The serializer configuration
Returns:
str: The serialized object
"""
if _is_typed(annotation):
return typed_serialize(obj, annotation, config)
else:
return untyped_serialize(obj, config) | 6fc0fab725798c4d5b643c2dfb6a76929173f601 | 25,468 |
def validate_dvprel(prop_type, pname_fid, validate):
"""
Valdiates the DVPREL1/2
.. note:: words that start with integers (e.g., 12I/T**3) doesn't
support strings
"""
if validate:
msg = 'DVPREL1: prop_type=%r pname_fid=%r is invalid' % (prop_type, pname_fid)
#if prop_type == 'CELAS2':
#assert pname_fid in ['K', 'GE', 'S'], msg
#elif prop_type == 'CELAS4':
#assert pname_fid in ['K'], msg
if prop_type == 'PELAS':
if pname_fid in ['K1', 3]:
pname_fid = 'K1'
elif pname_fid in ['GE1', 4]:
pname_fid = 'GE1'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
#assert pname_fid in [3, 4, 'K1', 'GE1'], msg
elif prop_type == 'PELAST':
if pname_fid in ['TKID', 3]:
pname_fid = 'TKID'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [3, 4, 'TKID'], msg
elif prop_type == 'PROD':
if pname_fid in ['A', 4]:
pname_fid = 'A'
elif pname_fid in ['J', 5]:
pname_fid = 'J'
#elif pname_fid in ['C', 6]:
#pname_fid = 'C'
else:
raise NotImplementedError('PROD pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [4, 'A', 5, 'J'], msg
elif prop_type == 'PTUBE':
assert pname_fid in [4, 5], msg
#elif prop_type == 'CBAR':
#assert pname_fid in ['X1', 'X2'], msg
elif prop_type == 'PBAR':
assert pname_fid in [4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 18, 19, 'A', 'I1', 'J'], msg
elif prop_type == 'PBARL':
assert pname_fid in [12, 13, 14, 15, 16, 17, 'DIM1', 'DIM2', 'DIM3', 'DIM4'], msg
#elif prop_type == 'CBEAM':
#assert pname_fid in ['X1', 'X2', 'X3', 'W1A', 'W2A', 'W3A', 'W1B', 'W2B', 'W3B'], msg
elif prop_type == 'PBEAM':
assert pname_fid in ['I1', 'I2', 'A', 'J',
'I1(B)', 'I2(B)',
'-8', '-9', '-10', '-14'], msg # -8
elif prop_type == 'PBEAML':
assert pname_fid in ['DIM1', 'DIM2', 'DIM3', 'DIM4', 'DIM5', 'DIM6',
'DIM1(A)',
'DIM1(B)', 'DIM2(B)', 'I1(B)', 'I2(B)',
'NSM'], msg # 'DIM(B)'
#elif prop_type == 'CQUAD4':
#assert pname_fid in ['T1', 'T2', 'T3', 'T4'], msg
elif prop_type == 'PSHELL':
if pname_fid in ['T', 4]:
pname_fid = 'T'
elif pname_fid in [6]: # 12I/T**3 doesn't support strings
pass
else:
raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
#cp_name =
#assert pname_fid in ['T', 4, 6], msg
elif prop_type == 'PCOMP':
if isinstance(pname_fid, str):
word, num = break_word_by_trailing_integer(pname_fid)
if word not in ['T', 'THETA']:
raise RuntimeError(msg)
else:
assert pname_fid in [3, #3-z0
# 13-t1, 14-theta1, 17-t2, 18-theta2
13, 14, 17, 18,
23, 24, 27, 28,
33, 34, 37, 38,
43, 44, 47, 48], msg
elif prop_type == 'PCOMPG':
#if pname_fid in ['T', 4]:
#pname_fid = 'T'
#elif pname_fid in [6]: # 12I/T**3 doesn't support strings
#pass
#else:
#raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
assert pname_fid in ['Z0', 'SB',
15, 25, 75, 85], msg
#elif prop_type == 'CBUSH':
#assert pname_fid in ['X1', 'X2', 'X3', 'S', 'S1'], msg
elif prop_type == 'PBUSH':
assert pname_fid in [18,
'K1', 'K2', 'K3', 'K4', 'K5', 'K6',
'B2',
'GE1', 'GE3', 'GE4', 'GE5', 'GE6',
'-13'], msg
elif prop_type == 'PBUSH1D':
assert pname_fid in ['K', 'C'], msg
elif prop_type == 'PBUSHT':
assert pname_fid in ['TBID1', 'TGEID1', 'TGEID2'], msg
# CGAP
elif prop_type == 'PGAP':
assert pname_fid in [5], msg
elif prop_type == 'PVISC':
assert pname_fid in ['CE1'], msg
#elif prop_type == 'CDAMP2':
#assert pname_fid in ['B'], msg
elif prop_type == 'PDAMP':
assert pname_fid in [3, 'B1'], msg
#elif prop_type == 'CMASS2':
#assert pname_fid in ['M'], msg
#elif prop_type == 'CMASS4':
#assert pname_fid in ['M'], msg
elif prop_type == 'PMASS':
assert pname_fid in [3], msg
#elif prop_type == 'CONM2':
#assert pname_fid in ['M', 'X1', 'X2', 'I11', 'I22'], msg
elif prop_type == 'PSHEAR':
if pname_fid in ['T', 4]:
pname_fid = 'T'
else:
raise NotImplementedError('PSHEAR pname_fid=%r is invalid' % pname_fid)
elif prop_type == 'PWELD':
assert pname_fid in ['D'], msg
elif prop_type == 'PBEND':
raise RuntimeError('Nastran does not support the PBEND')
else:
raise NotImplementedError(msg)
return pname_fid | fa3129485081d4b7312fda74e87b1203c97e9adc | 25,469 |
def is_ligature(archar):
"""Checks for Arabic Ligatures like LamAlef.
(LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE)
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in LIGUATURES | 721c6135064e21ba681c43fc776c0f64f290e2d3 | 25,470 |
def _get_embl_key(line):
"""Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')"""
# embl keys have a fixed size of 2 chars
return line[:2] | b54f1a94f120f7ac63a0dd2a22bd47d5a5d5eeb9 | 25,471 |
import os
def size_too_big(path):
"""Returns true is file is too large (5MB)
"""
five_mb = 5242880
return os.path.getsize(path) > five_mb | 62e926247ab5439732ce61c8e59fe4a50366cba0 | 25,472 |
from h2o.job import H2OJob
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
from h2o import api
import os
def store_h2o_frame(data, directory, filename, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param data: the Frame to save to disk.
:param directory: the directory to the save point on disk.
:param filename: the name to save the frame to.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
:return string filepath: the path to which the file was stored.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
filepath = _make_local_path(os.path.join(directory, filename))
assert_is_type(data, H2OFrame)
assert_is_type(filepath, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (data.frame_id), data={"path": filepath, "num_parts": parts, "force": force}),
"Export File").poll()
return filepath | d2fd4d38748cfa47cd1d29306a7d646fd96ab2c8 | 25,473 |
import scipy
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop | e35b41eca663d25d065d2b656745e7c41e038dc1 | 25,474 |
def MONTH(*args) -> Function:
"""
Returns the month of the year a specific date falls in, in numeric format.
Learn more: https//support.google.com/docs/answer/3093052
"""
return Function("MONTH", args) | 86a44c35e989ccc149935515550d3176549ee82e | 25,475 |
import os
def local_tmp_dir():
"""tmp directory for tests"""
tmp_dir_path = "./tmp"
if not os.path.isdir(tmp_dir_path):
os.mkdir(tmp_dir_path)
return tmp_dir_path | 3f1f710ba18ec336982c69225c999a75fed9c481 | 25,476 |
import logging
import time
def Install(browser):
"""Installs |browser|, if necessary. It is not possible to install
an older version of the already installed browser currently.
Args:
browser: specific browst to install.
Returns:
whether browser is installed.
"""
# Only dynamic installation of browsers for Windows now.
if not util.IsWindows():
return True
logging.info('Wants to install ' + browser['name'])
version = GetVersionNumber(browser['family'])
if version is None:
logging.info('No version of %s is installed' % browser['family'])
else:
logging.info('Version %s of %s is installed already'
% (version, browser['family']))
if not IsBrowserInstalled(browser):
install_cmd = None
# Download browser.
logging.info('Downloading ' + browser['name'])
if browser['family'] == 'ie':
if browser['name'] == 'ie7':
install_cmd = util.Download(_IE_7_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
elif browser['name'] == 'ie8':
install_cmd = util.Download(_IE_8_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
install_cmd += ' /passive /no-default'
elif browser['family'] == 'firefox':
if util.IsWindows():
install = util.Download(_FIREFOX_VERSIONS[browser['name']],
SOFTWARE_PATH)
install_cmd = install + ' -ms'
elif browser['family'] == 'chrome':
if util.IsWindows():
install_cmd = util.Download(_CHROME_VERSIONS[browser['name']],
SOFTWARE_PATH)
else:
logging.error('Browser %s is not currently supported' % browser['name'])
# Run installation.
if install_cmd is not None:
logging.info('Installing browser: ' + install_cmd)
if install_cmd is None or util.RunStr(install_cmd) != 0:
logging.error('Could not install %s' % browser['name'])
return False
# Do post installation things.
if browser['family'] == 'chrome':
first_run = file(HOME_PATH + '\\Local Settings\\'
'Application Data\\Google\\Chrome\\Application\\'
'First Run', 'w')
first_run.close()
# Wait for Chrome to install. Reboot to get rid of that first run UI.
time.sleep(90)
util.Reboot()
logging.error('Could not reboot. Needed for Chrome installation.')
return False
else:
logging.info(browser['name'] + ' already installed')
return True | 93bc5b7ad0b1bc8b4d4496a9ae608d2b9fa1848f | 25,477 |
def get_query_string_from_process_type_string(process_type_string: str) -> str: # pylint: disable=invalid-name
"""
Take the process type string of a Node and create the queryable type string.
:param process_type_string: the process type string
:type process_type_string: str
:return: string that can be used to query for subclasses of the process type using 'LIKE <string>'
:rtype: str
"""
if ':' in process_type_string:
return f'{process_type_string}.'
path = process_type_string.rsplit('.', 2)[0]
return f'{path}.' | 1380ad90a98da26237176890c52a75684e92964e | 25,478 |
def get_column(fn):
"""Get column from Cellomics filename.
Parameters
----------
fn : string
A filename from the Cellomics high-content screening system.
Returns
-------
column : string
The channel of the filename.
Examples
--------
>>> fn = 'MFGTMP_140206180002_A01f00d0.TIF'
>>> get_column(fn)
'01'
"""
sem = cellomics_semantic_filename(fn)
column = sem['well'][1:]
return column | 5582b6952af2cfcc6c2bcf0aeb7d472420766c9c | 25,479 |
def add_tables():
"""
Generates tables in postgres database according to SQLAlchemy
model when this script is invoked directly via terminal.
"""
return database.Base.metadata.create_all(bind=database.engine) | e7da7d2ccef81197faa3393a4e0a04cf1a656b7d | 25,480 |
import os
def isloggedin(userdir):
"""If user has sent us an in date, valid cookie then return updated cookie header,
otherwise return False."""
try:
rawcookie = os.environ['HTTP_COOKIE']
except KeyError:
return False
thecookie = SimpleCookie(rawcookie)
try:
cookiestring = thecookie['userid'].value
except KeyError:
return False
test = decodestring(cookiestring, userdir)
if not test:
return False
user, password, cookiepath = test
thecookie = makecookie(user, password, cookiepath)
return user, thecookie | 250c4860160607bbf228ab6204b87ca232cd25c7 | 25,481 |
import os
import time
def enable(name, start=False, **kwargs):
"""
Start service ``name`` at boot.
Returns ``True`` if operation is successful
name
the service's name
start : False
If ``True``, start the service once enabled.
CLI Example:
.. code-block:: bash
salt '*' service.enable <name> [start=True]
"""
# non-existent service
if not available(name):
return False
# if service is aliased, refuse to enable it
alias = get_svc_alias()
if name in alias:
log.error("This service is aliased, enable its alias instead")
return False
# down_file: file that disables sv autostart
svc_realpath = _get_svc_path(name)[0]
down_file = os.path.join(svc_realpath, "down")
# if service already enabled, remove down_file to
# let service starts on boot (as requested)
if enabled(name):
if os.path.exists(down_file):
try:
os.unlink(down_file)
except OSError:
log.error("Unable to remove file %s", down_file)
return False
return True
# let's enable the service
if not start:
# create a temp 'down' file BEFORE enabling service.
# will prevent sv from starting this service automatically.
log.trace("need a temporary file %s", down_file)
if not os.path.exists(down_file):
try:
# pylint: disable=resource-leakage
salt.utils.files.fopen(down_file, "w").close()
# pylint: enable=resource-leakage
except OSError:
log.error("Unable to create file %s", down_file)
return False
# enable the service
try:
os.symlink(svc_realpath, _service_path(name))
except OSError:
# (attempt to) remove temp down_file anyway
log.error("Unable to create symlink %s", down_file)
if not start:
os.unlink(down_file)
return False
# ensure sv is aware of this new service before continuing.
# if not, down_file might be removed too quickly,
# before 'sv' have time to take care about it.
# Documentation indicates that a change is handled within 5 seconds.
cmd = "sv status {}".format(_service_path(name))
retcode_sv = 1
count_sv = 0
while retcode_sv != 0 and count_sv < 10:
time.sleep(0.5)
count_sv += 1
call = __salt__["cmd.run_all"](cmd)
retcode_sv = call["retcode"]
# remove the temp down_file in any case.
if (not start) and os.path.exists(down_file):
try:
os.unlink(down_file)
except OSError:
log.error("Unable to remove temp file %s", down_file)
retcode_sv = 1
# if an error happened, revert our changes
if retcode_sv != 0:
os.unlink(os.path.join([_service_path(name), name]))
return False
return True | 6863f09820389443604d2b313d5f841551ebb1ee | 25,482 |
def label_vertices(ast, vi, vertices, var_v):
"""Label each node in the AST with a unique vertex id
vi : vertex id counter
vertices : list of all vertices (modified in place)
"""
def inner(ast):
nonlocal vi
if type(ast) != dict:
if type(ast) == list:
# print(vi)
pass
return ast
ast["vertex_id"] = vi
vertices.append(ast["tag"])
# if not (ast['tag'] in ['EVar', 'LvVar'] and ast['contents'] in var_v):
vi += 1
for k, v in ast.items():
if k != "tag":
inner(v)
return ast
return inner(ast) | 1216c3ff1f5995e24f0f3a245fad5db820335f4d | 25,483 |
import logging
def standardize_batch(inputs,
is_training,
decay=0.999,
epsilon=1e-3,
data_format="NHWC",
use_moving_averages=True,
use_cross_replica_mean=None):
"""Adds TPU-enabled batch normalization layer.
This version does not apply trainable scale or offset!
It normalizes a tensor by mean and variance.
Details on Batch Normalization can be found in "Batch Normalization:
Accelerating Deep Network Training by Reducing Internal Covariate Shift",
Ioffe S. and Szegedy C. 2015 [http://arxiv.org/abs/1502.03167].
Note #1: This method computes the batch statistic across all TPU replicas,
thus simulating the true batch norm in the distributed setting. If one wants
to avoid the cross-replica communication set use_cross_replica_mean=False.
Note #2: When is_training is True the moving_mean and moving_variance need
to be updated in each training step. By default, the update_ops are placed
in `tf.GraphKeys.UPDATE_OPS` and they need to be added as a dependency to
the `train_op`. For example:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
total_loss = control_flow_ops.with_dependencies([updates], total_loss)
Note #3: Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.99, 0.9, etc. Lower the `decay` value (trying
`decay`=0.9) if model experiences reasonably good training performance but
poor validation and/or test performance.
Args:
inputs: A tensor with 2 or 4 dimensions, where the first dimension is
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC`, and the second dimension if `data_format` is
`NCHW`.
is_training: Whether or not the layer is in training mode. In training
mode it would accumulate the statistics of the moments into the
`moving_mean` and `moving_variance` using an exponential moving average
with the given `decay`. When is_training=False, these variables are not
updated, and the precomputed values are used verbatim.
decay: Decay for the moving averages. See notes above for reasonable
values.
epsilon: Small float added to variance to avoid dividing by zero.
data_format: Input data format. NHWC or NCHW.
use_moving_averages: If True keep moving averages of mean and variance that
are used during inference. Otherwise use accumlators.
use_cross_replica_mean: If True add operations to do computes batch norm
statistics across all TPU cores. These ops are not compatible with other
platforms. The default (None) will only add the operations if running
on TPU.
Returns:
The normalized tensor with the same type and shape as `inputs`.
"""
if data_format not in {"NCHW", "NHWC"}:
raise ValueError(
"Invalid data_format {}. Allowed: NCHW, NHWC.".format(data_format))
if use_cross_replica_mean is None:
# Default to global batch norm only on TPUs.
use_cross_replica_mean = (
tpu_function.get_tpu_context().number_of_shards is not None)
logging.debug("Automatically determined use_cross_replica_mean=%s.",
use_cross_replica_mean)
inputs = tf.convert_to_tensor(inputs)
inputs_dtype = inputs.dtype
inputs_shape = inputs.get_shape()
num_channels = inputs.shape[-1].value
if num_channels is None:
raise ValueError("`C` dimension must be known but is None")
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError("Inputs %s has undefined rank" % inputs.name)
elif inputs_rank not in [2, 4]:
raise ValueError(
"Inputs %s has unsupported rank."
" Expected 2 or 4 but got %d" % (inputs.name, inputs_rank))
# Bring 2-D inputs into 4-D format.
if inputs_rank == 2:
new_shape = [-1, 1, 1, num_channels]
if data_format == "NCHW":
new_shape = [-1, num_channels, 1, 1]
inputs = tf.reshape(inputs, new_shape)
# Execute a distributed batch normalization
axis = 1 if data_format == "NCHW" else 3
inputs = tf.cast(inputs, tf.float32)
reduction_axes = [i for i in range(4) if i != axis]
if use_cross_replica_mean:
mean, variance = tpu_ops.cross_replica_moments(inputs, reduction_axes)
else:
counts, mean_ss, variance_ss, _ = tf.nn.sufficient_statistics(
inputs, reduction_axes, keep_dims=False)
mean, variance = tf.nn.normalize_moments(
counts, mean_ss, variance_ss, shift=None)
if use_moving_averages:
mean, variance = _moving_moments_for_inference(
mean=mean, variance=variance, is_training=is_training, decay=decay)
else:
mean, variance = _accumulated_moments_for_inference(
mean=mean, variance=variance, is_training=is_training)
outputs = tf.nn.batch_normalization(
inputs,
mean=mean,
variance=variance,
offset=None,
scale=None,
variance_epsilon=epsilon)
outputs = tf.cast(outputs, inputs_dtype)
# Bring 2-D inputs back into 2-D format.
if inputs_rank == 2:
outputs = tf.reshape(outputs, [-1] + inputs_shape[1:].as_list())
outputs.set_shape(inputs_shape)
return outputs | 6e5d39704877c797bb568d09e7b3c6e8899d0900 | 25,484 |
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
#initial = tf.constant(0.1, shape=shape)
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial) | 046c9fc01bba5af90b166e16d3dce9a294decc58 | 25,485 |
import sys
def getPortNumber():
"""
Check the command-line arguments for the port number.
The program can exit in this method if
Too few arguments are passed into the program
Too many arguments are passed into the program
The port number argument is non-numeric
The port number argument is less than 0 since port numbers cannot be negative
The port number argument is between 0 and 1023 since those ports are reserved
The port number argument is larger than 65353 since that is the max port number
Returns
number: The port number passed into the program
"""
if len(sys.argv) == 1:
printHelpMessage('You passed too few command-line arguments into the application')
elif len(sys.argv) > 2:
printHelpMessage('You passed too many command-line arguments into the application')
elif sys.argv[1].find('.') != -1:
printHelpMessage('Port number `{}` is a decimal'.format(sys.argv[1]))
try:
portNumber = int(sys.argv[1])
if portNumber < 0:
printHelpMessage(
'Port number `{}` is negative'.format(portNumber)
)
elif portNumber < 1024:
printHelpMessage(
'Port number `{}` is reserved for common TCP/IP applications'.format(portNumber)
)
elif portNumber > 65353:
printHelpMessage(
'Port number `{}` is higher than the maximum port number'.format(portNumber)
)
return portNumber
except ValueError:
printHelpMessage('You Passed a Non-Numeric Port Number Into the Application') | f92a079313e585138e7b548b9bd2d8c383b7418f | 25,486 |
def object_gatekeeper(obj, is_auth, ignore_standalone=False):
"""
It's OK to use available_to_public here because the underlying logic is identical.
"""
if not obj:
return False
if is_auth:
return True
else:
try:
return obj.available_to_public
except:
pass
return False | 66f0749788f462ba9a0dfee6edf890245aca15ba | 25,487 |
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope) | 6fe25d5f90d23d192c2b0d9897d5e025d534813c | 25,488 |
def test013_ip_range():
"""
to run:
kosmos 'j.data.types.test(name="iprange")'
"""
ipv4 = j.data.types.get("iprange", default="192.168.0.0/28")
assert ipv4.default_get() == "192.168.0.0/28"
assert ipv4.check("192.168.23.255/28") is True
assert ipv4.check("192.168.23.300/28") is False
assert ipv4.check("192.168.23.255/32") is True
ipv6 = j.data.types.get("iprange")
assert ipv6.default_get() == "::/128"
assert ipv6.check("2001:db00::0/24") is True
assert ipv6.check("2001:db00::1/24") is True
assert ipv6.check("2001:db00::0/ffff:ff00::") is False
j.data.types._log_info("TEST DONE LIST")
return "OK" | 4ac18b32aef77b5d4c1080150dd218a8f96efcf3 | 25,489 |
def _create_hive_cursor():
"""
Initializes a hive connection and returns a cursor to it
:return: hive cursor
"""
_print_info('Initializing hive cursor.')
return _initialize_hive_connection() | 52e0250b1a163a6ae8f43bbb3ce723cd79518e98 | 25,490 |
import os
import pickle
def load_pretrained_wts(featurizer_params, ExtendedEncoder_params):
"""Merging pre-trained and initialised parameters"""
param_idx = config['restart_from']//config['total_steps']
if os.path.isfile(config['params_dir']+f'params_{param_idx}'):
with open(config['params_dir']+f'params_{param_idx}', 'rb') as f:
params = pickle.load(f)
return params
if config['initialize_pretrained']!='':
pt_wts = get_pretrained_weights(config)
featurizer_params = to_mutable_dict(featurizer_params)
featurizer_params = copy_available_keys(pt_wts, featurizer_params,
[('embeddings/word_embeddings/weight', ('encoder/embedding/~/embed', 'embeddings')),
('embeddings/position_embeddings/weight', ('encoder/embedding/position_embeddings', 'position_embeddings')),
('embeddings/LayerNorm', ('encoder/embedding/layer_norm',))])
ExtendedEncoder_params = to_mutable_dict(ExtendedEncoder_params)
ExtendedEncoder_params = copy_available_keys(pt_wts, ExtendedEncoder_params,
[('embeddings/word_embeddings/weight', ('encoder/~/embedding/~/embed', 'embeddings')),
('embeddings/position_embeddings/weight', ('encoder/~/embedding/position_embeddings', 'position_embeddings')),
('embeddings/LayerNorm', ('encoder/~/embedding/layer_norm',))])
else:
print("No pretrained MLM model (e.g. distilbert, roberta..) was specified, initializing with random wts. Provide a pretrained \
model name in config['initialize_pretrained'], if you wish to use pretrained weights of that model.")
params = to_immutable_dict( {'comments_encoder' : featurizer_params,
'mlm_predictor' : ExtendedEncoder_params } )
return params | 67798e37aa8fc1a9c1f1a96788d8c845be479a3a | 25,491 |
def to_vector_single(text, embeddings, maxlen=300):
"""
Given a string, tokenize it, then convert it to a sequence of word embedding
vectors with the provided embeddings, introducing <PAD> and <UNK> padding token
vector when appropriate
"""
tokens = tokenizeAndFilterSimple(clean_text(text))
window = tokens[-maxlen:]
# TBD: use better initializers (uniform, etc.)
x = np.zeros((maxlen, embeddings.embed_size), )
# TBD: padding should be left and which vector do we use for padding?
# and what about masking padding later for RNN?
for i, word in enumerate(window):
x[i,:] = embeddings.get_word_vector(word).astype('float32')
return x | 3000691c9bbb75c9c86b6d740ff2559e10228db4 | 25,492 |
import numpy
def eval_tensor_density(
tens: tf_compat.Tensor, sess: tf_compat.Session = None
) -> float:
"""
Get the density (fraction of non zero values) in a tensor
:param tens: the tensor to get the density for
:param sess: the session to use for evaluating the tensor,
if not supplied will use the default session
:return: the density of the tensor
"""
if not sess:
sess = tf_compat.get_default_session()
val_array = sess.run(tens)
num_nonzeros = numpy.count_nonzero(val_array)
density = float(num_nonzeros) / float(val_array.size)
return density | 38ed298cdef732a1465a4221a9fbac82535b6d2c | 25,493 |
import collections
def get(key, default):
"""Get a config bloc from the YAML config file.
Args:
default (dict): The default bloc if the key is not available
Returns:
dict: The config bloc (or the default one)
"""
if not key.lower() in _YAML_DICT or isinstance(_YAML_DICT[key.lower()], collections.Mapping):
return default
else:
return _YAML_DICT[key.lower()] | 40a7ac19bf64667bccd183c28c2fb0c772c8f748 | 25,494 |
def adaptsim(f, a, b, eps=1e-8, max_iter=10000):
"""自适应 Simpson 求积
P.S. 这个函数名来自 Gander, W. and W. Gautschi, “Adaptive
Quadrature – Revisited,” BIT, Vol. 40, 2000, pp. 84-101.
该文档可以在 https://people.inf.ethz.ch/gander/ 找到。
但该函数的实现并没有使用此文中的递归方法。
Args:
f: 要求积的函数
a, b: 求积区间
eps: 目标精度,达到则停止,返回积分值
max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 Exception
Returns: (I, m, p)
I: 积分的近似值
m: 分层数
p: 分点
Raises:
Exception: 无法在 max_iter 步内迭代到目标精度
"""
p = [a, b] # 分点
p0 = p
ep = [eps]
m = 0
q = 0
I = 0
for _iter_times in range(int(max_iter)):
n1 = len(ep)
n = len(p0)
if n <= 1:
break
h = p0[1] - p0[0]
s0 = h / 6 * ( f(p0[0]) + 4 * f(p0[0] + h/2) + f(p0[0] + h ) )
s1 = h / 12 * ( f(p0[0]) + 4 * f(p0[0] + h/4) + f(p0[0] + h/2) )
s2 = h / 12 * ( f(p0[0] + h/2) + 4 * f(p0[0] + 3*h/4) + f(p0[0] + h) )
if abs(s0 - s1 - s2) <= 15 * ep[0]:
I += s1 + s2
p0 = p0[1:]
if n1 >= 2:
ep = ep[1:]
q += 1
else:
m += 1
p0 = [p0[0], p0[0] + h/2] + p0[1:]
if n1 == 1:
ep = [ep[0]/2, ep[0]/2]
else:
ep = [ep[0]/2, ep[1]/2] + ep[1:]
if q == 0:
p = p0
else:
p = p[:q] + p0
else:
raise Exception('无法在 max_iter 步内迭代到目标精度')
return I, m, p | b24ed3c2493b8ece19a69cf781a75e7a9e0f9cd0 | 25,495 |
def get_next_position(grid):
"""Returns best next position to send."""
width = len(grid[0])
unprepared = [inspect_around_position(grid, x)
for x in range(1, width - 1)]
return unprepared.index(max(unprepared)) + 2 | 8d1a75766e830ee49c895a5fe90adc3208011c3d | 25,496 |
from typing import Optional
def which_subdir(sha: str) -> Optional[str]:
""" Determine which subset (if any) sha is represented in """
fname = sha + '.json'
for k, v in subdir_contents.items():
if fname in v:
subdir_contents[k].remove(fname)
return k
subdir_contents[MISSING_FILE].add(fname)
return MISSING_FILE | f5a32354724604f15710bbf9a69c1e5d38e84a83 | 25,497 |
import numpy as np
def smoothedEnsembles(data,lat_bounds,lon_bounds):
"""
Smoothes all ensembles by taking subsamples
"""
### Import modules
print('\n------- Beginning of smoothing the ensembles per model -------')
### Save MM
newmodels = data.copy()
mmean = newmodels[-1,:,:,:,:] # 7 for MMmean
otherens = newmodels[:7,:,:,:,:]
newmodeltest = np.empty(otherens.shape)
for modi in range(otherens.shape[0]):
for sh in range(otherens.shape[1]):
ensnum = np.arange(otherens.shape[1])
slices = np.random.choice(ensnum,size=otherens.shape[0],replace=False)
modelsmooth = otherens[modi]
slicenewmodel = np.nanmean(modelsmooth[slices,:,:,:],axis=0)
newmodeltest[modi,sh,:,:,:] = slicenewmodel
### Add new class
smoothClass = np.append(newmodeltest,mmean[np.newaxis,:,:,:],axis=0)
print('--Size of smooth twin --->',newmodeltest.shape)
print('--NEW Size of smoothedclass--->',smoothClass.shape)
print('------- Ending of smoothing the ensembles per model -------')
return smoothClass | ed8fe2bc3d4e77384179d6a1a1406ca9446dc973 | 25,498 |
def conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
data_format="channels_last",
**kwargs):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation,
data_format=data_format,
**kwargs) | 9de1518d95417877a0bf5e094ebb907c3534434f | 25,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.