max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
venv/lib/python3.6/site-packages/pykalman/unscented.py
|
QuantTraderEd/vnpy_crypto
| 34
|
12776151
|
'''
=========================================
Inference for Non-Linear Gaussian Systems
=========================================
This module contains the Unscented Kalman Filter (Wan, <NAME> 2000)
for state estimation in systems with non-Gaussian noise and non-linear dynamics
'''
from collections import namedtuple
import numpy as np
from numpy import ma
from scipy import linalg
from .utils import array1d, array2d, check_random_state, get_params, preprocess_arguments, check_random_state
from .standard import _last_dims, _determine_dimensionality, _arg_or_default
# represents a collection of sigma points and their associated weights. one
# point per row
SigmaPoints = namedtuple(
'SigmaPoints',
['points', 'weights_mean', 'weights_covariance']
)
# represents mean and covariance of a multivariate normal distribution
Moments = namedtuple('Moments', ['mean', 'covariance'])
def points2moments(points, sigma_noise=None):
'''Calculate estimated mean and covariance of sigma points
Parameters
----------
points : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
SigmaPoints object containing points and weights
sigma_noise : [n_dim_state, n_dim_state] array
additive noise covariance matrix, if any
Returns
-------
moments : Moments object of size [n_dim_state]
Mean and covariance estimated using points
'''
(points, weights_mu, weights_sigma) = points
mu = points.T.dot(weights_mu)
points_diff = points.T - mu[:, np.newaxis]
sigma = points_diff.dot(np.diag(weights_sigma)).dot(points_diff.T)
if sigma_noise is not None:
sigma = sigma + sigma_noise
return Moments(mu.ravel(), sigma)
def moments2points(moments, alpha=None, beta=None, kappa=None):
'''Calculate "sigma points" used in Unscented Kalman Filter
Parameters
----------
moments : [n_dim] Moments object
mean and covariance of a multivariate normal
alpha : float
Spread of the sigma points. Typically 1e-3.
beta : float
Used to "incorporate prior knowledge of the distribution of the state".
2 is optimal is the state is normally distributed.
kappa : float
a parameter which means ????
Returns
-------
points : [2*n_dim+1, n_dim] SigmaPoints
sigma points and associated weights
'''
(mu, sigma) = moments
n_dim = len(mu)
mu = array2d(mu, dtype=float)
if alpha is None:
alpha = 1.0
if beta is None:
beta = 0.0
if kappa is None:
kappa = 3.0 - n_dim
# compute sqrt(sigma)
sigma2 = linalg.cholesky(sigma).T
# Calculate scaling factor for all off-center points
lamda = (alpha * alpha) * (n_dim + kappa) - n_dim
c = n_dim + lamda
# calculate the sigma points; that is,
# mu
# mu + each column of sigma2 * sqrt(c)
# mu - each column of sigma2 * sqrt(c)
# Each column of points is one of these.
points = np.tile(mu.T, (1, 2 * n_dim + 1))
points[:, 1:(n_dim + 1)] += sigma2 * np.sqrt(c)
points[:, (n_dim + 1):] -= sigma2 * np.sqrt(c)
# Calculate weights
weights_mean = np.ones(2 * n_dim + 1)
weights_mean[0] = lamda / c
weights_mean[1:] = 0.5 / c
weights_cov = np.copy(weights_mean)
weights_cov[0] = lamda / c + (1 - alpha * alpha + beta)
return SigmaPoints(points.T, weights_mean, weights_cov)
def unscented_transform(points, f=None, points_noise=None, sigma_noise=None):
'''Apply the Unscented Transform to a set of points
Apply f to points (with secondary argument points_noise, if available),
then approximate the resulting mean and covariance. If sigma_noise is
available, treat it as additional variance due to additive noise.
Parameters
----------
points : [n_points, n_dim_state] SigmaPoints
points to pass into f's first argument and associated weights if f is
defined. If f is unavailable, then f is assumed to be the identity
function.
f : [n_dim_state, n_dim_state_noise] -> [n_dim_state] function
transition function from time t to time t+1, if available.
points_noise : [n_points, n_dim_state_noise] array
points to pass into f's second argument, if any
sigma_noise : [n_dim_state, n_dim_state] array
covariance matrix for additive noise, if any
Returns
-------
points_pred : [n_points, n_dim_state] SigmaPoints
points transformed by f with same weights
moments_pred : [n_dim_state] Moments
moments associated with points_pred
'''
n_points, n_dim_state = points.points.shape
(points, weights_mean, weights_covariance) = points
# propagate points through f
if f is not None:
if points_noise is None:
points_pred = [f(points[i]) for i in range(n_points)]
else:
points_noise = points_noise.points
points_pred = [f(points[i], points_noise[i]) for i in range(n_points)]
else:
points_pred = points
# make each row a predicted point
points_pred = np.vstack(points_pred)
points_pred = SigmaPoints(points_pred, weights_mean, weights_covariance)
# calculate approximate mean, covariance
moments_pred = points2moments(points_pred, sigma_noise)
return (points_pred, moments_pred)
def unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma_pred = moments_pred
obs_mu_pred, obs_sigma_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
# calculate Kalman gain
K = cross_sigma.dot(linalg.pinv(obs_sigma_pred))
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
sigma_filt = sigma_pred - K.dot(cross_sigma.T)
else:
# no corrections to be made
mu_filt = mu_pred
sigma_filt = sigma_pred
return Moments(mu_filt, sigma_filt)
def augmented_points(momentses):
'''Calculate sigma points for augmented UKF
Parameters
----------
momentses : list of Moments
means and covariances for multiple multivariate normals
Returns
-------
pointses : list of Points
sigma points for each element of momentses
'''
# stack everything together
means, covariances = zip(*momentses)
mu_aug = np.concatenate(means)
sigma_aug = linalg.block_diag(*covariances)
moments_aug = Moments(mu_aug, sigma_aug)
# turn augmented representation into sigma points
points_aug = moments2points(moments_aug)
# unstack everything
dims = [len(m) for m in means]
result = []
start = 0
for i in range(len(dims)):
end = start + dims[i]
part = SigmaPoints(
points_aug.points[:, start:end],
points_aug.weights_mean,
points_aug.weights_covariance
)
result.append(part)
start = end
# return
return result
def augmented_unscented_filter_points(mean_state, covariance_state,
covariance_transition,
covariance_observation):
"""Extract sigma points using augmented state representation
Primarily used as a pre-processing step before predicting and updating in
the Augmented UKF.
Parameters
----------
mean_state : [n_dim_state] array
mean of state at time t given observations from time steps 0...t
covariance_state : [n_dim_state, n_dim_state] array
covariance of state at time t given observations from time steps 0...t
covariance_transition : [n_dim_state, n_dim_state] array
covariance of zero-mean noise resulting from transitioning from time
step t to t+1
covariance_observation : [n_dim_obs, n_dim_obs] array
covariance of zero-mean noise resulting from observation state at time
t+1
Returns
-------
points_state : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for state at time t
points_transition : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for transition noise between time t and t+1
points_observation : [2 * n_dim_state + 1, n_dim_obs] SigmaPoints
sigma points for observation noise at time step t+1
"""
# get sizes of dimensions
n_dim_state = covariance_state.shape[0]
n_dim_obs = covariance_observation.shape[0]
# extract sigma points using augmented representation
state_moments = Moments(mean_state, covariance_state)
transition_noise_moments = (
Moments(np.zeros(n_dim_state), covariance_transition)
)
observation_noise_moments = (
Moments(np.zeros(n_dim_obs), covariance_observation)
)
(points_state, points_transition, points_observation) = (
augmented_points([
state_moments,
transition_noise_moments,
observation_noise_moments
])
)
return (points_state, points_transition, points_observation)
def unscented_filter_predict(transition_function, points_state,
points_transition=None,
sigma_transition=None):
"""Predict next state distribution
Using the sigma points representing the state at time t given observations
from time steps 0...t, calculate the predicted mean, covariance, and sigma
points for the state at time t+1.
Parameters
----------
transition_function : function
function describing how the state changes between times t and t+1
points_state : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the state at time step t given
observations from time steps 0...t
points_transition : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the noise in transitioning from time step
t to t+1, if available. If not, assumes that noise is additive
sigma_transition : [n_dim_state, n_dim_state] array
covariance corresponding to additive noise in transitioning from time
step t to t+1, if available. If not, assumes noise is not additive.
Returns
-------
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to state at time step t+1 given observations
from time steps 0...t. These points have not been "standardized" by the
unscented transform yet.
moments_pred : [n_dim_state] Moments
mean and covariance corresponding to time step t+1 given observations
from time steps 0...t
"""
assert points_transition is not None or sigma_transition is not None, \
"Your system is noiseless? really?"
(points_pred, moments_pred) = (
unscented_transform(
points_state, transition_function,
points_noise=points_transition, sigma_noise=sigma_transition
)
)
return (points_pred, moments_pred)
def unscented_filter_correct(observation_function, moments_pred,
points_pred, observation,
points_observation=None,
sigma_observation=None):
"""Integrate new observation to correct state estimates
Parameters
----------
observation_function : function
function characterizing how the observation at time t+1 is generated
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to moments_pred
observation : [n_dim_state] array
observation at time t+1. If masked, treated as missing.
points_observation : [2*n_dim_state, n_dim_obs] SigmaPoints
sigma points corresponding to predicted observation at time t+1 given
observations from times 0...t, if available. If not, noise is assumed
to be additive.
sigma_observation : [n_dim_obs, n_dim_obs] array
covariance matrix corresponding to additive noise in observation at
time t+1, if available. If missing, noise is assumed to be non-linear.
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t+1
"""
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
(obs_points_pred, obs_moments_pred) = (
unscented_transform(
points_pred, observation_function,
points_noise=points_observation, sigma_noise=sigma_observation
)
)
# Calculate Cov(x_t, z_t | z_{0:t-1})
sigma_pair = (
((points_pred.points - moments_pred.mean).T)
.dot(np.diag(points_pred.weights_mean))
.dot(obs_points_pred.points - obs_moments_pred.mean)
)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
moments_filt = unscented_correct(sigma_pair, moments_pred, obs_moments_pred, observation)
return moments_filt
def augmented_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with arbitrary noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
# TODO use _augumented_unscented_filter_update here
for t in range(T):
# Calculate sigma points for augmented state:
# [actual state, transition noise, observation noise]
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
# extract sigma points using augmented representation
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(mu, sigma, Q, R)
)
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1}) and sigma points
# for P(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
points_transition=points_transition
)
)
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], points_observation=points_observation
)
)
return (mu_filt, sigma_filt)
def augmented_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Smoother with arbitrary noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for [state, transition noise]
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
moments_transition_noise = Moments(np.zeros(n_dim_state), Q)
(points_state, points_transition) = (
augmented_points([moments_state, moments_transition_noise])
)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = unscented_transform(
points_state, f_t, points_noise=points_transition
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
def additive_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with additive noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
for t in range(T):
# Calculate sigma points for P(x_{t-1} | z_{0:t-1})
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
points_state = moments2points(Moments(mu, sigma))
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, sigma_transition=Q
)
)
points_pred = moments2points(moments_pred)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], sigma_observation=R
)
)
return (mu_filt, sigma_filt)
def additive_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Filter assuming additiven noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for state
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
points_state = moments2points(moments_state)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_transform(points_state, f_t, sigma_noise=Q)
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
class UnscentedMixin(object):
"""Methods shared by all Unscented Kalman Filter implementations."""
def __init__(self, transition_functions=None, observation_functions=None,
transition_covariance=None, observation_covariance=None,
initial_state_mean=None, initial_state_covariance=None,
n_dim_state=None, n_dim_obs=None, random_state=None):
# determine size of state and observation space
n_dim_state = _determine_dimensionality(
[(transition_covariance, array2d, -2),
(initial_state_covariance, array2d, -2),
(initial_state_mean, array1d, -1)],
n_dim_state
)
n_dim_obs = _determine_dimensionality(
[(observation_covariance, array2d, -2)],
n_dim_obs
)
# set parameters
self.transition_functions = transition_functions
self.observation_functions = observation_functions
self.transition_covariance = transition_covariance
self.observation_covariance = observation_covariance
self.initial_state_mean = initial_state_mean
self.initial_state_covariance = initial_state_covariance
self.n_dim_state = n_dim_state
self.n_dim_obs = n_dim_obs
self.random_state = random_state
def _initialize_parameters(self):
"""Retrieve parameters if they exist, else replace with defaults"""
arguments = get_params(self)
defaults = self._default_parameters()
converters = self._converters()
processed = preprocess_arguments([arguments, defaults], converters)
return (
processed['transition_functions'],
processed['observation_functions'],
processed['transition_covariance'],
processed['observation_covariance'],
processed['initial_state_mean'],
processed['initial_state_covariance']
)
def _parse_observations(self, obs):
"""Safely convert observations to their expected format"""
obs = ma.atleast_2d(obs)
if obs.shape[0] == 1 and obs.shape[1] > 1:
obs = obs.T
return obs
def _converters(self):
return {
'transition_functions': array1d,
'observation_functions': array1d,
'transition_covariance': array2d,
'observation_covariance': array2d,
'initial_state_mean': array1d,
'initial_state_covariance': array2d,
'n_dim_state': int,
'n_dim_obs': int,
'random_state': check_random_state,
}
class UnscentedKalmanFilter(UnscentedMixin):
r'''Implements the General (aka Augmented) Unscented Kalman Filter governed
by the following equations,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t, \text{Normal}(0, Q)) \\
z_{t} &= g_t(x_t, \text{Normal}(0, R))
Notice that although the input noise to the state transition equation and
the observation equation are both normally distributed, any non-linear
transformation may be applied afterwards. This allows for greater
generality, but at the expense of computational complexity. The complexity
of :class:`UnscentedKalmanFilter.filter()` is :math:`O(T(2n+m)^3)`
where :math:`T` is the number of time steps, :math:`n` is the size of the
state space, and :math:`m` is the size of the observation space.
If your noise is simply additive, consider using the
:class:`AdditiveUnscentedKalmanFilter`
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state and the transition
noise at time t and produces the state at time t+1. Also known as
:math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state and the observation
noise at time t and produces the observation at time t. Also known as
:math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
random_state : optional, int or Random
random number generator
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = rng.multivariate_normal(
initial_state_mean, initial_state_covariance
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1], transition_noise)
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t], observation_noise)
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
augmented_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(
filtered_state_mean, filtered_state_covariance,
transition_covariance, observation_covariance
)
)
# predict
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, points_transition
)
)
# correct
next_filtered_state_mean, next_filtered_state_covariance = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, points_observation=points_observation
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
augmented_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state, noise: state + noise,
'observation_functions': lambda state, noise: state + noise,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
class AdditiveUnscentedKalmanFilter(UnscentedMixin):
r'''Implements the Unscented Kalman Filter with additive noise.
Observations are assumed to be generated from the following process,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t) + \text{Normal}(0, Q) \\
z_{t} &= g_t(x_t) + \text{Normal}(0, R)
While less general the general-noise Unscented Kalman Filter, the Additive
version is more computationally efficient with complexity :math:`O(Tn^3)`
where :math:`T` is the number of time steps and :math:`n` is the size of
the state space.
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state at time t and
produces the state at time t+1. Also known as :math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state at time t and
produces the observation at time t. Also known as :math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`.
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = (
rng.multivariate_normal(
initial_state_mean,
initial_state_covariance
)
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1]) + transition_noise
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t]) + observation_noise
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
moments_state = Moments(filtered_state_mean, filtered_state_covariance)
points_state = moments2points(moments_state)
# predict
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
sigma_transition=transition_covariance
)
)
points_pred = moments2points(moments_pred)
# correct
(next_filtered_state_mean, next_filtered_state_covariance) = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, sigma_observation=observation_covariance
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = ma.asarray(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
additive_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state: state,
'observation_functions': lambda state: state,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
| 3.140625
| 3
|
src/cloudwatch/modules/flusher.py
|
klarna/collectd-cloudwatch
| 1
|
12776152
|
import threading
import time
import os
import math
from client.putclient import PutClient
from logger.logger import get_logger
from metricdata import MetricDataStatistic, MetricDataBuilder
class Flusher(object):
"""
The flusher is responsible for translating Collectd metrics to CloudWatch MetricDataStatistic,
batching, aggregating and flushing metrics to CloudWatch endpoints.
Keyword arguments:
config_helper -- The ConfigHelper object with configuration loaded
"""
_LOGGER = get_logger(__name__)
_FLUSH_INTERVAL_IN_SECONDS = 60
_FLUSH_DELTA_IN_SECONDS = 1
_MAX_METRICS_PER_PUT_REQUEST = 20
_MAX_METRICS_TO_AGGREGATE = 2000
def __init__(self, config_helper):
self.lock = threading.Lock()
self.client = None
self.config = config_helper
self.metric_map = {}
self.last_flush_time = time.time()
self.nan_key_set = set()
def is_numerical_value(self, value):
"""
Assume that the value from collectd to this plugin is float or Integer, if string transfer from collectd to this interface,
we should modify the method _add_values_to_metric, to convert the string type value to float type value.
Returns:
True if the value is float and is not nan
False if the value is nan
"""
try:
return not math.isnan(float(value))
except ValueError:
return False
def add_metric(self, value_list):
"""
Translates Collectd metrics to CloudWatch format and stores them in flusher for further processing
such as batching and aggregating.
Keyword arguments:
value_list -- The ValueList object passed by Collectd to the write callback
"""
with self.lock:
# The flush operation should take place before adding metric for a new minute.
# Together with flush delta this ensures that old metrics are flushed before or at the start of a new minute.
self._flush_if_need(time.time())
if self.config.whitelist.is_whitelisted(self._get_metric_key(value_list)):
self._aggregate_metric(value_list)
def _flush_if_need(self, current_time):
"""
Checks if metrics should be flushed and starts the flush procedure
"""
if self._is_flush_time(current_time):
if self.config.debug and self.metric_map:
state = ""
for dimension_metrics in self.metric_map:
state += str(dimension_metrics) + "[" + str(self.metric_map[dimension_metrics][0].statistics.sample_count) + "] "
self._LOGGER.info("[debug] flushing metrics " + state)
self._flush()
def _is_flush_time(self, current_time):
return (current_time - self.last_flush_time) + self._FLUSH_DELTA_IN_SECONDS >= self._FLUSH_INTERVAL_IN_SECONDS
def record_nan_value(self, key, value_list):
if not key in self.nan_key_set:
self._LOGGER.warning(
"Adding Metric value is not numerical, key: " + key + " value: " + str(value_list.values))
self.nan_key_set.add(key)
def _aggregate_metric(self, value_list):
"""
Selects existing metric or adds a new metric to the metric_map. Then aggregates values from ValueList with the selected metric.
If the size of metric_map is above the limit, new metric will not be added and the value_list will be dropped.
"""
nan_value_count = 0
key = self._get_metric_key(value_list)
if key in self.metric_map:
nan_value_count = self._add_values_to_metrics(self.metric_map[key], value_list)
else:
if len(self.metric_map) < self._MAX_METRICS_TO_AGGREGATE:
metrics = MetricDataBuilder(self.config, value_list).build()
nan_value_count = self._add_values_to_metrics(metrics, value_list)
if nan_value_count != len(value_list.values):
self.metric_map[key] = metrics
else:
self._LOGGER.warning("Batching queue overflow detected. Dropping metric.")
if nan_value_count:
self.record_nan_value(key, value_list)
def _get_metric_key(self, value_list):
"""
Generates key for the metric. The key must use both metric_name and plugin instance to ensure uniqueness.
"""
return value_list.plugin + "-" + value_list.plugin_instance + "-" + value_list.type + "-" +value_list.type_instance
def _add_values_to_metrics(self, dimension_metrics, value_list):
"""
Aggregates values from value_list with existing metric
Add the valid value to the metric and just skip the nan value.
Returns:
return the count of the nan value in value_list
"""
for metric in dimension_metrics:
nan_value_count = 0
for value in value_list.values:
if self.is_numerical_value(value):
metric.add_value(value)
else:
nan_value_count += 1
return nan_value_count
def _flush(self):
"""
Batches and puts metrics to CloudWatch
"""
self.last_flush_time = time.time()
self.client = PutClient(self.config)
if self.metric_map:
prepare_batch = self._prepare_batch()
try:
while True:
metric_batch = prepare_batch.next()
if not metric_batch:
break
self.client.put_metric_data(MetricDataStatistic.NAMESPACE, metric_batch)
if len(metric_batch) < self._MAX_METRICS_PER_PUT_REQUEST:
break
except StopIteration, e:
self._LOGGER.error("_flush error: "+ str(e))
def _prepare_batch(self):
"""
Removes metrics from the metric_map and adds them to the batch.
The batch size is defined by _MAX_METRICS_PER_PUT_REQUEST.
"""
metric_batch = []
while self.metric_map:
key, dimension_metrics = self.metric_map.popitem()
for metric in dimension_metrics:
if len(metric_batch) < self._MAX_METRICS_PER_PUT_REQUEST:
metric_batch.append(metric)
else:
yield metric_batch
metric_batch = []
metric_batch.append(metric)
yield metric_batch
| 2.34375
| 2
|
scripts/vis_layout.py
|
d116626/covid
| 0
|
12776153
|
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot, offline
def get_layout(themes, title="", x_name="", y_name="", tipo=None):
layout = go.Layout(
# automargin=True,
margin=dict(
l=themes["margin"]["l"],
r=themes["margin"]["r"],
t=themes["margin"]["t"],
b=themes["margin"]["b"],
),
barmode=themes["barmode"],
hovermode=themes["hovermode"],
autosize=True,
# automargin=True,
# title=dict(
# text=title,
# x=0.5,
# # y=0.9,
# xanchor='center',
# yanchor='top',
# font = dict(
# size=themes['title']['size'],
# color=themes['title']['color']
# )
# ),
# xaxis_title=x_name,
xaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
# linecolor=themes['axis_legend']['gridcolor'],
# linewidth=2,
# mirror=True,
tickformat=themes["axis_legend"]["tickformat"]["x"],
type=themes["axis_legend"]["type"]["x"],
),
yaxis_title=y_name,
yaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
# linecolor=themes['axis_legend']['gridcolor'],
# linewidth=2,
tickformat=themes["axis_legend"]["tickformat"]["y"],
type=tipo,
),
font=dict(
size=themes["axis_tilte"]["size"], color=themes["axis_tilte"]["color"]
),
legend=go.layout.Legend(
x=themes["legend"]["position"]["x"],
y=themes["legend"]["position"]["y"],
xanchor=themes["legend"]["position"]["xanchor"],
yanchor=themes["legend"]["position"]["yanchor"],
traceorder=themes["legend"]["traceorder"],
orientation=themes["legend"]["orientation"],
font=dict(
family=themes["legend"]["family"],
size=themes["legend"]["size"],
color=themes["legend"]["color"],
),
bgcolor=themes["legend"]["bgcolor"],
bordercolor=themes["legend"]["bordercolor"],
borderwidth=themes["legend"]["borderwidth"],
),
height=themes["altura"],
width=themes["largura"],
paper_bgcolor=themes["paper_bgcolor"],
plot_bgcolor=themes["plot_bgcolor"],
annotations=[
dict(
showarrow=False,
text=f"<b>{themes['source']['text']}<b>",
x=themes["source"]["position"]["x"],
y=themes["source"]["position"]["y"],
xref="paper",
yref="paper",
align="left",
# xanchor='right',
xshift=0,
yshift=0,
font=dict(
family=themes["source"]["family"],
size=themes["source"]["size"],
color=themes["source"]["color"],
),
)
],
)
return layout
def get_layout_new(themes, var, scale):
layout = go.Layout(
hovermode=themes["hovermode"],
margin=dict(
l=themes["margin"]["l"],
r=themes["margin"]["r"],
t=themes["margin"]["t"],
b=themes["margin"]["b"],
),
barmode=themes["barmode"],
autosize=True,
title=dict(
text=themes["vars"][var]["title"],
x=0.5,
y=0.9,
xanchor="center",
yanchor="top",
font=dict(size=themes["title"]["size"], color=themes["title"]["color"]),
),
xaxis_title=themes["vars"][var]["x_title"],
xaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
linecolor=themes["axis_legend"]["gridcolor"],
# linewidth=2,
# mirror=True,
tickformat=themes["axis_legend"]["scale"][scale]["x"]["tickformat"],
type=themes["axis_legend"]["scale"][scale]["x"]["type"],
),
yaxis_title=themes["vars"][var]["y_title"],
yaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
# linecolor=themes['axis_legend']['gridcolor'],
# linewidth=2,
tickformat=themes["axis_legend"]["scale"][scale]["y"]["tickformat"],
type=themes["axis_legend"]["scale"][scale]["y"]["type"],
),
font=dict(
size=themes["axis_tilte"]["size"], color=themes["axis_tilte"]["color"]
),
legend=go.layout.Legend(
x=themes["legend"]["position"]["x"],
y=themes["legend"]["position"]["y"],
xanchor=themes["legend"]["position"]["xanchor"],
yanchor=themes["legend"]["position"]["yanchor"],
traceorder=themes["legend"]["traceorder"],
orientation=themes["legend"]["orientation"],
font=dict(
family=themes["legend"]["family"],
size=themes["legend"]["size"],
color=themes["legend"]["color"],
),
bgcolor=themes["legend"]["bgcolor"],
bordercolor=themes["legend"]["bordercolor"],
borderwidth=themes["legend"]["borderwidth"],
),
height=themes["altura"],
width=themes["largura"],
paper_bgcolor=themes["paper_bgcolor"],
plot_bgcolor=themes["plot_bgcolor"],
annotations=[
dict(
showarrow=False,
text=f"<b>{themes['source']['text']}<b>",
x=themes["source"]["position"]["x"],
y=themes["source"]["position"]["y"],
xref="paper",
yref="paper",
align="left",
# xanchor='right',
xshift=0,
yshift=0,
font=dict(
family=themes["source"]["family"],
size=themes["source"]["size"],
color=themes["source"]["color"],
),
)
],
)
return layout
def get_layout_bar(themes):
layout = go.Layout(
hovermode=themes["hovermode"],
margin=dict(
l=themes["margin"]["l"],
r=themes["margin"]["r"],
t=themes["margin"]["t"],
b=themes["margin"]["b"],
),
barmode=themes["barmode"],
autosize=True,
xaxis_title=themes["axis_legend"]["x"]["title"],
xaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
linecolor=themes["axis_legend"]["gridcolor"],
# linewidth=2,
# mirror=True,
tickformat=themes["axis_legend"]["scale"]["linear"]["x"]["tickformat"],
type=themes["axis_legend"]["scale"]["linear"]["x"]["type"],
),
yaxis_title=themes["axis_legend"]["y"]["title"],
yaxis=dict(
tickfont=dict(
size=themes["axis_legend"]["size"],
color=themes["axis_legend"]["color"],
),
gridcolor=themes["axis_legend"]["gridcolor"],
zerolinecolor=themes["axis_legend"]["gridcolor"],
# linecolor=themes['axis_legend']['gridcolor'],
# linewidth=2,
tickformat=themes["axis_legend"]["scale"]["linear"]["y"]["tickformat"],
type=themes["axis_legend"]["scale"]["linear"]["y"]["type"],
),
font=dict(
size=themes["axis_tilte"]["size"], color=themes["axis_tilte"]["color"]
),
legend=go.layout.Legend(
x=themes["legend"]["position"]["x"],
y=themes["legend"]["position"]["y"],
xanchor=themes["legend"]["position"]["xanchor"],
yanchor=themes["legend"]["position"]["yanchor"],
traceorder=themes["legend"]["traceorder"],
orientation=themes["legend"]["orientation"],
font=dict(
family=themes["legend"]["family"],
size=themes["legend"]["size"],
color=themes["legend"]["color"],
),
bgcolor=themes["legend"]["bgcolor"],
bordercolor=themes["legend"]["bordercolor"],
borderwidth=themes["legend"]["borderwidth"],
),
height=themes["altura"],
width=themes["largura"],
paper_bgcolor=themes["paper_bgcolor"],
plot_bgcolor=themes["plot_bgcolor"],
# annotations=[
# dict(
# showarrow=False,
# text=f"<b>{themes['source']['text']}<b>",
# x=themes["source"]["position"]["x"],
# y=themes["source"]["position"]["y"],
# xref="paper",
# yref="paper",
# align="left",
# # xanchor='right',
# xshift=0,
# yshift=0,
# font=dict(
# family=themes["source"]["family"],
# size=themes["source"]["size"],
# color=themes["source"]["color"],
# ),
# )
# ],
)
return layout
| 2.453125
| 2
|
setup.py
|
callat-qcd/espressodb
| 8
|
12776154
|
# -*- coding: utf-8 -*-
"""Setup file for EspressoDB
"""
from espressodb import __version__
__author__ = "@cchang5, @ckoerber"
from os import path
from setuptools import setup, find_packages
CWD = path.abspath(path.dirname(__file__))
with open(path.join(CWD, "README.md"), encoding="utf-8") as inp:
LONG_DESCRIPTION = inp.read()
with open(path.join(CWD, "requirements.txt"), encoding="utf-8") as inp:
REQUIREMENTS = [el.strip() for el in inp.read().split(",")]
with open(path.join(CWD, "requirements-dev.txt"), encoding="utf-8") as inp:
REQUIREMENTS_DEV = [el.strip() for el in inp.read().split(",")]
setup(
name="espressodb",
python_requires=">=3.6",
version=__version__,
description="Science database interface using Django as the content manager.",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/callat-qcd/espressodb",
project_urls={
"Bug Reports": "https://github.com/callat-qcd/espressodb/issues",
"Source": "https://github.com/callat-qcd/espressodb",
"Documentation": "https://espressodb.readthedocs.io",
},
author=__author__,
author_email="<EMAIL>",
keywords=["Database", "Workflow", "Django"],
packages=find_packages(exclude=["docs", "tests", "example"]),
install_requires=REQUIREMENTS,
entry_points={"console_scripts": ["espressodb=espressodb.manage:main"]},
extras_require={"dev": REQUIREMENTS_DEV},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
],
include_package_data=True,
)
| 1.625
| 2
|
parsers/Interrupts.py
|
ondrejholecek/fortimonitor
| 9
|
12776155
|
from EasyParser import EasyParser
import re
import time
# FG1K2D-2 # diag hardware sysinfo interrupts
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
# 0: 36 0 0 0 0 0 0 0 IO-APIC-edge timer
# 2: 0 0 0 0 0 0 0 0 XT-PIC-XT-PIC cascade
# 3: 0 3577171 0 0 0 0 0 0 IO-APIC-edge serial
# 4: 0 4688 0 0 0 0 0 0 IO-APIC-edge serial
# 8: 0 0 0 0 0 0 0 0 IO-APIC-edge rtc
# 16: 0 1832355 0 0 0 0 0 0 IO-APIC-fasteoi ehci_hcd:usb1, ehci_hcd:usb2, uhci_hcd:usb5, uhci_hcd:usb9, linux-kernel-bde, mgmt1
# 17: 0 0 3 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb3, uhci_hcd:usb6, mgmt2
# 18: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb4, uhci_hcd:usb7
# 19: 0 0 0 0 0 0 0 0 IO-APIC-fasteoi uhci_hcd:usb8, net2280
# 64: 1 0 0 260298 0 0 0 0 PCI-MSI-edge ahci
# 65: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn0
# 66: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn1
# 67: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn2
# 68: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_vpn3
# 69: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_0_kxp
# 70: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn0
# 71: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn1
# 72: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn2
# 73: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_vpn3
# 74: 0 0 0 0 0 0 0 0 PCI-MSI-edge cp8_1_kxp
# 75: 5 1 0 0 0 0 0 0 PCI-MSI-edge np6_0-tx-rx0
# 76: 0 1 5 0 0 0 0 0 PCI-MSI-edge np6_0-tx-rx1
# 77: 0 0 1 0 5 0 0 0 PCI-MSI-edge np6_0-tx-rx2
# 78: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-err0
# 79: 0 0 17 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-tx-rx0
# 80: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-err0
# 81: 16418964 0 0 1 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-0
# 82: 0 16141636 0 1 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-1
# 83: 0 0 0 14991882 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-2
# 84: 0 0 0 1 15879562 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-3
# 85: 0 0 0 0 1 16707050 0 0 PCI-MSI-edge np6_0-nturbo-ips-4
# 86: 0 0 0 0 1 0 16444822 0 PCI-MSI-edge np6_0-nturbo-ips-5
# 87: 0 0 0 0 1 0 0 16581448 PCI-MSI-edge np6_0-nturbo-ips-6
# 88: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-ips-7
# 89: 0 0 0 0 0 1 7 0 PCI-MSI-edge np6_0-tx-rx3
# 90: 5 0 0 0 0 1 0 0 PCI-MSI-edge np6_0-tx-rx4
# 91: 0 0 5 0 0 1 0 0 PCI-MSI-edge np6_0-tx-rx5
# 92: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-err1
# 93: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_0-nturbo-err1
# 94: 207221826 0 0 0 0 0 1 0 PCI-MSI-edge np6_1-tx-rx0
# 95: 0 0 200639569 0 0 0 1 0 PCI-MSI-edge np6_1-tx-rx1
# 96: 0 0 0 0 240962811 0 1 0 PCI-MSI-edge np6_1-tx-rx2
# 97: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-err0
# 98: 0 1 479259756 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-tx-rx0
# 99: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-err0
# 100: 0 0 1 0 0 0 240663469 0 PCI-MSI-edge np6_1-tx-rx3
# 101: 210887756 0 1 0 0 0 0 0 PCI-MSI-edge np6_1-tx-rx4
# 102: 0 0 202674599 0 0 0 0 0 PCI-MSI-edge np6_1-tx-rx5
# 103: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-err1
# 104: 0 0 0 0 0 0 0 0 PCI-MSI-edge np6_1-nturbo-err1
# NMI: 451378 451332 451379 451331 451379 451330 451380 451329 Non-maskable interrupts
# LOC: 27025393 27025374 27025356 27025338 27025320 27025302 27025284 27025266 Local timer interrupts
# SPU: 0 0 0 0 0 0 0 0 Spurious interrupts
# PMI: 451378 451332 451379 451331 451379 451330 451380 451329 Performance monitoring interrupts
# IWI: 0 0 0 0 0 0 0 0 IRQ work interrupts
# RES: 54764029 23029410 66355685 21516202 64664597 18859876 69639605 20136217 Rescheduling interrupts
# CAL: 1227 1315 1304 287 1295 1290 1323 1325 Function call interrupts
# TLB: 350 792 1188 1324 712 547 831 507 TLB shootdowns
# ERR: 0
# MIS: 0
# FG1K2D-2 # fnsysctl cat /proc/softirqs
# CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7
# HI: 0 0 0 0 0 0 0 0
# TIMER: 28521064 28525832 28520649 28526326 28524819 28526243 28524655 28526254
# NET_TX: 994 57592 871 518 854 502 578 462
# NET_RX: 576621254 1990912 889144076 0 350281983 2 353098308 0
# BLOCK: 476 301 193 275534 181 396 98 313
# BLOCK_IOPOLL: 0 0 0 0 0 0 0 0
# TASKLET: 14128586 1943262 12439627 1942008 9747759 1944864 9735439 1961939
# SCHED: 9818324 13579287 11060339 13505914 10051866 12468454 9796770 12164434
# HRTIMER: 0 0 0 0 0 0 0 0
# RCU: 26288609 14045430 23576147 14059434 19574070 15025426 19446047 15275527
class ParserInterrupts(EasyParser):
def prepare(self):
self.re_cpus = re.compile("^\s+CPU.*?(\d+)\s+\n")
def get(self, soft=True, hard=True, description=None):
interrupts = {}
collected_on = None
cpus = None
desc_re = None
if description != None:
desc_re = re.compile(description)
if hard:
hw = self.get_real('hard')
interrupts.update(hw['interrupts'])
collected_on = hw['collected_on']
cpus = hw['cpus']
if soft:
sw = self.get_real('soft')
interrupts.update(sw['interrupts'])
collected_on = sw['collected_on']
cpus = sw['cpus']
if collected_on == None or cpus == None:
raise Exception('Either soft or hard interrupts must be selected')
# filter out not matching
for irq in interrupts.keys():
if desc_re == None or desc_re.search(interrupts[irq]['description']) != None: continue
del interrupts[irq]
return {
'collected_on': collected_on,
'cpus' : cpus,
'interrupts' : interrupts,
}
def get_real(self, source):
if source == 'hard':
interrupts = self.sshc.clever_exec("diagnose hardware sysinfo interrupts")
elif source == 'soft':
interrupts = self.sshc.clever_exec("fnsysctl cat /proc/softirqs")
else:
raise Exception('Interrupts can be either "hard" or "soft"')
command_time = time.time()
result = {}
# count cpus
g = self.re_cpus.search(interrupts)
if g == None: raise Exception("Cannot count CPUs")
cpus = int(g.group(1))+1
# parse lines with entry for each cpu
tmp = "^\s*(\S+):" + "\s*(\d+)"*cpus
if source == 'hard': tmp += "\s+(.*?)[\r]*$"
re_interrupt = re.compile(tmp, re.M)
for iline in re_interrupt.findall(interrupts):
if source == 'hard':
try: int(iline[0])
except ValueError: itype = 'other'
else: itype = 'numeric'
if itype == 'numeric':
tmp = iline[-1].split(None, 1)
trigger = tmp[0]
desc = tmp[1]
elif itype == 'other':
trigger = 'other'
desc = iline[-1]
elif source == 'soft':
itype = 'soft'
trigger = 'other'
if iline[0] == 'NET_RX':
desc = 'Incoming packets (NAPI)'
elif iline[0] == 'NET_TX':
desc = 'Outgoing packets (NAPI)'
elif iline[0] == 'HI':
desc = 'High priority tasklet'
elif iline[0] == 'TASKLET':
desc = 'Normal priority tasklet'
elif iline[0] == 'TIMER':
desc = 'Normal timer'
elif iline[0] == 'HRTIMER':
desc = 'High-resolution timer'
elif iline[0] == 'RCU':
desc = 'RCU locking'
elif iline[0] == 'SCHED':
desc = 'Scheduler'
elif iline[0] in ('BLOCK', 'BLOCK_IOPOLL'):
desc = 'Block device (disk)'
else:
desc = 'softirq'
ticks = {'total':0}
for i in range(cpus):
ticks[i] = int(iline[1+i])
ticks['total'] += ticks[i]
result[iline[0]] = {
'type' : itype,
'trigger' : trigger,
'description' : desc,
'ticks' : ticks,
'source' : source,
}
# parse lines with single cpu column
re_single = re.compile('^\s*(ERR|MIS):\s*(\d+)', re.M)
for single in re_single.findall(interrupts):
ticks = {'total': int(single[1])}
for i in range(cpus): ticks[i] = ticks['total']
result[single[0]] = {
'type' : 'single',
'trigger' : 'other',
'description' : 'unknown',
'ticks' : ticks,
'source' : source,
}
return {
'collected_on': command_time,
'cpus' : cpus,
'interrupts' : result,
}
| 1.75
| 2
|
pytorch_yolo_v1/utils/torch_utils.py
|
ldylab/learning_yolo_family_with_pytorch
| 0
|
12776156
|
<filename>pytorch_yolo_v1/utils/torch_utils.py<gh_stars>0
import torch
def load_match_dict(model, model_path):
# model: single gpu model, please load dict before warp with nn.DataParallel
pretrain_dict = torch.load(model_path)
model_dict = model.state_dict()
# the pretrain dict may be multi gpus, cleaning
pretrain_dict = {k.replace('.module', ''): v for k, v in pretrain_dict.items()}
# 1. filter out unnecessary keys
pretrain_dict = {k: v for k, v in pretrain_dict.items() if
k in model_dict and v.shape == model_dict[k].shape}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrain_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
| 2.375
| 2
|
rl_algorithms/a2c/agent.py
|
mshukor/DSACfD
| 1
|
12776157
|
# -*- coding: utf-8 -*-
"""1-Step Advantage Actor-Critic agent for episodic tasks in OpenAI Gym.
- Author: <NAME>
- Contact: <EMAIL>
"""
import argparse
from typing import Tuple
import gym
import numpy as np
import torch
import wandb
from rl_algorithms.common.abstract.agent import Agent
from rl_algorithms.common.helper_functions import numpy2floattensor
from rl_algorithms.registry import AGENTS, build_learner
from rl_algorithms.utils.config import ConfigDict
@AGENTS.register_module
class A2CAgent(Agent):
"""1-Step Advantage Actor-Critic interacting with environment.
Attributes:
env (gym.Env): openAI Gym environment
args (argparse.Namespace): arguments including hyperparameters and training settings
hyper_params (ConfigDict): hyper-parameters
network_cfg (ConfigDict): config of network for training agent
optim_cfg (ConfigDict): config of optimizer
state_dim (int): state size of env
action_dim (int): action size of env
actor (nn.Module): policy model to select actions
critic (nn.Module): critic model to evaluate states
actor_optim (Optimizer): optimizer for actor
critic_optim (Optimizer): optimizer for critic
episode_step (int): step number of the current episode
i_episode (int): current episode number
transition (list): recent transition information
"""
def __init__(
self,
env: gym.Env,
env_info: ConfigDict,
args: argparse.Namespace,
hyper_params: ConfigDict,
learner_cfg: ConfigDict,
log_cfg: ConfigDict,
):
"""Initialize."""
Agent.__init__(self, env, env_info, args, log_cfg)
self.transition: list = list()
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self.learner_cfg.args = self.args
self.learner_cfg.env_info = self.env_info
self.learner_cfg.hyper_params = self.hyper_params
self.learner_cfg.log_cfg = self.log_cfg
self.learner = build_learner(self.learner_cfg)
def select_action(self, state: np.ndarray) -> torch.Tensor:
"""Select an action from the input space."""
state = numpy2floattensor(state, self.learner.device)
selected_action, dist = self.learner.actor(state)
if self.args.test:
selected_action = dist.mean
else:
predicted_value = self.learner.critic(state)
log_prob = dist.log_prob(selected_action).sum(dim=-1)
self.transition = []
self.transition.extend([log_prob, predicted_value])
return selected_action
def step(self, action: torch.Tensor) -> Tuple[np.ndarray, np.float64, bool, dict]:
"""Take an action and return the response of the env."""
action = action.detach().cpu().numpy()
next_state, reward, done, info = self.env.step(action)
if not self.args.test:
done_bool = done
if self.episode_step == self.args.max_episode_steps:
done_bool = False
self.transition.extend([next_state, reward, done_bool])
return next_state, reward, done, info
def write_log(self, log_value: tuple):
i, score, policy_loss, value_loss = log_value
total_loss = policy_loss + value_loss
print(
"[INFO] episode %d\tepisode step: %d\ttotal score: %d\n"
"total loss: %.4f\tpolicy loss: %.4f\tvalue loss: %.4f\n"
% (i, self.episode_step, score, total_loss, policy_loss, value_loss)
)
if self.args.log:
wandb.log(
{
"total loss": total_loss,
"policy loss": policy_loss,
"value loss": value_loss,
"score": score,
}
)
def train(self):
"""Train the agent."""
# logger
if self.args.log:
self.set_wandb()
# wandb.watch([self.actor, self.critic], log="parameters")
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
policy_loss_episode = list()
value_loss_episode = list()
self.episode_step = 0
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.step(action)
self.episode_step += 1
policy_loss, value_loss = self.learner.update_model(self.transition)
policy_loss_episode.append(policy_loss)
value_loss_episode.append(value_loss)
state = next_state
score += reward
# logging
policy_loss = np.array(policy_loss_episode).mean()
value_loss = np.array(value_loss_episode).mean()
log_value = (self.i_episode, score, policy_loss, value_loss)
self.write_log(log_value)
if self.i_episode % self.args.save_period == 0:
self.learner.save_params(self.i_episode)
self.interim_test()
# termination
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test()
| 2.390625
| 2
|
simulation/src/launch_tools/scripts/launch_tools/services_timer.py
|
LeonardII/KitCarFork
| 0
|
12776158
|
<gh_stars>0
#!/usr/bin/env python
"""Copyright (c) 2013, Systems, Robotics and Vision Group University of the Balearican
Islands All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Systems, Robotics and Vision Group, University of
the Balearican Islands nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import roslib
roslib.load_manifest("launch_tools")
import sys
import threading
import rospy
import rosservice
## Class for calling a service using a timer.
class TimedService(threading.Thread):
## The constructor
# @param self The object pointer.
# @param name The service name this class is going to call
# @param freq The desired timer period
def __init__(self, name, period):
threading.Thread.__init__(self)
self._service_name = name
self._service_period = period
## Run function required by threading library
def run(self):
rospy.wait_for_service(self._service_name)
rospy.Timer(rospy.Duration(self._service_period), self.callback)
rospy.loginfo(
"Initialized timer for service: \n\t* Name: %s\n\t* Period: %f ",
self._service_name,
self._service_period,
)
## Timer callback
# @param event The event that has generated this callback
def callback(self, event):
rospy.wait_for_service(self._service_name)
service_class = rosservice.get_service_class_by_name(self._service_name)
try:
service = rospy.ServiceProxy(self._service_name, service_class)
service()
rospy.loginfo("Service %s called.", self._service_name)
except rospy.ServiceException, e:
rospy.logwarn("Service %s call failed: %s", self._service_name, e)
## @var _service_name
# The service name going to be called
_service_name = "service"
## @var _service_period
# The timer period to call the service
_service_period = 1.0
## Print usage for people that does not deserve to use this awesome python node.
def usage():
return "%s service period [service period ...]" % sys.argv[0]
## main function
if __name__ == "__main__":
rospy.init_node("services_timer")
if len(sys.argv) >= 3:
names = sys.argv[1 : len(sys.argv) : 2]
periods = sys.argv[2 : len(sys.argv) : 2]
rospy.loginfo("names : %s", names)
rospy.loginfo("periods : %s", periods)
ts_list = []
for name, period in zip(names, periods):
ts_list.append(TimedService(str(name), float(period)))
for ts in ts_list:
ts.start()
else:
rospy.loginfo(usage())
sys.exit(1)
rospy.spin()
| 1.726563
| 2
|
ListaDeExercicios/Exercicio12.py
|
LucasAlmeida0/Estudos
| 0
|
12776159
|
# 12 Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal, usando a seguinte fórmula: (72.7*altura) - 58
Altura = float(input("Digite sua altura: "));
PesoIdeal = ( 72.7 * Altura ) - 58;
print("Seu peso ideal é {:.2f}".format(PesoIdeal));
| 3.609375
| 4
|
src/autostory/map_generators.py
|
gustavospiess/2021-1-JD-Eq1
| 0
|
12776160
|
from typing import NamedTuple
from random import choice, randint, shuffle
__doc___ = '''
This module is used to generate the graph of a game map.
The graph is divided in partition in such way that to any two partitions, there
are at most one edge between this two.
This partitions are linked in a tree structure, internally it can be any
ordinary graph, but between two partitions there are only one possible walk.
To every edge that link two partitions `a` and `b`, it is considered locked, the
key is granted to be in a partition bigger then min(a, b), this way, the
navigation starting from the last partition can go through every vertex.
'''
class Raw(NamedTuple):
vertexes: set
edges: set
keys: set
initial: 'Vertex'
final: 'Vertex'
class Vertex(NamedTuple):
area: int
sub_area: int
@property
def identifier(self) -> str:
return f'{self.area}_{self.sub_area}'
class Edge(NamedTuple):
origin: 'Vertex'
destin: 'Vertex'
class Key(NamedTuple):
position: 'Vertex'
door: 'Edge'
def raw(size = 3, size_factor = 4) -> Raw:
if not size or size < 3:
size = 3
if not size_factor or size_factor < 4:
size_factor = 4
vertexes = [Vertex(0, 0)]
edges = []
keys = []
for area_id in range(1, size):
vertexes.append(Vertex(area_id, 0))
minimum_sub_size = size_factor//2+1
maximum_sub_size = size_factor*2-1
sub_size = randint(minimum_sub_size, maximum_sub_size)
for sub_area_id in range(1, sub_size):
new_vertex = Vertex(area_id, sub_area_id)
minimum_connection = 1
maximum_connection = min(sub_area_id, 3)
connection_amount = randint(minimum_connection, maximum_connection)
for connection_id in range(connection_amount):
edges.append(Edge(
new_vertex,
choice(tuple(v for v in vertexes if v.area == area_id))
))
vertexes.append(new_vertex)
for area_id in range(0, size-1):
previous = [area_id + 1, randint(min(area_id+1, size-1), size-1)]
shuffle(previous)
key_area, door_area = previous
new_edge = Edge(
choice(tuple(v for v in vertexes if v.area == door_area)),
choice(tuple(v for v in vertexes if v.area == area_id)),
)
new_key = Key(
choice(tuple(v for v in vertexes if v.area == key_area and v not in new_edge)),
new_edge,
)
edges.append(new_edge)
keys.append(new_key)
return Raw(
vertexes = set(vertexes),
edges = set(edges),
keys = set(keys),
initial = vertexes[-1],
final = vertexes[0])
| 3.90625
| 4
|
network/setup.py
|
splitstrument/training
| 4
|
12776161
|
from setuptools import setup, find_packages
setup(name='unmix', version='1.0', packages=find_packages())
| 1.21875
| 1
|
contrib/aws/awsexecutor.py
|
lachnerm/benchexec
| 0
|
12776162
|
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) <NAME>
#
# SPDX-License-Identifier: Apache-2.0
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import io
import json
import logging
import os
import requests
import shutil
import sys
import time
import zipfile
import benchexec.util
from benchexec.model import MEMLIMIT, TIMELIMIT, CORELIMIT
sys.dont_write_bytecode = True # prevent creation of .pyc files
REQUEST_URL = {
"create": "{0}{1}/execution/create",
"upload": "{0}{1}/upload/{2}?file={3}",
"launch": "{0}{1}/execution/{2}/launch?verifier={3}&verifierS3={4}&tasks={5}&tasksS3={6}&commands={7}",
"progress": "{0}{1}/execution/{2}/progress",
"results": "{0}{1}/execution/{2}/results",
"clean": "{0}{1}/clean",
}
DEFAULT_CLOUD_TIMELIMIT = 300 # s
DEFAULT_CLOUD_MEMLIMIT = None
DEFAULT_CLOUD_MEMORY_REQUIREMENT = 7000000000 # 7 GB
DEFAULT_CLOUD_CPUCORE_REQUIREMENT = 2 # one core with hyperthreading
DEFAULT_CLOUD_CPUMODEL_REQUIREMENT = "" # empty string matches every model
STOPPED_BY_INTERRUPT = False
def init(config, benchmark):
benchmark.executable = benchmark.tool.executable()
benchmark.tool_version = benchmark.tool.version(benchmark.executable)
logging.info("Using %s version %s.", benchmark.tool_name, benchmark.tool_version)
def get_system_info():
return None
def execute_benchmark(benchmark, output_handler):
(toolpaths, awsInput) = getAWSInput(benchmark)
with open(benchmark.config.aws_config, "r") as conf_file:
conf = json.load(conf_file)[0]
aws_endpoint = conf["Endpoint"]
aws_token = conf["UserToken"]
try:
logging.info("Building archive files for the verifier-tool and the tasks...")
verifier_arc_name = benchmark.tool_name + "_" + benchmark.instance + ".zip"
verifier_arc_path = _createArchiveFile(
verifier_arc_name, toolpaths["absBaseDir"], toolpaths["absToolpaths"],
)
tasks_arc_name = "tasks_" + benchmark.instance + ".zip"
tasks_arc_path = _createArchiveFile(
tasks_arc_name, toolpaths["absBaseDir"], toolpaths["absSourceFiles"],
)
start_time = benchexec.util.read_local_time()
logging.info("Waiting for the AWS EC2-instance to set everything up...")
# Create
http_request = requests.get(
REQUEST_URL["create"].format(aws_endpoint, aws_token)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
if (
msg.get("message") is not None
and msg.get("message") == "Token not authorized."
):
sys.exit("Invalid token submitted: " + aws_token)
requestId = msg["requestId"]
# Upload verifier
http_request = requests.get(
REQUEST_URL["upload"].format(
aws_endpoint, aws_token, requestId, verifier_arc_name
)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
(verifier_uploadUrl, verifier_s3_key, verifier_aws_public_url) = (
msg["uploadUrl"],
msg["S3Key"],
msg["publicURL"],
)
payload = open(verifier_arc_path, "rb").read()
headers = {"Content-Type": "application/zip"}
http_request = requests.request(
"PUT", verifier_uploadUrl, headers=headers, data=payload
)
_exitWhenRequestFailed(http_request)
# Upload tasks
http_request = requests.get(
REQUEST_URL["upload"].format(
aws_endpoint, aws_token, requestId, tasks_arc_name
)
)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
(tasks_uploadUrl, tasks_s3_key, tasks_aws_public_url) = (
msg["uploadUrl"],
msg["S3Key"],
msg["publicURL"],
)
payload = open(tasks_arc_path, "rb").read()
headers = {"Content-Type": "application/zip"}
http_request = requests.request(
"PUT", tasks_uploadUrl, headers=headers, data=payload
)
_exitWhenRequestFailed(http_request)
# Launch
http_request = requests.get(
REQUEST_URL["launch"].format(
aws_endpoint,
aws_token,
requestId,
verifier_aws_public_url,
verifier_s3_key,
tasks_aws_public_url,
tasks_s3_key,
json.dumps(awsInput),
)
)
_exitWhenRequestFailed(http_request)
# Progress
logging.info(
"Executing RunExec on the AWS workers. Depending on the size of the tasks, this might take a while."
)
progress_url = REQUEST_URL["progress"].format(
aws_endpoint, aws_token, requestId
)
initialized = False
# Give the ec2-instance some time for instantiation
while not initialized:
http_request = requests.get(progress_url)
_exitWhenRequestFailed(http_request)
msg = http_request.json()
if (
msg["message"] == "Internal server error"
or msg["instancesNotTerminatedTotal"] > 0
):
logging.info("waiting...")
time.sleep(10)
continue
initialized = True
logging.info("Done. Collecting the results back from AWS.")
# Results
http_request = requests.get(
REQUEST_URL["results"].format(aws_endpoint, aws_token, requestId)
)
_exitWhenRequestFailed(http_request)
for url in http_request.json()["urls"]:
logging.debug("Downloading file from url: %s", url)
result_file = requests.get(url)
zipfile.ZipFile(io.BytesIO(result_file.content)).extractall(
benchmark.log_folder
)
except KeyboardInterrupt:
stop()
finally:
if os.path.exists(verifier_arc_path):
os.remove(verifier_arc_path)
if os.path.exists(tasks_arc_path):
os.remove(tasks_arc_path)
if STOPPED_BY_INTERRUPT:
output_handler.set_error("interrupted")
end_time = benchexec.util.read_local_time()
handleCloudResults(benchmark, output_handler, start_time, end_time)
# Clean
requests.get(REQUEST_URL["clean"].format(aws_endpoint, aws_token))
def stop():
global STOPPED_BY_INTERRUPT
STOPPED_BY_INTERRUPT = True
def _exitWhenRequestFailed(http_request):
if http_request.status_code != 200:
sys.exit(
"Http-request failed (Server responded with status code: {0}).".format(
http_request.status_code
)
)
def getAWSInput(benchmark):
(
requirements,
numberOfRuns,
limitsAndNumRuns,
runDefinitions,
sourceFiles,
) = getBenchmarkData(benchmark)
(workingDir, toolpaths) = getToolData(benchmark)
absWorkingDir = os.path.abspath(workingDir)
absToolpaths = list(map(os.path.abspath, toolpaths))
absSourceFiles = list(map(os.path.abspath, sourceFiles))
absBaseDir = benchexec.util.common_base_dir(absSourceFiles + absToolpaths)
if absBaseDir == "":
sys.exit("No common base dir found.")
toolpaths = {
"absBaseDir": absBaseDir,
"workingDir": workingDir,
"absWorkingDir": absWorkingDir,
"toolpaths": toolpaths,
"absToolpaths": absToolpaths,
"sourceFiles": sourceFiles,
"absSourceFiles": absSourceFiles,
}
awsInput = {
"requirements": requirements,
"workingDir": os.path.relpath(absWorkingDir, absBaseDir),
}
if benchmark.result_files_patterns:
if len(benchmark.result_files_patterns) > 1:
sys.exit("Multiple result-files patterns not supported in cloud mode.")
awsInput.update({"resultFilePatterns": benchmark.result_files_patterns[0]})
awsInput.update({"limitsAndNumRuns": limitsAndNumRuns})
awsInput.update({"runDefinitions": runDefinitions})
return (toolpaths, awsInput)
def _zipdir(path, zipfile, absBaseDir):
for root, dirs, files in os.walk(path):
for file in files:
filepath = os.path.join(root, file)
zipfile.write(filepath, os.path.relpath(filepath, absBaseDir))
def _createArchiveFile(archive_name, absBaseDir, abs_paths):
archive_path = os.path.join(absBaseDir, archive_name)
if os.path.isfile(archive_path):
sys.exit(
"Zip file already exists: '{0}'; not going to overwrite it.".format(
os.path.normpath(archive_path)
)
)
zipf = zipfile.ZipFile(archive_path, "w", zipfile.ZIP_DEFLATED)
for file in abs_paths:
if not os.path.exists(file):
zipf.close()
if os.path.isfile(archive_path):
os.remove(archive_path)
sys.exit(
"Missing file '{0}', cannot run benchmark without it.".format(
os.path.normpath(file)
)
)
if os.path.isdir(file):
_zipdir(file, zipf, absBaseDir)
else:
zipf.write(file, os.path.relpath(file, absBaseDir))
zipf.close()
return archive_path
def getBenchmarkData(benchmark):
# get requirements
r = benchmark.requirements
requirements = {
"cpu_cores": DEFAULT_CLOUD_CPUCORE_REQUIREMENT
if r.cpu_cores is None
else r.cpu_cores,
"cpu_model": DEFAULT_CLOUD_CPUMODEL_REQUIREMENT
if r.cpu_model is None
else r.cpu_model,
"memory_in_mb": bytes_to_mb(
DEFAULT_CLOUD_MEMORY_REQUIREMENT if r.memory is None else r.memory
),
}
# get limits and number of Runs
timeLimit = benchmark.rlimits.get(TIMELIMIT, DEFAULT_CLOUD_TIMELIMIT)
memLimit = bytes_to_mb(benchmark.rlimits.get(MEMLIMIT, DEFAULT_CLOUD_MEMLIMIT))
coreLimit = benchmark.rlimits.get(CORELIMIT, None)
numberOfRuns = sum(
len(runSet.runs) for runSet in benchmark.run_sets if runSet.should_be_executed()
)
limitsAndNumRuns = {
"number_of_runs": numberOfRuns,
"time_limit_in_sec": timeLimit,
"mem_limit_in_mb": memLimit,
}
if coreLimit is not None:
limitsAndNumRuns.update({"core_limit": coreLimit})
# get Runs with args and sourcefiles
sourceFiles = set()
runDefinitions = []
for runSet in benchmark.run_sets:
if not runSet.should_be_executed():
continue
if STOPPED_BY_INTERRUPT:
break
# get runs
for run in runSet.runs:
runDefinition = {}
# wrap list-elements in quotations-marks if they contain whitespace
cmdline = ["'{}'".format(x) if " " in x else x for x in run.cmdline()]
cmdline = " ".join(cmdline)
log_file = os.path.relpath(run.log_file, benchmark.log_folder)
runDefinition.update(
{
"cmdline": cmdline,
"log_file": log_file,
"sourcefile": run.sourcefiles,
"required_files": run.required_files,
}
)
runDefinitions.append(runDefinition)
sourceFiles.update(run.sourcefiles)
sourceFiles.update(run.required_files)
if not runDefinitions:
sys.exit("Benchmark has nothing to run.")
return (requirements, numberOfRuns, limitsAndNumRuns, runDefinitions, sourceFiles)
def getToolData(benchmark):
workingDir = benchmark.working_directory()
if not os.path.isdir(workingDir):
sys.exit("Missing working directory '{0}', cannot run tool.".format(workingDir))
logging.debug("Working dir: " + workingDir)
toolpaths = benchmark.required_files()
validToolpaths = set()
for file in toolpaths:
if not os.path.exists(file):
sys.exit(
"Missing file '{0}', not runing benchmark without it.".format(
os.path.normpath(file)
)
)
validToolpaths.add(file)
return (workingDir, validToolpaths)
def bytes_to_mb(mb):
if mb is None:
return None
return int(mb / 1000 / 1000)
def handleCloudResults(benchmark, output_handler, start_time, end_time):
outputDir = benchmark.log_folder
if not os.path.isdir(outputDir) or not os.listdir(outputDir):
# outputDir does not exist or is empty
logging.warning(
"Cloud produced no results. Output-directory is missing or empty: %s",
outputDir,
)
if start_time and end_time:
usedWallTime = (end_time - start_time).total_seconds()
else:
usedWallTime = None
# write results in runs and handle output after all runs are done
executedAllRuns = True
runsProducedErrorOutput = False
for runSet in benchmark.run_sets:
if not runSet.should_be_executed():
output_handler.output_for_skipping_run_set(runSet)
continue
output_handler.output_before_run_set(runSet, start_time=start_time)
for run in runSet.runs:
filename = os.path.split(run.log_file)[1]
resultFilesDir = os.path.splitext(filename)[0]
awsFileDir = os.path.join(benchmark.log_folder, resultFilesDir)
logFile = os.path.join(awsFileDir, filename)
shutil.move(logFile, run.log_file)
dataFile = run.log_file + ".data"
shutil.move(logFile + ".data", dataFile)
errFile = run.log_file + ".stdError"
if os.path.exists(errFile):
shutil.move(logFile + ".stdError", errFile)
if os.path.isdir(awsFileDir):
if os.listdir(awsFileDir):
logging.info("Dir %s contains unhandled files", awsFileDir)
else:
os.rmdir(awsFileDir)
if os.path.exists(dataFile) and os.path.exists(run.log_file):
try:
values = parseCloudRunResultFile(dataFile)
if not benchmark.config.debug:
os.remove(dataFile)
except IOError as e:
logging.warning(
"Cannot extract measured values from output for file %s: %s",
run.identifier,
e,
)
output_handler.all_created_files.add(dataFile)
output_handler.set_error("missing results", runSet)
executedAllRuns = False
else:
output_handler.output_before_run(run)
run.set_result(values, ["host"])
output_handler.output_after_run(run)
else:
logging.warning("No results exist for file %s.", run.identifier)
output_handler.set_error("missing results", runSet)
executedAllRuns = False
if os.path.exists(run.log_file + ".stdError"):
runsProducedErrorOutput = True
# The directory structure differs between direct and webclient mode when using VCloud.
# Move all output files from "sibling of log-file" to "sibling of parent directory".
rawPath = run.log_file[: -len(".log")]
dirname, filename = os.path.split(rawPath)
vcloudFilesDirectory = rawPath + ".files"
benchexecFilesDirectory = os.path.join(
dirname[: -len(".logfiles")] + ".files", filename
)
if os.path.isdir(vcloudFilesDirectory) and not os.path.isdir(
benchexecFilesDirectory
):
shutil.move(vcloudFilesDirectory, benchexecFilesDirectory)
output_handler.output_after_run_set(
runSet, walltime=usedWallTime, end_time=end_time
)
output_handler.output_after_benchmark(STOPPED_BY_INTERRUPT)
if not executedAllRuns:
logging.warning("Some expected result files could not be found!")
if runsProducedErrorOutput and not benchmark.config.debug:
logging.warning(
"Some runs produced unexpected warnings on stderr, please check the %s files!",
os.path.join(outputDir, "*.stdError"),
)
def parseCloudRunResultFile(filePath):
def read_items():
with open(filePath, "rt") as file:
for line in file:
key, value = line.split("=", 1)
yield key, value
return parse_vcloud_run_result(read_items())
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError('Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), "Inconsistent exit codes {} and {} from VerifierCloud".format(old, new)
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
| 1.992188
| 2
|
tests/fork/conftest.py
|
AqualisDAO/curve-dao-contracts
| 217
|
12776163
|
<gh_stars>100-1000
import pytest
from brownie_tokens import MintableForkToken
class _MintableTestToken(MintableForkToken):
def __init__(self, address):
super().__init__(address)
@pytest.fixture(scope="session")
def MintableTestToken():
yield _MintableTestToken
@pytest.fixture(scope="module")
def USDC():
yield _MintableTestToken("<KEY>")
@pytest.fixture(scope="module")
def ThreeCRV():
yield _MintableTestToken("0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490")
@pytest.fixture(scope="module")
def SUSD():
yield _MintableTestToken("0x57ab1ec28d129<PASSWORD>52df4df418<PASSWORD>a2d46d5f51")
@pytest.fixture(scope="module")
def SBTC():
yield _MintableTestToken("0xfE18be6b3Bd88A2D2A7f928d00292E7a9963CfC6")
| 1.867188
| 2
|
src/haydi/base/permutations.py
|
Kobzol/haydi
| 5
|
12776164
|
from .domain import Domain
from math import factorial
import itertools
import random
class Permutations(Domain):
def __init__(self, domain, name=None):
super(Permutations, self).__init__(name)
self._set_flags_from_domain(domain)
self.step_jumps = False # not implemented yet
self.domain = domain
def _compute_size(self):
return factorial(self.domain.size)
def create_iter(self, step=0):
assert step == 0 # nonzero step implemented yet
items = tuple(self.domain)
return itertools.permutations(items)
def generate_one(self):
return random.shuffle(tuple(self.domain))
def _remap_domains(self, transformation):
return Permutations(transformation(self.domain), self.name)
| 3
| 3
|
tests/timetools_test.py
|
ziotom78/stripsim
| 0
|
12776165
|
<reponame>ziotom78/stripsim
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import os.path
import stripeline.timetools as tt
import numpy as np
class TestTimeTools(ut.TestCase):
def testSplitTimeRangeSimple(self):
'''Test split_time_range against a very simple input'''
result = tt.split_time_range(
time_length=2.0, num_of_chunks=2, sampfreq=2.0, time0=0.5)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], tt.TimeChunk(
start_time=0.5, num_of_samples=2))
self.assertEqual(result[1], tt.TimeChunk(
start_time=1.5, num_of_samples=2))
def testSplitTimeRangeComplex(self):
'''Test split_time_range against a tricky input'''
result = tt.split_time_range(
time_length=10.0, num_of_chunks=4, sampfreq=1.0, time0=2.0)
self.assertEqual(len(result), 4)
self.assertEqual(result[0], tt.TimeChunk(
start_time=2.0, num_of_samples=2))
self.assertEqual(result[1], tt.TimeChunk(
start_time=5.0, num_of_samples=2))
self.assertEqual(result[2], tt.TimeChunk(
start_time=7.0, num_of_samples=2))
self.assertEqual(result[3], tt.TimeChunk(
start_time=10.0, num_of_samples=2))
class TestToiProviders(ut.TestCase):
'Test classes like ToiProvider and FitsToiProvider'
def test_split(self):
'Verify that "split_into_n" returns the expected results.'
self.assertEqual(tuple(tt.split_into_n(10, 4)), (2, 3, 2, 3))
self.assertEqual(tuple(tt.split_into_n(201, 2)), (100, 101))
def test_toi_splitting(self):
'Verify that "assign_toi_files_to_processes" returns the expected results.'
samples_per_processes = [110, 90]
fits_files = [tt.ToiFile(file_name='A.fits', num_of_samples=40),
tt.ToiFile(file_name='B.fits', num_of_samples=60),
tt.ToiFile(file_name='C.fits', num_of_samples=30),
tt.ToiFile(file_name='D.fits', num_of_samples=70)]
result = tt.assign_toi_files_to_processes(
samples_per_processes, fits_files)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), 3)
self.assertEqual(len(result[1]), 2)
segment0, segment1 = tuple(result)
self.assertEqual(segment0[0],
tt.ToiFileSegment(file_name='A.fits',
first_element=0,
num_of_elements=40))
self.assertEqual(segment0[1],
tt.ToiFileSegment(file_name='B.fits',
first_element=0,
num_of_elements=60))
self.assertEqual(segment0[2],
tt.ToiFileSegment(file_name='C.fits',
first_element=0,
num_of_elements=10))
self.assertEqual(segment1[0],
tt.ToiFileSegment(file_name='C.fits',
first_element=10,
num_of_elements=20))
self.assertEqual(segment1[1],
tt.ToiFileSegment(file_name='D.fits',
first_element=0,
num_of_elements=70))
def test_fits_tois(self):
'Verify that FitsToiProvider is able to load some real data from FITS files'
test_file_path = os.path.dirname(__file__)
file_names = [os.path.join(test_file_path, x) for x in ['toi_test_A.fits',
'toi_test_B.fits',
'toi_test_C.fits']]
file_layout = \
tt.FitsTableLayout(time_col=tt.FitsColumn(hdu=1, column='TIME'),
theta_col=tt.FitsColumn(hdu=2, column=0),
phi_col=tt.FitsColumn(hdu=2, column=1),
psi_col=tt.FitsColumn(hdu=2, column=2),
signal_cols=[
tt.FitsColumn(hdu=3, column='DET_Q1'),
tt.FitsColumn(hdu=3, column='DET_Q2'),
tt.FitsColumn(hdu=3, column='DET_U1'),
tt.FitsColumn(hdu=3, column='DET_U2')
])
# Create a set of FitsToiProviders, one for each MPI rank. Note that we do
# *not* really use MPI here (comm is None): we just want to check that
# the segment is loaded correctly for each rank
num_of_processes = 2
providers = [tt.FitsToiProvider(rank=i,
num_of_processes=num_of_processes,
file_names=file_names,
file_layout=file_layout,
comm=None)
for i in range(num_of_processes)]
# Check that get_time works
self.assertTrue(np.allclose(
providers[0].get_time(), np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])))
self.assertTrue(np.allclose(
providers[1].get_time(), np.array([8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0])))
# Check that get_pointings work
theta0, phi0 = providers[0].get_pointings()
theta1, phi1 = providers[1].get_pointings()
self.assertTrue(np.allclose(
theta0, np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6])))
self.assertTrue(np.allclose(
theta1, np.array([0.5, 0.4, 0.3, 0.0, 0.1, 0.2, 0.3, 0.4])))
self.assertTrue(np.allclose(
phi0, np.array([0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.0])))
self.assertTrue(np.allclose(
phi1, np.array([0.2, 0.4, 0.6, 0.0, 0.01, 0.02, 0.03, 0.04])))
# Check that get_signal works, both when passing an integer and a string
sig_from_idx = providers[0].get_signal(0)
sig_from_name = providers[0].get_signal('Q1')
self.assertTrue(np.allclose(sig_from_idx, sig_from_name))
self.assertTrue(np.allclose(
sig_from_idx, np.array([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
| 2.5
| 2
|
Python/loop4.py
|
AungWinnHtut/CStutorial
| 0
|
12776166
|
<filename>Python/loop4.py
# loop3
userinput = input("Enter a letter in the range A - C : ")
while (userinput != "A") and (userinput != "a") and (userinput != "B") and (userinput != "b") and (userinput != "C") and (userinput != "c"):
userinput = input("Enter a letter in the range A-C : ")
| 3.78125
| 4
|
bleurt/score_test.py
|
yongchanghao/bleurt
| 416
|
12776167
|
<reponame>yongchanghao/bleurt
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for scoring function."""
import os
from bleurt import score
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
references = [
"An apple a day keeps the doctor away.",
"An apple a day keeps the doctor away."
]
candidates = [
"An apple a day keeps the doctor away.",
"An apple a day keeps doctors away."
]
ref_scores = [0.910811, 0.771989]
def get_test_checkpoint():
pkg = os.path.abspath(__file__)
pkg, _ = os.path.split(pkg)
ckpt = os.path.join(pkg, "test_checkpoint")
assert tf.io.gfile.exists(ckpt)
return ckpt
class ScoreTest(tf.test.TestCase):
def test_default_bleurt_score(self):
bleurt = score.BleurtScorer()
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_positional_args_error(self):
bleurt = score.BleurtScorer()
with self.assertRaises(AssertionError):
_ = bleurt.score(references, candidates)
def test_bleurt_nulls(self):
bleurt = score.BleurtScorer()
test_references = []
test_candidates = []
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 0)
def test_bleurt_empty(self):
bleurt = score.BleurtScorer()
test_references = [""]
test_candidates = [""]
scores = bleurt.score(
references=test_references, candidates=test_candidates)
self.assertLen(scores, 1)
def test_bleurt_score_with_checkpoint(self):
checkpoint = get_test_checkpoint()
bleurt = score.BleurtScorer(checkpoint)
scores = bleurt.score(references=references, candidates=candidates)
self.assertLen(scores, 2)
self.assertAllClose(scores, ref_scores)
def test_tf_bleurt_score_eager(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
bleurt_out = bleurt_ops(references=tfreferences, candidates=tfcandidates)
# Computes the BLEURT scores.
self.assertIn("predictions", bleurt_out)
self.assertEqual(bleurt_out["predictions"].shape, (2,))
self.assertAllClose(bleurt_out["predictions"], ref_scores)
def test_tf_bleurt_positional_args_error(self):
# Creates the TF Graph.
bleurt_ops = score.create_bleurt_ops()
tfcandidates = tf.constant(candidates)
tfreferences = tf.constant(references)
with self.assertRaises(AssertionError):
_ = bleurt_ops(tfreferences, tfcandidates)
if __name__ == "__main__":
tf.test.main()
| 2.1875
| 2
|
qa/rpc-tests/preciousblock.py
|
jeffontenot/bitcoin
| 0
|
12776168
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PreciousBlock code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class PreciousTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Mine blocks A-B-C on Node 0"
self.nodes[0].generate(3)
assert(self.nodes[0].getblockcount() == 3)
hashC = self.nodes[0].getbestblockhash()
print "Mine competing blocks E-F-G on Node 1"
self.nodes[1].generate(3)
assert(self.nodes[1].getblockcount() == 3)
hashG = self.nodes[1].getbestblockhash()
assert(hashC != hashG)
print "Connect nodes and check no reorg occurs"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getbestblockhash() == hashC)
assert(self.nodes[1].getbestblockhash() == hashG)
print "Make Node0 prefer block G"
self.nodes[0].preciousblock(hashG)
assert(self.nodes[0].getbestblockhash() == hashG)
print "Make Node0 prefer block C again"
self.nodes[0].preciousblock(hashC)
assert(self.nodes[0].getbestblockhash() == hashC)
print "Make Node1 prefer block C"
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert(self.nodes[1].getbestblockhash() == hashC)
print "Make Node1 prefer block G again"
self.nodes[1].preciousblock(hashG)
assert(self.nodes[1].getbestblockhash() == hashG)
print "Make Node0 prefer block G again"
self.nodes[0].preciousblock(hashG)
assert(self.nodes[0].getbestblockhash() == hashG)
print "Make Node1 prefer block C again"
self.nodes[1].preciousblock(hashC)
assert(self.nodes[1].getbestblockhash() == hashC)
print "Mine another block (E-F-G-)H on Node 0 and reorg Node 1"
self.nodes[0].generate(1)
assert(self.nodes[0].getblockcount() == 4)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert(self.nodes[1].getbestblockhash() == hashH)
print "Node1 should not be able to prefer block C anymore"
self.nodes[1].preciousblock(hashC)
assert(self.nodes[1].getbestblockhash() == hashH)
print "Mine competing blocks I-J-K-L on Node 2"
self.nodes[2].generate(4)
assert(self.nodes[2].getblockcount() == 4)
hashL = self.nodes[2].getbestblockhash()
print "Connect nodes and check no reorg occurs"
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes[0:3])
assert(self.nodes[0].getbestblockhash() == hashH)
assert(self.nodes[1].getbestblockhash() == hashH)
assert(self.nodes[2].getbestblockhash() == hashL)
print "Make Node1 prefer block L"
self.nodes[1].preciousblock(hashL)
assert(self.nodes[1].getbestblockhash() == hashL)
print "Make Node2 prefer block H"
self.nodes[2].preciousblock(hashH)
assert(self.nodes[2].getbestblockhash() == hashH)
if __name__ == '__main__':
PreciousTest().main()
| 2.375
| 2
|
pyTMD/model.py
|
tsutterley/pyTMD
| 47
|
12776169
|
#!/usr/bin/env python
u"""
model.py
Written by <NAME> (09/2021)
Retrieves tide model parameters for named tide models and
from model definition files
UPDATE HISTORY:
Written 09/2021
"""
import os
import re
import io
import copy
class model:
"""Retrieves tide model parameters for named models or
from a model definition file for use in the pyTMD tide
prediction programs
"""
def __init__(self, directory=os.getcwd(), **kwargs):
# set default keyword arguments
kwargs.setdefault('compressed',False)
kwargs.setdefault('format','netcdf')
kwargs.setdefault('verify',True)
# set initial attributes
self.atl03 = None
self.atl06 = None
self.atl07 = None
self.atl11 = None
self.atl12 = None
self.compressed = copy.copy(kwargs['compressed'])
self.constituents = None
self.description = None
self.directory = os.path.expanduser(directory)
self.format = copy.copy(kwargs['format'])
self.gla12 = None
self.grid_file = None
self.long_name = None
self.model_file = None
self.name = None
self.projection = None
self.reference = None
self.scale = None
self.type = None
self.variable = None
self.verify = copy.copy(kwargs['verify'])
self.version = None
def grid(self,m):
"""Create a model object from known tide grid files
"""
# model name
self.name = m
# select between known tide models
if (m == 'CATS0201'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
elif (m == 'CATS2008_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,
'CATS2008a_SPOTL_Load')
self.grid_file = self.pathfinder('grid_CATS2008a_opt')
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
self.version = 'v1'
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
self.version = 'v2'
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
self.version = 'v3'
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
self.version = 'v4'
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1','DATA')
self.grid_file = self.pathfinder('grid_tpxo9')
self.version = '9.1'
elif (m == 'TPXO8-atlas'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.version = '8'
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.version = '7.2'
elif (m == 'TPXO7.2_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_load')
self.grid_file = self.pathfinder('grid_tpxo6.2')
self.version = '7.2'
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.version = '2018'
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.version = 'v2'
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
def elevation(self,m):
"""Create a model object from known tidal elevation models
"""
# model name
self.name = m
# model type
self.type = 'z'
# select between known tide models
if (m == 'CATS0201'):
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
self.model_file = self.pathfinder('h0_CATS02_01')
self.format = 'OTIS'
self.projection = '4326'
# model description and references
self.reference = ('https://mail.esr.org/polar_tide_models/'
'Model_CATS0201.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
self.model_file = self.pathfinder('hf.CATS2008.out')
self.projection = 'CATS2008'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/cats2008/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'CATS2008_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,
'CATS2008a_SPOTL_Load')
self.grid_file = self.pathfinder('grid_CATS2008a_opt')
self.model_file = self.pathfinder('h_CATS2008a_SPOTL_load')
self.projection = 'CATS2008'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/cats2008/')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
model_files = ['h_q1_tpxo9_atlas_30','h_o1_tpxo9_atlas_30',
'h_p1_tpxo9_atlas_30','h_k1_tpxo9_atlas_30',
'h_n2_tpxo9_atlas_30','h_m2_tpxo9_atlas_30',
'h_s2_tpxo9_atlas_30','h_k2_tpxo9_atlas_30',
'h_m4_tpxo9_atlas_30','h_ms4_tpxo9_atlas_30',
'h_mn4_tpxo9_atlas_30','h_2n2_tpxo9_atlas_30']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo9_atlas.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
model_files = ['h_q1_tpxo9_atlas_30_v2','h_o1_tpxo9_atlas_30_v2',
'h_p1_tpxo9_atlas_30_v2','h_k1_tpxo9_atlas_30_v2',
'h_n2_tpxo9_atlas_30_v2','h_m2_tpxo9_atlas_30_v2',
'h_s2_tpxo9_atlas_30_v2','h_k2_tpxo9_atlas_30_v2',
'h_m4_tpxo9_atlas_30_v2','h_ms4_tpxo9_atlas_30_v2',
'h_mn4_tpxo9_atlas_30_v2','h_2n2_tpxo9_atlas_30_v2']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v2'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
model_files = ['h_q1_tpxo9_atlas_30_v3','h_o1_tpxo9_atlas_30_v3',
'h_p1_tpxo9_atlas_30_v3','h_k1_tpxo9_atlas_30_v3',
'h_n2_tpxo9_atlas_30_v3','h_m2_tpxo9_atlas_30_v3',
'h_s2_tpxo9_atlas_30_v3','h_k2_tpxo9_atlas_30_v3',
'h_m4_tpxo9_atlas_30_v3','h_ms4_tpxo9_atlas_30_v3',
'h_mn4_tpxo9_atlas_30_v3','h_2n2_tpxo9_atlas_30_v3',
'h_mf_tpxo9_atlas_30_v3','h_mm_tpxo9_atlas_30_v3']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v3'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
model_files = ['h_q1_tpxo9_atlas_30_v4','h_o1_tpxo9_atlas_30_v4',
'h_p1_tpxo9_atlas_30_v4','h_k1_tpxo9_atlas_30_v4',
'h_n2_tpxo9_atlas_30_v4','h_m2_tpxo9_atlas_30_v4',
'h_s2_tpxo9_atlas_30_v4','h_k2_tpxo9_atlas_30_v4',
'h_m4_tpxo9_atlas_30_v4','h_ms4_tpxo9_atlas_30_v4',
'h_mn4_tpxo9_atlas_30_v4','h_2n2_tpxo9_atlas_30_v4',
'h_mf_tpxo9_atlas_30_v4','h_mm_tpxo9_atlas_30_v4']
self.model_file = self.pathfinder(model_files)
self.projection = '4326'
self.scale = 1.0/1000.0
self.version = 'v4'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1','DATA')
self.grid_file = self.pathfinder('grid_tpxo9')
self.model_file = self.pathfinder('h_tpxo9.v1')
self.projection = '4326'
self.version = '9.1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO8-atlas'):
self.format = 'ATLAS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.model_file = self.pathfinder('hf.tpxo8_atlas_30_v1')
self.projection = '4326'
self.version = '8'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/tpxo8_atlas.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.model_file = self.pathfinder('h_tpxo7.2')
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'TPXO7.2_load'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_load')
self.grid_file = self.pathfinder('grid_tpxo6.2')
self.model_file = self.pathfinder('h_tpxo7.2_load')
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/'
'tides/global.html')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = self.pathfinder('h0_Arc5km.oce')
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aodtm-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = self.pathfinder('h_Arc5km.oce')
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.model_file = self.pathfinder('h_Arc5km2018')
self.projection = 'PSNorth'
self.version = '2018'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.model_file = self.pathfinder('h_Greenland8.v2')
self.projection = '3413'
self.version = 'v2'
# model description and references
self.reference = 'https://doi.org/10.1002/2016RG000546'
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.7'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.7','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.7'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.7_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.7','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.7'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'GOT4.8'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'got4.8','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.8'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.8_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'got4.8','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.8'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'GOT4.10'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.10c','grids_oceantide')
model_files = ['q1.d','o1.d','p1.d','k1.d','n2.d',
'm2.d','s2.d','k2.d','s1.d','m4.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/100.0
self.version = '4.10'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'GOT4.10_load'):
self.format = 'GOT'
self.model_directory = os.path.join(self.directory,
'GOT4.10c','grids_loadtide')
model_files = ['q1load.d','o1load.d',
'p1load.d','k1load.d','n2load.d',
'm2load.d','s2load.d','k2load.d',
's1load.d','m4load.d']
self.model_file = self.pathfinder(model_files)
self.scale = 1.0/1000.0
self.version = '4.10'
# model description and references
self.reference = ('https://denali.gsfc.nasa.gov/'
'personal_pages/ray/MiscPubs/'
'19990089548_1999150788.pdf')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
elif (m == 'FES2014'):
self.format = 'FES'
self.model_directory = os.path.join(self.directory,
'fes2014','ocean_tide')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = self.pathfinder(model_files)
self.constituents = ['2n2','eps2','j1','k1','k2','l2',
'lambda2','m2','m3','m4','m6','m8','mf','mks2','mm',
'mn4','ms4','msf','msqm','mtm','mu2','n2','n4','nu2',
'o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']
self.scale = 1.0/100.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/'
'en/data/products/auxiliary-products/'
'global-tide-fes.html')
self.atl03 = 'tide_ocean'
self.atl06 = 'tide_ocean'
self.atl07 = 'height_segment_ocean'
self.atl11 = 'tide_ocean'
self.atl12 = 'tide_ocean_seg'
self.gla12 = 'd_ocElv'
self.variable = 'tide_ocean'
self.long_name = "Ocean Tide"
self.description = ("Ocean Tides including diurnal and "
"semi-diurnal (harmonic analysis), and longer period "
"tides (dynamic and self-consistent equilibrium).")
elif (m == 'FES2014_load'):
self.format = 'FES'
self.model_directory = os.path.join(self.directory,
'fes2014','load_tide')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = self.pathfinder(model_files)
self.constituents = ['2n2','eps2','j1','k1','k2','l2',
'lambda2','m2','m3','m4','m6','m8','mf','mks2','mm',
'mn4','ms4','msf','msqm','mtm','mu2','n2','n4','nu2',
'o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']
self.scale = 1.0/100.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/'
'en/data/products/auxiliary-products/'
'global-tide-fes.html')
self.atl03 = 'tide_load'
self.atl06 = 'tide_load'
self.atl07 = 'height_segment_load'
self.atl11 = 'tide_load'
self.atl12 = 'tide_load_seg'
self.gla12 = 'd_ldElv'
self.variable = 'tide_load'
self.long_name = "Load Tide"
self.description = ("Local displacement due to Ocean "
"Loading (-6 to 0 cm)")
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
def current(self,m):
"""Create a model object from known tidal current models
"""
# model name
self.name = m
# model type
self.type = ['u','v']
# select between tide models
if (m == 'CATS0201'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'cats0201_tmd')
self.grid_file = self.pathfinder('grid_CATS')
self.model_file = dict(u=self.pathfinder('UV0_CATS02_01'))
self.projection = '4326'
# model description and references
self.reference = ('https://mail.esr.org/polar_tide_models/'
'Model_CATS0201.html')
elif (m == 'CATS2008'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'CATS2008')
self.grid_file = self.pathfinder('grid_CATS2008')
self.model_file = dict(u=self.pathfinder('uv.CATS2008.out'))
self.projection = 'CATS2008'
elif (m == 'TPXO9-atlas'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas')
self.grid_file = self.pathfinder('grid_tpxo9_atlas')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30','u_o1_tpxo9_atlas_30',
'u_p1_tpxo9_atlas_30','u_k1_tpxo9_atlas_30',
'u_n2_tpxo9_atlas_30','u_m2_tpxo9_atlas_30',
'u_s2_tpxo9_atlas_30','u_k2_tpxo9_atlas_30',
'u_m4_tpxo9_atlas_30','u_ms4_tpxo9_atlas_30',
'u_mn4_tpxo9_atlas_30','u_2n2_tpxo9_atlas_30']
model_files['v'] = ['v_q1_tpxo9_atlas_30','v_o1_tpxo9_atlas_30',
'v_p1_tpxo9_atlas_30','v_k1_tpxo9_atlas_30',
'v_n2_tpxo9_atlas_30','v_m2_tpxo9_atlas_30',
'v_s2_tpxo9_atlas_30','v_k2_tpxo9_atlas_30',
'v_m4_tpxo9_atlas_30','v_ms4_tpxo9_atlas_30',
'v_mn4_tpxo9_atlas_30','v_2n2_tpxo9_atlas_30']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo9_atlas.html')
elif (m == 'TPXO9-atlas-v2'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v2')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v2')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v2','u_o1_tpxo9_atlas_30_v2',
'u_p1_tpxo9_atlas_30_v2','u_k1_tpxo9_atlas_30_v2',
'u_n2_tpxo9_atlas_30_v2','u_m2_tpxo9_atlas_30_v2',
'u_s2_tpxo9_atlas_30_v2','u_k2_tpxo9_atlas_30_v2',
'u_m4_tpxo9_atlas_30_v2','u_ms4_tpxo9_atlas_30_v2',
'u_mn4_tpxo9_atlas_30_v2','u_2n2_tpxo9_atlas_30_v2']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v2','v_o1_tpxo9_atlas_30_v2',
'v_p1_tpxo9_atlas_30_v2','v_k1_tpxo9_atlas_30_v2',
'v_n2_tpxo9_atlas_30_v2','v_m2_tpxo9_atlas_30_v2',
'v_s2_tpxo9_atlas_30_v2','v_k2_tpxo9_atlas_30_v2',
'v_m4_tpxo9_atlas_30_v2','v_ms4_tpxo9_atlas_30_v2',
'v_mn4_tpxo9_atlas_30_v2','v_2n2_tpxo9_atlas_30_v2']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v2'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9-atlas-v3'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v3')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v3')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v3','u_o1_tpxo9_atlas_30_v3',
'u_p1_tpxo9_atlas_30_v3','u_k1_tpxo9_atlas_30_v3',
'u_n2_tpxo9_atlas_30_v3','u_m2_tpxo9_atlas_30_v3',
'u_s2_tpxo9_atlas_30_v3','u_k2_tpxo9_atlas_30_v3',
'u_m4_tpxo9_atlas_30_v3','u_ms4_tpxo9_atlas_30_v3',
'u_mn4_tpxo9_atlas_30_v3','u_2n2_tpxo9_atlas_30_v3']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v3','v_o1_tpxo9_atlas_30_v3',
'v_p1_tpxo9_atlas_30_v3','v_k1_tpxo9_atlas_30_v3',
'v_n2_tpxo9_atlas_30_v3','v_m2_tpxo9_atlas_30_v3',
'v_s2_tpxo9_atlas_30_v3','v_k2_tpxo9_atlas_30_v3',
'v_m4_tpxo9_atlas_30_v3','v_ms4_tpxo9_atlas_30_v3',
'v_mn4_tpxo9_atlas_30_v3','v_2n2_tpxo9_atlas_30_v3']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v3'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9-atlas-v4'):
self.model_directory = os.path.join(self.directory,'TPXO9_atlas_v4')
self.grid_file = self.pathfinder('grid_tpxo9_atlas_30_v4')
model_files = {}
model_files['u'] = ['u_q1_tpxo9_atlas_30_v4','u_o1_tpxo9_atlas_30_v4',
'u_p1_tpxo9_atlas_30_v4','u_k1_tpxo9_atlas_30_v4',
'u_n2_tpxo9_atlas_30_v4','u_m2_tpxo9_atlas_30_v4',
'u_s2_tpxo9_atlas_30_v4','u_k2_tpxo9_atlas_30_v4',
'u_m4_tpxo9_atlas_30_v4','u_ms4_tpxo9_atlas_30_v4',
'u_mn4_tpxo9_atlas_30_v4','u_2n2_tpxo9_atlas_30_v4']
model_files['v'] = ['v_q1_tpxo9_atlas_30_v4','v_o1_tpxo9_atlas_30_v4',
'v_p1_tpxo9_atlas_30_v4','v_k1_tpxo9_atlas_30_v4',
'v_n2_tpxo9_atlas_30_v4','v_m2_tpxo9_atlas_30_v4',
'v_s2_tpxo9_atlas_30_v4','v_k2_tpxo9_atlas_30_v4',
'v_m4_tpxo9_atlas_30_v4','v_ms4_tpxo9_atlas_30_v4',
'v_mn4_tpxo9_atlas_30_v4','v_2n2_tpxo9_atlas_30_v4']
self.model_file = {}
for key,val in model_files.items():
self.model_file[key] = self.pathfinder(val)
self.projection = '4326'
self.scale = 1.0/100.0
self.version = 'v4'
# model description and references
self.reference = 'https://www.tpxo.net/global/tpxo9-atlas'
elif (m == 'TPXO9.1'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO9.1')
self.grid_file = self.pathfinder('grid_tpxo9')
self.model_file = dict(u=self.pathfinder('u_tpxo9.v1'))
self.projection = '4326'
self.version = '9.1'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'global.html')
elif (m == 'TPXO8-atlas'):
self.format = 'ATLAS'
self.model_directory = os.path.join(self.directory,'tpxo8_atlas')
self.grid_file = self.pathfinder('grid_tpxo8atlas_30_v1')
self.model_file = dict(u=self.pathfinder('uv.tpxo8_atlas_30_v1'))
self.projection = '4326'
self.version = '8'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'tpxo8_atlas.html')
elif (m == 'TPXO7.2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'TPXO7.2_tmd')
self.grid_file = self.pathfinder('grid_tpxo7.2')
self.model_file = dict(u=self.pathfinder('u_tpxo7.2'))
self.projection = '4326'
self.version = '7.2'
# model description and references
self.reference = ('http://volkov.oce.orst.edu/tides/'
'global.html')
elif (m == 'AODTM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aodtm5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = dict(u=self.pathfinder('UV0_Arc5km'))
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aodtm-5/')
elif (m == 'AOTIM-5'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'aotim5_tmd')
self.grid_file = self.pathfinder('grid_Arc5km')
self.model_file = dict(u=self.pathfinder('UV_Arc5km'))
self.projection = 'PSNorth'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
elif (m == 'AOTIM-5-2018'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'Arc5km2018')
self.grid_file = self.pathfinder('grid_Arc5km2018')
self.model_file = dict(u=self.pathfinder('UV_Arc5km2018'))
self.projection = 'PSNorth'
self.version = '2018'
# model description and references
self.reference = ('https://www.esr.org/research/'
'polar-tide-models/list-of-polar-tide-models/'
'aotim-5/')
elif (m == 'Gr1km-v2'):
self.format = 'OTIS'
self.model_directory = os.path.join(self.directory,'greenlandTMD_v2')
self.grid_file = self.pathfinder('grid_Greenland8.v2')
self.model_file = dict(u=self.pathfinder('u_Greenland8_rot.v2'))
self.projection = '3413'
self.version = 'v2'
# model description and references
self.reference = 'https://doi.org/10.1002/2016RG000546'
elif (m == 'FES2014'):
self.format = 'FES'
model_directory = {}
model_directory['u'] = os.path.join(self.directory,
'fes2014','eastward_velocity')
model_directory['v'] = os.path.join(self.directory,
'fes2014','northward_velocity')
model_files = ['2n2.nc','eps2.nc','j1.nc','k1.nc',
'k2.nc','l2.nc','la2.nc','m2.nc','m3.nc','m4.nc',
'm6.nc','m8.nc','mf.nc','mks2.nc','mm.nc',
'mn4.nc','ms4.nc','msf.nc','msqm.nc','mtm.nc',
'mu2.nc','n2.nc','n4.nc','nu2.nc','o1.nc','p1.nc',
'q1.nc','r2.nc','s1.nc','s2.nc','s4.nc','sa.nc',
'ssa.nc','t2.nc']
self.model_file = {}
for key,val in model_directory.items():
self.model_directory = os.path.expanduser(model_directory)
self.model_file[key] = self.pathfinder(val)
self.constituents = ['2n2','eps2','j1','k1','k2','l2','lambda2',
'm2','m3','m4','m6','m8','mf','mks2','mm','mn4','ms4','msf',
'msqm','mtm','mu2','n2','n4','nu2','o1','p1','q1','r2','s1',
's2','s4','sa','ssa','t2']
self.scale = 1.0
self.version = 'FES2014'
# model description and references
self.reference = ('https://www.aviso.altimetry.fr/en/data/products'
'auxiliary-products/global-tide-fes.html')
else:
raise Exception("Unlisted tide model")
# return the model parameters
return self
@property
def gzip(self):
"""compression flag"""
return '.gz' if self.compressed else ''
@property
def suffix(self):
"""format suffix flag"""
return '.nc' if (self.format == 'netcdf') else ''
def pathfinder(self,model_file):
"""Completes file paths and appends file and gzip suffixes
"""
if isinstance(model_file,list):
output_file = [os.path.join(self.model_directory,
''.join([f,self.suffix,self.gzip])) for f in model_file]
valid = all([os.access(f, os.F_OK) for f in output_file])
elif isinstance(model_file,str):
output_file = os.path.join(self.model_directory,
''.join([model_file,self.suffix,self.gzip]))
valid = os.access(output_file, os.F_OK)
#-- check that (all) output files exist
if self.verify and not valid:
raise FileNotFoundError(output_file)
#-- return the complete output path
return output_file
def from_file(self, definition_file):
"""Create a model object from an input definition file
"""
# variable with parameter definitions
parameters = {}
# Opening definition file and assigning file ID number
if isinstance(definition_file,io.IOBase):
fid = copy.copy(definition_file)
else:
fid = open(os.path.expanduser(definition_file), 'r')
# for each line in the file will extract the parameter (name and value)
for fileline in fid:
# Splitting the input line between parameter name and value
part = fileline.rstrip().split(maxsplit=1)
# filling the parameter definition variable
parameters[part[0]] = part[1]
# close the parameter file
fid.close()
# convert from dictionary to model variable
temp = self.from_dict(parameters)
# verify model name, format and type
assert temp.name
assert temp.format in ('OTIS','ATLAS','netcdf','GOT','FES')
assert temp.type
# verify necessary attributes are with model format
assert temp.model_file
# split model file into list if an ATLAS, GOT or FES file
# model files can be comma, tab or space delimited
# extract full path to tide model files
if re.search(r'[\s\,]+', temp.model_file):
temp.model_file = [os.path.expanduser(f) for f in
re.split(r'[\s\,]+',temp.model_file)]
temp.model_directory = os.path.dirname(temp.model_file[0])
else:
temp.model_file = os.path.expanduser(temp.model_file)
temp.model_directory = os.path.dirname(temp.model_file)
# extract full path to tide grid file
if temp.format in ('OTIS','ATLAS','netcdf'):
assert temp.grid_file
temp.grid_file = os.path.expanduser(temp.grid_file)
if temp.format in ('OTIS','ATLAS'):
assert temp.projection
# convert scale from string to float
if temp.format in ('netcdf','GOT','FES'):
assert temp.scale
temp.scale = float(temp.scale)
if temp.format in ('FES',):
assert temp.version
# split type into list if currents u,v
if re.search(r'[\s\,]+', temp.type):
temp.type = re.split(r'[\s\,]+',temp.type)
# convert boolean strings
if isinstance(temp.compressed,str):
temp.compressed = self.to_bool(temp.compressed)
# return the model parameters
return temp
def from_dict(self,d):
"""Create a model object from a python dictionary
"""
for key,val in d.items():
setattr(self,key,copy.copy(val))
# return the model parameters
return self
def to_bool(self,val):
"""Converts strings of True/False to a boolean values
"""
if val.lower() in ('y','yes','t','true','1'):
return True
elif val.lower() in ('n','no','f','false','0'):
return False
else:
raise ValueError('Invalid boolean string {0}'.format(val))
| 2.796875
| 3
|
src/normalisr/normalisr.py
|
lingfeiwang/normalisr
| 9
|
12776170
|
#!/usr/bin/python3
from .qc import qc_reads, qc_outlier
from .lcpm import lcpm, scaling_factor
from .norm import normcov, compute_var, normvar
from .de import de
from .coex import coex
from .binnet import binnet
from .gocovt import gotop, pccovt
assert __name__ != "__main__"
| 1.195313
| 1
|
src/hyphenator.py
|
AntiCompositeNumber/maplink-generator
| 2
|
12776171
|
#!/usr/bin/env python3
# coding: utf-8
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 AntiCompositeNumber
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import urllib.parse
import flask
import mwparserfromhell
import requests
from stdnum import isbn
bp = flask.Blueprint("hyphenator", __name__, url_prefix="/hyphenator")
flash = []
def get_wikitext(url):
wikitext_url = url + "&action=raw"
headers = {
"user-agent": "anticompositetools/hyphenator "
"(https://anticompositetools.toolforge.org/hyphenator; "
"<EMAIL>) python-requests/"
+ requests.__version__
}
for i in range(1, 5):
try:
request = requests.get(wikitext_url, headers=headers)
request.raise_for_status()
except Exception:
if request.status_code == 404:
flash.append(("That page does not exist.", "danger"))
raise
elif i == 4:
flash.append(("Unable to retrieve wikitext.", "danger"))
raise
else:
time.sleep(5 * i)
continue
else:
start_time = time.strftime("%Y%m%d%H%M%S", time.gmtime())
timestruct = time.strptime(
request.headers["Last-Modified"], "%a, %d %b %Y %H:%M:%S %Z"
)
edit_time = time.strftime("%Y%m%d%H%M%S", timestruct)
return (request.text, (edit_time, start_time))
def find_isbns(code):
for template in code.ifilter_templates():
if template.name.matches("ISBN") or template.name.matches("ISBNT"):
try:
raw_isbn = template.get("1").value.strip()
except ValueError:
continue
para = "1"
elif template.has("isbn", ignore_empty=True):
raw_isbn = template.get("isbn").value.strip()
para = "isbn"
elif template.has("ISBN", ignore_empty=True):
raw_isbn = template.get("ISBN").value.strip()
para = "ISBN"
else:
continue
yield (template, raw_isbn, para)
def check_isbn(raw_isbn):
"""If the ISBN can be worked on, return True"""
if len(raw_isbn) == 17 or not isbn.is_valid(raw_isbn):
return False
else:
return True
def get_page_url(url):
parsed = urllib.parse.urlparse(url)
if parsed.path == "/w/index.php":
query_params = urllib.parse.parse_qs(parsed.query)
if "oldid" not in query_params:
title = query_params["title"][0]
else:
flash.append(("Invalid URL", "danger"))
raise ValueError # fix
elif "/wiki/" in parsed.path:
# Because some people expect invalid URLs to work anyway
title = urllib.parse.quote(urllib.parse.unquote(parsed.path[6:]), safe=":/")
else:
flash.append(("Invalid URL", "danger"))
raise ValueError # this one too
new_url = parsed.scheme + "://" + parsed.netloc + "/w/index.php?title=" + title
return new_url
def main(raw_url, convert=True):
url = get_page_url(raw_url)
wikitext, times = get_wikitext(url)
code = mwparserfromhell.parse(wikitext)
count = 0
for template, raw_isbn, para in find_isbns(code):
if not check_isbn(raw_isbn):
continue
new_isbn = isbn.format(raw_isbn, convert=convert)
if raw_isbn != new_isbn:
count += 1
template.add(para, new_isbn)
return code, times, count, url
@bp.route("/", methods=["GET"])
def form():
return flask.render_template("hyphenator-form.html")
@bp.route("/output", methods=["POST"])
def output():
def check_err(messages):
for message in messages:
if message[1] == "danger":
return True
return False
if flask.request.method == "POST":
pageurl = flask.request.form["page_url"]
convert = bool(flask.request.form.get("convert", False))
try:
newtext, times, count, url = main(pageurl, convert)
except Exception as err:
if not check_err(flash):
flash.append(
("An unhandled {0} exception occurred.".format(err), "danger")
)
for message in flash:
flask.flash(message[0], message[1])
return flask.redirect(flask.url_for("hyphenator.form"))
submit_url = url + "&action=submit"
return flask.render_template(
"hyphenator-output.html",
count=count,
submit_url=submit_url,
newtext=newtext,
edit_time=times[0],
start_time=times[1],
)
| 2.5
| 2
|
Scripts/1. Data collection/4.1_tag_count.py
|
NAIST-SE/Package_management_system
| 1
|
12776172
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 15:53:15 2020
@author: syful
"""
import xml.etree.ElementTree as et
import re
import pandas as pd
from datetime import datetime
start = datetime.now()
from tqdm.auto import tqdm
import numpy as np
from collections import Counter
import os
#Please specify your dataset directory.
os.chdir("your dataset directory")
Id=[]
CreationDate=[]
Score=[]
ViewCount=[]
Title=[]
Body=[]
Tags=[]
Tag_list=[]
df2 = pd.read_csv("syc01_PM_related_initial_post_detailed_syc.csv")
for element in df2['Tags']:
res = re.findall(r'\<(.*?)\>', element)
for tag_element in res:
Tag_list.append(tag_element)
tag=[]
count=[]
data=Counter(Tag_list)
for i in data:
tag.append(i)
count.append(data[i])
dict={'tags':tag, 'count':count}
df1=pd.DataFrame(dict)
df1.to_csv('syc02_filtered_tag.csv', header=True, index=False, encoding='utf-8')
| 2.65625
| 3
|
thefarm/daylight/daylight.py
|
harmsm/thefarm
| 0
|
12776173
|
<gh_stars>0
__description__ = \
"""
To change servers, make a subclass of DaylightServer and re-define the
_grab_from_server method.
"""
__date__ = "2017-04-12"
__author__ = "<NAME> (<EMAIL>)"
import urllib.request, json
from datetime import datetime
import logging, os
class DaylightException(Exception):
"""
Exception for this module.
"""
def __init__(self,*args,**kwargs):
logging.warning(args[0])
super().__init__(*args,**kwargs)
class DaylightServer:
"""
Get information about daylight (sunrise, sunset, etc.) from a server.
"""
def __init__(self,latitude,longitude,twilight="civil"):
"""
"""
self._latitude = latitude
self._longitude = longitude
# Deal with twilight definition
self._twilight = twilight
if self._twilight not in ["civil","nautical","astronomical"]:
err = "twilight \"{}\" not recognized. should be civil, nautical or astronomical\n".format(self._twilight)
raise DaylightException(err)
# Update
self.update()
self._icon_dict = {}
self._icon_dict["day"] = os.path.join("img","day.png")
self._icon_dict["night"] = os.path.join("night","day.png")
def update(self):
"""
"""
try:
self._grab_from_server()
self._last_check_time = datetime.now()
except (ValueError,urllib.error.URLError) as e:
err = "Problem downloading sunrise/sunset times"
raise DaylightException(err)
except:
err = "Unknown problem updating sunrise/sunset times!"
raise DaylightException(err)
def _grab_from_server(self):
"""
Grab sunrise, sunset, twilight_begin, and twilight_end from the
sunrise-sunset.org server, storing the information as datetime.datetime
objects that can be accessed using the Daylight properties.
Sets the following (as datetime objects in current time zone):
self._twilight_begin
self._twilight_end
self._sunrise
self._sunset
"""
url_base = "https://api.sunrise-sunset.org/json?lat={:.7f}&lng={:.7f}&date=today&formatted=0"
time_format = "%Y-%m-%dT%H:%M:%S+00:00"
url = url_base.format(self._latitude,self._longitude)
# Grab the date
with urllib.request.urlopen(url) as u:
json_data = json.loads(u.read().decode())
# Make sure the read worked
if json_data['status'] != "OK":
err = "server json could not be read\n"
raise DaylightException(err)
# Parse resulting json
twilight_begin = json_data["results"]["{}_twilight_begin".format(self._twilight)]
twilight_end = json_data["results"]["{}_twilight_end".format(self._twilight)]
sunrise = json_data["results"]["sunrise"]
sunset = json_data["results"]["sunset"]
# Convert to dateime objects
utc_offset = datetime.now() - datetime.utcnow()
self._twilight_begin = datetime.strptime(twilight_begin,time_format) + utc_offset
self._twilight_end = datetime.strptime(twilight_end,time_format) + utc_offset
self._sunrise = datetime.strptime(sunrise,time_format) + utc_offset
self._sunset = datetime.strptime(sunset,time_format) + utc_offset
@property
def last_update(self):
return self._last_check_time
@property
def sunrise(self):
if self._sunrise.day != self._last_check_time.day:
self.update()
return self._sunrise
@property
def sunset(self):
if self._sunset.day != self._last_check_time.day:
self.update()
return self._sunset
@property
def twilight_begin(self):
if self._twilight_end.day != self._last_check_time.day:
self.update()
return self._twilight_begin
@property
def twilight_end(self):
if self._twilight_end.day != self._last_check_time.day:
self.update()
return self._twilight_end
@property
def web_content(self):
self.update()
now = datetime.datetime.now()
state = ""
if now < self.twilight_begin:
if now > self.twighlight_end:
state = "night"
# Get time and update graphic
out = []
out.append('<div class="well"><div class="row">')
# Icon
out.append('<div class="col-xs-6 col-s-3">')
out.append('<img class="img-responsive" src="{}" />'.format(icon))
out.append('</div>')
# Text
out.append('<div class="col-xs-6 col-s-9">')
out.append("Stuff about other stuff.")
out.append('</div>')
out.append('</div></div>')
return "".join(out)
| 2.984375
| 3
|
pwtools/test/test_extend_array.py
|
elcorto/pwtools
| 41
|
12776174
|
import numpy as np
import os
from pwtools import num, common
rand = np.random.rand
def equal(a,b):
assert (a == b).all()
def test_extend_array():
arr = rand(3,3)
nrep = 5
a0 = num.extend_array(arr, nrep, axis=0)
a1 = num.extend_array(arr, nrep, axis=1)
a2 = num.extend_array(arr, nrep, axis=2)
am1 = num.extend_array(arr, nrep, axis=-1)
assert a0.shape == (nrep,3,3)
assert a1.shape == (3,nrep,3)
assert a2.shape == (3,3,nrep)
assert am1.shape == (3,3,nrep)
equal(a2, am1)
for axis, aa in enumerate([a0, a1, a2]):
for ii in range(nrep):
# slicetake(a0, 3, 0) -> a0[3,:,:]
equal(arr, num.slicetake(aa, ii, axis=axis))
| 2.953125
| 3
|
docs/GNIB.py
|
naitao/SEProject_Group18
| 0
|
12776175
|
<gh_stars>0
#!/usr/bin/python
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import multiprocessing, time, random, datetime
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import WebDriverException
import datetime
from subprocess import call
import time, json
class GNIB:
def __init__(self):
self.today = datetime.datetime.now().day
self.__url = "https://burghquayregistrationoffice.inis.gov.ie/Website/AMSREG/AMSRegWeb.nsf/AppSelect?OpenForm"
profile = webdriver.FirefoxProfile()
#profile.set_preference("font.size.variable.x-western", 10)
#profile.set_preference("font.size.variable.x-unicode", 10)
profile.set_preference("browser.fullscreen.autohide", True)
profile.set_preference("layout.css.devPixelsPerPx", "0.8")
options = Options()
options.add_argument("--headless")
self.driver = webdriver.Firefox(firefox_options=options, firefox_profile=profile)
self.driver.maximize_window()
self.__data = {}
'''
options = webdriver.ChromeOptions()
self.driver = webdriver.Chrome(chrome_options=options)
'''
def checkMonth(self):
elements = self.driver.find_elements_by_xpath('//td[contains(@class,"day")]')
available_days = []
for i in range(len(elements)):
try:
text = elements[i].text
# Click the particular data to check if it is clickable
elements[i].click()
time.sleep(0.5)
# If the date is clickable, we need to click button going back # to the previous schedule tap
self.driver.find_element_by_xpath('//input[@id="Appdate"]').click()
time.sleep(0.5)
#print("Checking {}...".format(text))
if self.driver.find_element_by_xpath('//td[contains(@class, "active day")]').text == text:
print("{} is available!".format(text))
available_days.append(text)
elements = self.driver.find_elements_by_xpath('//td[contains(@class,"day")]')
except WebDriverException:
#print("{} Element is not clickable".format(text))
continue
#print("current id: {}, total: {}".format(i, len(elements)))
return list(set(available_days))
def checkSlot(self):
self.driver.get(self.__url)
self.driver.execute_script("document.body.style.zoom='50%'")
time.sleep(1)
# Find the element on Category selection
category_element = self.driver.find_element_by_id('Category')
category_element.click()
time.sleep(0.5)
# Choose study category
study_element = self.driver.find_element_by_xpath('//option[@value="Study"]')
study_element.click()
time.sleep(0.5)
# Find the element on Subcategory selection
sub_category_element = self.driver.find_element_by_id('SubCategory')
sub_category_element.click()
time.sleep(0.5)
# Choose Master subcategory
master_sub_category_element = self.driver.find_element_by_xpath('//select[@id="SubCategory"]//option[@value="Masters"]')
master_sub_category_element.click()
time.sleep(1)
# Date of Brith
self.driver.find_element_by_id('DOB').click()
time.sleep(1.5)
table_element = self.driver.find_element_by_xpath('//table[@class="table-condensed"]')
self.driver.find_element_by_xpath('//span[contains(text(), "2009")]').click()
time.sleep(0.5)
self.driver.find_element_by_xpath('//span[contains(text(), "Jan")]').click()
time.sleep(0.5)
self.driver.find_element_by_xpath('//td[contains(text(), "28")]').click()
time.sleep(0.5)
# Confirmation on GNIB card
self.driver.find_element_by_id('ConfirmGNIB').click()
time.sleep(0.5)
self.driver.find_element_by_xpath('//option[@value="Renewal"]').click()
# Input GNIB number
gnib_number_form_element = self.driver.find_element_by_id('GNIBNo')
gnib_number_form_element.send_keys("I78193432")
time.sleep(0.5)
# Input expiry date
expiry_form_element = self.driver.find_element_by_id('GNIBExDT')
expiry_form_element.click()
print("find out expiry form!")
# click year
#self.driver.find_element_by_xpath('//div[@class="datepicker-years"]/*/span[contains(text(), "2018")]').click()
self.driver.find_element_by_xpath('//span[contains(text(), "2018")]').click()
time.sleep(0.5)
# click month
self.driver.find_element_by_xpath('//span[contains(text(), "Dec")]').click()
time.sleep(0.5)
# click day
self.driver.find_element_by_xpath('//td[contains(text(), "30")]').click()
print("Filled expiry form!")
time.sleep(0.5)
# Confirm all above
self.driver.find_element_by_id('UsrDeclaration').click()
time.sleep(0.5)
# Given name
self.driver.find_element_by_id('GivenName').send_keys("Peng")
# Surname
self.driver.find_element_by_id('SurName').send_keys("Ye")
# Nationality
self.driver.find_element_by_id('Nationality').click()
self.driver.find_element_by_xpath('//option[contains(text(), "China")]').click()
# Email
self.driver.find_element_by_id('Email').send_keys("<EMAIL>")
self.driver.find_element_by_id('EmailConfirm').send_keys("<EMAIL>")
# Familly
element = self.driver.find_element_by_id('FamAppYN')
element.click()
time.sleep(0.5)
print("clicked Familly!")
self.driver.find_element_by_xpath('//select[@id="FamAppYN"]/option[contains(text(), "No")]').click()
# passport
self.driver.find_element_by_id('PPNoYN').click()
self.driver.find_element_by_xpath('//select[@id="PPNoYN"]/option[contains(text(), "Yes")]').click()
self.driver.find_element_by_id('PPNo').send_keys("E93312971")
# Look for appointment
self.driver.find_element_by_id('btLook4App').click()
# Search for appointments by
self.driver.find_element_by_id('AppSelectChoice').click()
self.driver.find_element_by_xpath('//select[@id="AppSelectChoice"]/option[contains(text(), "specific date")]').click()
self.driver.find_element_by_id('Appdate').click()
date_element = self.driver.find_element_by_xpath('//th[@class="datepicker-switch"]')
print("Available days on {}".format(date_element.text))
text_file = "monitor.log"
currentTime = datetime.datetime.now()
days = self.checkMonth()
message = "{}: {}".format(date_element.text, days)
self.__data[date_element.text] = [currentTime, days]
if len(days) > 0:
list(map(print,days))[0]
with open(text_file, "a") as f:
f.write("{} | {}\n".format(currentTime, message))
f.close()
time.sleep(1)
# Click next page
while True:
try:
self.driver.find_element_by_xpath('//div[@class="datepicker-days"]//th[@class="next"]').click()
date_element = self.driver.find_element_by_xpath('//th[@class="datepicker-switch"]')
print("Available days on {}".format(date_element.text))
days = self.checkMonth()
message = "{}: {}".format(date_element.text, days)
self.__data[date_element.text] = [currentTime, days]
with open(text_file, "a") as f:
f.write("{} | {}\n".format(currentTime, message))
f.close()
time.sleep(1)
except Exception:
break
'''
# Check "I am not robot
self.driver.find_element_by_xpath('//div[@class="rc-anchor-center-container"]/label[@id="recaptcha-anchor-label"]').click()
'''
self.driver.quit()
with open("/tmp/gnib.json", 'w', encoding='utf-8') as f:
f.write(json.dumps(self.__data, f, ensure_ascii=False))
if __name__ == '__main__':
# claen up firefox processes first
call(["pkill", "firefox"])
print("firefox processes were clean up!")
time.sleep(1)
mygnib = GNIB()
mygnib.checkSlot()
| 2.421875
| 2
|
courses/python/cursoemvideo/exercicios/ex046.py
|
bdpcampos/public
| 3
|
12776176
|
import time
for i in range(10, -1, -1):
print(i)
time.sleep(1)
print('FOGOSSSSSSSSS!!!!!!!!! \o/ \o/ \o/ \o/ ')
| 2.9375
| 3
|
main/urls.py
|
rajeshgupta14/pathscriptfinal
| 0
|
12776177
|
<gh_stars>0
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls.static import static
#from django.conf.urls import patterns
from django.conf import settings
from django.contrib import admin
from myapp import views as myappviews
from consultantform import views as myviews
from django.views.generic.base import TemplateView
urlpatterns = [
url(r'^admin/',admin.site.urls),#fwdslash
url(r'^home/$',myappviews.home,name="home"),
url(r'^$',myappviews.auth_view,name="auth_view"),#addedslash
url(r'^login/$',myappviews.auth_view,name="auth_view"),
url(r'^logout/$',myappviews.getout,name="getout"),
url(r'^invalid/$',myappviews.invalid_login,name="invalid_login"),
url(r'^clients/$',myappviews.clients,name="clients"),
url(r'^ram/(?P<consultantform_id>\d+)$',myviews.ram,name="ram"),
url(r'^ram1/(?P<consultantform_id>\d+)$',myviews.ram1,name="ram1"),
url(r'^ram2/(?P<consultantform_id>\d+)$',myviews.ram2,name="ram2"),
url(r'^ram3/(?P<consultantform_id>\d+)$',myviews.ram3,name="ram3"),
url(r'^ramm/(?P<checklistform_id>\d+)$',myviews.ramm,name="ramm"),
url(r'^rammm/(?P<projectform_id>\d+)$',myviews.rammm,name="rammm"),
url(r'^consultantformss/(?P<a_id>\d+)/$',myviews.consultantformss,name='consultantformss'),
url(r'^projects/(?P<a_id>\d+)$',myviews.projects,name='projects'),
url(r'^finalformss/(?P<a_id>\d+)/$',myviews.finalformss,name='finalformss'),
url(r'^consultantformss/(?P<a_id>\d+)/get/(?P<consultantform_id>\d+)/$',myviews.consultantform,name='consultantform'),
url(r'^consultantformss/(?P<a_id>\d+)/getbr/(?P<consultantform_id>\d+)/$',myviews.branchform,name='branchform'),
url(r'^consultantformss/(?P<a_id>\d+)/getsu/(?P<consultantform_id>\d+)/$',myviews.subsidiaryform,name='subsidiaryform'),
url(r'^consultantformss/(?P<a_id>\d+)/getre/(?P<consultantform_id>\d+)/$',myviews.relatedform,name='relatedform'),
url(r'^consultantformss/(?P<a_id>\d+)/getpr/(?P<consultantform_id>\d+)/$',myviews.productform,name='productform'),
url(r'^consultantformss/(?P<a_id>\d+)/gets/(?P<checklistform_id>\d+)/$',myviews.checklistform,name='checklistform'),
url(r'^consultantformss/(?P<a_id>\d+)/getp/(?P<projectform_id>\d+)/$',myviews.projectform,name='projectform'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<serviceform_id>\d+)/$',myviews.serviceforms,name='serviceforms'),
url(r'^consultantformss/(?P<a_id>\d+)/get/(?P<consultantform_id>\d+)/$',myviews.consultantform,name='consultantform'),
url(r'^consultantformss/(?P<a_id>\d+)/gett/(?P<form_id>\d+)/$',myviews.finalform,name='finalform'),
url(r'^consultantformss/(?P<a_id>\d+)/getd/(?P<project_id>\d+)/(?P<dueform_id>\d+)/$',myviews.sam,name='sam'),
url(r'^consultantformss/(?P<a_id>\d+)/getsc/(?P<project_id>\d+)/(?P<scriptform_id>\d+)/$',myviews.samm,name='samm'),
url(r'^consultantformss/(?P<a_id>\d+)/getst/(?P<project_id>\d+)/(?P<strategyform_id>\d+)/$',myviews.sammm,name='sammm'),
url(r'^consultantformss/(?P<a_id>\d+)/getps/(?P<project_id>\d+)/(?P<psform_id>\d+)/$',myviews.dam,name='dam'),
url(r'^consultantformss/(?P<a_id>\d+)/getdi/(?P<project_id>\d+)/(?P<digiform_id>\d+)/$',myviews.damm,name='damm'),
url(r'^consultantformss/(?P<a_id>\d+)/getm/(?P<project_id>\d+)/(?P<miomform_id>\d+)/$',myviews.dammm,name='dammm'),
url(r'^consultantformss/(?P<a_id>\d+)/getpd/(?P<project_id>\d+)/(?P<dueform_id>\d+)/$',myviews.samp,name='samp'),
url(r'^consultantformss/(?P<a_id>\d+)/getpsc/(?P<project_id>\d+)/(?P<scriptform_id>\d+)/$',myviews.sammp,name='sammp'),
url(r'^consultantformss/(?P<a_id>\d+)/getpst/(?P<project_id>\d+)/(?P<strategyform_id>\d+)/$',myviews.sammmp,name='sammmp'),
url(r'^consultantformss/(?P<a_id>\d+)/getpps/(?P<project_id>\d+)/(?P<psform_id>\d+)/$',myviews.damp,name='damp'),
url(r'^consultantformss/(?P<a_id>\d+)/getpdi/(?P<project_id>\d+)/(?P<digiform_id>\d+)/$',myviews.dammp,name='dammp'),
url(r'^consultantformss/(?P<a_id>\d+)/getpm/(?P<project_id>\d+)/(?P<miomform_id>\d+)/$',myviews.dammmp,name='dammmp'),
url(r'^consultantformss/(?P<a_id>\d+)/create/$',myviews.create,name='create'),
url(r'^consultantformss/(?P<a_id>\d+)/create1/$',myviews.create1,name='create1'),
url(r'^consultantformss/(?P<a_id>\d+)/create2/$',myviews.create2,name='create2'),
url(r'^consultantformss/(?P<a_id>\d+)/create3/$',myviews.create3,name='create3'),
url(r'^consultantformss/(?P<a_id>\d+)/checklist/$',myviews.checklist,name='checklist'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/created$',myviews.created,name='created'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createsc$',myviews.createsc,name='createsc'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createst$',myviews.createst,name='createst'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createps$',myviews.createps,name='createps'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createdigi$',myviews.createdigi,name='createdigi'),
url(r'^consultantformss/(?P<a_id>\d+)/getf/(?P<project_id>\d+)/createmiom$',myviews.createmiom,name='createmiom'),
url(r'^500/$', TemplateView.as_view(template_name="404.html")),
url(r'^404/$', TemplateView.as_view(template_name="404.html")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
#if settings.DEBUG:
| 2.625
| 3
|
Day-6/problem1.py
|
sanjusci/10-Days-of-Statistics
| 1
|
12776178
|
# Day 6: The Central Limit Theorem I
# Enter your code here. Read input from STDIN. Print output to STDOUT
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019"
import math
class Day6(object):
e = 2.71829
def __init__(self):
pass
# Define functions
def the_central_limit_theorem_i(self, mean, std, value):
return 0.5 * (1 + math.erf((value - mean) / (std * (2 ** 0.5))))
if __name__ == '__main__':
d6 = Day6()
# Set data
max_weight = float(input())
n = float(input())
mean = float(input())
std = float(input())
new_mean = mean * n
new_std = math.sqrt(n) * std
# Gets the result and show on the screen
result = d6.the_central_limit_theorem_i(new_mean, new_std, max_weight)
print('{:.4f}'.format(result))
| 3.71875
| 4
|
keras_text_cls/embedding/word2vec_embedder.py
|
titicaca/keras-text-cls
| 3
|
12776179
|
import logging
import numpy as np
from keras_text_cls.embedding.base_embedder import BaseEmbedder
from keras_text_cls.vocab import Vocabulary, SYMBOL_PADDING, SYMBOL_UNKNOWN
from gensim.models.word2vec import Word2Vec
class Word2vecEmbedder(BaseEmbedder):
"""
Word2vec Embedder is a wrapper of gensim word2vec model
Reference to: https://radimrehurek.com/gensim/models/word2vec.html
Attributes
----------
dim: int
embedding vector dimension, default 300
seed: int
random seed, default is None
"""
def __init__(self, dim=300, seed=None):
super().__init__(dim, seed)
def transform(self, word):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
if word in self._model.wv.vocab:
return self._model.wv[word]
elif word == SYMBOL_PADDING:
return self._PADDING_VEC
else:
return self._UNKNOWN_VEC
def fit_on_words(self, words, sg=0, window=5, min_count=5, workers=4, iter=5, negative=5, **kwargs):
"""
fit word2vec model on words, vector size is assigned internally by default (equal to model._dim)
parameters are the same as gensim word2vec model
:param words: 2-dim list of words
:param sg: int {1, 0}
Defines the training algorithm. If 1, skip-gram is employed; otherwise, CBOW is used.
:param window: int
The maximum distance between the current and predicted word within a sentence.
:param min_count: int
The maximum distance between the current and predicted word within a sentence.
:param workers: int
Use these many worker threads to train the model (=faster training with multicore machines).
:param iter: int
Number of iterations (epochs) over the corpus.
:param negative: int
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
:param kwargs: more arguments can assigned by referring to gensim word2vec model
:return: fitted model
"""
if self.is_fitted:
raise ValueError("model is already fitted")
sentences = words
vector_size = self._dim
word2vec_model = Word2Vec(sentences, size=vector_size, sg=sg, window=window, min_count=min_count,
workers=workers, iter=iter, negative=negative, **kwargs)
self._model = word2vec_model
logging.info("word2ec model is fitted successfully")
self.is_fitted = True
return self
def get_vocabs(self):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
vocabs_set = set(self._predefined_vocabs)
vocabs = self._predefined_vocabs.copy()
for w in self._model.wv.vocab:
if w not in vocabs_set:
vocabs_set.add(w)
vocabs.append(w)
return vocabs
def get_dim(self):
if self.is_fitted:
return self._model.vector_size
else:
return self._dim
def save_model(self, path):
if not self.is_fitted:
raise ValueError("model needs to be fitted first")
self._model.save(path)
logging.info("saving model into: " + path)
@staticmethod
def load_model(path):
w2v = Word2vecEmbedder()
w2v._model = Word2Vec.load(path)
logging.info("loaded word2vec model from: " + path)
w2v.is_fitted = True
return w2v
| 3.171875
| 3
|
net/pacemaker1/files/extra-patch-cts_remote.py
|
egypcio/freebsd-ports
| 7
|
12776180
|
--- cts/remote.py.orig 2020-02-07 14:06:22 UTC
+++ cts/remote.py
@@ -125,7 +125,7 @@ class RemoteExec:
'''
def __init__(self, rsh, silent=False):
- self.async = []
+ self.async_calls = []
self.rsh = rsh
self.silent = silent
self.logger = LogFactory()
| 1.570313
| 2
|
bridges/data_src_dependent/song.py
|
krs-world/bridges-python
| 1
|
12776181
|
class Song:
"""
@brief A Song object, used along with the Songs data source.
This is a convenience class provided for users who wish to use this
data source as part of their application. It provides an API that makes
it easy to access the attributes of this data set.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_song()
For an example, check out https://bridgesuncc.github.io/tutorials/Data_Song_Lyrics.html
@author <NAME>, <NAME>
@date 2018, 12/29/20
"""
def __init__(self, artist: str = "", song: str = "", album: str = "", lyrics: str = "", release_date: str = ""):
"""
@brief Constructor
Args:
artist: song artist
song: song title
album: song album
lyrics: lyrics of song
release_date: release date of song
"""
self._artist = artist
self._song = song
self._lyrics = lyrics
self._album = album
self._release_date = release_date
@property
def artist(self):
"""
@brief return artist of song
Returns:
artist name of song
"""
return self._artist
@artist.setter
def artist(self, a):
"""
@brief Set artist of song
Args:
a: artist name to set
"""
self._artist = a
@property
def song_title(self):
"""
@brief return title of song
Returns:
song title
"""
return self._song
@song_title.setter
def song_title(self, s):
"""
@brief Set the song title
Args:
s: artist name to set
"""
self._song = s
@property
def album_title(self):
"""
@brief return album title
Returns:
album title of song
"""
return self._album
@album_title.setter
def album_title(self, a):
"""
@brief Set title of song
Args:
a: album title to set
"""
self._album = a
@property
def lyrics(self):
"""
@brief return lyrics of song
Returns:
lyrics of song
"""
return self._lyrics
@lyrics.setter
def lyrics(self, l):
"""
@brief Set artist of song
Args:
l: lyrics data to set
"""
self._lyrics = l
@property
def release_date(self):
"""
@brief return release date of song
Returns:
release date of song
"""
return self._release_date
@release_date.setter
def release_date(self, r):
"""
@brief Set release date of song
Args:
r: release date to set
"""
self._release_date = r
| 3.4375
| 3
|
references/nlp_dicts.py
|
derpyninja/nlp4cciwr
| 0
|
12776182
|
<gh_stars>0
# -*- coding: utf-8 -*-
from collections import OrderedDict
# set: unordered collections of unique elements (no duplicates possible)
stopwords_bbc_monitoring = {
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"w",
"x",
"y",
"z",
"january",
"march",
"april",
"may",
"june",
"juli",
"august",
"september",
"october",
"november",
"december",
"jan",
"feb",
"mar",
"apr",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dev",
"hyperlink",
"url",
"bbc",
"monitoring",
"al",
"gmt",
"news",
"tv",
"television",
"gen",
"rig",
"ha",
"report",
"reports",
"statement",
"top",
"web",
"site",
"af1",
"af",
"mon",
"announcer",
"agency",
"min",
"maj",
"gen",
"page",
"said",
"says",
"say",
"saying",
"cite",
"cited",
"citing",
"source",
"year",
"time",
"day",
"season",
"top",
"media",
"nf",
"pk",
"sources",
"page",
"website",
"websites",
"press",
"british broadcasting corporation",
"bbc monitoring research",
"bbc mon nf newsfile",
"number",
"british",
"broadcasting",
"corporation",
"video",
"show",
"agency",
"english",
"europol",
"gb",
"eu1",
"newspaper",
"reported",
"report",
"excerpt",
"excerpts",
"time",
"internet",
"press",
"highlights",
"web",
"sites",
"site",
"spanish",
"la",
"la1",
"translated",
"translate",
"online",
"summary",
"headline",
"headlines",
"radio",
"television",
"district",
"region",
"report",
"daily",
"official",
"officials",
"today",
"newspaper",
"cent",
"video show",
"new",
"people",
"district",
"afp",
"quote",
"paper",
"area",
"areas",
"write",
"writes",
"written",
"writing",
"village",
"town",
"yesterday",
"head",
"helicopter",
"weekend",
"promotion",
"sport",
"significant",
"special",
"interview",
"senior",
"introduce",
"presenter",
"correspondent",
"text",
"section",
"quotes",
"article",
"read",
"comment",
"publish",
"publishes",
"published",
"publishing",
"want",
"come",
"tell",
"dollar",
"dollars",
"take",
"local",
"regional",
"country",
"state",
"showing",
"shown",
"shows",
"showed",
"words",
"word",
"wording",
"carry",
"carries",
"carrying",
"carried",
"satellite",
"think",
"thinks",
"comment",
"commented",
"commenting",
"notes",
"noted",
"know",
"knowing",
"known",
"main",
"great",
"greatly",
"review",
"reviews",
"briefing",
"summaries",
"programme",
"min",
"mins",
"column",
"listed",
"commercial",
"commercials",
"newsfile",
}
if __name__ == "__main__":
import pandas as pd
df = pd.DataFrame(stopwords_bbc_monitoring, columns=["stopword"])
df = df.sort_values(by="stopword")
df.to_csv("stopwords_bbc_monitoring_data.csv", index=False)
print(df.shape)
| 2.171875
| 2
|
app/ui/files_list_dialog.py
|
Deteriorator/GUI-YouGet
| 85
|
12776183
|
<filename>app/ui/files_list_dialog.py<gh_stars>10-100
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5.QtGui import QIcon
from app.ui.ui_files_list_dialog import Ui_FilesListDialog
from app import mlog, mconfig
from app.util import status
from app.util.download_thread import DownloadThread
from PyQt5.QtWidgets import QDialog, QMessageBox, QProgressDialog
from PyQt5.QtCore import QThread, Qt
from app.ui.icon_rc import *
__author__ = 'InG_byr'
class FilesListDialog(Ui_FilesListDialog):
def __init__(self):
super().__init__()
self.files_list_dialog = QDialog()
self.setupUi(self.files_list_dialog)
self.files_list_dialog.setFixedSize(self.files_list_dialog.width(), self.files_list_dialog.height())
self.files_list_dialog.show()
self.msg = QMessageBox()
self.set_slot()
self.set_combo_box()
def set_slot(self):
self.push_button_confirm.clicked.connect(self.start_download_files)
self.push_button_cancel.clicked.connect(self.files_list_dialog.close)
def set_combo_box(self):
options = mconfig.get_streams()
if options:
self.combo_box_options.addItems(options)
else:
self.combo_box_options.addItem('default')
def update_files_list(self, files_list):
self.text_files_list.setHtml(files_list)
def start_download_files(self):
status.set_default()
self.push_button_confirm.setEnabled(False)
option = self.combo_box_options.currentText()
mconfig.set_file_itag(option)
mlog.debug('option is ' + option)
self.download_thread = DownloadThread(mconfig.get_urls(), **mconfig.kwargs)
self.download_thread.finish_signal.connect(self.finish_download)
self.download_thread.start()
self.show_progress_bar()
def show_msg(self, icon, title, text):
self.msg.setWindowTitle(title)
self.msg.setWindowIcon(QIcon(':/res/favicon.ico'))
self.msg.setIcon(icon)
self.msg.setText(text)
self.msg.setStandardButtons(QMessageBox.Ok)
self.msg.show()
def finish_download(self, is_succeed):
self.push_button_confirm.setEnabled(True)
if is_succeed:
if self.result:
self.show_msg(QMessageBox.Information, 'Tip',
self.result + '\n\nFiles path: ' + mconfig.get_file_path())
else:
self.show_msg(QMessageBox.Information, 'Completed',
'Download completed (ง •̀_•́)ง\n\nFiles path: ' + mconfig.get_file_path())
else:
self.show_msg(QMessageBox.Critical, 'Failed', 'Download failed (╯°Д°)╯︵ ┻━┻')
def show_progress_bar(self):
percent = 0
is_exits = False
self.result = None
progressDialog = QProgressDialog(self.files_list_dialog)
progressDialog.setAutoReset(True)
progressDialog.setWindowModality(Qt.WindowModal)
progressDialog.setMinimumDuration(5)
progressDialog.setWindowTitle('Downloading')
progressDialog.setWindowIcon(QIcon(':/res/favicon.ico'))
progressDialog.setLabelText('Current speed: ')
progressDialog.setCancelButtonText('Cancel')
progressDialog.setRange(0, 100)
progressDialog.setValue(0)
progressDialog.show()
while percent < 100 and not is_exits:
percent = status.get_percent()
is_exits = status.get_exist()
if is_exits:
self.result = 'Files already exists (..•˘_˘•..)'
progressDialog.close()
break
progressDialog.setValue(percent)
progressDialog.setLabelText('Current speed: ' + str(status.get_speed()))
QThread.msleep(100)
if progressDialog.wasCanceled():
status.set_stop_thread(True)
self.download_thread.wait()
mlog.debug('stop the download thread')
mlog.debug('download_thread.isRunning ' + str(self.download_thread.isRunning()))
progressDialog.close()
self.result = 'Paused Σ(っ °Д °;)っ'
break
| 2.28125
| 2
|
titanic/titanic.py
|
cjporteo/kaggle-competitions
| 0
|
12776184
|
<filename>titanic/titanic.py
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
from collections import Counter, defaultdict
from scipy.stats import skew
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.model_selection import GridSearchCV, ShuffleSplit
#Common Model Algorithms
from sklearn import svm, ensemble
from xgboost import XGBClassifier
X_train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId')
X_test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId')
X_train.dropna(axis=0, subset=['Survived'], inplace=True)
# This dictionary will store all the family members and their survival outcomes for each family.
# Two passengers are said to be in the same "family" if they have the same last name and family size.
# Consider, for instance, the scenario where there are multiple Brown families on board, but one is
# a family of 6 and the other is a lone bachelor.
# Our system will be able to discriminate between these two groups and not associate them.
#
# { key = (last name, family size), type=tuple : value = list of tuples for all family members in training set (full name, outcome) }
family_survival_details = defaultdict(list)
# This dictionary will function similarly to the family dictionary, but instead just looks for matching tickets and isn't concerned with group size.
# { key = ticket : value = list of tuples for all ticket group members in training set (full name, outcome) }
ticket_survival_details = defaultdict(list)
# This set stores all the passengers who do not have any other family members in the training set.
# More formally, this set stores all passengers satisfying len(family_survival_details[lastname, familysize]) = 1
unknown_passengers_by_family = set([])
# Again, this functions similiarly to the "unknown by family" set.
# This set stores all passengers satisfying len(ticket_survival_details[ticket]) = 1
unknown_passengers_by_ticket = set([])
# Trims the cardinality of the Title feature that we are about to engineer from the Name field
def adjust_title(title):
if title in ['Mr', 'Miss', 'Mrs', 'Master']:
return title
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title in ['Mme', 'Dona']:
return 'Mrs'
return 'Other'
# Builds the FamilyName, FamilySize and Title features
for df in [X_train, X_test]:
df['FamilyName'] = df['Name'].map(lambda x : x.split(',')[0])
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['Title'] = df['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
df['Title'] = df['Title'].map(adjust_title)
# This function populates our family survival details dictionary
def fill_fsd(row):
last_name = row['FamilyName']
size = row['FamilySize']
full_name = row['Name']
survived = row['Survived']
family_survival_details[(last_name, size)].append((full_name, survived))
# This function populates our ticket survival details dictionary
def fill_tsd(row):
ticket = row['Ticket']
full_name = row['Name']
survived = row['Survived']
ticket_survival_details[ticket].append((full_name, survived))
# Call the above two functions
X_train.apply(fill_fsd, axis=1)
X_train.apply(fill_tsd, axis=1)
# Establish a base survival rates dictionary based on class, gender, and title.
# When we lack any family or ticket survival rate information about a passenger, we will use this base rate.
# Even when we have family data, this base rate will still be useful. See below for details.
base_survival_rates = dict(X_train.groupby(by=['Pclass', 'Sex', 'Title'])['Survived'].mean())
# This will give us what we are looking for, our crucial feature: family specific survival rates.
# For each passenger in the training set and the testing set, we inquire about the known outcomes for
# all OTHER seen members of that family group.
# It is very important that we don't include the outcome for the passenger in question in this calculation,
# since this would lead to data leakage and would tarnish the usefulness of this predictor.
#
# Comments have been added to this function for clarity.
def get_fsr(row):
last_name = row['FamilyName']
size = row['FamilySize']
full_name = row['Name']
# Where we are storing the known outcomes for other passengers in the family.
outcomes = []
for passenger, outcome in family_survival_details[(last_name, size)]:
if passenger != full_name:
# We only care about the outcome for OTHER passengers in the family.
# Ex: When building the family survival rate for <NAME>, we don't
# care about whether he survived or not, only his family. (Sorry John)
outcomes.append(outcome)
if not outcomes:
# If we don't have any known outcomes for other family members, add this passenger
# to the unknown set and return 0 as a survival rate (we will adjust this later)
unknown_passengers_by_family.add(full_name)
return 0
# Return the average of all the outcomes to get a probility to estimate survival.
return np.mean(outcomes)
# This is simply the ticket counterpart to the above function. The inner workings are very similar.
def get_tsr(row):
ticket = row['Ticket']
full_name = row['Name']
outcomes = []
for passenger, outcome in ticket_survival_details[ticket]:
if passenger != full_name:
outcomes.append(outcome)
if not outcomes:
unknown_passengers_by_ticket.add(full_name)
return 0
return np.mean(outcomes)
for df in [X_train, X_test]:
df['FamilySurvival'] = df.apply(get_fsr, axis=1)
df['TicketSurvival'] = df.apply(get_tsr, axis=1)
for df in [X_train, X_test]:
df['KnownFamily?'] = df['Name'].map(lambda x: 0 if x in unknown_passengers_by_family else 1)
df['KnownTicket?'] = df['Name'].map(lambda x: 0 if x in unknown_passengers_by_ticket else 1)
unknown_passengers = set([])
# This function amalgamates every result we have so far involving base, family, and ticket survival rate.
# The resulting overall survival rate will be a weighted average of these three rates.
def get_osr(row):
base_rate = base_survival_rates[(row['Pclass'], row['Sex'], row['Title'])]
if row['KnownFamily?'] and row['KnownTicket?']:
# The passenger can be identified by family and ticket group.
return 0.25*row['FamilySurvival'] + 0.25*row['TicketSurvival'] + 0.5*base_rate
elif row['KnownFamily?']:
# The passenger can be identified by family group only.
return 0.5*row['FamilySurvival'] + 0.5*base_rate
elif row['KnownTicket?']:
# The passenger can be identified by ticket group only.
return 0.5*row['TicketSurvival'] + 0.5*base_rate
else:
# The passenger can't be identified by family or ticket group.
unknown_passengers.add(row['Name'])
return base_rate
for df in [X_train, X_test]:
df['GroupRate'] = df.apply(get_osr, axis=1)
df['GroupRate'] = df.apply(get_osr, axis=1)
for df in [X_train, X_test]:
df['KnownGroup?'] = df['Name'].map(lambda x: 0 if x in unknown_passengers else 1)
df.drop(['FamilySurvival', 'TicketSurvival', 'KnownFamily?', 'KnownTicket?'], axis=1, inplace=True)
y = X_train['Survived']
X_train.drop(['Survived'], axis=1, inplace=True)
train_size = len(X_train)
'''
# Which features have missing values and what are is their datatype?
def investigate_missing(df):
for col in df:
missing = df[col].isnull().sum()
if missing > 0:
print("{}: {} missing --- type: {}".format(col, missing, df[col].dtype))
investigate_missing(pd.concat([X_train, X_test]))
'''
def featureProcessing(df):
# Change class from numerical to categorical, since class is ordinal.
df['Pclass'] = df['Pclass'].astype(str)
# Impute missing fares by the average fare across corresponding class, gender, and title.
df['Fare'] = df.groupby(by=['Pclass', 'Sex', 'Title'])['Fare'].transform(lambda x : x.fillna(x.median()))
# Impute missing ages by the average age across corresponding class, gender, and title.
df['Age'] = df.groupby(by=['Pclass', 'Sex', 'Title'])['Age'].transform(lambda x : x.fillna(x.median()))
# Fill missing embarking locations with S, the mode.
df['Embarked'].fillna('S', inplace=True)
# Passengers travelling together might not have matching last names (friends?) but could have matching tickets.
# Create a feature to represent the size of the "ticket group".
df['TicketGroupSize'] = df.groupby('Ticket')['Ticket'].transform('count')
# Condense family size and ticket group size into the maximum between the two.
# Split this feature into five bins.
df['GroupSize'] = df[["FamilySize", "TicketGroupSize"]].max(axis=1)
group_bins = [0, 1, 2, 4, 6, df['GroupSize'].max()]
group_labels = ['Alone', 'Small', 'Medium', 'Large', 'XLarge']
df['GroupSize'] = pd.cut(df['GroupSize'], group_bins, labels=group_labels).astype(str)
# "Women and children only"
df['Female?'] = np.where(df['Sex'] == 'female', 1, 0)
adults = ['Mr', 'Mrs', 'Other']
df['Parent?'] = np.where((df['Title'].isin(adults)) & (df['Parch'] > 0), 1, 0)
# Cabin has lot of missing values, but of these missing values, the vast majority lie in 2nd and 3rd class.
# Doing some simple research on the deck layout of Titanic and the likely cabin assignments. We can make
# relatively safe assumptions to categorize passengers into their respective deck categories.
# The three deck categories I will use here are ABC, DE, and FG.
# These very strongly correlate to 1st, 2nd and 3rd class, respectively, but there is enough variation to warrant
# the inclusion of this feature.
#
# Ex: a 3rd class passenger whose cabin happens to be located in D or E deck will stand a better chance of survival
# than a 3rd class passenger in F or G
def get_deck(row):
if pd.isnull(row['Cabin']):
if row['Pclass'] == '1':
return 'ABC'
elif row['Pclass'] == '2':
return 'DE'
return 'FG'
deck = row['Cabin'][0]
if deck in 'ABCT':
return 'ABC'
elif deck in 'DE':
return 'DE'
return 'FG'
df['DeckClass'] = df.apply(get_deck, axis=1)
df.drop(['Ticket', 'Cabin', 'Name', 'Parch', 'SibSp', 'Sex', 'FamilyName', 'TicketGroupSize', 'FamilySize'], axis=1, inplace=True)
# Our numerical features
numericals = ['Fare', 'Age']
# Our categorical features
categoricals = df.select_dtypes(include='object').columns.values
# Process the numerical features
skewness = df[numericals].apply(lambda x: skew(x))
skew_index = skewness[abs(skewness) >= 0.5].index
# Get features with high skew
for col in skew_index:
# Apply boxcox transformation to attempt to reduce their skewness
df[col] = boxcox1p(df[col], boxcox_normmax(df[col] + 1))
for feat in numericals:
# Reshape using RobustScaler
df[feat] = RobustScaler().fit_transform(df[feat].apply(float).values.reshape(-1,1))
# One-hot encode the categorical features
for feat in categoricals:
dummies = pd.get_dummies(df[feat])
dummies.columns = [feat + ": " + col for col in dummies.columns.values]
df.drop(feat, axis=1, inplace=True)
df = df.join(dummies)
return df
X_full = featureProcessing(pd.concat([X_train, X_test]))
X_train = X_full[:train_size]
X_test = X_full[train_size:]
# We will use five different models, tune them, and then hold a vote.
vote_est = [
('for', ensemble.RandomForestClassifier()),
('svc', svm.SVC(probability=True)),
('xgb', XGBClassifier()),
('ada', ensemble.AdaBoostClassifier()),
('gb', ensemble.GradientBoostingClassifier())
]
# The tuned hyperparameters for each of the models. Obtained using GridSearchCV to cross-validate
# on five shuffle splits for each hyperparameter combination with train = 0.6, test = 0.3, drop = 0.1.
tuned_parameters=[
{'criterion': 'entropy', 'max_depth': 12, 'n_estimators': 550, 'oob_score': True, 'random_state': 0},
{'C': 10, 'decision_function_shape': 'ovo', 'gamma': 0.01, 'kernel': 'rbf', 'probability': True, 'random_state': 0},
{'learning_rate': 0.1, 'max_depth': 4, 'n_estimators': 100, 'random_state': 0},
{'algorithm': 'SAMME', 'learning_rate': 0.15, 'n_estimators': 250, 'random_state': 0},
{'learning_rate': 0.01, 'loss': 'deviance', 'max_depth': 4, 'n_estimators': 100, 'random_state': 0}
]
#tuned_parameters = []
#The hyperparameter search space for our models.
grid_n_estimator = [100, 250, 400, 550, 700, 850, 1000]
grid_ratio = [.1, .25, .5, .75, 0.9]
grid_learn = [0.01, 0.025, 0.05, 0.075, 0.1, 0.15]
grid_max_depth = [2, 4, 6, 8, 10, 12, 15, 20, None]
grid_criterion = ['gini', 'entropy']
grid_bool = [True, False]
grid_seed = [0]
grid_param = [
[{
#RandomForestClassifier
'n_estimators': grid_n_estimator,
'criterion': grid_criterion,
'max_depth': grid_max_depth,
'oob_score': [True],
'random_state': grid_seed
}],
[{
#SVC
'kernel': ['rbf'],
'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'gamma': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'decision_function_shape': ['ovo', 'ovr'],
'probability': [True],
'random_state': grid_seed
}],
[{
#XGBClassifier
'learning_rate': grid_learn, #default: .3
'max_depth': [3, 4, 5, 6, 8, 10], #default 2
'n_estimators': grid_n_estimator,
'random_state': grid_seed
}],
[{
#AdaBoostClassifier
'algorithm': ['SAMME','SAMME.R'],
'n_estimators' : grid_n_estimator,
'learning_rate': grid_learn,
'random_state': grid_seed
}],
[{
#GradientBoostingClassifier
'loss' : ['deviance'],
'n_estimators' : grid_n_estimator,
'learning_rate': grid_learn,
'max_depth': [3, 4, 5, 6, 8, 10],
'random_state': grid_seed
}]
]
cv_split = ShuffleSplit(n_splits = 5, test_size = .3, train_size = .6, random_state = 0)
# This code is only executed if the list of tuned parameters is empty. It will find the optimal hyperparameters.
if not tuned_parameters:
for clf, param in zip(vote_est, grid_param):
print(clf[0])
best_search = GridSearchCV(estimator = clf[1], param_grid = param, cv = cv_split, scoring = 'accuracy', n_jobs=-1, verbose=True)
best_search.fit(X_train, y)
print("Best Parameters:\n{}".format(best_search.best_params_))
print("Best Score: {}\n".format(best_search.best_score_))
best_param = best_search.best_params_
clf[1].set_params(**best_param)
else:
# Set the model parameters to the tuned values.
for clf, tuned_param in zip(vote_est, tuned_parameters):
clf[1].set_params(**tuned_param)
# Conduct the hard vote.
vote_hard_tuned = ensemble.VotingClassifier(estimators = vote_est, voting = 'hard')
vote_hard_tuned.fit(X_train, y)
preds = vote_hard_tuned.predict(X_test)
output = pd.DataFrame({'PassengerId' : X_test.index,
'Survived' : preds})
output.to_csv('submission.csv', index=False)
| 3.296875
| 3
|
examples/10-customer-payment-history.py
|
bryanwills/mollie-api-python
| 95
|
12776185
|
<filename>examples/10-customer-payment-history.py<gh_stars>10-100
# Example: Retrieving the payment history for a customer
#
import os
import flask
from mollie.api.client import Client
from mollie.api.error import Error
def main():
try:
#
# Initialize the Mollie API library with your API key.
#
# See: https://www.mollie.com/dashboard/settings/profiles
#
api_key = os.environ.get("MOLLIE_API_KEY", "test_test")
mollie_client = Client()
mollie_client.set_api_key(api_key)
body = ""
customer_id = flask.request.args.get("customer_id")
# If no customer ID was provided in the URL, we grab the first customer
if customer_id is None:
customers = mollie_client.customers.list()
body += "<p>No customer ID specified. Attempting to retrieve the first page of customers "
body += "and grabbing the first.</p>"
if not len(customers):
body += "<p>You have no customers. You can create one from the examples.</p>"
return body
customer = next(customers)
else:
customer = mollie_client.customers.get(customer_id)
amount_of_payments_to_retrieve = 20
#
# Retrieve the latest payments for the customer
#
# See: https://www.mollie.com/nl/docs/reference/customers/list-payments
#
params = {
"limit": amount_of_payments_to_retrieve,
}
payments = mollie_client.customer_payments.with_parent_id(customer.id).list(**params)
body += f'<p>Showing the last {len(payments)} payments for customer "{customer.id}"</p>'
for payment in payments:
body += f'<p>Payment {payment.id} ({payment.amount["value"]}) {payment.amount["currency"]}</p>'
return body
except Error as err:
return f"API call failed: {err}"
if __name__ == "__main__":
print(main())
| 3.046875
| 3
|
src/final_work/image_converter.py
|
mi-sts/spbu_python_homeworks
| 0
|
12776186
|
import typing
import torch
import torchvision
import numpy as np
from PIL import Image
from torch.autograd import Variable
from src.final_work.transformer import Transformer
from enum import Enum
class ModelType(Enum):
HOSODA = "hosoda_mamoru"
KON = "kon_satoshi"
MIYAZAKI = "miyazaki_hayao"
SHINKAI = "shinkai_makoto"
class Device(Enum):
CPU = "cpu"
GPU = torch.device("cuda")
class ImageConverter:
MODELS_DIRECTORY = "models"
def __init__(self):
self.device = self._define_device()
self._init_models()
def _get_model(self, model_type: ModelType) -> Transformer:
return self.models[model_type.value]
@staticmethod
def _define_device() -> Device:
_is_gpu_enable = torch.cuda.is_available()
if _is_gpu_enable:
return Device.GPU
else:
return Device.CPU
def _init_models(self):
self.models = dict()
for model_type in ModelType:
self.models[model_type.value] = self._create_model(model_type)
def _load_model_parameters(self, model: ModelType):
return torch.load(f"{self.MODELS_DIRECTORY}/{model.value}.pth", self.device.value)
def _create_model(self, model_type: ModelType) -> Transformer:
new_model = Transformer()
new_model_parameters = self._load_model_parameters(model_type)
new_model.load_state_dict(new_model_parameters)
if self.device == Device.GPU:
new_model.to(self.device.value)
new_model.eval()
return new_model
def convert_image(self, image: Image, model_type: ModelType) -> Image:
image = image.convert("RGB")
image = np.asarray(image)
image = image[:, :, [2, 1, 0]]
image = torchvision.transforms.ToTensor()(image).unsqueeze(0)
image = -1 + 2 * image
if self.device == Device.GPU:
image = Variable(image).to(self.device.value)
else:
image = Variable(image).float()
model = self._get_model(model_type)
converted_image = model(image)
converted_image = converted_image[0]
converted_image = converted_image[[2, 1, 0], :, :]
converted_image = converted_image.data.cpu().float() * 0.5 + 0.5
return torchvision.transforms.ToPILImage()(converted_image)
| 2.578125
| 3
|
midastools/misc/create_mip.py
|
lab-midas/toolbox
| 3
|
12776187
|
<gh_stars>1-10
import SimpleITK as sitk
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
def main():
print("NIFI-image information")
parser = argparse.ArgumentParser()
parser.add_argument('--nii', help='image .nii file')
args = parser.parse_args()
nii_file = vars(args)['nii']
print(nii_file)
if not nii_file:
print('please specifiy .nii file')
return 1
nii_file = Path(nii_file)
if not nii_file.exists():
print(str(nii_file), ' does not exist.')
reader = sitk.ImageFileReader()
reader.SetImageIO('NiftiImageIO')
reader.SetFileName(str(nii_file))
img = reader.Execute()
mip_img = sitk.MaximumProjection(img, 1)
print(mip_img.GetSize())
plt.imshow(sitk.GetArrayFromImage(mip_img)[:, 0, :])
plt.show()
if __name__ == '__main__':
main()
| 2.65625
| 3
|
save/MacroFuegoRunsList.py
|
tbird20d/fserver
| 0
|
12776188
|
<reponame>tbird20d/fserver
"""
MacroFuegoRunList - show a list of test runs
"""
import os
def main(req, args=""):
full_dirlist = os.listdir(req.config.files_dir+os.sep+"runs")
full_dirlist.sort()
# each run is it's own directory
dirlist = full_dirlist
# FIXTHIS - look for test run json file
if not dirlist:
return req.html_error("No runs found.")
runs_url = req.config.files_url_base + os.sep + "runs/"
html = "<ul>"
for item in dirlist:
html += '<li><a href="'+runs_url+item+'">' + item + '</a></li>\n'
html += "</ul>"
return html
| 2.46875
| 2
|
alphamind/portfolio/meanvariancebuilder.py
|
rongliang-tech/alpha-mind
| 186
|
12776189
|
# -*- coding: utf-8 -*-
"""
Created on 2017-6-27
@author: cheng.li
"""
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import (
QuadraticOptimizer,
TargetVolOptimizer
)
from alphamind.exceptions.exceptions import PortfolioBuilderException
def _create_bounds(lbound,
ubound,
bm,
risk_exposure,
risk_target):
if lbound is not None:
lbound = lbound - bm
if ubound is not None:
ubound = ubound - bm
if risk_exposure is not None:
cons_mat = risk_exposure.T
bm_risk = cons_mat @ bm
clbound = (risk_target[0] - bm_risk).reshape((-1, 1))
cubound = (risk_target[1] - bm_risk).reshape((-1, 1))
else:
cons_mat = None
clbound = None
cubound = None
return lbound, ubound, cons_mat, clbound, cubound
def _create_result(optimizer, bm):
if optimizer.status() == "optimal" or optimizer.status() == "optimal_inaccurate":
return optimizer.status(), optimizer.feval(), optimizer.x_value() + bm
else:
raise PortfolioBuilderException(optimizer.status())
def mean_variance_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float, None],
ubound: Union[np.ndarray, float, None],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
lam: float = 1.,
linear_solver: str = 'deprecated') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = QuadraticOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
penalty=lam,
cov=cov,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
def target_vol_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
vol_target: float = 1.,
linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = TargetVolOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
target_vol=vol_target,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk,
cov=cov)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
| 2.375
| 2
|
run.py
|
icdlvru2021/project1-ai
| 0
|
12776190
|
import pacman
import autograder
"""
run.py runs things that look like command-line arguments
for Berkeley Python. Leave the 'python pacman.py' part
at the beginning, just like running from the command line.
You should comment out all lines in the file except the one
you wan to run!
"""
#pacman.main('python pacman.py --layout tinyMaze --pacman GoWestAgent')
#pacman.main('python pacman.py -l tinyMaze -p SearchAgent -a fn=tinyMazeSearch')
pacman.main('python pacman.py -l mediumMaze -p SearchAgent -a fn=dfs')
# autograder
#autograder.run('python autograder.py')
#autograder.run('python autograder -q q1')
#autograder.run('python autograder.py -t test_cases/q2/graph_bfs_vs_dfs')
| 2.8125
| 3
|
src/dataset/vctk_dataset.py
|
antic11d/neural-compression
| 0
|
12776191
|
<reponame>antic11d/neural-compression<gh_stars>0
from torch.utils.data import Dataset
import pickle
import os
import numpy as np
class VCTKFeaturesDataset(Dataset):
def __init__(
self, vctk_path, subdirectory, normalizer=None, features_path="features"
):
self._vctk_path = vctk_path
self._subdirectory = subdirectory
features_path = self._vctk_path + os.sep + features_path
self._sub_features_path = features_path + os.sep + self._subdirectory
self._files_number = len(os.listdir(self._sub_features_path))
self._normalizer = normalizer
def __getitem__(self, index):
dic = None
path = self._sub_features_path + os.sep + str(index) + ".pickle"
if not os.path.isfile(path):
raise OSError("No such file '{}'".format(path))
if os.path.getsize(path) == 0:
raise OSError("Empty file '{}'".format(path))
with open(path, "rb") as file:
dic = pickle.load(file)
if self._normalizer:
dic["input_features"] = (
dic["input_features"] - self._normalizer["train_mean"]
) / self._normalizer["train_std"]
dic["output_features"] = (
dic["output_features"] - self._normalizer["train_mean"]
) / self._normalizer["train_std"]
dic["quantized"] = (
np.array([]) if dic["quantized"] is None else dic["quantized"]
)
dic["one_hot"] = np.array([]) if dic["one_hot"] is None else dic["one_hot"]
dic["index"] = index
return dic
def __len__(self):
return self._files_number
| 2.359375
| 2
|
tests/unit/test_ikonos_image.py
|
DigitalGlobe/gbdxtools
| 81
|
12776192
|
<gh_stars>10-100
'''
Authors: <NAME>, <NAME>
Contact: <EMAIL>
Unit tests for the gbdxtools.Idaho class
'''
from gbdxtools import IkonosImage, CatalogImage
import vcr
import unittest
from helpers import mockable_interface, gbdx_vcr
class GE01ImageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.gbdx = mockable_interface()
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_ikonos_image.yaml', filter_headers=['authorization'])
def test_ikonos_image(self):
_id = '2001110218231680000010116110'
img = self.gbdx.catalog_image(_id, bbox=[-111.96084176146779, 36.627666371883, -111.91772282506821, 36.670785308282575])
self.assertTrue(isinstance(img, IkonosImage))
assert img.shape == (4, 1500, 1500)
assert img.proj == 'EPSG:4326'
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_ikonos_image_pansharpen.yaml', filter_headers=['authorization'])
def test_ikonos_image_pansharpen(self):
_id = '2013052717574940000011621174'
img = self.gbdx.catalog_image(_id, pansharpen=True, bbox=[-104.89514954388144, 39.59212288111695, -104.87939038753511, 39.60903377365575])
self.assertTrue(isinstance(img, IkonosImage))
assert img.shape == (4, 2353, 2193)
assert img.proj == 'EPSG:4326'
@gbdx_vcr.use_cassette('tests/unit/cassettes/test_ikonos_image_proj.yaml', filter_headers=['authorization'])
def test_ikonos_image_proj(self):
_id = '2001110218231680000010116110'
img = self.gbdx.catalog_image(_id, proj="EPSG:3857")
self.assertTrue(isinstance(img, IkonosImage))
assert img.shape == (4, 33624, 4870)
assert img.proj == 'EPSG:3857'
| 1.9375
| 2
|
deep-insight/deepinsight/topo.py
|
opennetworkinglab/sdfabric-utils
| 2
|
12776193
|
# SPDX-FileCopyrightText: Copyright 2021-present Open Networking Foundation.
# SPDX-License-Identifier: Apache-2.0
import ipaddress
import logging
import re
from collections import Counter, defaultdict
import kubernetes as k8s
import requests
from netaddr import IPAddress
log = logging.getLogger("DeepInsightTopoUtility")
INT_HOST_REPORTER_TOPO_API="http://{}:4048/api/v1/topology"
PORT_MAPPINGS = {}
def parse_port_id(port_string):
# Port string can be "[port/channel](id)" or just "id".
# Only return the ID of port.
match = re.match(r"\[([0-9]+/[0-9]+)\]\(([0-9]+)\)", port_string)
if not match:
return int(port_string)
else:
return int(match.group(2))
def parse_port_name(port_string):
# Port string can be "[port/channel](id)" or just "id".
# Return the "port/channel" string, if exists.
# Otherwise, return the ID of port.
match = re.match(r"\[([0-9]+/[0-9]+)\]\(([0-9]+)\)", port_string)
if not match:
return port_string
elif match.group(1):
return match.group(1)
else:
return match.group(2)
def gen_topo(
onos_url="http://localhost:8181/onos/v1",
onos_user="onos",
onos_pass="<PASSWORD>",
with_end_host=False,
k8s_clusters=[],
):
"""
Generate topology based on ONOS and K8s cluster topology.
:param onos_url: The ONOS URL, default is http://localhost:8181/onos/v1
:param onos_user: The ONOS user, default is onos
:param onos_pass: The ONOS password, default is <PASSWORD>
:param with_end_host: Include end hosts(k8s nodes), default is False
:param k8s_clusters: [For end host] The list of K8s cluster info, default is empty
"""
log.info(
"Using ONOS REST APIs at %s (user:%s, password:%s)",
onos_url,
onos_user,
onos_pass,
)
auth = requests.auth.HTTPBasicAuth(onos_user, onos_pass)
netcfg = requests.get(onos_url + "/network/configuration", auth=auth)
if not netcfg.ok:
log.fatal("Unable to retrieve netcfg\n%s", netcfg.text)
netcfg = netcfg.json()
topo = {"switches": [], "links": [], "subnets": [], "hosts": []}
for key, value in netcfg["devices"].items():
topo["switches"].append(
{
"switchId": value["segmentrouting"]["ipv4NodeSid"],
"name": key,
"deviceType": "legacy",
"ip": value["segmentrouting"]["ipv4Loopback"],
}
)
devices = requests.get(onos_url + "/devices", auth=auth)
if not devices.ok:
log.fatal("Unable to retrieve devices\n%s", devices.text)
devices = devices.json()["devices"]
for device in devices:
device_ports = requests.get(onos_url + "/devices/" + device['id'] + "/ports", auth=auth)
if not device_ports.ok:
log.fatal("Unable to retrieve ports of device\n%s", device_ports.text)
for elem in device_ports.json()['ports']:
port_name = parse_port_name(elem['port'])
port_id = parse_port_id(elem['port'])
if not device['id'] in PORT_MAPPINGS:
PORT_MAPPINGS[device['id']] = {}
PORT_MAPPINGS[device['id']][port_id] = port_name
print(PORT_MAPPINGS)
subnets = defaultdict(lambda: {})
for key, value in netcfg["ports"].items():
if "interfaces" not in value:
continue
ifaces = value["interfaces"]
for iface in ifaces:
for ip in iface["ips"]:
ip = ipaddress.ip_interface(ip)
subnets[str(ip.network)][key] = True
subnet_id = 1
for subnet, ports in subnets.items():
topo["subnets"].append(
{"ip_subnet": subnet, "name": subnet, "subnet_id": subnet_id}
)
for port in ports:
switch_id, port_num = port.split("/")
topo["links"].append(
{
"node1": switch_id,
"port1": PORT_MAPPINGS[switch_id][int(port_num)],
"node2": subnet,
"port2": "-1",
"switchPort1": int(port_num),
}
)
subnet_id = subnet_id + 1
hosts = requests.get(onos_url + "/hosts", auth=auth)
if not hosts.ok:
log.fatal("Unable to retrieve hosts\n%s", hosts.text)
hosts = hosts.json()["hosts"]
# A dictionary stores mapping from host IP to locations.
# Later we will use this dictionary to find the location of next hop for each routes.
host_ip_to_locations = {}
# Host names in ONOS are not unique, in case of duplicates, append count
# suffix (e.h., myhost_1, myhost_2). Similarly, we use different names for hosts with
# multiple IP addresses.
name_ctr = Counter()
for host in hosts:
try:
name = host["annotations"]["name"]
except KeyError:
name = host["id"]
for ip in host["ipAddresses"]:
name_ctr.update([name])
unique_name = "%s_%s" % (name, name_ctr[name])
topo["hosts"].append(
{
"ip": ip,
"name": unique_name,
}
)
for location in host["locations"]:
port_num = parse_port_id(location["port"])
topo["links"].append(
{
"node1": location["elementId"],
"port1": PORT_MAPPINGS[location["elementId"]][int(port_num)],
"node2": unique_name,
"port2": "-1",
"switchPort1": port_num,
}
)
host_ip_to_locations[ip] = host["locations"]
links = requests.get(onos_url + "/links", auth=auth)
if not links.ok:
log.fatal("Unable to retrieve hosts\n%s", links.text)
links = links.json()["links"]
for app, value in netcfg["apps"].items():
if app == "org.omecproject.up4":
if "up4" not in value:
continue
up4 = value["up4"]
if "devices" in up4:
up4_switch_ids = up4["devices"]
else:
# TODO: For backward compatibility
# remove this when we are no longer need it.
up4_switch_ids = [up4["deviceId"]]
s1uaddr = up4["s1uAddr"]
s1uaddr = ipaddress.ip_address(s1uaddr)
uepools = set([str(ipaddress.ip_network(n)) for n in up4["uePools"]])
for uepool in uepools:
topo["subnets"].append(
{"ip_subnet": uepool, "name": uepool, "subnet_id": subnet_id}
)
subnet_id = subnet_id + 1
subnets_with_ue = []
for s in subnets:
if s1uaddr in ipaddress.ip_network(s):
subnets_with_ue.append(s)
if len(subnets_with_ue) == 0:
log.warning("Unable to map UP4 S1U address to switch port: %s", s1uaddr)
continue
for s in subnets_with_ue:
for port in subnets[s]:
switch_id, port_num = port.split("/")
if switch_id in up4_switch_ids:
for uepool in uepools:
topo["links"].append(
{
"node1": switch_id,
"port1": PORT_MAPPINGS[switch_id][int(port_num)],
"node2": uepool,
"port2": "-1",
"switchPort1": int(port_num),
}
)
elif app == "org.onosproject.route-service":
if "routes" not in value:
continue
for route in value["routes"]:
if "prefix" not in route or "nextHop" not in route:
continue
prefix = route["prefix"]
next_hop = route["nextHop"]
topo["subnets"].append(
{"ip_subnet": prefix, "name": prefix, "subnet_id": subnet_id}
)
subnet_id = subnet_id + 1
route_locations = host_ip_to_locations.get(next_hop, [])
for route_location in route_locations:
port_num = parse_port_id(route_location["port"])
topo["links"].append(
{
"node1": route_location["elementId"],
"port1": PORT_MAPPINGS[route_location["elementId"]][int(port_num)],
"node2": prefix,
"port2": "-1",
"switchPort1": port_num,
}
)
# ONOS returns an entry for each direction of a bidirectional link, but
# DeepInsight expects only one entry for both directions.
bidi_links = {}
for link in links:
key = [str(link["src"]), str(link["dst"])]
key.sort()
key = tuple(key)
port1_num = parse_port_id(link["src"]["port"])
port2_num = parse_port_id(link["dst"]["port"])
bidi_links[key] = {
"node1": link["src"]["device"],
"port1": PORT_MAPPINGS[link["src"]["device"]][int(port1_num)],
"node2": link["dst"]["device"],
"port2": PORT_MAPPINGS[link["dst"]["device"]][int(port2_num)],
"switchPort1": port1_num,
"switchPort2": port2_num,
}
topo["links"].extend(bidi_links.values())
if not with_end_host:
return topo
# End hosts topology config
for idx, cluster in enumerate(k8s_clusters):
if not 'subnet' in cluster:
log.error("Missing 'subnet' in K8s cluster info [argument index=%d]: %s, skipping to add K8s cluster to topology file.", idx, cluster)
continue
k8s_config = cluster['config'] if 'config' in cluster else None
k8s_cluster_subnet = cluster['subnet']
k8s.config.load_kube_config(config_file=k8s_config)
k8s_node_ips = []
for node in k8s.client.CoreV1Api().list_node().items:
k8s_node_ips += [
item.address for item in node.status.addresses if item.type == "InternalIP"
]
for subnet in topo["subnets"]:
if subnet["ip_subnet"] == k8s_cluster_subnet:
k8s_subnet = subnet
subnet_id = subnet["subnet_id"]
break
else:
k8s_subnet = {
"name": k8s_cluster_subnet,
"ip_subnet": k8s_cluster_subnet,
"subnet_id": subnet_id,
}
subnet_id += 1
k8s_node_cidrs = []
ipam_blocks = k8s.client.CustomObjectsApi().list_cluster_custom_object(
group="crd.projectcalico.org", version="v1", plural="ipamblocks"
)
for item in ipam_blocks["items"]:
cidr = item["spec"]["cidr"]
k8s_node_cidrs.append(
{"name": str(cidr), "ip_subnet": str(cidr), "subnet_id": subnet_id}
)
subnet_id += 1
vswitch_links = dict()
vswitches = []
for node_id, node_ip in enumerate(k8s_node_ips):
url = INT_HOST_REPORTER_TOPO_API.format(node_ip)
host_topology = requests.get(url)
if not host_topology.ok:
log.fatal("Unable to access Topology API from K8s node %s\n%s", node_ip, host_topology.text)
for link in host_topology.json()["links"]:
if link["is-node-iface"]:
node_iface = link["id"]
vswitch_ip = link["ip-addresses"][0]
hostname = [host["name"] for host in topo["hosts"] if host["ip"] == vswitch_ip]
hostname = hostname[0] if len(hostname) != 0 else ""
name = "device:vswitch" + str(node_id)
vswitches.append(
{
"name": name,
"ip": vswitch_ip,
"default-intf": str(node_iface),
"deviceType": "legacy",
"switchId": int(IPAddress(node_ip)),
"hostname": hostname,
}
)
vswitch_links[name] = host_topology.json()["links"]
topo['switches'].extend(vswitches)
all_host_subnets = k8s_node_cidrs + [k8s_subnet]
# Overrides links in the topology config.
# Connects the physical switch to the host vswitch
for link in topo["links"]:
for sw in vswitches:
# find IP of an attached host
host_ip = [host["ip"] for host in topo["hosts"] if host["name"] == link["node2"]]
host_ip = host_ip[0] if len(host_ip) != 0 else ""
if host_ip == sw["ip"]:
link["port2"] = sw["default-intf"]
link["node2"] = sw["name"]
link["switchPort2"] = int(sw["default-intf"])
# Connect vswitch to all possible subnets with all possible ports.
for sw in vswitches:
for host_subnet in all_host_subnets:
for link in vswitch_links[sw["name"]]:
if link["is-node-iface"]:
# skip data interfaces
continue
topo["links"].append(
{
"node1": sw["name"],
"node2": host_subnet["name"],
"port1": str(link["id"]),
"port2": "-1",
"switchPort1": int(link["id"]),
}
)
# Overrides subnets in the topology config.
if k8s_subnet not in topo["subnets"]:
topo["subnets"].append(k8s_subnet)
topo["subnets"] += k8s_node_cidrs
return topo
| 2.40625
| 2
|
ressources/constant.py
|
Jouca/Horse-Game
| 0
|
12776194
|
<gh_stars>0
COLOR = {
'ORANGE': (255, 121, 0),
'LIGHT_YELLOW': (255, 230, 130),
'YELLOW': (255, 204, 0),
'LIGHT_BLUE': (148, 228, 228),
'BLUE': (51, 204, 204),
'LIGHT_RED': (255, 136, 106),
'RED': (255, 51, 0),
'LIGHT_GREEN': (206, 255, 60),
'GREEN': (153, 204, 0),
'CYAN': (0, 159, 218),
'BLUE_PAUL': (0, 59, 111),
'PURPLE': (161, 6, 132),
'WHITE': (255, 255, 255),
'BLACK': (0, 0, 0),
'GOLD': (245, 189, 2),
'SILVER': (187, 194, 204),
'BRONZE':(205, 127, 50),
}
PLAYER_COLORS = ["YELLOW", "BLUE", "RED", "GREEN"]
PLATEAU_PLAYERS_COORDINATES = [(0, 0, 0), (308, 0, -90), (352, 308, 180), (0, 352, -270)]
FONT_HEIGHT = [19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37,
38, 40, 41, 43, 44, 46, 47, 49, 50, 52, 53, 55, 56,
58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76,
77, 79, 80, 82, 83, 85, 86, 88, 89, 91, 92, 94, 95,
97, 98, 100, 101, 103, 104, 106, 107, 109, 110, 112,
113, 115, 116, 118, 119, 121, 122, 124, 125, 127, 128,
130, 131, 133, 134, 136, 137, 139, 140, 142, 144, 145,
147, 148, 150, 151, 153, 154, 156, 157, 159, 160, 162,
163, 165, 166, 168, 169, 171, 172, 174, 175, 177, 178,
180, 181, 183, 184, 186, 187, 189, 190, 192, 193, 195,
196, 198, 199, 201, 202, 204, 205, 207, 208, 210, 211,
213, 214, 216, 217, 219, 220, 222, 223, 225, 226, 228,
229, 231, 232, 234, 235, 237, 238, 240, 241, 243, 244,
246, 247, 249, 250, 252, 253, 255, 256, 258, 259, 261,
262, 264, 265, 267, 268, 270, 271, 273, 274, 276, 277,
279, 280, 282, 284, 285, 287, 288, 290, 291, 293, 294,
296, 297, 299, 300]
| 1.8125
| 2
|
dash_echarts/examples/line_race.py
|
Covarians/dash-echarts
| 28
|
12776195
|
import json
import datetime, time
from os import path
import dash
import dash_echarts
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
basepath = path.dirname(__file__)
filepath = path.abspath(path.join(basepath+'/static', 'life-expectancy-table.json'))
with open(filepath) as json_file:
raw_data = json.load(json_file)
def get_countries():
return list(set([e[3] for e in raw_data[1:]]))
dataset_with_filters = [
{
"id": f"dataset_{country}",
"fromDatasetId": "dataset_raw",
"transform": {
"type": "filter",
"config": {
"and": [
{"dimension": "Year", "gte": 1950},
{"dimension": "Country", "=": country},
]
},
},
}
for country in get_countries()
]
series_list = [
{
"type": "line",
"datasetId": f"dataset_{country}",
"showSymbol": False,
"name": country,
"endLabel": {
"show": True,
"formatter": "line_race_formatter"
},
"labelLayout": {"moveOverlap": "shiftY"},
"emphasis": {"focus": "series"},
"encode": {
"x": "Year",
"y": "Income",
"label": ["Country", "Income"],
"itemName": "Year",
"tooltip": ["Income"],
},
}
for country in get_countries()
]
option = {
"animationDuration": 10000,
"animation": True,
"dataset": [{"id": "dataset_raw", "source": raw_data}] + dataset_with_filters,
"title": {"text": "Income since 1950"},
"tooltip": {"order": "valueDesc", "trigger": "axis"},
"xAxis": {"type": "category", "nameLocation": "middle"},
"yAxis": {"name": "Income"},
"grid": {"right": 140},
"series": series_list,
}
layout = html.Div([
dash_echarts.DashECharts(
option = option,
id='echarts',
style={
"width": '100%',
"height": '100vh',
},
funs={
"line_race_formatter":
'''
function(params){
return params.value[3] + ': ' + params.value[0];
}
'''
},
fun_values=['line_race_formatter']
),
dbc.Button('restart', color='success',
id='line-race-button',
style={
'position': 'absolute',
'height': 50, 'width': '5%',
'top': '25%', 'right': '15%',
'opacity': 0.8
}
),
])
def main():
app = dash.Dash(
external_stylesheets=[dbc.themes.BOOTSTRAP],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
suppress_callback_exceptions=True,
)
app.layout = layout
@app.callback(
Output('echarts', 'reset_id'),
[Input("line-race-button", "n_clicks")],
)
def update_line_race(n_clicks):
triggered = dash.callback_context.triggered
# value = triggered[0]['value']
prop_id, event = triggered[0]['prop_id'].split('.')
if n_clicks:
if 'line-race-button' in prop_id:
dtime = datetime.datetime.now()
int_time = int(time.mktime(dtime.timetuple()))
return int_time
raise PreventUpdate
app.run_server(debug=True)
if __name__ == '__main__':
main()
| 2.453125
| 2
|
src/promotion/register_model.py
|
AHaryanto/azure-automl-mlops
| 16
|
12776196
|
import os
import sys
from azureml.core.model import Model
sys.path.append(os.getcwd())
import config as f # noqa: E402
model_name = f.params["registered_model_name"]
if f.params['remote_run'] is True:
model_path = os.environ['MODEL_PATH']
elif f.params['remote_run'] is False:
model_path = os.path.join('models', model_name, 'best_model_data')
else:
raise Exception('remote_run unknown value. The value was: ' +
f.params['remote_run'])
print(f'Registering {model_name} from {model_path}')
model = Model.register(
model_path=model_path,
model_name=model_name,
tags={
'industry': 'retail',
'type': 'regression'
},
description="Retail AutoML regression model.",
workspace=f.ws)
print(f'{model.name} successfully registered to {f.ws.name}')
| 2.28125
| 2
|
test/unit/test_resource_manager_v2.py
|
gmzcarlos/platform-services-python-sdk
| 0
|
12776197
|
<reponame>gmzcarlos/platform-services-python-sdk<filename>test/unit/test_resource_manager_v2.py<gh_stars>0
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import requests
import responses
from ibm_platform_services.resource_manager_v2 import *
service = ResourceManagerV2(
authenticator=NoAuthAuthenticator()
)
base_url = 'https://resource-controller.cloud.ibm.com/v2'
service.set_service_url(base_url)
##############################################################################
# Start of Service: ResourceGroup
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_resource_groups
#-----------------------------------------------------------------------------
class TestListResourceGroups():
#--------------------------------------------------------
# list_resource_groups()
#--------------------------------------------------------
@responses.activate
def test_list_resource_groups_all_params(self):
# Set up mock
url = base_url + '/resource_groups'
mock_response = '{"resources": [{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
account_id = 'testString'
date = 'testString'
# Invoke method
response = service.list_resource_groups(
account_id=account_id,
date=date,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'account_id={}'.format(account_id) in query_string
assert 'date={}'.format(date) in query_string
#--------------------------------------------------------
# test_list_resource_groups_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_resource_groups_required_params(self):
# Set up mock
url = base_url + '/resource_groups'
mock_response = '{"resources": [{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_resource_groups()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for create_resource_group
#-----------------------------------------------------------------------------
class TestCreateResourceGroup():
#--------------------------------------------------------
# create_resource_group()
#--------------------------------------------------------
@responses.activate
def test_create_resource_group_all_params(self):
# Set up mock
url = base_url + '/resource_groups'
mock_response = '{"id": "id", "crn": "crn"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Set up parameter values
name = 'test1'
account_id = '25eba2a9-beef-450b-82cf-f5ad5e36c6dd'
# Invoke method
response = service.create_resource_group(
name=name,
account_id=account_id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['name'] == 'test1'
assert req_body['account_id'] == '25eba2a9-beef-450b-82cf-f5ad5e36c6dd'
#--------------------------------------------------------
# test_create_resource_group_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_resource_group_required_params(self):
# Set up mock
url = base_url + '/resource_groups'
mock_response = '{"id": "id", "crn": "crn"}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=201)
# Invoke method
response = service.create_resource_group()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 201
#-----------------------------------------------------------------------------
# Test Class for get_resource_group
#-----------------------------------------------------------------------------
class TestGetResourceGroup():
#--------------------------------------------------------
# get_resource_group()
#--------------------------------------------------------
@responses.activate
def test_get_resource_group_all_params(self):
# Set up mock
url = base_url + '/resource_groups/testString'
mock_response = '{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Invoke method
response = service.get_resource_group(
id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_resource_group_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_resource_group_value_error(self):
# Set up mock
url = base_url + '/resource_groups/testString'
mock_response = '{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"id": id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_resource_group(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for update_resource_group
#-----------------------------------------------------------------------------
class TestUpdateResourceGroup():
#--------------------------------------------------------
# update_resource_group()
#--------------------------------------------------------
@responses.activate
def test_update_resource_group_all_params(self):
# Set up mock
url = base_url + '/resource_groups/testString'
mock_response = '{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
name = 'testString'
state = 'testString'
# Invoke method
response = service.update_resource_group(
id,
name=name,
state=state,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['name'] == 'testString'
assert req_body['state'] == 'testString'
#--------------------------------------------------------
# test_update_resource_group_required_params()
#--------------------------------------------------------
@responses.activate
def test_update_resource_group_required_params(self):
# Set up mock
url = base_url + '/resource_groups/testString'
mock_response = '{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Invoke method
response = service.update_resource_group(
id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_update_resource_group_value_error()
#--------------------------------------------------------
@responses.activate
def test_update_resource_group_value_error(self):
# Set up mock
url = base_url + '/resource_groups/testString'
mock_response = '{"id": "id", "crn": "crn", "account_id": "account_id", "name": "name", "state": "state", "default": false, "quota_id": "quota_id", "quota_url": "quota_url", "payment_methods_url": "payment_methods_url", "resource_linkages": [{"anyKey": "anyValue"}], "teams_url": "teams_url", "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"id": id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.update_resource_group(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_resource_group
#-----------------------------------------------------------------------------
class TestDeleteResourceGroup():
#--------------------------------------------------------
# delete_resource_group()
#--------------------------------------------------------
@responses.activate
def test_delete_resource_group_all_params(self):
# Set up mock
url = base_url + '/resource_groups/testString'
responses.add(responses.DELETE,
url,
status=204)
# Set up parameter values
id = 'testString'
# Invoke method
response = service.delete_resource_group(
id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 204
#--------------------------------------------------------
# test_delete_resource_group_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_resource_group_value_error(self):
# Set up mock
url = base_url + '/resource_groups/testString'
responses.add(responses.DELETE,
url,
status=204)
# Set up parameter values
id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"id": id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_resource_group(**req_copy)
# endregion
##############################################################################
# End of Service: ResourceGroup
##############################################################################
##############################################################################
# Start of Service: QuotaDefinition
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_quota_definitions
#-----------------------------------------------------------------------------
class TestListQuotaDefinitions():
#--------------------------------------------------------
# list_quota_definitions()
#--------------------------------------------------------
@responses.activate
def test_list_quota_definitions_all_params(self):
# Set up mock
url = base_url + '/quota_definitions'
mock_response = '{"resources": [{"id": "id", "name": "name", "type": "type", "number_of_apps": 14, "number_of_service_instances": 27, "default_number_of_instances_per_lite_plan": 41, "instances_per_app": 17, "instance_memory": "instance_memory", "total_app_memory": "total_app_memory", "vsi_limit": 9, "resource_quotas": [{"_id": "id", "resource_id": "resource_id", "crn": "crn", "limit": 5}], "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_quota_definitions()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_quota_definition
#-----------------------------------------------------------------------------
class TestGetQuotaDefinition():
#--------------------------------------------------------
# get_quota_definition()
#--------------------------------------------------------
@responses.activate
def test_get_quota_definition_all_params(self):
# Set up mock
url = base_url + '/quota_definitions/testString'
mock_response = '{"id": "id", "name": "name", "type": "type", "number_of_apps": 14, "number_of_service_instances": 27, "default_number_of_instances_per_lite_plan": 41, "instances_per_app": 17, "instance_memory": "instance_memory", "total_app_memory": "total_app_memory", "vsi_limit": 9, "resource_quotas": [{"_id": "id", "resource_id": "resource_id", "crn": "crn", "limit": 5}], "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Invoke method
response = service.get_quota_definition(
id,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_quota_definition_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_quota_definition_value_error(self):
# Set up mock
url = base_url + '/quota_definitions/testString'
mock_response = '{"id": "id", "name": "name", "type": "type", "number_of_apps": 14, "number_of_service_instances": 27, "default_number_of_instances_per_lite_plan": 41, "instances_per_app": 17, "instance_memory": "instance_memory", "total_app_memory": "total_app_memory", "vsi_limit": 9, "resource_quotas": [{"_id": "id", "resource_id": "resource_id", "crn": "crn", "limit": 5}], "created_at": "2019-01-01T12:00:00", "updated_at": "2019-01-01T12:00:00"}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
id = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"id": id,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_quota_definition(**req_copy)
# endregion
##############################################################################
# End of Service: QuotaDefinition
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for QuotaDefinition
#-----------------------------------------------------------------------------
class TestQuotaDefinition():
#--------------------------------------------------------
# Test serialization/deserialization for QuotaDefinition
#--------------------------------------------------------
def test_quota_definition_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
resource_quota_model = {} # ResourceQuota
resource_quota_model['_id'] = 'testString'
resource_quota_model['resource_id'] = 'testString'
resource_quota_model['crn'] = 'testString'
resource_quota_model['limit'] = 36.0
# Construct a json representation of a QuotaDefinition model
quota_definition_model_json = {}
quota_definition_model_json['id'] = 'testString'
quota_definition_model_json['name'] = 'testString'
quota_definition_model_json['type'] = 'testString'
quota_definition_model_json['number_of_apps'] = 36.0
quota_definition_model_json['number_of_service_instances'] = 36.0
quota_definition_model_json['default_number_of_instances_per_lite_plan'] = 36.0
quota_definition_model_json['instances_per_app'] = 36.0
quota_definition_model_json['instance_memory'] = 'testString'
quota_definition_model_json['total_app_memory'] = 'testString'
quota_definition_model_json['vsi_limit'] = 36.0
quota_definition_model_json['resource_quotas'] = [resource_quota_model]
quota_definition_model_json['created_at'] = '2020-01-28T18:40:40.123456Z'
quota_definition_model_json['updated_at'] = '2020-01-28T18:40:40.123456Z'
# Construct a model instance of QuotaDefinition by calling from_dict on the json representation
quota_definition_model = QuotaDefinition.from_dict(quota_definition_model_json)
assert quota_definition_model != False
# Construct a model instance of QuotaDefinition by calling from_dict on the json representation
quota_definition_model_dict = QuotaDefinition.from_dict(quota_definition_model_json).__dict__
quota_definition_model2 = QuotaDefinition(**quota_definition_model_dict)
# Verify the model instances are equivalent
assert quota_definition_model == quota_definition_model2
# Convert model instance back to dict and verify no loss of data
quota_definition_model_json2 = quota_definition_model.to_dict()
assert quota_definition_model_json2 == quota_definition_model_json
#-----------------------------------------------------------------------------
# Test Class for QuotaDefinitionList
#-----------------------------------------------------------------------------
class TestQuotaDefinitionList():
#--------------------------------------------------------
# Test serialization/deserialization for QuotaDefinitionList
#--------------------------------------------------------
def test_quota_definition_list_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
resource_quota_model = {} # ResourceQuota
resource_quota_model['_id'] = 'testString'
resource_quota_model['resource_id'] = 'testString'
resource_quota_model['crn'] = 'testString'
resource_quota_model['limit'] = 36.0
quota_definition_model = {} # QuotaDefinition
quota_definition_model['id'] = 'testString'
quota_definition_model['name'] = 'testString'
quota_definition_model['type'] = 'testString'
quota_definition_model['number_of_apps'] = 36.0
quota_definition_model['number_of_service_instances'] = 36.0
quota_definition_model['default_number_of_instances_per_lite_plan'] = 36.0
quota_definition_model['instances_per_app'] = 36.0
quota_definition_model['instance_memory'] = 'testString'
quota_definition_model['total_app_memory'] = 'testString'
quota_definition_model['vsi_limit'] = 36.0
quota_definition_model['resource_quotas'] = [resource_quota_model]
quota_definition_model['created_at'] = '2020-01-28T18:40:40.123456Z'
quota_definition_model['updated_at'] = '2020-01-28T18:40:40.123456Z'
# Construct a json representation of a QuotaDefinitionList model
quota_definition_list_model_json = {}
quota_definition_list_model_json['resources'] = [quota_definition_model]
# Construct a model instance of QuotaDefinitionList by calling from_dict on the json representation
quota_definition_list_model = QuotaDefinitionList.from_dict(quota_definition_list_model_json)
assert quota_definition_list_model != False
# Construct a model instance of QuotaDefinitionList by calling from_dict on the json representation
quota_definition_list_model_dict = QuotaDefinitionList.from_dict(quota_definition_list_model_json).__dict__
quota_definition_list_model2 = QuotaDefinitionList(**quota_definition_list_model_dict)
# Verify the model instances are equivalent
assert quota_definition_list_model == quota_definition_list_model2
# Convert model instance back to dict and verify no loss of data
quota_definition_list_model_json2 = quota_definition_list_model.to_dict()
assert quota_definition_list_model_json2 == quota_definition_list_model_json
#-----------------------------------------------------------------------------
# Test Class for ResCreateResourceGroup
#-----------------------------------------------------------------------------
class TestResCreateResourceGroup():
#--------------------------------------------------------
# Test serialization/deserialization for ResCreateResourceGroup
#--------------------------------------------------------
def test_res_create_resource_group_serialization(self):
# Construct a json representation of a ResCreateResourceGroup model
res_create_resource_group_model_json = {}
res_create_resource_group_model_json['id'] = 'testString'
res_create_resource_group_model_json['crn'] = 'testString'
# Construct a model instance of ResCreateResourceGroup by calling from_dict on the json representation
res_create_resource_group_model = ResCreateResourceGroup.from_dict(res_create_resource_group_model_json)
assert res_create_resource_group_model != False
# Construct a model instance of ResCreateResourceGroup by calling from_dict on the json representation
res_create_resource_group_model_dict = ResCreateResourceGroup.from_dict(res_create_resource_group_model_json).__dict__
res_create_resource_group_model2 = ResCreateResourceGroup(**res_create_resource_group_model_dict)
# Verify the model instances are equivalent
assert res_create_resource_group_model == res_create_resource_group_model2
# Convert model instance back to dict and verify no loss of data
res_create_resource_group_model_json2 = res_create_resource_group_model.to_dict()
assert res_create_resource_group_model_json2 == res_create_resource_group_model_json
#-----------------------------------------------------------------------------
# Test Class for ResourceGroup
#-----------------------------------------------------------------------------
class TestResourceGroup():
#--------------------------------------------------------
# Test serialization/deserialization for ResourceGroup
#--------------------------------------------------------
def test_resource_group_serialization(self):
# Construct a json representation of a ResourceGroup model
resource_group_model_json = {}
resource_group_model_json['id'] = 'testString'
resource_group_model_json['crn'] = 'testString'
resource_group_model_json['account_id'] = 'testString'
resource_group_model_json['name'] = 'testString'
resource_group_model_json['state'] = 'testString'
resource_group_model_json['default'] = True
resource_group_model_json['quota_id'] = 'testString'
resource_group_model_json['quota_url'] = 'testString'
resource_group_model_json['payment_methods_url'] = 'testString'
resource_group_model_json['resource_linkages'] = [{ 'foo': 'bar' }]
resource_group_model_json['teams_url'] = 'testString'
resource_group_model_json['created_at'] = '2020-01-28T18:40:40.123456Z'
resource_group_model_json['updated_at'] = '2020-01-28T18:40:40.123456Z'
# Construct a model instance of ResourceGroup by calling from_dict on the json representation
resource_group_model = ResourceGroup.from_dict(resource_group_model_json)
assert resource_group_model != False
# Construct a model instance of ResourceGroup by calling from_dict on the json representation
resource_group_model_dict = ResourceGroup.from_dict(resource_group_model_json).__dict__
resource_group_model2 = ResourceGroup(**resource_group_model_dict)
# Verify the model instances are equivalent
assert resource_group_model == resource_group_model2
# Convert model instance back to dict and verify no loss of data
resource_group_model_json2 = resource_group_model.to_dict()
assert resource_group_model_json2 == resource_group_model_json
#-----------------------------------------------------------------------------
# Test Class for ResourceGroupList
#-----------------------------------------------------------------------------
class TestResourceGroupList():
#--------------------------------------------------------
# Test serialization/deserialization for ResourceGroupList
#--------------------------------------------------------
def test_resource_group_list_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
resource_group_model = {} # ResourceGroup
resource_group_model['id'] = 'testString'
resource_group_model['crn'] = 'testString'
resource_group_model['account_id'] = 'testString'
resource_group_model['name'] = 'testString'
resource_group_model['state'] = 'testString'
resource_group_model['default'] = True
resource_group_model['quota_id'] = 'testString'
resource_group_model['quota_url'] = 'testString'
resource_group_model['payment_methods_url'] = 'testString'
resource_group_model['resource_linkages'] = [{ 'foo': 'bar' }]
resource_group_model['teams_url'] = 'testString'
resource_group_model['created_at'] = '2020-01-28T18:40:40.123456Z'
resource_group_model['updated_at'] = '2020-01-28T18:40:40.123456Z'
# Construct a json representation of a ResourceGroupList model
resource_group_list_model_json = {}
resource_group_list_model_json['resources'] = [resource_group_model]
# Construct a model instance of ResourceGroupList by calling from_dict on the json representation
resource_group_list_model = ResourceGroupList.from_dict(resource_group_list_model_json)
assert resource_group_list_model != False
# Construct a model instance of ResourceGroupList by calling from_dict on the json representation
resource_group_list_model_dict = ResourceGroupList.from_dict(resource_group_list_model_json).__dict__
resource_group_list_model2 = ResourceGroupList(**resource_group_list_model_dict)
# Verify the model instances are equivalent
assert resource_group_list_model == resource_group_list_model2
# Convert model instance back to dict and verify no loss of data
resource_group_list_model_json2 = resource_group_list_model.to_dict()
assert resource_group_list_model_json2 == resource_group_list_model_json
#-----------------------------------------------------------------------------
# Test Class for ResourceQuota
#-----------------------------------------------------------------------------
class TestResourceQuota():
#--------------------------------------------------------
# Test serialization/deserialization for ResourceQuota
#--------------------------------------------------------
def test_resource_quota_serialization(self):
# Construct a json representation of a ResourceQuota model
resource_quota_model_json = {}
resource_quota_model_json['_id'] = 'testString'
resource_quota_model_json['resource_id'] = 'testString'
resource_quota_model_json['crn'] = 'testString'
resource_quota_model_json['limit'] = 36.0
# Construct a model instance of ResourceQuota by calling from_dict on the json representation
resource_quota_model = ResourceQuota.from_dict(resource_quota_model_json)
assert resource_quota_model != False
# Construct a model instance of ResourceQuota by calling from_dict on the json representation
resource_quota_model_dict = ResourceQuota.from_dict(resource_quota_model_json).__dict__
resource_quota_model2 = ResourceQuota(**resource_quota_model_dict)
# Verify the model instances are equivalent
assert resource_quota_model == resource_quota_model2
# Convert model instance back to dict and verify no loss of data
resource_quota_model_json2 = resource_quota_model.to_dict()
assert resource_quota_model_json2 == resource_quota_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
| 2.09375
| 2
|
code_sender/rstudio/__init__.py
|
fredcallaway/SendCode
| 177
|
12776198
|
import sublime
import os
from ..clipboard import clipboard
plat = sublime.platform()
if plat == "osx":
from ..applescript import osascript
RSTUDIOAPPLESCRIPT = os.path.join(os.path.dirname(__file__), "rstudio.applescript")
def send_to_rstudio(cmd):
osascript(RSTUDIOAPPLESCRIPT, cmd)
elif plat == "windows":
from .. import winauto
def send_to_rstudio(cmd, from_view):
rid = winauto.find_rstudio()
clipboard.set_clipboard(cmd)
winauto.paste_to_rstudio(rid, from_view=from_view)
clipboard.reset_clipboard()
elif plat == "linux":
from ..xdotool import xdotool
def send_to_rstudio(cmd):
wid = xdotool("search", "--onlyvisible", "--class", "rstudio")
if wid:
wid = wid.decode("utf-8").strip().split("\n")[-1]
clipboard.set_clipboard(cmd)
xdotool("key", "--window", wid, "ctrl+v")
xdotool("key", "--window", wid, "--clearmodifiers", "Return")
clipboard.reset_clipboard()
| 2.40625
| 2
|
webvep/webvep_api/urls.py
|
IanVermes/vep_api
| 0
|
12776199
|
from django.urls import path, include
from rest_framework.urlpatterns import format_suffix_patterns
from webvep_api.views import ping_view, vcf_view, vep_view
urlpatterns = [path("ping/", ping_view), path("vcf/", vcf_view), path("vep/", vep_view)]
urlpatterns = format_suffix_patterns(urlpatterns)
| 1.890625
| 2
|
leetcode/0944_delete_columns_to_make_sorted.py
|
jacquerie/leetcode
| 3
|
12776200
|
# -*- coding: utf-8 -*-
class Solution:
def minDeletionSize(self, A):
return len([col for col in zip(*A) if col != tuple(sorted(col))])
if __name__ == '__main__':
solution = Solution()
assert 1 == solution.minDeletionSize(['cba', 'daf', 'ghi'])
assert 0 == solution.minDeletionSize(['a', 'b'])
assert 3 == solution.minDeletionSize(['zyx', 'wvu', 'tsr'])
| 2.9375
| 3
|
Experiments/partialopt.py
|
robot0321/learnit_exp
| 0
|
12776201
|
<filename>Experiments/partialopt.py
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '5'
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = 'false'
checkpoint_dir = 'checkpoints/checkpoints_shapene/'
from functools import partial
import jax # jax==0.1.67 / jaxlib==0.1.55
from jax import random, grad, jit, vmap, flatten_util, nn
from jax.experimental import optix
from jax.config import config
import jax.numpy as np
import haiku as hk
from livelossplot import PlotLosses
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm as tqdm
import os
import cv2
import imageio
import json
import pickle
rng = jax.random.PRNGKey(42)
SPLITS_FILE = '/mnt/disk2/JYChung/dataset/metaNeRF/shapenet/car_splits.json' # Update
DATA_PATH = '/mnt/disk2/JYChung/dataset/metaNeRF/shapenet/cars/02958343' # Update
import json
import imageio
def load_train_ex(train_ex_dir, key=None):
with open(os.path.join('/', train_ex_dir, 'transforms.json'), 'r') as fp:
meta = json.load(fp)
camera_angle_x = float(meta['camera_angle_x'])
imgs = []
bkgds = []
poses = []
for idx in np.arange(len(meta['frames'])):
frame = meta['frames'][idx]
fname = os.path.join(train_ex_dir, os.path.basename(frame['file_path']) + '.png')
imgs.append(imageio.imread(fname))
poses.append(np.array(frame['transform_matrix']))
H, W = imgs[0].shape[:2]
focal = .5 * W / np.tan(.5 * camera_angle_x)
imgs = (np.array(imgs) / 255.).astype(np.float32)
imgs = imgs[...,:3] * imgs[...,-1:] + 1-imgs[...,-1:]
poses = np.array(poses).astype(np.float32)
return imgs, poses, [H,W,focal]
with open(SPLITS_FILE, "r") as read_file:
splits = json.load(read_file)
train_exs = [os.path.join(DATA_PATH, d) for d in sorted(splits['train'])]
test_exs =[os.path.join(DATA_PATH, d) for d in sorted(splits['test'])]
val_exs = [os.path.join(DATA_PATH, d) for d in sorted(splits['val'])]
def get_rays(H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W), np.arange(H), indexing='xy')
dirs = np.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -np.ones_like(i)], -1)
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = np.broadcast_to(c2w[:3,-1], rays_d.shape)
return np.stack([rays_o, rays_d], 0)
get_rays = jit(get_rays, static_argnums=(0, 1))
get_ray_batch = jax.vmap(get_rays, in_axes=[None, None, None, 0], out_axes=1)
def render_rays(rnd_input, model, params, rays, N_samples, rand=False, allret=False):
rays_o, rays_d = rays
near = 2
far = 6
# Compute 3D query points
z_vals = np.linspace(near, far, N_samples)
if rand:
z_vals += random.uniform(rnd_input, shape=list(rays_o.shape[:-1]) + [N_samples]) * (far-near)/N_samples
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
# Run network
pts_flat = np.reshape(pts, [-1,3])
raw = model.apply(params, pts_flat)
raw = np.reshape(raw, list(pts.shape[:-1]) + [4])
# Compute opacities and colors
rgb, sigma_a = raw[...,:3], raw[...,3]
sigma_a = jax.nn.relu(sigma_a)
rgb = jax.nn.sigmoid(rgb)
# Do volume rendering
dists = np.concatenate([z_vals[..., 1:] - z_vals[..., :-1], np.broadcast_to([1e-3], z_vals[...,:1].shape)], -1)
alpha = 1. - np.exp(-sigma_a * dists)
trans = np.minimum(1., 1. - alpha + 1e-10)
trans = np.concatenate([np.ones_like(trans[...,:1]), trans[...,:-1]], -1)
weights = alpha * np.cumprod(trans, -1)
rgb_map = np.sum(weights[...,None] * rgb, -2)
acc_map = np.sum(weights, -1)
rgb_map = rgb_map + (1.-acc_map[...,None]) # white background
if False:
rgb_map = rgb_map + (1.-acc_map[..., None])
if not allret:
return rgb_map
depth_map = np.sum(weights * z_vals, -1)
return rgb_map, depth_map, acc_map
def render_fn_inner(rnd_input, model, params, rays, rand, allret, N_samples):
return render_rays(rnd_input, model, params, rays, N_samples=N_samples, rand=rand, allret=allret)
render_fn_inner = jit(render_fn_inner, static_argnums=(1, 4, 5, 6))
def render_fn(rnd_input, model, params, rays, N_samples, rand):
chunk = 5
for i in range(0, rays.shape[1], chunk):
out = render_fn_inner(rnd_input, model, params, rays[:,i:i+chunk], rand, True, N_samples)
if i==0:
rets = out
else:
rets = [np.concatenate([a, b], 0) for a, b in zip(rets, out)]
return rets
class Model(hk.Module):
def __init__(self):
super().__init__()
self.width = 256
self.depth = 6
self.use_viewdirs = False
def __call__(self, coords, view_dirs=None):
sh = coords.shape
if self.use_viewdirs:
viewdirs = None
viewdirs = np.repeat(viewdirs[...,None,:], coords.shape[-2], axis=-2)
viewdirs /= np.linalg.norm(viewdirs, axis=-1, keepdims=True)
viewdirs = np.reshape(viewdirs, (-1,3))
viewdirs = hk.Linear(output_size=self.width//2)(viewdirs)
viewdirs = jax.nn.relu(viewdirs)
coords = np.reshape(coords, [-1,3])
x = np.concatenate([np.concatenate([np.sin(coords*(2**i)), np.cos(coords*(2**i))], axis=-1) for i in np.linspace(0,8,20)], axis=-1)
for _ in range(self.depth-1):
x = hk.Linear(output_size=self.width)(x)
x = jax.nn.relu(x)
if self.use_viewdirs:
density = hk.Linear(output_size=1)(x)
x = np.concatenate([x,viewdirs], axis=-1)
x = hk.Linear(output_size=self.width)(x)
x = jax.nn.relu(x)
rgb = hk.Linear(output_size=3)(x)
out = np.concatenate([density, rgb], axis=-1)
else:
out = hk.Linear(output_size=4)(x)
out = np.reshape(out, list(sh[:-1]) + [4])
return out
plt_groups = {'Train PSNR':[], 'Test PSNR':[]}
plotlosses_model = PlotLosses(groups=plt_groups)
exp_name = 'chairs'
max_iters = 100001
batch_size = 128
num_views = 25
test_time_views = 1
inner_update_steps = 32
inner_step_size = .5
lr = 5e-4
N_samples = 128
model = hk.without_apply_rng(hk.transform(lambda x, y=None: Model()(x, y)))
params = model.init(rng, np.ones((1,3)))
test_inner_steps = 128*128 * 4 // batch_size
mse_fn = jit(lambda x, y: np.mean((x - y)**2))
psnr_fn = jit(lambda x, y: -10 * np.log10(mse_fn(x, y)))
# update inner model network weights with once step of sgd
@jit
def single_step(rng, images, rays, params, idx):
def sgd(param, update):
return param - inner_step_size * update
def loss_fn(params, rng_inputs):
idx = random.randint(rng_inputs[0], shape=(batch_size,), minval=0, maxval=images.shape[0])
image_sub = images[idx,:]
rays_sub = rays[:,idx,:]
g = render_rays(rng_inputs[1], model, params, rays_sub, N_samples, rand=True)
return mse_fn(g, image_sub)
rng, *rng_inputs = jax.random.split(rng, 3)
loss, grad = jax.value_and_grad(loss_fn)(params, rng_inputs)
params = jax.tree_multimap(sgd, params, grad)
return rng, params, loss
# update inner model network weights inner_update_steps number of times
def update_network_weights(rng, images, rays, params):
params_mutsave = hk.data_structures.to_mutable_dict(params)
for _ in range(inner_update_steps):
idx = random.randint(rng, shape=(batch_size,), minval=0, maxval=images.shape[0])
rng, new_params, loss = single_step(rng, images, rays, params, idx)
new_params = hk.data_structures.to_mutable_dict(new_params)
params_mutsave['model/linear_5'] = new_params['model/linear_5']
params = hk.data_structures.to_immutable_dict(params_mutsave)
return rng, params, loss
# update meta model weights based on trained inner model weights
def update_model(rng, params, opt_state, images, rays):
rng, new_params, model_loss = update_network_weights(rng, images, rays, params)
def calc_grad(params, new_params):
return params - new_params
model_grad = jax.tree_multimap(calc_grad, params, new_params)
updates, opt_state = opt.update(model_grad, opt_state)
params = optix.apply_updates(params, updates)
return rng, params, opt_state, model_loss
# train meta model inner_steps number of times to evaluate meta parameters
def update_network_weights_test(rng, images, rays, params, inner_steps):
lowest_loss = 1e7
params_mutsave = hk.data_structures.to_mutable_dict(params)
for _ in range(inner_steps):
idx = random.randint(rng, shape=(batch_size,), minval=0, maxval=images.shape[0])
rng, new_params, loss = single_step(rng, images, rays, params, idx)
new_params = hk.data_structures.to_mutable_dict(new_params)
params_mutsave['model/linear_5'] = new_params['model/linear_5']
params = hk.data_structures.to_immutable_dict(params_mutsave)
if loss < lowest_loss:
lowest_loss = loss
return rng, params, lowest_loss
opt = optix.adam(lr)
opt_state = opt.init(params)
plt_groups['Train PSNR'].append(exp_name+f'_train')
plt_groups['Test PSNR'].append(exp_name+f'_test')
step = 0
train_psnrs = []
rng = jax.random.PRNGKey(0)
train_size = len(train_exs)
test_psnrs = []
for step in range(max_iters):
try:
rng, rng_input = jax.random.split(rng)
img_idx = random.randint(rng, shape=(1,), minval=0, maxval=train_size)
images, poses, hwf = load_train_ex(train_exs[img_idx[0]], rng)
images = images[:num_views]
poses = poses[:num_views]
except:
continue
images = np.reshape(images, (-1,3))
rays = get_ray_batch(hwf[0], hwf[1], hwf[2], poses)
rays = np.reshape(rays, (2,-1,3))
rng, params, opt_state, loss = update_model(rng, params, opt_state, images, rays)
train_psnrs.append(-10 * np.log10(loss))
if step == 0:
plotlosses_model.update({exp_name+'_train':np.mean(np.array(train_psnrs))}, current_step=step)
train_psnrs = []
if step % 1000 == 0 and step != 0:
plotlosses_model.update({exp_name+'_train':np.mean(np.array(train_psnrs))}, current_step=step)
train_psnr = float(np.mean(np.array(train_psnrs)))
train_psnrs = []
test_psnr = []
for i in range(5):
images, poses, hwf = load_train_ex(val_exs[0], rng)
test_holdout_images, test_images = np.split(images, [25], axis=0)
test_holdout_poses, test_poses = np.split(poses, [25], axis=0)
test_rays = get_ray_batch(hwf[0], hwf[1], hwf[2], test_poses)
test_images = test_images[:test_time_views]
test_rays = test_rays[:test_time_views]
test_images = np.reshape(test_images, (-1,3))
test_rays = np.reshape(test_rays, (2,-1,3))
rng, test_params, test_inner_loss = update_network_weights_test(rng, test_images, test_rays, params, test_inner_steps)
test_holdout_rays = get_rays(hwf[0], hwf[1], hwf[2], test_holdout_poses[0])
test_result = np.clip(render_fn(rng, model, test_params, test_holdout_rays, N_samples, rand=False)[0], 0, 1)
test_psnr.append(psnr_fn(test_holdout_images[0], test_result))
test_psnr = np.mean(np.array(test_psnr))
test_psnrs.append(test_psnr)
plotlosses_model.update({exp_name+'_test':test_psnr}, current_step=step)
plotlosses_model.send()
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.imshow(test_holdout_images[0])
plt.subplot(1, 2, 2)
plt.imshow(test_result)
plt.show()
if step % 10000 == 0 and step != 0:
pickle.dump(params, open(f"{checkpoint_dir}/{exp_name}_{step}.pkl", 'wb'))
| 1.78125
| 2
|
opensdraw/lcad_language/lexerParser.py
|
HazenBabcock/openldraw
| 9
|
12776202
|
<gh_stars>1-10
#!/usr/bin/env python
#
# Lexer, Parser and abstract syntax tree model for lcad. Much of
# inspiration for this comes from the lexer / parser in the hy
# project:
#
# https://github.com/hylang/hy/tree/master/hy/lex
#
# Hazen 07/14
#
from functools import wraps
# Lexer.
from rply import LexerGenerator
lg = LexerGenerator()
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
#lg.add('STRING', r'(".*?"|\'.*?\')')
lg.add('STRING', r'("[^"]*"|\'[^\']*\')')
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
# Model.
class LCadObject(object):
pass
class LCadConstant(LCadObject):
pass
class LCadExpression(LCadObject):
def __init__(self, expression):
self.initialized = False
self.lenv = None
self.simple_type_name = "Expression"
self.value = expression
class LCadFloat(LCadConstant):
def __init__(self, value):
self.simple_type_name = "Float"
self.value = float(value)
class LCadInteger(LCadConstant):
def __init__(self, value):
self.simple_type_name = "Integer"
self.value = int(value)
class LCadString(LCadConstant):
def __init__(self, value):
self.simple_type_name = "String"
self.value = str(value[1:-1])
class LCadSymbol(LCadObject):
def __init__(self, value):
self.lenv = None
self.simple_type_name = "Symbol"
self.value = str(value)
# Parser.
from rply import ParserGenerator
def set_boundaries(fun):
@wraps(fun)
def wrapped(state, p):
start = p[0].source_pos
end = p[-1].source_pos
ret = fun(state, p)
ret.filename = state.filename
ret.start_line = start.lineno
ret.start_column = start.colno
if start is not end:
ret.end_line = end.lineno
ret.end_column = end.colno
else:
ret.end_line = start.lineno
ret.end_column = start.colno + len(p[0].value)
return ret
return wrapped
pg = ParserGenerator(
[rule.name for rule in lexer.rules],
cache_id="lcad_parser"
)
@pg.production('main : list')
def main(state, p):
return p[0]
@pg.production('list : term list')
def list(state, p):
return [p[0]] + p[1]
@pg.production('list : term')
def single_list(state, p):
return [p[0]]
@pg.production('term : string')
@pg.production('term : identifier')
@pg.production('term : parens')
def term(state, p):
return p[0]
@pg.production("string : STRING")
@set_boundaries
def string(state, p):
return LCadString(p[0].getstr())
@pg.production("identifier : IDENTIFIER")
@set_boundaries
def identifier(state, p):
text = p[0].getstr()
try:
return LCadInteger(text)
except ValueError:
pass
try:
return LCadFloat(text)
except ValueError:
pass
return LCadSymbol(text)
@pg.production('parens : LPAREN list RPAREN')
@set_boundaries
def parens(state, p):
return LCadExpression(p[1])
@pg.production('parens : LPAREN RPAREN')
@set_boundaries
def empty_parens(state, p):
return LCadExpression([])
@pg.error
def error_handler(state, token):
if (token.gettokentype() == '$end'):
raise Exception("Unexpected EOF. Empty file? Unbalanced Parenthesis?")
raise ValueError("Ran into a {!s} where it wasn't expected at row {!s} column {!s}".format(token.gettokentype(),
token.source_pos.lineno,
token.source_pos.colno))
parser = pg.build()
class ParserState(object):
def __init__(self, filename):
self.filename = filename
def parse(string, filename = "na"):
return parser.parse(lexer.lex(string),
state = ParserState(filename))
# For testing purposes.
if (__name__ == '__main__'):
import sys
if (len(sys.argv) != 2):
print("usage: <file to parse>")
exit()
with open(sys.argv[1]) as fp:
print(parse(fp.read(), sys.argv[1]))
#
# The MIT License
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 2.71875
| 3
|
plugins/tests/papermilltests/test_spark_notebook.py
|
slai/flytekit
| 1
|
12776203
|
import os
from flytekitplugins.papermill import NotebookTask
from flytekitplugins.spark import Spark
from flytekit import kwtypes
from flytekit.types.schema import FlyteSchema
def _get_nb_path(name: str, suffix: str = "", abs: bool = True, ext: str = ".ipynb") -> str:
"""
Creates a correct path no matter where the test is run from
"""
_local_path = os.path.dirname(__file__)
path = f"{_local_path}/testdata/{name}{suffix}{ext}"
return os.path.abspath(path) if abs else path
def test_notebook_task_simple():
nb_name = "nb-spark"
nb = NotebookTask(
name="test",
notebook_path=_get_nb_path(nb_name, abs=False),
outputs=kwtypes(df=FlyteSchema[kwtypes(name=str, age=int)]),
task_config=Spark(spark_conf={"x": "y"}),
)
n, out, render = nb.execute()
assert nb.python_interface.outputs.keys() == {"df", "out_nb", "out_rendered_nb"}
assert nb.output_notebook_path == out == _get_nb_path(nb_name, suffix="-out")
assert nb.rendered_output_path == render == _get_nb_path(nb_name, suffix="-out", ext=".html")
| 2
| 2
|
docker/api/swarm.py
|
rsumner31/docker-py
| 0
|
12776204
|
import logging
from six.moves import http_client
from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
return utils.SwarmSpec(*args, **kwargs)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def inspect_swarm(self):
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
advertise_addr=None):
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
"JoinToken": join_token,
"AdvertiseAddr": advertise_addr,
}
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
})
self._raise_for_status(response)
return True
| 2.046875
| 2
|
rel-eng/custom/custom.py
|
ehelms/foreman-packaging
| 0
|
12776205
|
<reponame>ehelms/foreman-packaging
# Copyright (c) 2008-2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from datetime import datetime
import json
import os
import os.path
import re
import shutil
import urllib
from zipfile import ZipFile
from tito.builder.fetch import SourceStrategy
from tito.common import error_out, debug, run_command
class JenkinsSourceStrategy(SourceStrategy):
"""
Downloads the source files from Jenkins, from a job that produces them as
artifacts. Will follow the version number present in the filename and
adds a timestamp & SHA if available from the Jenkins job start reasons.
Designed to be used for nightly or pull request builds only.
It first copies source files from git, then downloads the Jenkins
artifacts over the top, so they're merged (patches etc can then be stored
in git).
Takes the following arguments:
jenkins_url: base URL of Jenkins ("http://ci.theforeman.org")
jenkins_job: name of job ("test_develop")
jenkins_job_id: job number or alias ("123", "lastSuccessfulBuild")
"""
def fetch(self):
url_base = self.builder.args['jenkins_url']
job_name = self.builder.args['jenkins_job']
if 'jenkins_job_id' in self.builder.args:
job_id = self.builder.args['jenkins_job_id']
else:
job_id = "lastSuccessfulBuild"
job_url_base = "%s/job/%s/%s" % (url_base, job_name, job_id)
json_url = "%s/api/json" % job_url_base
job_info = json.loads(urllib.urlopen(json_url).read())
if "number" in job_info:
job_id = job_info["number"]
if "runs" in job_info:
run_idx = 0
for idx, run in enumerate(job_info["runs"]):
if run["number"] == job_id:
run_idx = idx
break
job_url_base = job_info["runs"][run_idx]["url"]
elif "url" in job_info:
job_url_base = job_info["url"]
url = "%s/artifact/*zip*/archive.zip" % job_url_base
debug("Fetching from %s" % url)
(zip_path, zip_headers) = urllib.urlretrieve(url)
zip_file = ZipFile(zip_path, 'r')
try:
zip_file.extractall(self.builder.rpmbuild_sourcedir)
finally:
zip_file.close()
# Copy the live spec from our starting location. Unlike most builders,
# we are not using a copy from a past git commit.
self.spec_file = os.path.join(self.builder.rpmbuild_sourcedir,
'%s.spec' % self.builder.project_name)
shutil.copyfile(
os.path.join(self.builder.start_dir, '%s.spec' %
self.builder.project_name),
self.spec_file)
for s in os.listdir(self.builder.start_dir):
if os.path.exists(os.path.join(self.builder.start_dir, s)):
shutil.copyfile(
os.path.join(self.builder.start_dir, s),
os.path.join(self.builder.rpmbuild_sourcedir, os.path.basename(s)))
print(" %s.spec" % self.builder.project_name)
i = 0
replacements = []
src_files = run_command("find %s -type f" %
os.path.join(self.builder.rpmbuild_sourcedir, 'archive')).split("\n")
for s in src_files:
base_name = os.path.basename(s)
debug("Downloaded file %s" % base_name)
if ".tar" not in base_name and ".gem" not in base_name:
debug("Skipping %s as it isn't a source archive" % base_name)
continue
dest_filepath = os.path.join(self.builder.rpmbuild_sourcedir,
base_name)
shutil.move(s, dest_filepath)
self.sources.append(dest_filepath)
# Add a line to replace in the spec for each source:
source_regex = re.compile("^(source%s:\s*)(.+)$" % i, re.IGNORECASE)
new_line = "Source%s: %s\n" % (i, base_name)
replacements.append((source_regex, new_line))
i += 1
# Replace version in spec:
version_regex = re.compile("^(version:\s*)(.+)$", re.IGNORECASE)
self.version = self._get_version()
print("Building version: %s" % self.version)
replacements.append((version_regex, "Version: %s\n" % self.version))
self.replace_in_spec(replacements)
rel_date = datetime.utcnow().strftime("%Y%m%d%H%M")
gitrev = ""
for action in job_info["actions"]:
if "lastBuiltRevision" in action:
gitrev = "git%s" % action["lastBuiltRevision"]["SHA1"][0:7]
self.release = rel_date + gitrev
print("Building release: %s" % self.release)
run_command("sed -i '/^Release:/ s/%%/.%s%%/' %s" % (self.release, self.spec_file))
def _get_version(self):
"""
Get the version from the builder.
Sources are configured at this point.
"""
# Assuming source0 is a tar.gz we can extract a version from:
base_name = os.path.basename(self.sources[0])
debug("Extracting version from: %s" % base_name)
# Example filename: tito-0.4.18.tar.gz:
simple_version_re = re.compile(".*-(.*).(tar.gz|tgz|zip|tar.bz2|gem)")
match = re.search(simple_version_re, base_name)
if match:
version = match.group(1)
else:
error_out("Unable to determine version from file: %s" % base_name)
return version
def replace_in_spec(self, replacements):
"""
Replace lines in the spec file using the given replacements.
Replacements are a tuple of a regex to look for, and a new line to
substitute in when the regex matches.
Replaces all lines with one pass through the file.
"""
in_f = open(self.spec_file, 'r')
out_f = open(self.spec_file + ".new", 'w')
for line in in_f.readlines():
for line_regex, new_line in replacements:
match = re.match(line_regex, line)
if match:
line = new_line
out_f.write(line)
in_f.close()
out_f.close()
shutil.move(self.spec_file + ".new", self.spec_file)
| 2.109375
| 2
|
scripts/launch_image.py
|
rf972/lisa-qemu
| 2
|
12776206
|
<reponame>rf972/lisa-qemu<filename>scripts/launch_image.py
#
# Copyright 2020 Linaro
#
# Launches an image created via build_image.py.
#
import build_image
if __name__ == "__main__":
inst_obj = build_image.BuildImage(ssh=True)
inst_obj.run()
| 1.546875
| 2
|
custom_auth/custom_login/my_user_manager.py
|
farhadmpr/DjangoMobileLogin
| 0
|
12776207
|
<reponame>farhadmpr/DjangoMobileLogin<filename>custom_auth/custom_login/my_user_manager.py
from django.contrib.auth.base_user import BaseUserManager
class MyUserManager(BaseUserManager):
def create_user(self, mobile, password=<PASSWORD>, **other_fields):
if not mobile:
raise ValueError('mobile is required')
user = self.model(mobile=mobile, **other_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, mobile, password=<PASSWORD>, **other_fields):
other_fields.setdefault('is_staff', True)
other_fields.setdefault('is_superuser', True)
other_fields.setdefault('is_active', True)
if other_fields.get('is_staff') is not True:
raise ValueError('superuser must have is_staff')
if other_fields.get('is_superuser') is not True:
raise ValueError('superuser must have is_superuser')
return self.create_user(mobile, password, **other_fields)
| 2.421875
| 2
|
test/test_products.py
|
sdtaylor/pyUpackQA
| 2
|
12776208
|
import pytest
import numpy as np
from unpackqa import (unpack_to_array,
unpack_to_dict,
list_products,
list_qa_flags,
list_sensors,
)
from unpackqa.tools.validation import (product_info_has_required_entries,
flag_info_is_non_empty_dict,
flag_info_bit_list_non_empty,
flag_info_bits_non_neg_ints,
flag_info_flag_is_str,
bits_are_only_used_once,
bits_are_reasonable,
bits_do_not_exceed_bit_size,
max_value_matches_num_bits,
bits_are_ordered,
)
from unpackqa.tools.validation import InvalidProductSpec
from unpackqa.product_loader import all_products
"""
Validating product definitions.
"""
qa_array = np.array([[8,8,8],
[16,16,16],
[255,255,255]])
all_product_identifiers = list_products(sensor='all')
def test_product_ids_are_unique():
"""No duplicate product identifers"""
assert len(all_product_identifiers) == len(set(all_product_identifiers))
def test_list_sensors():
"""Should have some output"""
assert len(list_sensors()) > 0
def test_list_products():
"""Each sensor should have some products"""
sensors = list_sensors()
sensor_has_products = [len(list_products(sensor=s))>0 for s in sensors]
assert all(sensor_has_products)
def test_list_products_invalid_sensor():
"""Should raise error with unknown sensor"""
with pytest.raises(ValueError):
list_products(sensor='asdf')
def test_list_flags_invalid_product():
"""Should raise error with unknown product ID"""
with pytest.raises(ValueError):
list_qa_flags(product = 'asdf')
@pytest.mark.parametrize('product', all_product_identifiers)
def test_qa_flag_list(product):
"""Lists of flags should be available for each product"""
flags = list_qa_flags(product = product)
assert len(flags) > 0
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info_is_dict(product):
"""product_info entry should be dictonary"""
product_info = all_products[product]
assert isinstance(product_info, dict)
"""
Several tests for all products configred within the package.
Within product_info the 'flag_info' entry
should be a dictonary with key value pairs:
'flag name':[bit0,bit1,..]
Where flag name is a str, with a value of a list. List entries
are non-negative ints.
These same tests are used to validate user passed custom specifications, so
instead of essentially writing a new test function for each, just iterate
over them and create some informative output if 1 or more fails.
"""
test_list = [('product info does not have required entries',product_info_has_required_entries),
('flag_info is not dictionary, or is empty', flag_info_is_non_empty_dict),
('flag_info has empty lists',flag_info_bit_list_non_empty),
('flag_info has negative and/or non-int values',flag_info_bits_non_neg_ints),
('flag_info keys are not strings',flag_info_flag_is_str),
('duplicate bits detected',bits_are_only_used_once),
('bits are larger than needed for even a 32 bit int', bits_are_reasonable),
('largest bit is greater than num_bits',bits_do_not_exceed_bit_size),
('max_value is >= 2**num_bits',max_value_matches_num_bits),
('bits are out of order',bits_are_ordered),
]
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info(product):
product_info = all_products[product]
failed_tests = []
tests_failed = False
for test_message, test_function in test_list:
try:
test_function(product_info)
except InvalidProductSpec:
tests_failed = True
failed_tests.append(test_message)
if tests_failed:
error_message = '{} failed tests for {}\n'.format(len(failed_tests), product)
error_message = error_message + '\n'.join(['{}. {}'.format(i+1,m) for i,m in enumerate(failed_tests)])
assert False, error_message
| 2.578125
| 3
|
DESAFIO-012.py
|
Lukones/Evolution-Projetos-Python
| 0
|
12776209
|
# Um programa que resolve a hipotenusa #
from math import hypot
co = float(input('Comprimento do cateto opsoto: '))
ca = float(input('Comprimento do cateto adjacente: '))
print(f'A hipotenusa vai medir: {hypot(co, ca):.2f}')
| 3.703125
| 4
|
cryptography/the_var/__init__.py
|
JASTYN/pythonmaster
| 3
|
12776210
|
def the_var(var):
n = [ord(i) - 96 for i in var.split("+")]
return sum(n)
| 2.65625
| 3
|
view/web.py
|
jvpersuhn/Certo
| 0
|
12776211
|
import sys
sys.path.append("C:/Users/900143/Desktop/Certo")
from controller.squad_controller import BackController, FrontController,SGBDController , SquadController, BackEnd, FrontEnd, SGBD, Squad
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
bc = BackController()
fc = FrontController()
sc = SGBDController()
sqc = SquadController()
@app.route('/')
def inicio():
return render_template('index.html')
@app.route('/listar')
def listar():
l = sqc.select_all()
return render_template('listar.html', lista = l)
@app.route('/excluir')
def excluir():
id = int(request.args['id'])
sqc.delete(sqc.select_byId(id))
return redirect('/listar')
@app.route('/cadastrar')
def cadastrar():
if 'id' in request.args:
squad = sqc.select_byId(request.args['id'])
else:
squad = Squad(0,'','','')
return render_template('cadastrar.html', squad = squad)
@app.route('/salvar')
def salvar():
id = int(request.args['id'])
nome = request.args['nome']
desc = request.args['desc']
qtdPessoas = request.args['numPessoas']
squad = Squad(id,nome,desc,qtdPessoas)
idF = request.args['idF']
if idF:
idF = int(idF)
else:
idF = 0
nomeF = request.args['nomeF']
descF = request.args['descF']
versaoF = request.args['versaoF']
front = FrontEnd(idF,nomeF,descF,versaoF)
idB = request.args['idB']
if idB:
idB = int(idB)
else:
idB = 0
nomeB = request.args['nomeB']
descB = request.args['descB']
versaoB = request.args['versaoB']
back = BackEnd(idB, nomeB, descB, versaoB)
idS = request.args['idS']
if idS:
idS = int(idS)
else:
idS = 0
nomeS = request.args['nomeS']
descS = request.args['descS']
versaoS = request.args['versaoS']
sgbd = SGBD(idS,nomeS,descS,versaoS)
if id == 0:
squad.id_linguagemFront = fc.insert(front)
squad.id_linguagemBack = bc.insert(back)
squad.id_sgbd = sc.insert(sgbd)
sqc.insert(squad)
else:
squad.linguagemFront = front
squad.linguagemBack = back
squad.sgbd = sgbd
squad.id_linguagemFront = idF
squad.id_linguagemBack = idB
squad.id_sgbd = idS
sqc.update(squad)
return redirect('/listar')
app.run(debug=True)
| 2.28125
| 2
|
numerical/splines/__init__.py
|
shaxov/scikit-numerical
| 3
|
12776212
|
<gh_stars>1-10
from .definitions import (
linear,
schoenberg,
)
__all__ = ['linear', 'schoenberg']
| 1.0625
| 1
|
collipa/controllers/reply.py
|
ywmmmw/collipa
| 99
|
12776213
|
<reponame>ywmmmw/collipa
# coding: utf-8
import tornado.web
from ._base import BaseHandler
from pony import orm
from .user import EmailMixin
from collipa.models import Topic, Reply
from collipa.forms import ReplyForm
from collipa.libs.decorators import require_permission
from collipa import config
class HomeHandler(BaseHandler, EmailMixin):
@orm.db_session
def get(self, reply_id):
reply_id = int(reply_id)
reply = Reply.get(id=reply_id)
if not reply:
raise tornado.web.HTTPError(404)
return self.render("reply/index.html", reply=reply)
@orm.db_session
@tornado.web.authenticated
def put(self, reply_id):
reply_id = int(reply_id)
reply = Reply.get(id=reply_id)
action = self.get_argument('action', None)
user = self.current_user
if not reply:
raise tornado.web.HTTPError(404)
result = {}
if not action:
result = {'status': 'info', 'message':
'缺少 action 参数'}
if action == 'up':
if reply.user_id != user.id:
result = user.up(reply_id=reply.id)
else:
result = {'status': 'info', 'message':
'不能为自己的评论投票'}
if action == 'down':
if reply.user_id != user.id:
result = user.down(reply_id=reply.id)
else:
result = {'status': 'info', 'message':
'不能为自己的评论投票'}
if action == 'collect':
result = user.collect(reply_id=reply.id)
if action == 'thank':
result = user.thank(reply_id=reply.id)
if action == 'report':
result = user.report(reply_id=reply.id)
return self.send_result(result)
@orm.db_session
@tornado.web.authenticated
def delete(self, reply_id):
if not self.current_user.is_admin:
return self.redirect_next_url()
reply = Reply.get(id=reply_id)
if not reply:
return self.redirect_next_url()
subject = "评论删除通知 - " + config.site_name
template = (
'<p>尊敬的 <strong>{nickname}</strong> 您好!</p>'
'<p>您在主题 <strong><a href="{topic_url}">「{topic_title}」</a></strong>'
'下的评论由于违反社区规定而被删除,我们以邮件的形式给您进行了备份,备份数据如下:</p>'
'<div class="content">{content}</div>'
)
content = template.format(
nickname=reply.author.nickname,
topic_url=config.site_url + reply.topic.url,
topic_title=reply.topic.title,
content=reply.content
)
self.send_email(self, reply.author.email, subject, content)
reply.delete()
result = {'status': 'success', 'message': '已成功删除'}
return self.send_result(result)
class CreateHandler(BaseHandler):
@orm.db_session
@tornado.web.authenticated
@require_permission
def post(self):
page = int(self.get_argument('page', 1))
category = self.get_argument('category', 'index')
topic_id = int(self.get_argument('topic_id', 0))
topic = Topic.get(id=topic_id)
if not topic_id:
result = {'status': 'error', 'message': '无此主题,不能创建评论'}
if self.is_ajax:
return self.write(result)
else:
self.flash_message(**result)
return self.redirect_next_url()
user = self.current_user
form = ReplyForm(self.request.arguments)
if form.validate():
reply = form.save(user=user, topic=topic)
reply.put_notifier()
result = {'status': 'success', 'message': '评论创建成功',
'content': reply.content, 'name': reply.author.name,
'nickname': reply.author.nickname, 'author_avatar':
reply.author.get_avatar(size=48), 'author_url':
reply.author.url, 'author_name': reply.author.name,
'author_nickname': reply.author.nickname,
'reply_url': reply.url, 'created': reply.created,
'id': reply.id, 'floor': reply.floor}
if self.is_ajax:
return self.write(result)
self.flash_message(**result)
return self.redirect(topic.url)
reply_count = topic.reply_count
page_count = (reply_count + config.reply_paged - 1) // config.reply_paged
replies = topic.get_replies(page=page, category=category)
data = dict(form=form, topic=topic, replies=replies, category=category, page=page, page_count=page_count,
url=topic.url)
return self.send_result_and_render(form.result, "topic/index.html", data)
class EditHandler(BaseHandler):
@orm.db_session
@tornado.web.authenticated
@require_permission
def get(self, reply_id):
reply = Reply.get(id=reply_id)
if not reply or (reply.author != self.current_user and not self.current_user.is_admin):
return self.redirect_next_url()
form = ReplyForm(content=reply.content)
return self.render("reply/edit.html", form=form, reply=reply)
@orm.db_session
@tornado.web.authenticated
@require_permission
def post(self, reply_id):
reply = Reply.get(id=reply_id)
if not reply or (reply.author != self.current_user and not self.current_user.is_admin):
return self.redirect_next_url()
user = self.current_user
form = ReplyForm(self.request.arguments)
if form.validate():
reply = form.save(user=user, topic=reply.topic, reply=reply)
reply.put_notifier()
result = {'status': 'success', 'message': '评论修改成功',
'reply_url': reply.url}
return self.send_result(result, reply.url)
data = dict(form=form, reply=reply)
return self.send_result_and_render(form.result, "reply/edit.html", data)
class HistoryHandler(BaseHandler):
@orm.db_session
def get(self, reply_id):
reply = Reply.get(id=reply_id)
if not reply:
return self.redirect_next_url()
if not reply.histories:
return self.redirect(reply.topic.url)
return self.render("reply/history.html", reply=reply, histories=reply.histories)
| 2
| 2
|
src/main/python/monocyte/handler/rds2.py
|
claytonbrown/aws-monocyte
| 20
|
12776214
|
<gh_stars>10-100
# Monocyte - Search and Destroy unwanted AWS Resources relentlessly.
# Copyright 2015 Immobilien Scout GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from boto import rds2
from monocyte.handler import Resource, Handler
SKIPPING_CREATION_STATEMENT = "Currently in creation. Skipping."
SKIPPING_AUTOGENERATED_STATEMENT = "Not a manually created Snapshot. Skipping."
SKIPPING_DELETION_STATEMENT = "Deletion already in progress. Skipping."
DELETION_STATEMENT = "Initiating deletion sequence for %s."
CREATION_STATUS = "creating"
AUTOMATED_STATUS = "automated"
DELETION_STATUS = "deleting"
class Instance(Handler):
def fetch_region_names(self):
return [region.name for region in rds2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = rds2.connect_to_region(region_name)
resources = connection.describe_db_instances() or []
for resource in resources["DescribeDBInstancesResponse"]["DescribeDBInstancesResult"]["DBInstances"]:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource["DBInstanceIdentifier"],
creation_date=resource["InstanceCreateTime"],
region=region_name)
if resource['DBInstanceIdentifier'] in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "Database Instance found in {region}, ".format(**vars(resource)) + \
"with name {DBInstanceIdentifier}, with status {DBInstanceStatus}".format(**resource.wrapped)
def delete(self, resource):
if self.dry_run:
return
if resource.wrapped["DBInstanceStatus"] == DELETION_STATUS:
warnings.warn(Warning(SKIPPING_DELETION_STATEMENT))
self.logger.info(DELETION_STATEMENT % resource.wrapped["DBInstanceIdentifier"])
connection = rds2.connect_to_region(resource.region)
connection.delete_db_instance(resource.wrapped["DBInstanceIdentifier"], skip_final_snapshot=True)
class Snapshot(Handler):
def fetch_region_names(self):
return [region.name for region in rds2.regions()]
def fetch_unwanted_resources(self):
for region_name in self.region_names:
connection = rds2.connect_to_region(region_name)
resources = connection.describe_db_snapshots() or []
for resource in resources["DescribeDBSnapshotsResponse"]["DescribeDBSnapshotsResult"]["DBSnapshots"]:
resource_wrapper = Resource(resource=resource,
resource_type=self.resource_type,
resource_id=resource["DBSnapshotIdentifier"],
creation_date=resource["SnapshotCreateTime"],
region=region_name)
if resource['DBSnapshotIdentifier'] in self.ignored_resources:
self.logger.info('IGNORE ' + self.to_string(resource_wrapper))
continue
yield resource_wrapper
def to_string(self, resource):
return "Database Snapshot found in {region}, ".format(**vars(resource)) + \
"with name {DBSnapshotIdentifier}, with status {Status}".format(**resource.wrapped)
def delete(self, resource):
if self.dry_run:
return
if resource.wrapped["Status"] == DELETION_STATUS:
warnings.warn(Warning(SKIPPING_DELETION_STATEMENT))
if resource.wrapped["Status"] == CREATION_STATUS:
warnings.warn(Warning(SKIPPING_CREATION_STATEMENT))
if resource.wrapped["SnapshotType"] == AUTOMATED_STATUS:
warnings.warn(Warning(SKIPPING_AUTOGENERATED_STATEMENT))
self.logger.info(DELETION_STATEMENT % resource.wrapped["DBSnapshotIdentifier"])
connection = rds2.connect_to_region(resource.region)
connection.delete_db_snapshot(resource.wrapped["DBSnapshotIdentifier"])
| 2.015625
| 2
|
server/main.py
|
gtluu/timsconvert
| 3
|
12776215
|
# main.py
from app import app
import views
if __name__ == '__main__':
app.run(host='0.0.0.0',port='5000')
| 1.578125
| 2
|
lino/core/site.py
|
khchine5/lino
| 1
|
12776216
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 <NAME>.
# License: BSD, see LICENSE for more details.
# doctest lino/core/site.py
"""
Defines the :class:`Site` class. For an overview see
:doc:`/dev/site` and :doc:`/dev/plugins`.
.. doctest init:
>>> import lino
>>> lino.startup('lino.projects.std.settings_test')
"""
from __future__ import unicode_literals, print_function
from builtins import map
from builtins import str
import six
import os
import sys
from os.path import normpath, dirname, join, isdir, relpath, exists
import inspect
import datetime
import warnings
import collections
from importlib import import_module
from six.moves.urllib.parse import urlencode
from unipath import Path
from atelier.utils import AttrDict, date_offset, tuple_py2
from atelier import rstgen
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
# from django.core.exceptions import ImproperlyConfigured
from lino.core.plugin import Plugin
from lino import assert_django_code, DJANGO_DEFAULT_LANGUAGE
from etgen.html import E
from lino.core.utils import simplify_name, get_models
# from lino.utils.html2text import html2text
# from html2text import html2text
from lino.core.exceptions import ChangedAPI
# from .roles import SiteUser
from html2text import HTML2Text
# _INSTANCES = []
def html2text(html):
text_maker = HTML2Text()
text_maker.unicode_snob = True
return text_maker.handle(html)
PRINT_EMAIL = """send email
Sender: {sender}
To: {recipients}
Subject: {subject}
{body}
"""
LanguageInfo = collections.namedtuple(
'LanguageInfo', ('django_code', 'name', 'index', 'suffix'))
"""
A named tuple with four fields:
- `django_code` -- how Django calls this language
- `name` -- how Lino calls it
- `index` -- the position in the :attr:`Site.languages` tuple
- `suffix` -- the suffix to append to babel fields for this language
"""
def to_locale(language):
"""Simplified copy of `django.utils.translation.to_locale`, but we
need it while the `settings` module is being loaded, i.e. we
cannot yet import django.utils.translation. Also we don't need
the to_lower argument.
"""
p = language.find('-')
if p >= 0:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' \
+ language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def class2str(cl):
return cl.__module__ + '.' + cl.__name__
gettext_noop = lambda s: s
PLUGIN_CONFIGS = {}
def configure_plugin(app_label, **kwargs):
"""
Set one or several configuration settings of the given plugin
*before* the :setting:`SITE` has been instantiated. See
:doc:`/dev/plugins`.
"""
# if PLUGIN_CONFIGS is None:
# raise ImproperlyConfigured(
# "Tried to call configure_plugin after Site instantiation")
cfg = PLUGIN_CONFIGS.setdefault(app_label, {})
cfg.update(kwargs)
# from django.db.models.fields import NOT_PROVIDED
class NOT_PROVIDED(object):
pass
class Site(object):
"""
The base class for a Lino application. This class is designed to
be overridden by both application developers and local site
administrators. Your :setting:`SITE` setting is expected to
contain an instance of a subclass of this.
.. attribute:: plugins
An :class:`AttrDict <atelier.utils.AttrDict>` with one entry
for each installed plugin, mapping the `app_label` of every
plugin to the corresponding :class:`lino.core.plugin.Plugin`
instance.
This attribute is automatically filled by Lino and available as
:attr:`dd.plugins <lino.api.dd>` already before Django starts to
import :xfile:`models.py` modules.
.. attribute:: modules
Old name for :attr:`models`. Deprecated.
.. attribute:: models
An :class:`AttrDict <atelier.utils.AttrDict>` which maps every
installed `app_label` to the corresponding :xfile:`models.py`
module object.
This is also available as the shortcut :attr:`rt.models
<lino.api.rt.models>`.
See :doc:`/dev/plugins`
.. attribute:: LANGUAGE_CHOICES
A tuple in the format expected by Django's `choices
<https://docs.djangoproject.com/en/1.11/ref/models/fields/#choices>`__
attribute, used e.g. by :class:`LanguageField
<lino.utils.mldbc.fields.LanguageField>`. It's content is
automatically populated from :attr:`languages` and application
code should not change it's value.
"""
auto_fit_column_widths = True
"""
The default value for the :attr:`auto_fit_column_widths
<lino.core.tables.AbstractTable.auto_fit_column_widths>` of tables
in this application.
"""
confdirs = None
"""
This attribute is available only after site startup. See
:mod:`lino.utils.config`.
"""
kernel = None
"""
This attribute is available only after :meth:`startup`.
See :mod:`lino.core.kernel`.
"""
# ui = None
# """
# Deprecated alias for :attr:`kernel`.
# """
readonly = False
"""Setting this to `True` turns this site in a readonly site. This
means that :setting:`DATABASES` must point to the
:setting:`DATABASES` of some other (non-readonly) site, and that
:manage:`initdb` will do nothing.
"""
history_aware_logging = False
"""Whether to log a message :message:`Started %s (using %s) --> PID
%s` at process startup (and a message :message:`Done PID %s` at
termination).
These two messages are interesting e.g. when a system
administrator wants to know which processes have been running on a
given production site, but they are usually disturbing during
development.
TODO: Replace this setting by an aproach using a second logger
`lino.archive`. Also tidy up usage of
:mod:`lino.utils.dblogger`. To be meditated.
See also :ref:`lino.logging`.
"""
the_demo_date = None
"""A hard-coded constant date to be used as reference by :meth:`today`
and :meth:`demo_date`. For example many demo databases have this
set because certain tests rely on a constant reference date.
"""
title = None
"""The title of this web site to appear in the browser window. If
this is None, Lino will use :attr:`verbose_name` as default value.
"""
hoster_status_url = "http://bugs.saffre-rumma.net/"
"""This is mentioned in :xfile:`500.html`.
"""
verbose_name = "yet another Lino application"
"""The name of this application, to be displayed to end-users at
different places.
Note the difference between :attr:`title` and
:attr:`verbose_name`:
- :attr:`title` may be None, :attr:`verbose_name` not.
- :attr:`title` is used by the
:srcref:`index.html <lino/modlib/extjs/config/extjs/index.html>` for
:mod:`lino.modlib.extjs`.
- :attr:`title` and :attr:`verbose_name` are used by
:xfile:`admin_main.html` to generate the fragments "Welcome to the
**title** site" and "We are running **verbose_name** version
**x.y**" (the latter only if :attr:`version` is set).
- :meth:`site_version` uses :attr:`verbose_name` (not :attr:`title`)
IOW, the :attr:`title` is rather for usage by local system
administrators, while the :attr:`verbose_name` is rather for usage
by application developers.
"""
version = None
"The version number."
url = None
"""
The URL of the website that describes this application.
Used e.g. in a :menuselection:`Site --> About` dialog box.
"""
# server_url = None
server_url = "http://127.0.0.1:8000/"
"""The "official" URL used by "normal" users when accessing this Lino
site. This is used by templates such as the email sent by
:class:`lino.modlib.notify.Message`
"""
device_type = 'desktop'
"""
The default device type used on this server. Should be one of
``'desktop'``, ``'tablet'`` or ``'mobile'``.
This is used by :class:`DeviceTypeMiddleware
<lino.core.auth.middleware.DeviceTypeMiddleware>`.
"""
obj2text_template = "*{0}*"
"""The format template to use when rendering a ForeignKey as plain
text.
Note: reSTructuredText uses *italic* and **bold**. Changing this
can cause lots of trivial failures in test suites. It is also
used by :mod:`lino.modlib.notify` when generating the mail body.
"""
make_missing_dirs = True
"""Set this to `False` if you don't want Lino to automatically create
missing directories when needed. If this is False, Lino will
raise an exception in these cases, asking you to create it
yourself.
"""
userdocs_prefix = ''
project_name = None
"""A nickname for this project. This is used to set :attr:`cache_dir`
and therefore should be unique for all Lino projects in a given
development environment.
If this is None, Lino will find a default value by splitting
:attr:`project_dir` and taking the last part (or the second-last
if the last part is 'settings'.
"""
cache_dir = None
"""The directory where Lino will create temporary data for this
project, including the :xfile:`media` directory and the
:xfile:`default.db` file.
This is either the same as :attr:`project_dir` or (if
:envvar:`LINO_CACHE_ROOT` is set), will be set to
:envvar:`LINO_CACHE_ROOT` + :attr:`project_name`.
"""
project_dir = None
"""Full path to your local project directory.
Lino automatically sets this to the directory of the
:xfile:`settings.py` file (or however your
:envvar:`DJANGO_SETTINGS_MODULE` is named).
It is recommended to not override this variable.
Note that when using a *settings package*, :attr:`project_dir`
points to the :file:`settings` subdir of what we would intuitively
consider the project directory.
If the :attr:`project_dir` contains a :xfile:`config` directory,
this will be added to the config search path.
"""
languages = None
"""The language distribution used on this site. It has its own
chapter :doc:`/dev/languages` in the Developers Guide.
"""
not_found_msg = '(not installed)'
django_settings = None
"""This is a reference to the `globals()` dictionary of your
:xfile:`settings.py` file (the one you provided when instantiating
the Site object).
"""
startup_time = None
"""
The time when this Site has been instantiated,
in other words the startup time of this Django process.
Don't modify this.
"""
plugins = None
models = None
top_level_menus = [
("master", _("Master")),
("main", None),
("reports", _("Reports")),
("config", _("Configure")),
("explorer", _("Explorer")),
("site", _("Site")),
]
"The list of top-level menu items. See :meth:`setup_menu`."
# is_local_project_dir = False
# """Contains `True` if this is a "local" project. For local projects,
# Lino checks for local fixtures and config directories and adds
# them to the default settings.
# This is automatically set when a :class:`Site` is instantiated.
# """
ignore_model_errors = False
"""Not yet sure whether this is needed. Maybe when generating
documentation.
"""
loading_from_dump = False
"""Whether the process is currently loading data from a Python dump.
When loading from a python dump, application code should not
generate certain automatic data because that data is also part of
the dump.
This is normally `False`, but a Python dump created with
:manage:`dump2py` explicitly calls :meth:`install_migrations`
which sets this to `True`.
Application code should not change this setting except for certain
special test cases.
"""
# see docs/settings.rst
migration_class = None
"""
If you maintain a data migrator module for your application,
specify its name here.
See :ref:`datamig` and/or :func:`lino.utils.dpy.install_migrations`.
TODO: rename this to `migrator_class`
"""
migration_module = None
"""The full Python path of a module to use for all migrations.
"""
hidden_languages = None
"""A string with a space-separated list of django codes of languages
that should be hidden.
:ref:`welfare` uses this because the demo database has 4
languages, but `nl` is currently hidden bu default.
"""
BABEL_LANGS = tuple()
partners_app_label = 'contacts'
"""
Temporary setting, see :ref:`polymorphism`.
"""
# three constants used by lino_xl.lib.workflows:
max_state_value_length = 20
max_action_name_length = 50
max_actor_name_length = 100
trusted_templates = False
"""
Set this to True if you are sure that the users of your site won't try to
misuse Jinja's capabilities.
"""
allow_duplicate_cities = False
"""In a default configuration (when :attr:`allow_duplicate_cities` is
False), Lino declares a UNIQUE clause for :class:`Places
<lino_xl.lib.countries.models.Places>` to make sure that your
database never contains duplicate cities. This behaviour mighr
disturb e.g. when importing legacy data that did not have this
restriction. Set it to True to remove the UNIQUE clause.
Changing this setting might affect your database structure and
thus require a :doc:`/topics/datamig` if your application uses
:mod:`lino_xl.lib.countries`.
"""
uid = 'myuid'
"""A universal identifier for this Site. This is needed when
synchronizing with CalDAV server. Locally created calendar
components in remote calendars will get a UID based on this
parameter, using ``"%s@%s" % (self.pk, settings.SITE.kernel)``.
The default value is ``'myuid'``, and you should certainly
override this on a production server that uses remote calendars.
"""
project_model = None
"""
Optionally set this to the full name of a model used as "central
project" in your application. Models which inherit from
:class:`ProjectRelated <lino.mixins.ProjectRelated>` then have an
additional ForeignKey to this model.
"""
user_model = None
"""
If :mod:`lino.modlib.users` is installed, this holds a reference to
the model class which represents a user of the system. Default
value is `None`, meaning that this application has no user
management. See also :meth:`set_user_model`
"""
social_auth_backends = None
"""
A list of backends for `Python Social Auth
<https://github.com/python-social-auth>`__ (PSA).
Having this at a value different from `None` means that this site
uses authentication via third-party providers.
Sites which use this must also install PSA into their
environment::
$ pip install social-auth-app-django
Depending on the backend you must also add credentials in your
local :xfile:`settings.py` file, e.g.::
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \
'1234567890-a1b2c3d4e5.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'SH6da...'
A working example is in the :mod:`lino_book.projects.team` demo
project.
"""
use_security_features = False
"""
Set this to `True` in order to activate a selection of security
features to protect against miscellaneous attacks. You can do
this only if your application is being served via HTTPS. The idea
is to provide a reasonable security out of the box.
This will activate some middleware and set some security-related
settings. This is a new feature and not much tested. As a hoster
you may prefer adding security manually using your established
standards (regarding security Lino does not add anything to plain
Django). See also :doc:`/admin/security`.
"""
use_ipdict = False
"""
Whether this site uses :mod:`lino.modlib.ipdict`.
Note that :mod:`lino.modlib.ipdict` unlike normal plugins should
not be installed by adding it to your :meth:`get_installed_apps`
method but by setting this attribute. This approach has the
advantage of also setting :setting:`MIDDLEWARE_CLASSES`
automatically.
"""
# use_auth = True
# """Whether this site uses authentication. If this is set to `False`,
# all requests are anonymous (as if :attr:`user_model` was `None`).
# This is ignored when :attr:`user_model` is `None`.
# """
auth_middleware = None
"""
Override used Authorisation middlewares with supplied tuple of
middleware class names.
If None, use logic described in :doc:`/topics/auth`
"""
workflows_module = None
"""The full Python path of the **workflows module** to be used on
this site.
"""
user_types_module = None
"""The name of the **user types module** to be used on this site.
Default value is `None`, meaning that permission control is
inactive: everything is permitted. But note that
:meth:`set_user_model` sets it to :mod:`lino.core.user_types`.
This must be set if you want to enable permission control based on
user roles defined in :attr:`Permittable.required_roles
<lino.core.permissions.Permittable.required_roles>` and
:attr:`UserType.role
<lino.modlib.users.choicelists.UserType.role>`.
If set, Lino will import the named module during site startup. It
is expected to define application-specific user roles (if
necessary) and to fill the :class:`UserTypes
<lino.modlib.users.choicelists.UserTypes>` choicelist.
For example::
class Site(Site):
user_types_module = 'myapp.user_types'
Examples of such user types modules are
:mod:`lino.core.user_types` and
:mod:`lino_noi.lib.noi.user_types`.
"""
custom_layouts_module = None
"""The full Python path of the **custom layouts module** used on this
site.
"""
legacy_data_path = None
"""
Used by custom fixtures that import data from some legacy
database.
"""
propvalue_max_length = 200
"""
Used by :mod:`lino_xl.lib.properties`.
"""
show_internal_field_names = True
"""Whether the internal field names should be visible. ExtUI
implements this by prepending them to the tooltip, which means
that :attr:`use_quicktips` must also be `True`. Default is
`True`.
"""
never_build_site_cache = False
"""Set this to `True` if you want that Lino never (re)builds the site
cache, even when asked. This can be useful on a development
server when you are debugging directly on the generated
:xfile:`lino*.js`. Or for certain unit test cases.
"""
build_js_cache_on_startup = False
"""Whether the Javascript cache files should be built on startup for
all user profiles and languages.
On a production server this should be `True` for best performance,
but often this is not necessary, so default value is `False`,
which means that each file is built upon need (when a first
request comes in).
You can also set it to `None`, which means that Lino decides
automatically during startup: it becomes `False` if either
:func:`lino.core.utils.is_devserver` returns True or
setting:`DEBUG` is set.
.. envvar:: LINO_BUILD_CACHE_ON_STARTUP
If a variable of that name is set, then Lino will override the
code value and set :attr:`build_js_cache_on_startup` to True.
"""
keep_erroneous_cache_files = False
"""When some exception occurs during
:meth:`lino.core.kernel.Kernel.make_cache_file`, Lino usually
removes the partly generated file to make sure that it will try to
generate it again (and report the same error message) for every
subsequent next request.
Set this to `True` if you need to see the partly generated cache
file. **Don't forget to remove this** when you have inspected the
file and fixed the reason of the exception, because if this is
`True` and some next exception occurs (which will happen sooner or
later), then all subsequent requests will usually end up to the
user with a blank screen and (if they notice it), a message
:message:`TypeError: Lino.main_menu is undefined` in their
Javascript console.
"""
use_websockets = False
"""Set this to `True` in order to activate use of websockets and
channels.
This setting is currently used only by :mod:`lino.modlib.notify`,
so its setting is ignored if your application doesn't use that
plugin.
If you use :mod:`lino.modlib.notify` and change this setting to
True, then you need to install `django-channels`::
pip install channels
"""
use_java = True
"""
A site-wide option to disable everything that needs Java. Note
that it is up to the plugins which include Java applications to
respect this setting. Usage example is :mod:`lino_xl.lib.beid`.
"""
use_silk_icons = False
"""
If this is `True`, certain Lino plugins use the deprecated `silk
icons library <http://www.famfamfam.com/lab/icons/silk/>`__ for
representing workflows.
The recommended but not yet fully implemented "modern" style is to
use unicode symbols instead of icons.
"""
use_new_unicode_symbols = False
"""Whether to use "new" unicode symbols (e.g. from the `Miscellaneous
Symbols and Pictographs
<https://en.wikipedia.org/wiki/Miscellaneous_Symbols_and_Pictographs>`__
block) which are not yet implemented in all fonts.
Currently used by :mod:`lino_noi.lib.noi.workflows`
"""
use_experimental_features = False
"""Whether to include "experimental features". Deprecated.
lino_xl.lib.inspect
"""
site_config_defaults = {}
"""
Default values to be used when creating the :attr:`site_config`.
Usage example::
site_config_defaults = dict(default_build_method='appypdf')
"""
# default_build_method = "appypdf"
# default_build_method = "appyodt"
# default_build_method = "wkhtmltopdf"
default_build_method = None
"""The default build method to use when rendering printable documents.
This is the last default value, used only when
:attr:`default_build_method
<lino.modlib.system.models.SiteConfig.default_build_method>` in
:class:`SiteConfig <lino.modlib.system.models.SiteConfig>` is
empty.
"""
is_demo_site = True
"""When this is `True`, then this site runs in "demo" mode. "Demo
mode" means:
- the welcome text for anonymous users says "This demo site has X
users, they all have "<PASSWORD>" as password", followed by a list of
available usernames.
Default value is `True`. On a production site you will of course
set this to `False`.
See also :attr:`demo_fixtures` and :attr:`the_demo_date`.
"""
demo_email = '<EMAIL>'
# demo_fixtures = ['std', 'demo', 'demo2']
demo_fixtures = []
"""
The list of fixtures to be loaded by the :manage:`prep`
command.
"""
use_spinner = False # doesn't work. leave this to False
#~ django_admin_prefix = '/django'
django_admin_prefix = None
"""
The prefix to use for Django admin URLs.
Leave this unchanged as long as :srcref:`docs/tickets/70` is not solved.
"""
time_format_extjs = 'H:i'
"""
Format (in ExtJS syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_time`.
"""
alt_time_formats_extjs = "g:ia|g:iA|g:i a|g:i A|h:i|g:i|H:i|ga|ha|gA|h a|g a|g A|gi|hi" \
"|gia|hia|g|H|gi a|hi a|giA|hiA|gi A|hi A" \
"|Hi|g.ia|g.iA|g.i a|g.i A|h.i|g.i|H.i"
"""Alternative time entry formats accepted by ExtJS time widgets.
ExtJS default is:
"g:ia|g:iA|g:i a|g:i A|h:i|g:i|H:i|ga|ha|gA|h a|g a|g A|gi|hi|gia|hia|g|H|gi a|hi a|giA|hiA|gi A|hi A"
Lino's extended default also includes:
"Hi" (1900) and "g.ia|g.iA|g.i a|g.i A|h.i|g.i|H.i" (Using . in replacement of ":")
"""
date_format_extjs = 'd.m.Y'
"""Format (in ExtJS syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_date`.
"""
alt_date_formats_extjs = 'd/m/Y|Y-m-d'
"""Alternative date entry formats accepted by ExtJS Date widgets.
"""
default_number_format_extjs = '0,000.00/i'
# default_number_format_extjs = '0,00/i'
uppercase_last_name = False
"""
Whether last name of persons should (by default) be printed with
uppercase letters. See :mod:`lino.test_apps.human`
"""
jasmine_root = None
"""Path to the Jasmine root directory. Only used on a development
server if the `media` directory has no symbolic link to the
Jasmine root directory and only if :attr:`use_jasmine` is True.
"""
default_user = None
"""Username of the user to be used for all incoming requests. Setting
this to a nonempty value will disable authentication on this site.
The special value `'anonymous'` will cause anonymous requests
(whose `user` attribute is the :class:`AnonymousUser
<lino.core.auth.utils.AnonymousUser>` singleton).
See also :meth:`get_auth_method`.
This setting should be `None` when :attr:`user_model` is `None`.
"""
remote_user_header = None
"""The name of the header (set by the web server) that Lino should
consult for finding the user of a request. The default value
`None` means that http authentication is not used. Apache's
default value is ``"REMOTE_USER"``.
"""
ldap_auth_server = None
"""
This should be a string with the domain name and DNS (separated by a
space) of the LDAP server to be used for authentication.
Example::
ldap_auth_server = 'DOMAIN_NAME SERVER_DNS'
"""
use_gridfilters = True
use_eid_applet = False
"""
Whether to include functionality to read Belgian id cards using the
official `eid-applet <http://code.google.com/p/eid-applet>`_.
This option is experimental and doesn't yet work. See
`/blog/2012/1105`.
"""
use_esteid = False
"""
Whether to include functionality to read Estonian id cards. This
option is experimental and doesn't yet work.
"""
use_filterRow = not use_gridfilters
"""
See `/blog/2011/0630`.
This option was experimental and doesn't yet work (and maybe never will).
"""
use_awesome_uploader = False
"""
Whether to use AwesomeUploader.
This option was experimental and doesn't yet work (and maybe never will).
"""
use_tinymce = True
"""Replaced by :mod:`lino.modlib.tinymce`.
"""
use_jasmine = False
"""Whether to use the `Jasmine <https://github.com/pivotal/jasmine>`_
testing library.
"""
use_quicktips = True
"""Whether to make use of `Ext.QuickTips
<http://docs.sencha.com/ext-js/3-4/#!/api/Ext.QuickTips>`_ for
displaying :ref:`help_texts` and internal field names (if
:attr:`show_internal_field_names`).
"""
use_css_tooltips = False
"""
Whether to make use of CSS tooltips
when displaying help texts defined in :class:`lino.models.HelpText`.
"""
use_vinylfox = False
"""
Whether to use VinylFox extensions for HtmlEditor.
This feature was experimental and doesn't yet work (and maybe never will).
See `/blog/2011/0523`.
"""
webdav_root = None
"""
The path on server to store webdav files.
Default is :attr:`cache_dir` + ´/media/webdav'.
"""
webdav_url = None
"""
The URL prefix for webdav files. In a normal production
configuration you should leave this to `None`, Lino will set a
default value "/media/webdav/", supposing that your Apache is
configured as described in :doc:`/admin/webdav`.
This may be used to simulate a :term:`WebDAV` location on a
development server. For example on a Windows machine, you may set
it to ``w:\``, and before invoking :manage:`runserver`, you issue in
a command prompt::
subst w: <dev_project_path>\media\webdav
"""
webdav_protocol = None
"""
Set this to a string like e.g. 'wdav' in order to use a custom
protocol for opening editable printable documents. In this case
Lino expects the browser to be configured to understand the given
protocol.
If this is non-empty, Lino ignores whether
:mod:`lino.modlib.davlink` is installed or not.
When an *editable* printable document has been generated, Lino
does not open a new browser window on that document but invokes
the client's Office application. That application accesses the
document either via a WebDAV link (on a production server) or a
``file://`` link (on a development server).
"""
beid_protocol = None
"""
Set this to a string like e.g. 'beid' in order to use a custom
protocal for reading eid cards.
"""
sidebar_width = 0
"""
Used by :mod:`lino.modlib.plain`.
Width of the sidebar in 1/12 of total screen width.
Meaningful values are 0 (no sidebar), 2 or 3.
"""
config_id = 1
"""
The primary key of the one and only :class:`SiteConfig
<lino.modlib.system.models.SiteConfig>` instance of this
:class:`Site`. Default value is 1.
This is Lino's equivalent of Django's :setting:`SITE_ID` setting.
Lino applications don't need ``django.contrib.sites`` (`The
"sites" framework
<https://docs.djangoproject.com/en/dev/ref/contrib/sites/>`_)
because an analog functionality is provided by
:mod:`lino.modlib.system`.
"""
preview_limit = 15
"""
Default value for the :attr:`preview_limit
<lino.core.tables.AbstractTable.preview_limit>` parameter of all
tables who don't specify their own one. Default value is 15.
"""
# default_ui = 'lino_extjs6.extjs6'
default_ui = 'lino.modlib.extjs'
"""
The full Python name of the plugin which is to be used as default
user interface on this :class:`Site`.
Default value is :mod:`lino.modlib.extjs`. Other candidates are
:mod:`lino.modlib.bootstrap3`, :mod:`lino_xl.lib.pages` and
:mod:`lino_extjs6.extjs6` .
Another possibility is to set it to `None`. In that case you will
probably also set :attr:`root_urlconf` to a custom URL dispatcher.
Usage example for this see :mod:`lino.projects.cms`.
"""
admin_ui = None
mobile_view = False
"""
When this is `False` (the default), then Lino uses an attribute
named :attr:`main <lino.core.layouts.BaseLayout.main>` as the main
element of a detail window and :attr:`column_names
<lino.core.tables.AbstractTable.column_names>` as the table's
column layout.
When this is `True`, then Lino uses :attr:`main_m
<lino.core.layouts.BaseLayout.main_m>` and :attr:`column_names_m
<lino.core.tables.AbstractTable.column_names_m>` respectively.
"""
detail_main_name = 'main'
# detail_main_name = 'main_m'
design_name = 'desktop'
"""
The name of the design to use. The default value is
``'desktop'``. The value should be one of ``'desktop'`` or
``'mobile'``.
For every plugin, Lino will try to import its "design module".
For example if :attr:`design_name` is ``'desktop'``, then the
design module for a plugin ``'foo.bar'`` is ``'foo.bar.desktop'``.
If such a module exists, Lino imports it and adds it to
:attr:`models.bar`. The result is the same as if there were a
``from .desktop import *`` statement at the end of the
:xfile:`models.py` module.
"""
root_urlconf = 'lino.core.urls'
"""
The value to be attribute to :setting:`ROOT_URLCONF` when this
:class:`Site` instantiates.
The default value is :mod:`lino.core.urls`.
"""
textfield_format = 'plain'
"""
The default format for text fields. Valid choices are currently
'plain' and 'html'.
Text fields are either Django's `models.TextField` or
:class:`lino.core.fields.RichTextField`.
You'll probably better leave the global option as 'plain',
and specify explicitly the fields you want as html by declaring
them::
foo = fields.RichTextField(...,format='html')
We even recommend that you declare your *plain* text fields also
using `fields.RichTextField` and not `models.TextField`::
foo = fields.RichTextField()
Because that gives subclasses of your application the possibility to
make that specific field html-formatted::
resolve_field('Bar.foo').set_format('html')
"""
log_each_action_request = False
"""
Whether Lino should log every incoming request for non
:attr:`readonly <lino.core.actions.Action.readonly>` actions.
This is experimental. Theoretically it is useless to ask Lino for
logging every request since Apache does this. OTOH Lino can
produce more readable logs.
Note also that there is no warranty that actually *each* request
is being logged. It corrently works only for requests that are
being processed by the kernel's :meth:`run_action
<lino.core.kernel.Kernel.run_action>` or
:meth:`run_callback
<lino.core.kernel.Kernel.run_callback>` methods.
"""
verbose_client_info_message = False
"""
Set this to True if actions should send debug messages to the client.
These will be shown in the client's Javascript console only.
"""
help_url = "http://www.lino-framework.org"
help_email = "<EMAIL>"
"""
An e-mail address where users can get help. This is included in
:xfile:`admin_main.html`.
"""
catch_layout_exceptions = True
"""
Lino usually catches any exception during startup (in
:func:`create_layout_element
<lino.core.layouts.create_layout_element>`) to report errors of
style "Unknown element "postings.PostingsByController
('postings')" referred in layout <PageDetail on pages.Pages>."
Setting this to `False` is useful when there's some problem
*within* the framework.
"""
strict_dependencies = True
"""
This should be True unless this site is being used just for autodoc
or similar applications.
"""
strict_choicelist_values = True
"""
Whether invalid values in a ChoiceList should raise an exception.
This should be `True` except for exceptional situations.
"""
csv_params = dict()
"""
Site-wide default parameters for CSV generation. This must be a
dictionary that will be used as keyword parameters to Python
`csv.writer()
<http://docs.python.org/library/csv.html#csv.writer>`_
Possible keys include:
- encoding :
the charset to use when responding to a CSV request.
See
http://docs.python.org/library/codecs.html#standard-encodings
for a list of available values.
- many more allowed keys are explained in
`Dialects and Formatting Parameters
<http://docs.python.org/library/csv.html#csv-fmt-params>`_.
"""
logger_filename = 'lino.log'
"""
The name of Lino's main log file, created in :meth:`setup_logging`.
See also :ref:`lino.logging`.
"""
auto_configure_logger_names = 'schedule atelier django lino radicale'
"""
A string with a space-separated list of logger names to be
automatically configured. See :meth:`setup_logging`.
"""
# appy_params = dict(ooPort=8100)
appy_params = dict(
ooPort=8100, pythonWithUnoPath='/usr/bin/python3',
raiseOnError=True)
"""
Used by :class:`lino_xl.lib.appypod.choicelist.AppyBuildMethod`.
Allowed keyword arguments for `appy.pod.renderer.Render` are::
pythonWithUnoPath=None,
ooPort=2002
stylesMapping={}
forceOoCall=False,
finalizeFunction=None
overwriteExisting=False
raiseOnError=False
imageResolver=None
See `the source code
<http://bazaar.launchpad.net/~appy-dev/appy/trunk/view/head:/pod/renderer.py>`_
for details.
See also :doc:`/admin/oood`
"""
#~ decimal_separator = '.'
decimal_separator = ','
"""
Set this to either ``'.'`` or ``','`` to define wether to use comma
or dot as decimal point separator when entering a `DecimalField`.
"""
# decimal_group_separator = ','
# decimal_group_separator = ' '
# decimal_group_separator = '.'
decimal_group_separator = u"\u00A0"
"""
Decimal group separator for :meth:`decfmt`.
"""
time_format_strftime = '%H:%M'
"""
Format (in strftime syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_time`.
"""
date_format_strftime = '%d.%m.%Y'
"""
Format (in strftime syntax) to use for displaying dates to the user.
If you change this setting, you also need to override :meth:`parse_date`.
"""
date_format_regex = "/^[0123]?\d\.[01]?\d\.-?\d+$/"
"""
Format (in Javascript regex syntax) to use for displaying dates to
the user. If you change this setting, you also need to override
:meth:`parse_date`.
"""
datetime_format_strftime = '%Y-%m-%dT%H:%M:%S'
"""
Format (in strftime syntax) to use for formatting timestamps in
AJAX responses. If you change this setting, you also need to
override :meth:`parse_datetime`.
"""
datetime_format_extjs = 'Y-m-d\TH:i:s'
"""
Format (in ExtJS syntax) to use for formatting timestamps in AJAX
calls. If you change this setting, you also need to override
:meth:`parse_datetime`.
"""
# for internal use:
_site_config = None
_logger = None
_starting_up = False
override_modlib_models = None
"""
A dictionary which maps model class names to the plugin which
overrides them.
This is automatically filled at startup. You can inspect it, but
you should not modify it. Needed for :meth:`is_abstract_model`.
The challenge is that we want to know exactly where every model's
concrete class will be defined *before* actually starting to
import the :xfile:`models.py` modules. That's why we need
:attr:`extends_models <lino.core.plugin.Plugin.extends_models>`.
This can be tricky, see e.g. 20160205.
"""
installed_plugin_modules = None
"""
Used internally by :meth:`is_abstract_model`. Don't modify.
A set of the full Python paths of all imported plugin modules. Not
just the plugin modules themselves but also those they inherit
from.
"""
def __init__(self, settings_globals=None, local_apps=[], **kwargs):
"""Every Lino application calls this once in it's
:file:`settings.py` file.
See :doc:`/usage`.
`settings_globals` is the `globals()` dictionary of your
:xfile:`settings.py`.
"""
if hasattr(self, 'setup_choicelists'):
raise ChangedAPI("setup_choicelists is no longer supported")
if hasattr(self, 'setup_workflows'):
raise ChangedAPI("setup_workflows is no longer supported")
# if len(_INSTANCES):
# raise Exception("20161219")
# # happens e.g. during sphinx-build
# _INSTANCES.append(self)
# self.logger.info("20140226 Site.__init__() a %s", self)
#~ print "20130404 ok?"
if 'no_local' in kwargs:
kwargs.pop('no_local')
# For the moment we just silently ignore it, but soon:
# if False:
raise ChangedAPI("The no_local argument is no longer needed.")
self._welcome_handlers = []
self._help_texts = dict()
self.plugins = AttrDict()
self.models = AttrDict()
self.modules = self.models # backwards compat
# self.actors = self.models # backwards compat
# self.actors = AttrDict()
if settings_globals is None:
settings_globals = {}
self.init_before_local(settings_globals, local_apps)
self.setup_logging()
self.run_lino_site_module()
self.override_settings(**kwargs)
self.load_plugins()
for p in self.installed_plugins:
p.on_plugins_loaded(self)
if self.migration_module is not None:
self.django_settings.update(
MIGRATION_MODULES={
p.app_label:self.migration_module
for p in self.installed_plugins})
self.setup_plugins()
self.install_settings()
from lino.utils.config import ConfigDirCache
self.confdirs = ConfigDirCache(self)
for k in ('ignore_dates_before', 'ignore_dates_after'):
if hasattr(self, k):
msg = "{0} is no longer a site attribute"
msg += " but a plugin attribute on lino_xl.lib.cal."
msg = msg.format(k)
raise ChangedAPI(msg)
self.load_help_texts()
def init_before_local(self, settings_globals, local_apps):
"""If your :attr:`project_dir` contains no :xfile:`models.py`, but
*does* contain a `fixtures` subdir, then Lino automatically adds this
as a local fixtures directory to Django's :setting:`FIXTURE_DIRS`.
But only once: if your application defines its own local
fixtures directory, then this directory "overrides" those of
parent applications. E.g. lino_noi.projects.care does not want
to load the application-specific fixtures of
lino_noi.projects.team.
"""
if not isinstance(settings_globals, dict):
raise Exception("""
The first argument when instantiating a %s
must be your settings.py file's `globals()`
and not %r
""" % (self.__class__.__name__, settings_globals))
if isinstance(local_apps, six.string_types):
local_apps = [local_apps]
self.local_apps = local_apps
self.django_settings = settings_globals
project_file = settings_globals.get('__file__', '.')
self.project_dir = Path(dirname(project_file)).absolute().resolve()
# inherit `project_name` from parent?
# if self.__dict__.get('project_name') is None:
if self.project_name is None:
parts = reversed(self.project_dir.split(os.sep))
# print(20150129, list(parts))
for part in parts:
if part != 'settings':
self.project_name = part
break
cache_root = os.environ.get('LINO_CACHE_ROOT', None)
if cache_root:
cr = Path(cache_root).absolute()
if not cr.exists():
msg = "LINO_CACHE_ROOT ({0}) does not exist!".format(cr)
raise Exception(msg)
self.cache_dir = cr.child(self.project_name).resolve()
self.setup_cache_directory()
else:
self.cache_dir = Path(self.project_dir).absolute()
self._startup_done = False
self.startup_time = datetime.datetime.now()
db = self.get_database_settings()
if db is not None:
self.django_settings.update(DATABASES=db)
self.update_settings(SERIALIZATION_MODULES={
"py": "lino.utils.dpy",
})
if self.site_prefix != '/':
if not self.site_prefix.endswith('/'):
raise Exception("`site_prefix` must end with a '/'!")
if not self.site_prefix.startswith('/'):
raise Exception("`site_prefix` must start with a '/'!")
self.update_settings(
SESSION_COOKIE_PATH=self.site_prefix[:-1])
# self.update_settings(SESSION_COOKIE_NAME='ssid')
# ## Local project directory
# modname = self.__module__
# i = modname.rfind('.')
# if i != -1:
# modname = modname[:i]
# self.is_local_project_dir = modname not in self.local_apps
self.VIRTUAL_FIELDS = []
def setup_logging(self):
"""Modifies the :data:`DEFAULT_LOGGING
<django.utils.log.DEFAULT_LOGGING>` dictionary *before* Django
passes it to the `logging.config.dictConfig
<https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig>`__
function.
Note that this is called *before* any plugins are loaded.
It is designed to work with the :setting:`LOGGING` and
:setting:`LOGGER_CONFIG` settings unmodified.
It does the following modifications:
- (does not) configure the console handler to write to stdout
instead of Django's default stderr (as explained `here
<http://codeinthehole.com/writing/console-logging-to-stdout-in-django/>`__)
because that breaks testing.
- Define a *default logger configuration* which is initially
the same as the one used by Django::
{
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
}
- If the :attr:`project_dir` has a subdirectory named ``log``,
and if :attr:`logger_filename` is not empty, add a handler
named ``file`` and a formatter named ``verbose``, and add
that handler to the default logger configuration.
- Apply the default logger configuration to every logger name
in :attr:`auto_configure_logger_names`.
It does nothing at all if :attr:`auto_configure_logger_names`
is set to `None` or empty.
See also :ref:`lino.logging`.
See also Django's doc about `Logging
<https://docs.djangoproject.com/en/1.11/topics/logging/>`__.
"""
if not self.auto_configure_logger_names:
return
from django.utils.log import DEFAULT_LOGGING
d = DEFAULT_LOGGING
level = os.environ.get('LINO_LOGLEVEL') or 'INFO'
file_level = os.environ.get('LINO_FILE_LOGLEVEL') or 'INFO'
loggercfg = {
'handlers': ['console', 'mail_admins'],
'level': level,
}
handlers = d.setdefault('handlers', {})
if True:
# We override Django's default config: write to stdout (not
# stderr) and remove the 'require_debug_true' filter.
console = handlers.setdefault('console', {})
console['stream'] = sys.stdout
console['filters'] = []
console['level'] = level
if self.logger_filename and 'file' not in handlers:
logdir = self.project_dir.child('log')
if logdir.isdir():
# if self.history_aware_logging is None:
# self.history_aware_logging = True
formatters = d.setdefault('formatters', {})
formatters.setdefault('verbose', dict(
format='%(asctime)s %(levelname)s '
'[%(module)s %(process)d %(thread)d] : %(message)s',
datefmt='%Y%m-%d %H:%M:%S'))
handlers['file'] = {
'level': file_level,
'class': 'logging.FileHandler',
'filename': logdir.child(self.logger_filename),
'encoding': 'UTF-8',
'formatter': 'verbose',
}
loggercfg['handlers'].append('file')
for name in self.auto_configure_logger_names.split():
# if name not in d['loggers']:
d['loggers'][name] = loggercfg
# set schedule logger level to WARNING
# TODO: find a more elegant way to do this.
if 'schedule' in d['loggers']:
d['loggers']['schedule'] = {
'handlers': loggercfg['handlers'],
'level': 'WARNING',
}
dblogger = d['loggers'].setdefault('django.db.backends', {})
dblogger['propagate'] = False
dblogger['level'] = os.environ.get('LINO_SQL_LOGLEVEL', 'WARNING')
dblogger['handlers'] = loggercfg['handlers']
# self.update_settings(LOGGING=d)
# from pprint import pprint
# pprint(d)
# print("20161126 Site %s " % d['loggers'].keys())
# import yaml
# print(yaml.dump(d))
def get_database_settings(self):
"""Return a dict to be set as the :setting:`DATABASE` setting.
The default behaviour uses SQLiite on a file named
:xfile:`default.db` in the :attr:`cache_dir`, and in
``:memory:`` when :attr:`cache_dir` is `None`.
And alternative might be for example::
def get_database_settings(self):
return {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_' + self.project_name,
'USER': 'django',
'PASSWORD': os.environ['MYSQL_PASSWORD'],
'HOST': 'localhost',
'PORT': 3306,
'OPTIONS': {
"init_command": "SET storage_engine=MyISAM",
}
}
}
"""
if self.cache_dir is None:
pass # raise Exception("20160516 No cache_dir")
else:
dbname = self.cache_dir.child('default.db')
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': dbname
}
}
def run_lino_site_module(self):
"""See :ref:`lino.site_module`.
"""
site_module = os.environ.get('LINO_SITE_MODULE', None)
if site_module:
mod = import_module(site_module)
func = getattr(mod, 'setup_site', None)
if func:
func(self)
# try:
# from djangosite_local import setup_site
# except ImportError:
# pass
# else:
# setup_site(self)
def override_settings(self, **kwargs):
# Called internally during `__init__` method.
# Also called from :mod:`lino.utils.djangotest`
#~ logger.info("20130404 lino.site.Site.override_defaults")
for k, v in kwargs.items():
if not hasattr(self, k):
raise Exception("%s has no attribute %s" % (self.__class__, k))
setattr(self, k, v)
self.apply_languages()
def load_plugins(self):
"""Load all plugins and build the :setting:`INSTALLED_APPS` setting
for Django.
This includes a call to :meth:`get_apps_modifiers` and
:meth:`get_installed_apps`.
"""
# Called internally during `__init__` method.
requested_apps = []
apps_modifiers = self.get_apps_modifiers()
if hasattr(self, 'hidden_apps'):
raise ChangedAPI("Replace hidden_apps by get_apps_modifiers()")
def add(x):
if isinstance(x, six.string_types):
app_label = x.split('.')[-1]
x = apps_modifiers.pop(app_label, x)
if x:
requested_apps.append(x)
else:
# if it's not a string, then it's an iterable of strings
for xi in x:
add(xi)
for x in self.get_installed_apps():
add(x)
for x in self.local_apps:
add(x)
# actual_apps = []
plugins = []
disabled_plugins = set()
def install_plugin(app_name, needed_by=None):
# print("20170505 install_plugin({})".format(app_name))
# Django does not accept newstr, and we don't want to see
# ``u'applabel'`` in doctests.
app_name = six.text_type(app_name)
# print("20160524 install_plugin(%r)" % app_name)
app_mod = import_module(app_name)
# print "Loading plugin", app_name
k = app_name.rsplit('.')[-1]
x = apps_modifiers.pop(k, 42)
if x is None:
return
elif x == 42:
pass
else:
raise Exception("20160712")
if k in self.plugins:
other = self.plugins[k]
if other.app_name == app_name:
# If a plugin is installed more than once, only
# the first one counts and all others are ignored
# silently. Happens e.g. in Lino Noi where
# lino_noi.lib.noi is both a required plugin and
# the default_ui.
return
raise Exception("Tried to install {} where {} "
"is already installed.".format(
app_name, other))
# Can an `__init__.py` file explicitly set ``Plugin =
# None``? Is that feature being used?
app_class = getattr(app_mod, 'Plugin', None)
if app_class is None:
app_class = Plugin
p = app_class(self, k, app_name, app_mod, needed_by)
cfg = PLUGIN_CONFIGS.pop(k, None)
if cfg:
p.configure(**cfg)
needed_by = p
while needed_by.needed_by is not None:
needed_by = needed_by.needed_by
for dep in p.needs_plugins:
k2 = dep.rsplit('.')[-1]
if k2 not in self.plugins:
install_plugin(dep, needed_by=needed_by)
# plugins.append(dep)
# actual_apps.append(app_name)
plugins.append(p)
self.plugins.define(k, p)
for dp in p.disables_plugins:
disabled_plugins.add(dp)
# lino_startup is always the first plugin:
# install_plugin(str('lino.modlib.lino_startup'))
install_plugin(str('lino'))
for app_name in requested_apps:
install_plugin(app_name)
if apps_modifiers:
raise Exception(
"Invalid app_label '{0}' in your get_apps_modifiers!".format(
list(apps_modifiers.keys())[0]))
# The return value of get_auth_method() may depend on a
# plugin, so if needed we must add the django.contrib.sessions
# afterwards.
# if self.get_auth_method() == 'session':
if self.user_model:
k = str('django.contrib.sessions')
if not k in self.plugins:
install_plugin(k)
for p in plugins:
if p.app_label in disabled_plugins \
or p.app_name in disabled_plugins:
plugins.remove(p)
del self.plugins[p.app_label]
# self.update_settings(INSTALLED_APPS=tuple(actual_apps))
self.update_settings(
INSTALLED_APPS=tuple([p.app_name for p in plugins]))
self.installed_plugins = tuple(plugins)
if self.override_modlib_models is not None:
raise ChangedAPI("override_modlib_models no longer allowed")
self.override_modlib_models = dict()
# def reg(p, pp, m):
# name = pp.__module__ + '.' + m
# self.override_modlib_models[name] = p
def plugin_parents(pc):
for pp in pc.__mro__:
if issubclass(pp, Plugin):
# if pp not in (Plugin, p.__class__):
if pp is not Plugin:
yield pp
def reg(pc):
# If plugin p extends some models, then tell all parent
# plugins to make their definition of each model abstract.
extends_models = pc.__dict__.get('extends_models')
if extends_models is not None:
for m in extends_models:
if "." in m:
raise Exception(
"extends_models in %s still uses '.'" % pc)
for pp in plugin_parents(pc):
if pp is pc:
continue
name = pp.__module__ + '.' + m
self.override_modlib_models[name] = pc
# if m == "Company":
# print("20160524 tell %s that %s extends %s" % (
# pp, p.app_name, m))
for pp in plugin_parents(pc):
if pp is pc:
continue
reg(pp)
# msg = "{0} declares to extend_models {1}, but " \
# "cannot find parent plugin".format(p, m)
# raise Exception(msg)
for p in self.installed_plugins:
reg(p.__class__)
# for pp in plugin_parents(p.__class__):
# if p.app_label == 'contacts':
# print("20160524c %s" % pp)
# reg(p.__class__)
# for m, p in self.override_modlib_models.items():
# print("20160524 %s : %s" % (m, p))
self.installed_plugin_modules = set()
for p in self.installed_plugins:
self.installed_plugin_modules.add(p.app_module.__name__)
for pp in plugin_parents(p.__class__):
self.installed_plugin_modules.add(pp.__module__)
# print("20160524 %s", self.installed_plugin_modules)
# raise Exception("20140825 %s", self.override_modlib_models)
# Tried to prevent accidental calls to configure_plugin()
# *after* Site initialization.
# global PLUGIN_CONFIGS
# PLUGIN_CONFIGS = None
def load_help_texts(self):
"""Collect :xfile:`help_texts.py` modules"""
for p in self.installed_plugins:
mn = p.app_name + '.help_texts'
try:
m = import_module(mn)
# print("20160725 Loading help texts from", mn)
self._help_texts.update(m.help_texts)
except ImportError:
pass
def load_actors(self):
"""Collect :xfile:`desktop.py` modules.
Note the situation when a :xfile:`desktop.py` module exists
but causes itself an ImportError because it contains a
programming mistake. In that case we want the traceback to
occur, not to silently do as if no :xfile:`desktop.py` module
existed.
"""
for p in self.installed_plugins:
mn = p.app_name + '.' + self.design_name
fn = join(
dirname(p.app_module.__file__), self.design_name + '.py')
if exists(fn):
# self.actors[p.app_label] = import_module(mn)
m = import_module(mn)
self.models[p.app_label].__dict__.update(m.__dict__)
# try:
# # print("20160725 Loading actors from", mn)
# self.actors[p.app_label] = import_module(mn)
# except ImportError:
# pass
def install_help_text(self, fld, cls=None, attrname=None):
"""Install a `help_text` from collected :xfile:`help_texts.py` for
this field.
"""
if cls is None:
cls = fld
debug = False
# if attrname.startswith('mun'):
# debug = True
# from lino.core.actions import Action
# if isinstance(fld, Action) and fld.__class__.__name__ == 'ChangePassword':
# debug = True
# if isinstance(fld, type) and fld.__name__ == 'ChangePassword':
# # if isinstance(fld, Action) and fld.__class__.__name__ == 'ChangePassword':
# debug = True
if not hasattr(fld, 'help_text'): # e.g. virtual fields don't
# have a help_text attribute
if debug:
print("20170824 {!r} has no help_text".format(fld))
return
# if fld.help_text:
# # if debug:
# # print("20170824 {} on {} has already a help_text {}".format(
# # attrname, cls, repr(fld.help_text)))
# return
# if debug:
# print(20160829, cls)
# if isinstance(fld, type):
# cls = fld
# else:
# cls = fld.model
for m in cls.mro():
# useless = ['lino.core', 'lino.mixins']
# if m.__module__.startswith(useless):
# continue
# if m in self.unhelpful_classes:
# continue
k = m.__module__ + '.' + m.__name__
k = simplify_name(k)
# debug = k.startswith('users')
if attrname:
k += '.' + attrname
txt = self._help_texts.get(k, None)
# if attrname == "nationality":
# print("20180313 {} {}".format(k, txt))
if txt is None:
if debug:
print("20170824 {}.{} : no help_text using {!r}".format(
cls, attrname, k))
if fld.help_text:
# coded help text gets overridden only if docs
# provide a more specific help text.
return
else:
if debug:
print("20170824 {}.{}.help_text found using {}".format(
cls, attrname, k))
fld.help_text = txt
return
if debug:
print("20170824 {}.{} : no help_text".format(
cls, attrname))
def setup_plugins(self):
"""This method is called exactly once during site startup, after
:meth:`load_plugins` but before populating the models
registry.
See :ref:`dev.plugins`.
"""
pass
def install_settings(self):
assert not self.help_url.endswith('/')
# import django
# django.setup()
if self.cache_dir is not None:
if self.webdav_url is None:
self.webdav_url = self.site_prefix + 'media/webdav/'
if self.webdav_root is None:
self.webdav_root = join(self.cache_dir, 'media', 'webdav')
self.django_settings.update(
MEDIA_ROOT=join(self.cache_dir, 'media'))
self.update_settings(ROOT_URLCONF=self.root_urlconf)
self.update_settings(MEDIA_URL='/media/')
if not self.django_settings.get('STATIC_ROOT', False):
cache_root = os.environ.get('LINO_CACHE_ROOT', None)
if cache_root:
self.django_settings.update(
STATIC_ROOT=Path(cache_root).child('collectstatic'))
else:
self.django_settings.update(
STATIC_ROOT=self.cache_dir.child('static'))
if not self.django_settings.get('STATIC_URL', False):
self.update_settings(STATIC_URL='/static/')
# loaders = [
# 'lino.modlib.jinja.loader.Loader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# #~ 'django.template.loaders.eggs.Loader',
# ]
tcp = []
tcp += [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
]
# self.update_settings(TEMPLATE_LOADERS=tuple(loaders))
# self.update_settings(TEMPLATE_CONTEXT_PROCESSORS=tuple(tcp))
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': tcp,
# 'loaders': loaders
},
},
]
TEMPLATES.append(
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [],
'OPTIONS': {
'environment': 'lino.modlib.jinja.get_environment'
},
})
self.update_settings(TEMPLATES=TEMPLATES)
if self.user_model:
self.update_settings(AUTH_USER_MODEL='users.User')
if self.use_security_features:
self.update_settings(
CSRF_USE_SESSIONS=True,
SESSION_COOKIE_SECURE=True,
CSRF_COOKIE_SECURE=True)
# self.define_settings(AUTH_USER_MODEL=self.user_model)
self.define_settings(
MIDDLEWARE_CLASSES=tuple(self.get_middleware_classes()))
# if self.get_auth_method() == 'session':
# self.define_settings(AUTHENTICATION_BACKENDS=[
# 'django.contrib.auth.backends.RemoteUserBackend'
# ])
backends = []
# if self.use_ipdict:
# backends.append('lino.modlib.ipdict.backends.Backend')
if self.get_auth_method() == 'remote':
backends.append('lino.core.auth.backends.RemoteUserBackend')
else:
backends.append('lino.core.auth.backends.ModelBackend')
if self.social_auth_backends is not None:
backends += self.social_auth_backends
self.define_settings(AUTHENTICATION_BACKENDS=backends)
self.update_settings(
LOGIN_URL='/accounts/login/',
LOGIN_REDIRECT_URL = '/',
# LOGIN_REDIRECT_URL = '/accounts/profile/',
LOGOUT_REDIRECT_URL = None)
def collect_settings_subdirs(lst, name, max_count=None):
def add(p):
p = p.replace(os.sep, "/")
if p not in lst:
lst.append(p)
for p in self.get_settings_subdirs(name):
# if the parent of a settings subdir has a
# `models.py`, then it is a plugin and we must not add
# the subdir because Django does that.
if exists(join(p, '..', 'models.py')):
self.logger.debug(
"Not loading %s %s because Django does that",
p, name)
else:
add(p)
if (max_count is not None) and len(lst) >= max_count:
break
# local_dir = self.cache_dir.child(name)
# if local_dir.exists():
# print "20150427 adding local directory %s" % local_dir
# add(local_dir)
# The STATICFILES_DIRS setting should not contain the
# STATIC_ROOT setting
if False:
# If a plugin has no "fixtures" ("config") directory
# of its own, inherit it from parents. That would be
# nice and it even works, but with a stud: these
# fixtures will be loaded at the end.
for ip in self.installed_plugins:
if not ip.get_subdir(name):
pc = ip.extends_from()
while pc and issubclass(pc, Plugin):
p = pc.get_subdir(name)
if p:
add(p)
pc = pc.extends_from()
fixture_dirs = list(self.django_settings.get('FIXTURE_DIRS', []))
locale_paths = list(self.django_settings.get('LOCALE_PATHS', []))
sfd = list(self.django_settings.get('STATICFILES_DIRS', []))
# sfd.append(self.cache_dir.child('genjs'))
collect_settings_subdirs(fixture_dirs, 'fixtures', 1)
collect_settings_subdirs(locale_paths, 'locale')
collect_settings_subdirs(sfd, 'static')
self.update_settings(FIXTURE_DIRS=tuple(fixture_dirs))
self.update_settings(LOCALE_PATHS=tuple(locale_paths))
self.update_settings(STATICFILES_DIRS=tuple(sfd))
# print(20150331, self.django_settings['FIXTURE_DIRS'])
def setup_cache_directory(self):
"""When :envvar:`LINO_CACHE_ROOT` is set, Lino adds a stamp file
called :xfile:`lino_cache.txt` to every project's cache
directory in order to avoid duplicate use of same cache
directory.
.. xfile:: lino_cache.txt
A small text file with one line of text which contains the
path of the project which uses this cache directory.
"""
stamp = self.cache_dir.child('lino_cache.txt')
this = class2str(self.__class__)
if stamp.exists():
other = stamp.read_file()
if other == this:
ok = True
else:
ok = False
for parent in self.__class__.__mro__:
if other == class2str(parent):
ok = True
break
if not ok:
# Can happen e.g. when `python -m lino.hello` is
# called. in certain conditions.
msg = ("Cannot use {cache_dir} for {this} "
"because it is used for {other}. (Settings {settings})")
msg = msg.format(
cache_dir=self.cache_dir,
this=this,
settings=self.django_settings.get('SETTINGS_MODULE'),
other=other)
if True:
raise Exception(msg)
else:
# print(msg)
self.cache_dir = None
else:
self.makedirs_if_missing(self.cache_dir)
stamp.write_file(this)
def set_user_model(self, spec):
"""This can be called during the :meth:`on_init
<lino.core.plugin.Plugin.on_init>` of plugins which provide
user management (the only plugin which does this is currently
:mod:`lino.modlib.users`).
"""
# if self.user_model is not None:
# msg = "Site.user_model was already set!"
# Theoretically this should raise an exception. But in a
# transitional phase after 20150116 we just ignore it. A
# warning would be nice, but we cannot use the logger here
# since it is not yet configured.
# self.logger.warning(msg)
# raise Exception(msg)
self.user_model = spec
if self.user_types_module is None:
self.user_types_module = 'lino.core.user_types'
def get_auth_method(self):
"""Returns the authentication method used on this site. This is one of
`None`, `'remote'` or `'session'`.
It depends on the values in
:attr:`user_model`,
:attr:`default_user` and
:attr:`remote_user_header`.
It influences the results of
:meth:`get_middleware_classes` and
:meth:`get_installed_apps`, and the content of
:setting:`AUTHENTICATION_BACKENDS`.
"""
if self.user_model is None:
return None
if self.default_user is not None:
return None
if self.remote_user_header is None:
return 'session' # model backend
return 'remote' # remote user backend
def get_apps_modifiers(self, **kw):
"""Override or hide individual plugins of an existing application.
For example, if your site inherits from
:mod:`lino.projects.min2`::
def get_apps_modifiers(self, **kw):
kw.update(sales=None)
kw.update(courses='my.modlib.courses')
return kw
The default implementation returns an empty dict.
This method adds an additional level of customization because
it lets you remove or replace individual plugins from
:setting:`INSTALLED_APPS` without rewriting your own
:meth:`get_installed_apps`.
This will be called during Site instantiation and is expected to
return a dict of `app_label` to `full_python_path`
mappings which you want to override in the list of plugins
returned by :meth:`get_installed_apps`.
Mapping an `app_label` to `None` will remove that plugin from
:setting:`INSTALLED_APPS`.
It is theoretically possible but not recommended to replace an
existing `app_label` by an app with a different
`app_label`. For example, the following might work but is not
recommended::
kw.update(courses='my.modlib.mycourses')
"""
return kw
def is_hidden_app(self, app_label):
"""
Return True if the app is known, but has been disabled using
:meth:`get_apps_modifiers`.
"""
am = self.get_apps_modifiers()
if am.get(app_label, 1) is None:
return True
def update_settings(self, **kw):
"""This may be called from within a :xfile:`lino_local.py`.
"""
self.django_settings.update(**kw)
def define_settings(self, **kwargs):
"""Same as :meth:`update_settings`, but raises an exception if a
setting already exists.
TODO: Currently this exception is deactivated. Because it
doesn't work as expected. For some reason (maybe because
settings is being imported twice on a devserver) it raises a
false exception when :meth:`override_defaults` tries to use it
on :setting:`MIDDLEWARE_CLASSES`...
"""
if False:
for name in list(kwargs.keys()):
if name in self.django_settings:
raise Exception(
"Tried to define existing Django setting %s" % name)
self.django_settings.update(kwargs)
def startup(self):
"""Start up this Site.
You probably don't want to override this method as it might be
called several times. e.g. under mod_wsgi: another thread has
started and not yet finished `startup()`.
If you want to run custom code on
site startup, override :meth:`do_site_startup`.
"""
from lino.core.kernel import site_startup
site_startup(self)
self.clear_site_config()
def do_site_startup(self):
"""This method is called exactly once during site startup,
just between the pre_startup and the post_startup signals.
A hook for subclasses.
TODO: rename this to `on_startup`?
If you override it, don't forget to call the super method.
"""
# self.logger.info("20160526 %s do_site_startup() a", self.__class__)
# self.logger.info("20160526 %s do_site_startup() b", self.__class__)
@property
def logger(self):
"""This must not be used before Django has done it logging config. For
example don't use it in a :xfile:`settings.py` module.
"""
if self._logger is None:
import logging
self._logger = logging.getLogger(__name__)
return self._logger
def get_settings_subdirs(self, subdir_name):
"""Yield all (existing) directories named `subdir_name` of this Site's
project directory and it's inherited project directories.
"""
# if local settings.py doesn't subclass Site:
if self.project_dir != normpath(dirname(
inspect.getfile(self.__class__))):
pth = join(self.project_dir, subdir_name)
if isdir(pth):
yield pth
for cl in self.__class__.__mro__:
#~ logger.info("20130109 inspecting class %s",cl)
if cl is not object and not inspect.isbuiltin(cl):
pth = join(dirname(inspect.getfile(cl)), subdir_name)
if isdir(pth):
yield pth
def makedirs_if_missing(self, dirname):
"""Make missing directories if they don't exist and if
:attr:`make_missing_dirs` is `True`.
"""
if dirname and not isdir(dirname):
if self.make_missing_dirs:
os.makedirs(dirname)
else:
raise Exception("Please create yourself directory %s" %
dirname)
def is_abstract_model(self, module_name, model_name):
"""
Return True if the named model is declared as being extended by
:attr:`lino.core.plugin.Plugin.extends_models`.
Typical usage::
class MyModel(dd.Model):
class Meta:
abstract = dd.is_abstract_model(__name__, 'MyModel')
See :doc:`/dev/plugin_inheritance`.
"""
app_name = '.'.join(module_name.split('.')[:-1])
model_name = app_name + '.' + model_name
# if 'avanti' in model_name:
# print("20170120", model_name,
# self.override_modlib_models,
# [m for m in self.installed_plugin_modules])
rv = model_name in self.override_modlib_models
if not rv:
if app_name not in self.installed_plugin_modules:
return True
# if model_name.endswith('Company'):
# self.logger.info(
# "20160524 is_abstract_model(%s) -> %s", model_name, rv)
# self.logger.info(
# "20160524 is_abstract_model(%s) -> %s (%s, %s)",
# model_name, rv, self.override_modlib_models.keys(),
# os.getenv('DJANGO_SETTINGS_MODULE'))
return rv
def is_installed_model_spec(self, model_spec):
"""
Deprecated. This feature was a bit too automagic and caused bugs
to pass silently. See e.g. :blogref:`20131025`.
"""
if False: # mod_wsgi interprets them as error
warnings.warn("is_installed_model_spec is deprecated.",
category=DeprecationWarning)
if model_spec == 'self':
return True
app_label, model_name = model_spec.split(".")
return self.is_installed(app_label)
def is_installed(self, app_label):
"""
Return `True` if :setting:`INSTALLED_APPS` contains an item
which ends with the specified `app_label`.
"""
return app_label in self.plugins
def setup_model_spec(self, obj, name):
"""
If the value of the named attribute of `obj` is a string, replace
it by the model specified by that string.
Example usage::
# library code:
class ThingBase(object):
the_model = None
def __init__(self):
settings.SITE.setup_model_spec(self, 'the_model')
# user code:
class MyThing(ThingBase):
the_model = "contacts.Partner"
"""
spec = getattr(obj, name)
if spec and isinstance(spec, six.string_types):
if not self.is_installed_model_spec(spec):
setattr(obj, name, None)
return
from lino.core.utils import resolve_model
msg = "Unresolved model '%s' in {0}.".format(name)
msg += " ({})".format(str(self.installed_plugins))
setattr(obj, name, resolve_model(spec, strict=msg))
def on_each_app(self, methname, *args):
"""
Call the named method on the :xfile:`models.py` module of each
installed app.
Note that this mechanism is deprecated. It is still used (on
names like ``setup_workflows`` and ``setup_site``) for
historical reasons but will disappear one day.
"""
from django.apps import apps
apps = [a.models_module for a in apps.get_app_configs()]
for mod in apps:
meth = getattr(mod, methname, None)
if meth is not None:
if False: # 20150925 once we will do it for good...
raise ChangedAPI("{0} still has a function {1}".format(
mod, methname))
meth(self, *args)
def for_each_app(self, func, *args, **kw):
"""
Call the given function on each installed plugin. Successor of
:meth:`on_each_app`.
This also loops over plugins that don't have a models module
and the base plugins of plugins which extend some plugin.
"""
from importlib import import_module
done = set()
for p in self.installed_plugins:
for b in p.__class__.__mro__:
if b not in (object, Plugin):
if b.__module__ not in done:
done.add(b.__module__)
parent = import_module(b.__module__)
func(b.__module__, parent, *args, **kw)
if p.app_name not in done:
func(p.app_name, p.app_module, *args, **kw)
def demo_date(self, *args, **kwargs):
"""
Deprecated. Should be replaced by :meth:`today`. Compute a date
using :func:`atelier.utils.date_offset` based on the process
startup time (or :attr:`the_demo_date` if this is set).
Used in Python fixtures and unit tests.
"""
base = self.the_demo_date or self.startup_time.date()
return date_offset(base, *args, **kwargs)
def today(self, *args, **kwargs):
"""
Almost the same as :func:`datetime.date.today`.
One difference is that the system's *today* is replaced by
:attr:`the_demo_date` if that attribute is set.
Another difference is that arguments can be passed to add some
offset. See :func:`atelier.utils.date_offset`.
This feature is being used in many test cases where e.g. the
age of people would otherwise change.
"""
if self.site_config is not None:
base = self.site_config.simulate_today \
or self.the_demo_date or datetime.date.today()
else:
base = self.the_demo_date or datetime.date.today()
return date_offset(base, *args, **kwargs)
def welcome_text(self):
"""
Returns the text to display in a console window when this
application starts.
"""
return "This is %s using %s." % (
self.site_version(), self.using_text())
def using_text(self):
"""
Text to display in a console window when Lino starts.
"""
return ', '.join([u"%s %s" % (n, v)
for n, v, u in self.get_used_libs()])
def site_version(self):
"""
Used in footnote or header of certain printed documents.
"""
if self.version:
return self.verbose_name + ' ' + self.version
return self.verbose_name
def configure_plugin(self, app_label, **kw):
raise Exception("Replace SITE.configure_plugin by ad.configure_plugin")
def install_migrations(self, *args):
"""
See :func:`lino.utils.dpy.install_migrations`.
"""
from lino.utils.dpy import install_migrations
install_migrations(self, *args)
def parse_date(self, s):
"""
Convert a string formatted using :attr:`date_format_strftime` or
:attr:`date_format_extjs` into a `(y,m,d)` tuple (not a
`datetime.date` instance). See `/blog/2010/1130`.
"""
ymd = tuple(reversed(list(map(int, s.split('.')))))
assert len(ymd) == 3
return ymd
#~ return datetime.date(*ymd)
def parse_time(self, s):
"""
Convert a string formatted using :attr:`time_format_strftime` or
:attr:`time_format_extjs` into a `datetime.time` instance.
"""
hms = list(map(int, s.split(':')))
return datetime.time(*hms)
def parse_datetime(self, s):
"""
Convert a string formatted using :attr:`datetime_format_strftime`
or :attr:`datetime_format_extjs` into a `datetime.datetime`
instance.
"""
#~ print "20110701 parse_datetime(%r)" % s
#~ s2 = s.split()
s2 = s.split('T')
if len(s2) != 2:
raise Exception("Invalid datetime string %r" % s)
ymd = list(map(int, s2[0].split('-')))
hms = list(map(int, s2[1].split(':')))
return datetime.datetime(*(ymd + hms))
#~ d = datetime.date(*self.parse_date(s[0]))
#~ return datetime.combine(d,t)
def strftime(self, t):
if t is None:
return ''
return t.strftime(self.time_format_strftime)
def resolve_virtual_fields(self):
for vf in self.VIRTUAL_FIELDS:
vf.lino_resolve_type()
self.VIRTUAL_FIELDS = []
def register_virtual_field(self, vf):
self.VIRTUAL_FIELDS.append(vf)
def find_config_file(self, *args, **kwargs):
return self.confdirs.find_config_file(*args, **kwargs)
def find_template_config_files(self, *args, **kwargs):
return self.confdirs.find_template_config_files(*args, **kwargs)
def setup_actions(self):
"""
Hook for subclasses to add or modify actions.
"""
from lino.core.merge import MergeAction
for m in get_models():
if m.allow_merge_action:
m.define_action(merge_row=MergeAction(m))
def setup_layouts(self):
'''
Hook for subclasses to add or modify layouts.
Usage example::
def setup_layouts(self):
super(Site, self).setup_layouts()
self.models.system.SiteConfigs.set_detail_layout("""
site_company next_partner_id:10
default_build_method
clients_account sales_account
suppliers_account purchases_account
""")
self.models.accounts.Accounts.set_detail_layout("""
ref:10 name id:5
seqno group type clearable
ledger.MovementsByAccount
""")
'''
pass
def add_user_field(self, name, fld):
if self.user_model:
from lino.api import dd
dd.inject_field(self.user_model, name, fld)
def get_used_libs(self, html=None):
"""
Yield a list of (name, version, url) tuples describing the
third-party software used on this site.
This function is used by :meth:`using_text` and
:meth:`welcome_html`.
"""
import lino
yield ("Lino", lino.SETUP_INFO['version'], lino.SETUP_INFO['url'])
try:
import mod_wsgi
version = "{0}.{1}".format(*mod_wsgi.version)
yield ("mod_wsgi", version, "http://www.modwsgi.org/")
except ImportError:
pass
import django
yield ("Django", django.get_version(), "http://www.djangoproject.com")
import sys
version = "%d.%d.%d" % sys.version_info[:3]
yield ("Python", version, "http://www.python.org/")
import babel
yield ("Babel", babel.__version__, "http://babel.edgewall.org/")
#~ import tidylib
#~ version = getattr(tidylib,'__version__','')
#~ yield ("tidylib",version,"http://countergram.com/open-source/pytidylib")
#~ import pyPdf
#~ version = getattr(pyPdf,'__version__','')
#~ yield ("pyPdf",version,"http://countergram.com/open-source/pytidylib")
import jinja2
version = getattr(jinja2, '__version__', '')
yield ("Jinja", version, "http://jinja.pocoo.org/")
# import sphinx
# version = getattr(sphinx, '__version__', '')
# yield ("Sphinx", version, "http://sphinx-doc.org/")
import dateutil
version = getattr(dateutil, '__version__', '')
yield ("python-dateutil", version, "http://labix.org/python-dateutil")
#~ try:
#~ import Cheetah
#~ version = Cheetah.Version
#~ yield ("Cheetah",version ,"http://cheetahtemplate.org/")
#~ except ImportError:
#~ pass
# try:
# from odf import opendocument
# version = opendocument.__version__
# except ImportError:
# version = self.not_found_msg
# yield ("OdfPy", version, "http://pypi.python.org/pypi/odfpy")
# try:
# import docutils
# version = docutils.__version__
# except ImportError:
# version = self.not_found_msg
# yield ("docutils", version, "http://docutils.sourceforge.net/")
# import yaml
# version = getattr(yaml, '__version__', '')
# yield ("PyYaml", version, "http://pyyaml.org/")
if self.social_auth_backends is not None:
try:
import social_django
version = social_django.__version__
except ImportError:
version = self.not_found_msg
name = "social-django"
yield (name, version, "https://github.com/python-social-auth")
for p in self.installed_plugins:
for u in p.get_used_libs(html):
yield u
def get_social_auth_links(self):
# print("20171207 site.py")
# elems = []
if self.social_auth_backends is None:
return
from social_core.backends.utils import load_backends
# from collections import OrderedDict
# from django.conf import settings
# from social_core.backends.base import BaseAuth
# backend = module_member(auth_backend)
# if issubclass(backend, BaseAuth):
for b in load_backends(
self.social_auth_backends).values():
yield E.a(b.name, href="/oauth/login/"+b.name)
# print("20171207 a", elems)
# return E.div(*elems)
def apply_languages(self):
"""This function is called when a Site object gets instantiated,
i.e. while Django is still loading the settings. It analyzes
the :attr:`languages` attribute and converts it to a tuple of
:data:`LanguageInfo` objects.
"""
if isinstance(self.languages, tuple) \
and isinstance(self.languages[0], LanguageInfo):
# e.g. override_defaults() has been called explicitly, without
# specifying a languages keyword.
return
self.language_dict = dict() # maps simple_code -> LanguageInfo
self.LANGUAGE_CHOICES = []
self.LANGUAGE_DICT = dict() # used in lino.modlib.users
must_set_language_code = False
#~ self.AVAILABLE_LANGUAGES = (to_locale(self.DEFAULT_LANGUAGE),)
if self.languages is None:
self.languages = [DJANGO_DEFAULT_LANGUAGE]
#~ self.update_settings(USE_L10N = False)
#~ info = LanguageInfo(DJANGO_DEFAULT_LANGUAGE,to_locale(DJANGO_DEFAULT_LANGUAGE),0,'')
#~ self.DEFAULT_LANGUAGE = info
#~ self.languages = (info,)
#~ self.language_dict[info.name] = info
else:
if isinstance(self.languages, six.string_types):
self.languages = str(self.languages).split()
#~ lc = [x for x in self.django_settings.get('LANGUAGES' if x[0] in languages]
#~ lc = language_choices(*self.languages)
#~ self.update_settings(LANGUAGES = lc)
#~ self.update_settings(LANGUAGE_CODE = lc[0][0])
#~ self.update_settings(LANGUAGE_CODE = self.languages[0])
self.update_settings(USE_L10N=True)
must_set_language_code = True
languages = []
for i, django_code in enumerate(self.languages):
assert_django_code(django_code)
name = str(to_locale(django_code))
if name in self.language_dict:
raise Exception("Duplicate name %s for language code %r"
% (name, django_code))
if i == 0:
suffix = ''
else:
suffix = '_' + name
info = LanguageInfo(str(django_code), str(name), i, str(suffix))
self.language_dict[name] = info
languages.append(info)
new_languages = languages
for info in tuple(new_languages):
if '-' in info.django_code:
base, loc = info.django_code.split('-')
if not base in self.language_dict:
self.language_dict[base] = info
# replace the complicated info by a simplified one
#~ newinfo = LanguageInfo(info.django_code,base,info.index,info.suffix)
#~ new_languages[info.index] = newinfo
#~ del self.language_dict[info.name]
#~ self.language_dict[newinfo.name] = newinfo
#~ for base,lst in simple_codes.items():
#~ if len(lst) == 1 and and not base in self.language_dict:
#~ self.language_dict[base] = lst[0]
self.languages = tuple(new_languages)
self.DEFAULT_LANGUAGE = self.languages[0]
self.BABEL_LANGS = tuple(self.languages[1:])
if must_set_language_code:
self.update_settings(LANGUAGE_CODE=self.languages[0].django_code)
# Note: LANGUAGE_CODE is what *Django* believes to be the
# default language. This should be some variant of
# English ('en' or 'en-us') if you use
# `django.contrib.humanize`
# https://code.djangoproject.com/ticket/20059
self.setup_languages()
def setup_languages(self):
"""
Reduce Django's :setting:`LANGUAGES` to my `languages`.
Note that lng.name are not yet translated, we take these
from `django.conf.global_settings`.
"""
from django.conf.global_settings import LANGUAGES
def langtext(code):
for k, v in LANGUAGES:
if k == code:
return v
# returns None if not found
def _add_language(code, lazy_text):
self.LANGUAGE_DICT[code] = lazy_text
self.LANGUAGE_CHOICES.append((code, lazy_text))
if self.languages is None:
_add_language(DJANGO_DEFAULT_LANGUAGE, _("English"))
else:
for lang in self.languages:
code = lang.django_code
text = langtext(code)
if text is None:
# Django doesn't know these
if code == 'de-be':
text = gettext_noop("German (Belgium)")
elif code == 'de-ch':
text = gettext_noop("German (Swiss)")
elif code == 'de-at':
text = gettext_noop("German (Austria)")
elif code == 'en-us':
text = gettext_noop("American English")
else:
raise Exception(
"Unknown language code %r (must be one of %s)" % (
lang.django_code,
[x[0] for x in LANGUAGES]))
text = _(text)
_add_language(lang.django_code, text)
"""
Cannot activate the site's default language
because some test cases in django.contrib.humanize
rely on en-us as default language
"""
#~ set_language(self.get_default_language())
"""
reduce Django's LANGUAGES to my babel languages:
"""
self.update_settings(
LANGUAGES=[x for x in LANGUAGES
if x[0] in self.LANGUAGE_DICT])
def get_language_info(self, code):
"""Use this in Python fixtures or tests to test whether a Site
instance supports a given language. `code` must be a
Django-style language code.
On a site with only one locale of a language (and optionally
some other languages), you can use only the language code to
get a tuple of :data:`LanguageInfo` objects.
>>> from lino.core.site import TestSite as Site
>>> Site(languages="en-us fr de-be de").get_language_info('en')
LanguageInfo(django_code='en-us', name='en_US', index=0, suffix='')
On a site with two locales of a same language (e.g. 'en-us'
and 'en-gb'), the simple code 'en' yields that first variant:
>>> site = Site(languages="en-us en-gb")
>>> print(site.get_language_info('en'))
LanguageInfo(django_code='en-us', name='en_US', index=0, suffix='')
"""
return self.language_dict.get(code, None)
def resolve_languages(self, languages):
"""
This is used by `UserType`.
Examples:
>>> from lino.core.site import TestSite as Site
>>> lst = Site(languages="en fr de nl et pt").resolve_languages('en fr')
>>> [i.name for i in lst]
['en', 'fr']
You may not specify languages which don't exist on this site:
>>> Site(languages="en fr de").resolve_languages('en nl')
Traceback (most recent call last):
...
Exception: Unknown language code 'nl' (must be one of ['en', 'fr', 'de'])
"""
rv = []
if isinstance(languages, six.string_types):
languages = str(languages).split()
for k in languages:
if isinstance(k, six.string_types):
li = self.get_language_info(k)
if li is None:
raise Exception(
"Unknown language code %r (must be one of %s)" % (
str(k), [i.name for i in self.languages]))
rv.append(li)
else:
assert k in self.languages
rv.append(k)
return tuple(rv)
def language_choices(self, language, choices):
l = choices.get(language, None)
if l is None:
l = choices.get(self.DEFAULT_LANGUAGE)
return l
def get_default_language(self):
"""
The django code of the default language to use in every
:class:`dd.LanguageField`.
"""
return self.DEFAULT_LANGUAGE.django_code
def str2kw(self, name, txt, **kw):
"""
Return a dictionary which maps the internal field names for
babelfield `name` to their respective translation of the given
lazy translatable string `text`.
>>> from django.utils.translation import ugettext_lazy as _
>>> from lino.core.site import TestSite as Site
>>> site = Site(languages='de fr es')
>>> site.str2kw('name', _("January")) == {'name_fr': 'janvier', 'name': 'Januar', 'name_es': 'Enero'}
True
>>> site = Site(languages='fr de es')
>>> site.str2kw('name', _("January")) == {'name_de': 'Januar', 'name': 'janvier', 'name_es': 'Enero'}
True
"""
from django.utils import translation
for simple, info in self.language_dict.items():
with translation.override(simple):
kw[name + info.suffix] = six.text_type(txt)
return kw
def babelkw(self, name, **kw):
"""
Return a dict with appropriate resolved field names for a
BabelField `name` and a set of hard-coded values.
You have some hard-coded multilingual content in a fixture:
>>> from lino.core.site import TestSite as Site
>>> kw = dict(de="Hallo", en="Hello", fr="Salut")
The field names where this info gets stored depends on the
Site's `languages` distribution.
>>> Site(languages="de-be en").babelkw('name',**kw) == {'name_en': 'Hello', 'name': 'Hallo'}
True
>>> Site(languages="en de-be").babelkw('name',**kw) == {'name_de_BE': 'Hallo', 'name': 'Hello'}
True
>>> Site(languages="en-gb de").babelkw('name',**kw) == {'name_de': 'Hallo', 'name': 'Hello'}
True
>>> Site(languages="en").babelkw('name',**kw) == {'name': 'Hello'}
True
>>> Site(languages="de-be en").babelkw('name',de="Hallo",en="Hello") == {'name_en': 'Hello', 'name': 'Hallo'}
True
In the following example `babelkw` attributes the
keyword `de` to the *first* language variant:
>>> Site(languages="de-ch de-be").babelkw('name',**kw) == {'name': 'Hallo'}
True
"""
d = dict()
for simple, info in self.language_dict.items():
v = kw.get(simple, None)
if v is not None:
d[name + info.suffix] = six.text_type(v)
return d
def args2kw(self, name, *args):
"""
Takes the basename of a BabelField and the values for each language.
Returns a `dict` mapping the actual fieldnames to their values.
"""
assert len(args) == len(self.languages)
kw = {name: args[0]}
for i, lang in enumerate(self.BABEL_LANGS):
kw[name + '_' + lang] = args[i + 1]
return kw
def field2kw(self, obj, name, **known_values):
"""Return a `dict` with all values of the BabelField `name` in the
given object `obj`. The dict will have one key for each
:attr:`languages`.
Examples:
>>> from lino.core.site import TestSite as Site
>>> from atelier.utils import AttrDict
>>> def testit(site_languages):
... site = Site(languages=site_languages)
... obj = AttrDict(site.babelkw(
... 'name', de="Hallo", en="Hello", fr="Salut"))
... return site,obj
>>> site, obj = testit('de en')
>>> site.field2kw(obj, 'name') == {'de': 'Hallo', 'en': 'Hello'}
True
>>> site, obj = testit('fr et')
>>> site.field2kw(obj, 'name') == {'fr': 'Salut'}
True
"""
# d = { self.DEFAULT_LANGUAGE.name : getattr(obj,name) }
for lng in self.languages:
v = getattr(obj, name + lng.suffix, None)
if v:
known_values[lng.name] = v
return known_values
def field2args(self, obj, name):
"""
Return a list of the babel values of this field in the order of
this Site's :attr:`Site.languages` attribute.
"""
return [str(getattr(obj, name + li.suffix)) for li in self.languages]
#~ l = [ getattr(obj,name) ]
#~ for lang in self.BABEL_LANGS:
#~ l.append(getattr(obj,name+'_'+lang))
#~ return l
def babelitem(self, *args, **values):
"""
Given a dictionary with babel values, return the
value corresponding to the current language.
This is available in templates as a function `tr`.
>>> kw = dict(de="Hallo", en="Hello", fr="Salut")
>>> from lino.core.site import TestSite as Site
>>> from django.utils import translation
A Site with default language "de":
>>> site = Site(languages="de en")
>>> tr = site.babelitem
>>> with translation.override('de'):
... print(tr(**kw))
Hallo
>>> with translation.override('en'):
... print(tr(**kw))
Hello
If the current language is not found in the specified `values`,
then it returns the site's default language:
>>> with translation.override('jp'):
... print(tr(en="Hello", de="Hallo", fr="Salut"))
Hallo
Testing detail: default language should be "de" in our example, but
we are playing here with more than one Site instance while Django
knows only one "default language" which is the one specified in
`lino.projects.docs.settings`.
Another way is to specify an explicit default value using a
positional argument. In that case the language's default language
doesn'n matter:
>>> with translation.override('jp'):
... print(tr("Tere", de="Hallo", fr="Salut"))
Tere
>>> with translation.override('de'):
... print(tr("Tere", de="Hallo", fr="Salut"))
Hallo
You may not specify more than one default value:
>>> tr("Hello", "Hallo")
Traceback (most recent call last):
...
ValueError: ('Hello', 'Hallo') is more than 1 default value.
"""
if len(args) == 0:
info = self.language_dict.get(
get_language(), self.DEFAULT_LANGUAGE)
default_value = None
if info == self.DEFAULT_LANGUAGE:
return values.get(info.name)
x = values.get(info.name, None)
if x is None:
return values.get(self.DEFAULT_LANGUAGE.name)
return x
elif len(args) == 1:
info = self.language_dict.get(get_language(), None)
if info is None:
return args[0]
default_value = args[0]
return values.get(info.name, default_value)
args = tuple_py2(args)
# print(type(args))
raise ValueError("%(values)s is more than 1 default value." %
dict(values=args))
# babel_get(v) = babelitem(**v)
def babeldict_getitem(self, d, k):
v = d.get(k, None)
if v is not None:
assert type(v) is dict
return self.babelitem(**v)
def babelattr(self, obj, attrname, default=NOT_PROVIDED, language=None):
"""
Return the value of the specified babel field `attrname` of `obj`
in the current language.
This is to be used in multilingual document templates. For
example in a document template of a Contract you may use the
following expression::
babelattr(self.type, 'name')
This will return the correct value for the current language.
Examples:
>>> from __future__ import unicode_literals
>>> from django.utils import translation
>>> from lino.core.site import TestSite as Site
>>> from atelier.utils import AttrDict
>>> def testit(site_languages):
... site = Site(languages=site_languages)
... obj = AttrDict(site.babelkw(
... 'name', de="Hallo", en="Hello", fr="Salut"))
... return site, obj
>>> site,obj = testit('de en')
>>> with translation.override('de'):
... print(site.babelattr(obj,'name'))
Hallo
>>> with translation.override('en'):
... print(site.babelattr(obj,'name'))
Hello
If the object has no translation for a given language, return
the site's default language. Two possible cases:
The language exists on the site, but the object has no
translation for it:
>>> site,obj = testit('en es')
>>> with translation.override('es'):
... print(site.babelattr(obj, 'name'))
Hello
Or a language has been activated which doesn't exist on the site:
>>> with translation.override('fr'):
... print(site.babelattr(obj, 'name'))
Hello
"""
if language is None:
language = get_language()
info = self.language_dict.get(language, self.DEFAULT_LANGUAGE)
if info.index != 0:
v = getattr(obj, attrname + info.suffix, None)
if v:
return v
if default is NOT_PROVIDED:
return getattr(obj, attrname)
else:
return getattr(obj, attrname, default)
#~ if lang is not None and lang != self.DEFAULT_LANGUAGE:
#~ v = getattr(obj,attrname+"_"+lang,None)
#~ if v:
#~ return v
#~ return getattr(obj,attrname,*args)
def diagnostic_report_rst(self, *args):
"""Returns a string with a diagnostic report about this
site. :manage:`diag` is a command-line shortcut to this.
"""
s = ''
s += rstgen.header(1, "Plugins")
for n, kp in enumerate(self.plugins.items()):
s += "%d. " % (n + 1)
s += "%s : %s\n" % kp
# s += "config_dirs: %s\n" % repr(self.confdirs.config_dirs)
s += "\n"
s += rstgen.header(1, "Config directories")
for n, cd in enumerate(self.confdirs.config_dirs):
s += "%d. " % (n + 1)
ln = relpath(cd.name)
if cd.writeable:
ln += " [writeable]"
s += ln + '\n'
# for arg in args:
# p = self.plugins[arg]
return s
# def get_db_overview_rst(self):
# from lino.utils.diag import analyzer
# analyzer.show_db_overview()
def override_defaults(self, **kwargs):
self.override_settings(**kwargs)
self.install_settings()
def is_imported_partner(self, obj):
"""
Return whether the specified
:class:`Partner <ml.contacts.Partner>` instance
`obj` is to be considered as imported from some legacy database.
"""
#~ return obj.id is not None and (obj.id < 200000 or obj.id > 299999)
return False
#~ return obj.id is not None and (obj.id > 10 and obj.id < 21)
def site_header(self):
"""Used in footnote or header of certain printed documents.
The convention is to call it as follows from an appy.pod template
(use the `html` function, not `xhtml`)
::
do text
from html(settings.SITE.site_header())
Note that this is expected to return a unicode string possibly
containing valid HTML (not XHTML) tags for formatting.
"""
if self.is_installed('contacts'):
if self.site_config.site_company:
return self.site_config.site_company.get_address('<br/>')
#~ s = unicode(self.site_config.site_company) + " / " + s
#~ return ''
# def setup_main_menu(self):
# """
# To be implemented by applications.
# """
# pass
def get_dashboard_items(self, user):
"""Expected to yield a sequence of items to be rendered on the
dashboard (:xfile:`admin_main.html`).
The default implementation calls :meth:`get_dashboard_items
<lino.core.plugin.Plugin.get_dashboard_items>` on every
installed plugin and yields all items.
The items will be rendered in that order, except if
:mod:`lino.modlib.dashboard` is installed to enable per-user
customized dashboard.
"""
if user:
for p in self.installed_plugins:
for i in p.get_dashboard_items(user):
yield i
@property
def site_config(self):
"""
This property holds a cached version of the one and only
:class:`SiteConfig <lino.modlib.system.models.SiteConfig>` row
that holds site-wide database-stored and web-editable Site
configuration parameters.
If no instance exists (which happens in a virgin database), we
create it using default values from
:attr:`site_config_defaults`.
This is always `None` when :mod:`lino.modlib.system` is not
installed.
"""
if 'system' not in self.models:
return None
if not self._startup_done:
return None
if self._site_config is None:
#~ raise Exception(20130301)
#~ print '20130320 create _site_config'
#~ from lino.core.utils import resolve_model
from lino.core.utils import obj2str
SiteConfig = self.models.system.SiteConfig
#~ from django.db.utils import DatabaseError
try:
self._site_config = SiteConfig.real_objects.get(
id=self.config_id)
# print("20180502 loaded SiteConfig {}",
# obj2str(self._site_config, True))
#~ except (SiteConfig.DoesNotExist,DatabaseError):
except SiteConfig.DoesNotExist:
#~ except Exception,e:
kw = dict(id=self.config_id)
#~ kw.update(settings.SITE.site_config_defaults)
kw.update(self.site_config_defaults)
self._site_config = SiteConfig(**kw)
# print("20180502 Created SiteConfig {}".format(
# obj2str(self._site_config, True)))
# 20120725
# polls_tutorial menu selection `Config --> Site Parameters`
# said "SiteConfig 1 does not exist"
# cannot save the instance here because the db table possibly doesn't yet exit.
#~ self._site_config.save()
return self._site_config
#~ site_config = property(get_site_config)
#~ def shutdown(self):
#~ self.clear_site_config()
#~ return super(Site,self).shutdown()
def clear_site_config(self):
"""
Clear the cached SiteConfig instance.
This is needed e.g. when the test runner has created a new
test database.
"""
from lino.core.utils import obj2str
# print("20180502 clear_site_config {}".format(
# obj2str(self._site_config, True)))
self._site_config = None
def get_quicklinks(self, user):
from lino.core import menus
m = menus.Toolbar(user.user_type, 'quicklinks')
self.setup_quicklinks(user, m)
return m
def setup_quicklinks(self, user, m):
"""Override this in application-specific (or even local)
:xfile:`settings.py` files to define a series of *quick links*
to appear below the main menu bar.
"""
self.on_each_app('setup_quicklinks', user, m)
def get_site_menu(self, ui, user_type):
"""
Return this site's main menu for the given UserType.
Must be a :class:`lino.core.menus.Toolbar` instance.
Applications usually should not need to override this.
"""
from lino.core import menus
main = menus.Toolbar(user_type, 'main')
self.setup_menu(user_type, main)
main.compress()
return main
def setup_menu(self, user_type, main):
"""Set up the application's menu structure.
The default implementation uses a system of *predefined
top-level items* that are filled by the different installed
plugins.
- `setup_master_menu`
- `setup_main_menu`
- `setup_reports_menu`
- `setup_config_menu`
- `setup_explorer_menu`
- `setup_site_menu`
These predefined top-level items ("Master", "Main", "Reports",
"Configuration"... are themselves configurable in
:attr:`top_level_menus`)
"""
from django.apps import apps
apps = [a.models_module for a in apps.get_app_configs()]
for k, label in self.top_level_menus:
methname = "setup_{0}_menu".format(k)
for mod in apps:
if hasattr(mod, methname):
msg = "{0} still has a function {1}(). \
Please convert to Plugin method".format(mod, methname)
raise ChangedAPI(msg)
if label is None:
menu = main
else:
menu = main.add_menu(k, label)
for p in self.installed_plugins:
meth = getattr(p, methname, None)
if meth is not None:
meth(self, user_type, menu)
def get_middleware_classes(self):
"""Yields the strings to be stored in
the :setting:`MIDDLEWARE_CLASSES` setting.
In case you don't want to use this method for defining
:setting:`MIDDLEWARE_CLASSES`, you can simply set
:setting:`MIDDLEWARE_CLASSES` in your :xfile:`settings.py`
after the :class:`Site` has been instantiated.
`Django and standard HTTP authentication
<http://stackoverflow.com/questions/152248/can-i-use-http-basic-authentication-with-django>`_
"""
yield 'django.middleware.common.CommonMiddleware'
if self.languages and len(self.languages) > 1:
yield 'django.middleware.locale.LocaleMiddleware'
if self.user_model:
yield 'django.contrib.sessions.middleware.SessionMiddleware'
# yield 'django.contrib.auth.middleware.AuthenticationMiddleware'
yield 'lino.core.auth.middleware.AuthenticationMiddleware'
yield 'lino.core.auth.middleware.WithUserMiddleware'
yield 'lino.core.auth.middleware.DeviceTypeMiddleware'
else:
yield 'lino.core.auth.middleware.NoUserMiddleware'
if self.get_auth_method() == 'remote':
# yield 'django.contrib.auth.middleware.RemoteUserMiddleware'
yield 'lino.core.auth.middleware.RemoteUserMiddleware'
if self.use_ipdict:
yield 'lino.modlib.ipdict.middleware.Middleware'
if self.social_auth_backends:
yield 'social_django.middleware.SocialAuthExceptionMiddleware'
if True:
yield 'lino.utils.ajax.AjaxExceptionResponse'
if self.use_security_features:
yield 'django.middleware.security.SecurityMiddleware'
yield 'django.middleware.clickjacking.XFrameOptionsMiddleware'
# yield 'django.middleware.csrf.CsrfViewMiddleware'
if False:
#~ yield 'lino.utils.sqllog.ShortSQLLogToConsoleMiddleware'
yield 'lino.utils.sqllog.SQLLogToConsoleMiddleware'
#~ yield 'lino.utils.sqllog.SQLLogMiddleware'
# def get_main_action(self, user_type):
# """No longer used.
# Return the action to show as top-level "index.html".
# The default implementation returns `None`, which means
# that Lino will call :meth:`get_main_html`.
# """
# return None
def __deepcopy__(self):
raise Exception("Who is copying me?!")
def __copy__(self):
raise Exception("Who is copying me?!")
def get_main_html(self, request, **context):
"""Return a chunk of html to be displayed in the main area of the
admin index. This is being called only if
:meth:`get_main_action` returns `None`. The default
implementation renders the :xfile:`admin_main.html` template.
"""
return self.plugins.jinja.render_from_request(
request, 'admin_main.html', **context)
def get_welcome_messages(self, ar):
"""
Yields a list of "welcome messages" (see
:meth:`lino.core.actors.Actor.get_welcome_messages`) of all
actors. This is being called from :xfile:`admin_main.html`.
"""
for h in self._welcome_handlers:
for msg in h(ar):
yield msg
# for a in self._welcome_actors:
# for msg in a.get_welcome_messages(ar):
# yield msg
def add_welcome_handler(self, func, actor=None, msg=None):
"""Add the given callable as a "welcome handler". Lino will call
every welcome handler for every incoming request, passing them
a :class:`BaseRequest <lino.core.requests.BaseRequest>`
instance representing this request as positional argument.
The callable is expected to yield a series of messages
(usually either 0 or 1). Each message must be either a string
or a :class:`E.span <etgen.html.E>` element.
"""
# print(
# "20161219 add_welcome_handler {} {} {}".format(
# actor, msg, func))
self._welcome_handlers.append(func)
def get_installed_apps(self):
"""Yield the list of apps to be installed on this site. Each item
must be either a string (unicode being converted to str) or a
*generator* which will be iterated recursively (again
expecting either strings or generators of strings).
Lino will call this method exactly once when the :class:`Site`
instantiates. The resulting list of names will then possibly
altered by the :meth:`get_apps_modifiers` method before being
assigned to the :setting:`INSTALLED_APPS` setting.
"""
if self.django_admin_prefix:
yield 'django.contrib.admin' # not tested
yield 'django.contrib.staticfiles'
yield 'lino.modlib.about'
if self.use_ipdict:
yield 'lino.modlib.ipdict'
if self.social_auth_backends:
yield 'social_django'
yield self.default_ui
if self.admin_ui is not None:
if self.admin_ui == self.default_ui:
raise Exception(
"admin_ui (if specified) must be different "
"from default_ui")
yield self.admin_ui
# if self.default_ui == "extjs":
# yield 'lino.modlib.extjs'
# yield 'lino.modlib.bootstrap3'
# elif self.default_ui == "bootstrap3":
# yield 'lino.modlib.bootstrap3'
# yield "lino.modlib.lino_startup"
site_prefix = '/'
"""The string to prefix to every URL of the Lino web interface.
This must *start and end with a *slash*. Default value is
``'/'``.
This must be set if your project is not being served at the "root"
URL of your server.
If this is different from the default value, Lino also sets
:setting:`SESSION_COOKIE_PATH`.
When this Site is running under something else than a development
server, this setting must correspond to your web server's
configuration. For example if you have::
WSGIScriptAlias /foo /home/luc/mypy/lino_sites/foo/wsgi.py
Then your :xfile:`settings.py` should specify::
site_prefix = '/foo/'
See also :ref:`mass_hosting`.
"""
def buildurl(self, *args, **kw):
url = self.site_prefix + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def build_media_url(self, *args, **kw):
from django.conf import settings
url = settings.MEDIA_URL + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def build_static_url(self, *args, **kw):
from django.conf import settings
url = settings.STATIC_URL + ("/".join(args))
if len(kw):
url += "?" + urlencode(kw, True)
return url
def send_email(self, subject, sender, body, recipients):
"""Send an email message with the specified arguments (the same
signature as `django.core.mail.EmailMessage`.
`recipients` is an iterator over a list of strings with email
addresses. Any address containing '@example.com' will be
removed. Does nothing if the resulting list of recipients is
empty.
If `body` starts with "<", then it is considered to be HTML.
"""
if '@example.com' in sender:
self.logger.debug(
"Ignoring email '%s' because sender is %s", subject, sender)
print(PRINT_EMAIL.format(
subject=subject, sender=sender, body=body,
recipients=u', '.join(recipients)).encode(
'ascii', 'replace').decode())
return
recipients = [a for a in recipients if '@example.com' not in a]
if not len(recipients):
self.logger.info(
"Ignoring email '%s' because there is no recipient", subject)
return
self.logger.info(
"Send email '%s' from %s to %s", subject, sender, recipients)
from django.core.mail import send_mail
kw = {}
if body.startswith('<'):
kw['html_message'] = body
body = html2text(body)
# self.logger.info("20161008b %r %r %r %r", subject, sender, recipients, body)
try:
send_mail(subject, body, sender, recipients, **kw)
except Exception as e:
self.logger.warning("send_mail() failed : %s", e)
# msg = EmailMessage(subject=subject,
# from_email=sender, body=body, to=recipients)
# from django.core.mail import EmailMessage
# msg = EmailMessage(subject=subject,
# from_email=sender, body=body, to=recipients)
# self.logger.info(
# "Send email '%s' from %s to %s", subject, sender, recipients)
# msg.send()
def welcome_html(self, ui=None):
"""
Return a HTML version of the "This is APPLICATION
version VERSION using ..." text. to be displayed in the
About dialog, in the plain html footer, and maybe at other
places.
"""
from django.utils.translation import ugettext as _
p = []
sep = ''
if self.verbose_name:
p.append(_("This is "))
if self.url:
p.append(
E.a(str(self.verbose_name), href=self.url, target='_blank'))
else:
p.append(E.b(str(self.verbose_name)))
if self.version:
p.append(' ')
p.append(self.version)
sep = _(' using ')
for name, version, url in self.get_used_libs(html=E):
p.append(sep)
p.append(E.a(name, href=url, target='_blank'))
p.append(' ')
p.append(version)
sep = ', '
return E.span(*p)
def login(self, username=None, **kw):
"""Open a session as the user with the given `username`.
For usage from a shell or a tested document. Does not require
any password because when somebody has command-line access we
trust that she has already authenticated.
It returns a
:class:`BaseRequest <lino.core.requests.BaseRequest>` object.
"""
from lino.core import requests
self.startup()
User = self.user_model
if User and username:
try:
kw.update(user=User.objects.get(username=username))
except User.DoesNotExist:
raise Exception("'{0}' : no such user".format(username))
# if not 'renderer' in kw:
# kw.update(renderer=self.ui.text_renderer)
# import lino.core.urls # hack: trigger ui instantiation
return requests.BaseRequest(**kw)
def get_letter_date_text(self, today=None):
"""
Returns a string like "Eupen, den 26. August 2013".
"""
sc = self.site_config.site_company
if today is None:
today = self.today()
from lino.utils.format_date import fdl
if sc and sc.city:
return _("%(place)s, %(date)s") % dict(
place=str(sc.city.name), date=fdl(today))
return fdl(today)
def decfmt(self, v, places=2, **kw):
"""
Format a Decimal value using :func:`lino.utils.moneyfmt`, but
applying the site settings
:attr:`lino.Lino.decimal_group_separator` and
:attr:`lino.Lino.decimal_separator`.
>>> from lino.core.site import TestSite as Site
>>> from decimal import Decimal
>>> self = Site()
>>> print(self.decimal_group_separator)
\xa0
>>> print(self.decimal_separator)
,
>>> x = Decimal(1234)
>>> print(self.decfmt(x))
1\xa0234,00
>>> print(self.decfmt(x, sep="."))
1.234,00
>>> self.decimal_group_separator = '.'
>>> print(self.decfmt(x))
1.234,00
>>> self.decimal_group_separator = "oops"
>>> print(self.decfmt(x))
1oops234,00
"""
kw.setdefault('sep', self.decimal_group_separator)
kw.setdefault('dp', self.decimal_separator)
from lino.utils import moneyfmt
return moneyfmt(v, places=places, **kw)
LOOKUP_OP = '__iexact'
def lookup_filter(self, fieldname, value, **kw):
"""
Return a `models.Q` to be used if you want to search for a given
string in any of the languages for the given babel field.
"""
from django.db.models import Q
kw[fieldname + self.LOOKUP_OP] = value
#~ kw[fieldname] = value
flt = Q(**kw)
del kw[fieldname + self.LOOKUP_OP]
for lng in self.BABEL_LANGS:
kw[fieldname + lng.suffix + self.LOOKUP_OP] = value
flt = flt | Q(**kw)
del kw[fieldname + lng.suffix + self.LOOKUP_OP]
return flt
# def relpath(self, p):
# """Used by :class:`lino.mixins.printable.EditTemplate` in order to
# write a testable message...
# """
# if p.startswith(self.project_dir):
# p = "$(PRJ)" + p[len(self.project_dir):]
# return p
class TestSite(Site):
"""Used to simplify doctest strings because it inserts default values
for the two first arguments that are mandatory but not used in our
examples.
Example::
>> from lino.core.site import Site
>> Site(globals(), ...)
>> from lino.core.site import TestSite as Site
>> Site(...)
"""
def __init__(self, *args, **kwargs):
# kwargs.update(no_local=True)
g = dict(__file__=__file__)
g.update(SECRET_KEY="20227") # see :djangoticket:`20227`
super(TestSite, self).__init__(g, *args, **kwargs)
# 20140913 Hack needed for doctests in :mod:`ad`.
from django.utils import translation
translation._default = None
def _test():
# we want to raise an Exception if there is a failure, but
# doctest's raise_on_error=True option is not useful because it
# does not report the traceback if some test fails.
import doctest
res = doctest.testmod()
if res.failed > 0:
raise Exception("{0} (see earlier output)".format(res))
if __name__ == "__main__":
_test()
| 2.046875
| 2
|
tools/generate-readme.py
|
adamsqi/python-scripts
| 1
|
12776217
|
__author__ = '[<NAME>](https://github.com/adamsqi)'
__date__ = '2020.06.21'
"""
This is a script for auto generation of README.md content. The script parses all .py script files within the repository and creates a README.md file.
Inspired by: [<NAME>](https://github.com/bamos/python-scripts/blob/master/README.md)
"""
import ast
import os
import re
from typing import List, Set
UNPARSABLE_FILES = ['.git', '.gitignore', 'README.md', 'generate_readme.py', 'LICENSE']
README_TEMPLATE = """<h1 align="center">Python scripts</h1>
<div align="center">

[](https://opensource.org/licenses/MIT)
</div>
This is a collection of short Python scripts I use as utility tools or just for testing of various features.
{content}
"""
def main():
generator = ReadmeGenerator()
generator.generate()
class ReadmeGenerator():
def __init__(self):
pass
def generate(self):
content = self._prepare_content()
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir, 'README.md'), mode='w') as f:
ready_content = README_TEMPLATE.format(content=content)
f.write(ready_content)
def _prepare_content(self) -> str:
scripts_finder = ValidScriptsFinder()
valid_script_names = scripts_finder.find()
content = self._get_all_content_from_scripts(script_names=valid_script_names)
return content
def _get_all_content_from_scripts(self, script_names: List[str]) -> str:
content = ''
script_names = sorted(script_names)
for name in script_names:
script_link = self._generate_script_link(script_name=name)
meta_text = self._parse_single_file(name=name)
content += '### ' + script_link + '\n\n' + meta_text + '\n\n\n'
return content
def _generate_script_link(self, script_name: str) -> str:
url_base = 'https://github.com/adamsqi/python-scripts/blob/master/scripts/'
url = url_base + script_name
return f'[{script_name}]({url})'
def _parse_single_file(self, name: str) -> str:
content = self._read_file(file_path=name)
meta_text = self._extract_doc_string(content)
return meta_text
def _read_file(self, file_path: str) -> str:
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir, 'scripts', file_path), mode='r') as f:
return f.read()
def _get_parent_dir_path(self) -> str:
dir_path = os.path.dirname(os.getcwd())
return dir_path
def _extract_doc_string(self, content: str) -> str:
ast_module = ast.parse(content)
ast_f = ast.literal_eval
author, date, doc_string = [ast_f(m.value) for m in ast_module.body[0:3]]
return f"""
+ Author: {author}
+ Created at: {date}
#### Description: {doc_string}
"""
class ValidScriptsFinder():
def __init__(self):
pass
def find(self) -> List[str]:
script_names = self._get_valid_script_names_within_cwd()
return script_names
def _get_valid_script_names_within_cwd(self) -> List[str]:
all_file_names = self._get_all_files_within_parent_dir()
file_names = set(all_file_names) - set(UNPARSABLE_FILES)
valid_file_names = self._exclude_files_with_ignored_extensions(file_names)
return valid_file_names
def _get_all_files_within_parent_dir(self) -> List[str]:
files = [file for file in os.listdir(os.path.join(self._get_parent_dir_path(), 'scripts'))]
return files
def _get_parent_dir_path(self) -> str:
dir_path = os.path.dirname(os.getcwd())
return dir_path
def _exclude_files_with_ignored_extensions(self, file_names: Set[str]) -> List[str]:
ignored_extensions = self._read_git_ignore()
result = [name for name in file_names if not any(sub in name for sub in ignored_extensions)] # only files without ignored extensions
return result
def _read_git_ignore(self) -> List[str]:
parent_dir = self._get_parent_dir_path()
with open(os.path.join(parent_dir,'.gitignore'), mode='r') as f:
data = f.read()
data = data.split('\n')
data = [el.replace('*', '') for el in data]
return data
if __name__ == '__main__':
main()
| 2.203125
| 2
|
ch06/ch0602_recurrent_neural_network.py
|
zhuyuanxiang/deep-learning-with-python-notebooks
| 6
|
12776218
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0602_recurrent_neural_network.py
@Version : v0.1
@Time : 2019-11-24 16:00
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec0602,P162
@Desc : 深度学习用于文本和序列,理解循环神经网络(并不适用于情感分析,建议看0603进一步理解RNN)
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu, sigmoid
from keras.datasets import imdb
from keras.layers import Dense
from keras.layers import Embedding, LSTM, SimpleRNN
from keras.losses import binary_crossentropy
from keras.metrics import binary_accuracy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
from tools import plot_classes_results
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# ----------------------------------------------------------------------
# Listing 6-21:简单 RNN 的 Numpy 实现
def simple_rnn_use_numpy():
timesteps = 100 # 输入序列的时间步数
input_features = 32 # 输入特征空间的维度
output_features = 64 # 输出特征空间的维度
# 输入数据:随机噪声,仅仅作为示例
inputs = np.random.random((timesteps, input_features))
state_t = np.zeros((output_features,)) # 初始状态:全零向量
# 创建随机的权重矩阵
W = np.random.random((output_features, input_features)) / 10
U = np.random.random((output_features, output_features)) / 10
b = np.random.random((output_features,)) / 10
successive_outputs = []
for input_t in inputs:
# 当前输出 = 当前输入 + 前一个输出
output_t = np.tanh(np.dot(W, input_t) + np.dot(U, state_t), +b)
successive_outputs.append(output_t) # 将输出保存到一个列表中
# 更新网络的状态,用于下一个时间步
state_t = output_t
pass
# 最终的输出是一个形状为(timesteps,output_features)的二维张量
# np.stack() 把数组组成的列表转换成一个二维数组
final_output_sequence = np.stack(successive_outputs, axis = 0)
return final_output_sequence
# ----------------------------------------------------------------------
# 简单 RNN 的 Keras 实现
def keras_simplernn():
model = Sequential(name = "完整的状态序列")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.summary()
model = Sequential(name = "最后一个时间步的输出")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
model = Sequential(name = "多个循环层的逐个堆叠")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32))
model.summary()
pass
# 使用 RNN 和 LSTM 模型应用于 IMDB 电影评论分类问题
max_features = 10000
max_len = 500
batch_size = 128
epochs = 10
# 数据集的详细说明参考 ch0304
print("Listing 6.22:加载数据集...")
(train_data, y_train), (test_data, y_test) = imdb.load_data(num_words = max_features)
x_train = pad_sequences(train_data, maxlen = max_len)
x_test = pad_sequences(test_data, maxlen = max_len)
def train_model(model, data, labels):
return model.fit(data, labels, epochs = epochs, batch_size = batch_size,
validation_split = 0.2, verbose = 2, use_multiprocessing = True)
# ----------------------------------------------------------------------
def definite_rnn():
title = "将 SimpleRNN 应用于 IMDB "
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(SimpleRNN(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
def definite_lstm():
title = "将 LSTM 应用于 IMDB"
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(LSTM(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
model = definite_rnn(title)
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
# 重构 ch0304 的二分类问题
def vectorize_sequences(sequences, dimension = 10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
pass
return results
vector_train_data = vectorize_sequences(train_data, max_features)
vector_test_data = vectorize_sequences(test_data, max_features)
vector_train_labels = np.asarray(y_train)
vector_test_labels = np.asarray(y_test)
# 将数据进行 One-Hot 编码后,准确率比 RNN 和 LSTM 的质量还好(ch0304确认了密集层的效果确实很好)
def definite_dense_for_one_hot():
title = "将 Dense+One-Hot 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (10000,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, vector_train_data, vector_train_labels)
plot_classes_results(history, title, epochs)
print(title + "评估测试集",
model.evaluate(vector_test_data, vector_test_labels, verbose = 2, use_multiprocessing = True))
pass
# 没有将数据进行 One-Hot 编码,准确率下降的会很厉害
def definite_dense():
title = "将 Dense 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (500,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
definite_rnn()
definite_lstm()
definite_dense_for_one_hot()
definite_dense()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 2.21875
| 2
|
Problems/String/71. Simplify Path.py
|
BYJRK/LeetCode-Solutions
| 0
|
12776219
|
# https://leetcode.com/problems/simplify-path/
class Solution:
def simplifyPath(self, path: str) -> str:
stack = []
for d in path.split('/'):
if d == '..':
if len(stack) > 0:
stack.pop()
elif d == '.' or d == '':
continue
else:
stack.append(d)
return '/' + '/'.join(stack)
s = Solution()
tests = [
'/home/',
'/../',
'/home//foo/',
'/a/./b/../../c/'
]
for test in tests:
print(s.simplifyPath(test))
| 3.671875
| 4
|
autodrp/utils.py
|
rcj0003/django-autodrp
| 1
|
12776220
|
<filename>autodrp/utils.py
from django.db.models.signals import class_prepared
from django.dispatch import receiver
ALWAYS_TRUE = lambda *args, **kwargs: True
class CheckPermissions:
def __init__(self, *checks):
self.checks = checks
def __call__(self, request):
for permission in self.checks:
if permission.has_permission(request):
return True
return False
class CheckObjectPermissions:
def __init__(self, *checks):
self.checks = checks
def __call__(self, obj, request):
for permission in self.checks:
if permission.has_object_permission(request, obj):
return True
return False
class Filter:
def __init__(self, *checks):
self.checks = checks
def __call__(self, request, queryset):
for filter in self.checks:
if hasattr(filter, 'has_permission') and not filter.has_permission(request):
continue
queryset, filtered = filter.filter(request, queryset)
if filtered:
break
return queryset
def _bake_global_permissions(sender, permission_data):
for actions, check_data in permission_data.items():
if not hasattr(check_data, '__iter__'):
check_data = [check_data]
checks = [check for check in check_data if hasattr(check, 'has_permission')]
actions = [actions] if isinstance(actions, str) else actions
if len(checks) > 0:
permission_function = staticmethod(CheckPermissions(*checks))
else:
permission_function = staticmethod(ALWAYS_TRUE)
for action in actions:
setattr(sender, f'has_{action}_permission', permission_function)
def _bake_object_permissions(sender, permission_data):
for actions, check_data in permission_data.items():
if not hasattr(check_data, '__iter__') and not isinstance(check_data, str):
check_data = [check_data]
checks = [check for check in check_data if hasattr(check, 'has_object_permission')]
filters = [check for check in check_data if hasattr(check, 'filter')]
actions = [actions] if isinstance(actions, str) else actions
if len(checks) > 0:
permission_function = CheckPermissions(*checks)
else:
permission_function = ALWAYS_TRUE
for action in actions:
setattr(sender, f'has_object_{action}_permission', permission_function)
if len(filters) > 0:
filter_function = staticmethod(Filter(*filters))
for action in actions:
setattr(sender, f'filter_for_{action}', filter_function)
def bake_permissions(sender):
if hasattr(sender, '_bake_permission_data'):
_bake_global_permissions(sender, sender._bake_permission_data())
elif hasattr(sender, 'DRY_GLOBAL_PERMISSIONS'):
_bake_global_permissions(sender, sender.DRY_GLOBAL_PERMISSIONS)
if hasattr(sender, '_bake_object_permission_data'):
_bake_object_permissions(sender, sender._bake_object_permission_data())
elif hasattr(sender, 'DRY_OBJECT_PERMISSIONS'):
_bake_object_permissions(sender, sender.DRY_OBJECT_PERMISSIONS)
| 2.234375
| 2
|
Classes/QAData.py
|
usgsdsm/qrevpy
| 0
|
12776221
|
import numpy as np
from Classes.Uncertainty import Uncertainty
from Classes.QComp import QComp
class QAData(object):
"""Evaluates and stores quality assurance characteristics and messages.
Attributes
----------
q_run_threshold_caution: int
Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_run_threshold_warning: int
Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_total_threshold_caution: int
Caution threshold for total interpolated discharge for invalid ensembles, in percent.
q_total_threshold_warning: int
Warning threshold for total interpolated discharge for invalid ensembles, in percent.
transects: dict
Dictionary of quality assurance checks for transects
system_tst: dict
Dictionary of quality assurance checks on the system test(s)
compass: dict
Dictionary of quality assurance checks on compass calibration and evaluations
temperature: dict
Dictionary of quality assurance checks on temperature comparions and variation
moving_bed: dict
Dictionary of quality assurance checks on moving-bed tests
user: dict
Dictionary of quality assurance checks on user input data
bt_vel: dict
Dictionary of quality assurance checks on bottom track velocities
gga_vel: dict
Dictionary of quality assurance checks on gga boat velocities
vtg_vel: dict
Dictionary of quality assurance checks on vtg boat velocities
w_vel: dict
Dictionary of quality assurance checks on water track velocities
extrapolation: dict
Dictionary of quality assurance checks on extrapolations
edges: dict
Dictionary of quality assurance checks on edges
"""
def __init__(self, meas):
"""Checks the measurement for all quality assurance issues.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Set default thresholds
self.q_run_threshold_caution = 3
self.q_run_threshold_warning = 5
self.q_total_threshold_caution = 10
self.q_total_threshold_warning = 25
# Initialize instance variables
self.transects = dict()
self.system_tst = dict()
self.compass = dict()
self.temperature = dict()
self.moving_bed = dict()
self.user = dict()
self.depths = dict()
self.bt_vel = dict()
self.gga_vel = dict()
self.vtg_vel = dict()
self.w_vel = dict()
self.extrapolation = dict()
self.edges = dict()
# Apply QA checks
self.transects_qa(meas)
self.system_tst_qa(meas)
self.compass_qa(meas)
self.temperature_qa(meas)
self.moving_bed_qa(meas)
self.user_qa(meas)
self.depths_qa(meas)
self.boat_qa(meas)
self.water_qa(meas)
self.extrapolation_qa(meas)
self.edges_qa(meas)
def transects_qa(self, meas):
"""Apply quality checks to transects
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Assume good results
self.transects['status'] = 'good'
# Initialize keys
self.transects['messages'] = []
self.transects['recip'] = 0
self.transects['sign'] = 0
self.transects['duration'] = 0
self.transects['number'] = 0
self.transects['uncertainty'] = 0
checked = []
discharges = []
start_edge = []
for n in range(len(meas.transects)):
checked.append(meas.transects[n].checked)
if meas.transects[n].checked:
discharges.append(meas.discharge[n])
start_edge.append(meas.transects[n].start_edge)
num_checked = np.nansum(np.asarray(checked))
# Check duration
total_duration = 0
if num_checked >= 1:
for transect in meas.transects:
if transect.checked:
total_duration += transect.date_time.transect_duration_sec
# Check duration against USGS policy
if total_duration < 720:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Duration of selected transects is less than 720 seconds;', 2, 0])
self.transects['duration'] = 1
# Check transects for missing ensembles
for transect in meas.transects:
if transect.checked:
# Determine number of missing ensembles
if transect.adcp.manufacturer == 'SonTek':
# Determine number of missing ensembles for SonTek data
idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
if len(idx_missing) > 0:
average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
- np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ (len(transect.date_time.ens_duration_sec) - len(idx_missing)))
num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ average_ensemble_duration) - len(idx_missing)
else:
num_missing = 0
else:
# Determine number of lost ensembles for TRDI data
idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec) == True)[0]
num_missing = len(idx_missing) - 1
# Save caution message
if num_missing > 0:
self.transects['messages'].append([['Transects: ' + str(transect.file_name) + ' is missing'
+ str(int(num_missing)) + ' ensembles;'], 2, 0])
self.transects['status'] = 'caution'
# Check number of transects checked
if num_checked == 0:
# No transects selected
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
self.transects['number'] = 2
elif num_checked == 1:
# Only one transect selected
self.transects['status'] = 'caution'
self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
self.transects['number'] = 2
else:
self.transects['number'] = num_checked
if num_checked == 2:
# Only 2 transects selected
cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
# Check uncertainty
if cov > 2:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
# Check for consistent sign
q_positive = []
for q in discharges:
if q.total >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1:
self.transects['status'] = 'warning'
self.transects['messages'].append(
['Transects: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
# Check for reciprocal transects
num_left = start_edge.count('Left')
num_right = start_edge.count('Right')
if not num_left == num_right:
self.transects['status'] = 'warning'
self.transects['messages'].append(['Transects: Transects selected are not reciprocal transects;', 1, 0])
# Check for zero discharge transects
q_zero = False
for q in discharges:
if q.total == 0:
q_zero = True
if q_zero:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
def system_tst_qa(self, meas):
"""Apply QA checks to system test.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.system_tst['messages'] = []
self.system_tst['status'] = 'good'
# Determine is a system test was recorded
if not meas.system_test:
# No system test data recorded
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
else:
pt3_fail = False
num_tests_with_failure = 0
for test in meas.system_test:
if hasattr(test, 'result'):
if 'pt3' in test.result and test.result['pt3'] is not None:
# Check hard_limit, high gain, wide bandwidth
if 'hard_limit' in test.result['pt3']:
if 'high_wide' in test.result['pt3']['hard_limit']:
corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
if len(corr_table) > 0:
# All lags past lag 2 should be less than 50% of lag 0
qa_threshold = corr_table[0, :] * 0.5
all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
# Lag 7 should be less than 25% of lag 0
lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
# If either condition is met for any beam the test fails
if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
pt3_fail = True
if test.result['n_failed'] is not None and test.result['n_failed'] > 0:
num_tests_with_failure += 1
if pt3_fail:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
# Check for failed tests
if num_tests_with_failure == len(meas.system_test):
# All tests had a failure
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(
['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
elif num_tests_with_failure > 0:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more system test sets have at least one test that failed;', 2, 3])
def compass_qa(self, meas):
"""Apply QA checks to compass calibration and evaluation.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.compass['messages'] = []
checked = []
for transect in meas.transects:
checked.append(transect.checked)
if np.any(checked):
heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
else:
heading = np.array([0])
# Intialize variable as if ADCP has no compass
self.compass['status'] = 'inactive'
self.compass['status1'] = 'good'
self.compass['status2'] = 'good'
self.compass['magvar'] = 0
self.compass['magvar_idx'] = 0
if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
# ADCP has a compass
# A compass calibration is required is a loop test or GPS are used
# Check for loop test
loop = False
for test in meas.mb_tests:
if test.type == 'Loop':
loop = True
# Check for GPS data
gps = False
if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
gps = True
if gps or loop:
# Compass calibration is required
# Determine the ADCP manufacturer
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# SonTek ADCP
if not meas.compass_cal:
# No compass calibration
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
# If the error cannot be decoded from the calibration assume the calibration is good
self.compass['status1'] = 'good'
else:
if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['COMPASS: Calibration result > 0.2 deg;', 2, 4])
elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
# TRDI ADCP
if not meas.compass_cal:
# No compass calibration
if not meas.compass_eval:
# No calibration or evaluation
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
else:
# No calibration but an evaluation was completed
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
else:
# Compass was calibrated
if not meas.compass_eval:
# No compass evaluation
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
else:
# Check results of evaluation
try:
if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
except ValueError:
self.compass['status1'] = 'good'
else:
# Compass not required
if (not meas.compass_cal) and (not meas.compass_eval):
# No compass calibration or evaluation
self.compass['status1'] = 'default'
else:
# Compass was calibrated and evaluated
self.compass['status1'] = 'good'
# Check for consistent magvar
magvar = []
mag_error_exceeded = []
pitch_mean = []
pitch_std = []
pitch_exceeded = []
roll_mean = []
roll_std = []
roll_exceeded = []
for transect in meas.transects:
if transect.checked:
heading_source_selected = getattr(
transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
magvar.append(heading_source_selected.mag_var_deg)
pitch_mean.append(np.nanmean(pitch_source_selected.data))
pitch_std.append(np.nanstd(pitch_source_selected.data))
roll_mean.append(np.nanmean(roll_source_selected.data))
roll_std.append(np.nanstd(roll_source_selected.data))
# SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
if heading_source_selected.pitch_limit is not None:
# Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
if len(pitch_source_selected.data.shape) == 1:
pitch_data = pitch_source_selected.data
else:
pitch_data = pitch_source_selected.data[:, 0]
idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
pitch_exceeded.append(True)
else:
pitch_exceeded.append(False)
if heading_source_selected.roll_limit is not None:
if len(roll_source_selected.data.shape) == 1:
roll_data = roll_source_selected.data
else:
roll_data = roll_source_selected.data[:, 0]
idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
roll_exceeded.append(True)
else:
roll_exceeded.append(False)
if heading_source_selected.mag_error is not None:
idx_max = np.where(heading_source_selected.mag_error > 2)[0]
if len(idx_max) > 0:
mag_error_exceeded.append(True)
else:
mag_error_exceeded.append(False)
if len(np.unique(magvar)) > 1:
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: Magnetic variation is not consistent among transects;', 2, 4])
self.compass['magvar'] = 1
# Check that magvar was set if GPS data are available
if gps:
if 0 in magvar:
self.compass['status2'] = 'warning'
self.compass['messages'].append(
['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
self.compass['magvar'] = 2
self.compass['magvar_idx'] = magvar.index(0)
# Check pitch mean
if np.any(np.asarray(pitch_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
elif np.any(np.asarray(pitch_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
# Check roll mean
if np.any(np.asarray(roll_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
elif np.any(np.asarray(roll_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4])
# Check pitch standard deviation
if np.any(np.asarray(pitch_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4])
# Check roll standard deviation
if np.any(np.asarray(roll_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4])
# Additional checks for SonTek G3 compass
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# Check if pitch limits were exceeded
if any(pitch_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4])
# Check if roll limits were exceeded
if any(roll_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have roll exceeding calibration limits;', 2, 4])
# Check if magnetic error was exceeded
if any(mag_error_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4])
if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning':
self.compass['status'] = 'warning'
elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution':
self.compass['status'] = 'caution'
else:
self.compass['status'] = 'good'
def temperature_qa(self, meas):
"""Apply QA checks to temperature.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.temperature['messages'] = []
check = [0, 0]
# Create array of all temperatures
temp = np.array([])
checked = []
for transect in meas.transects:
if transect.checked:
checked.append(transect.checked)
temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected)
if len(temp) == 0:
temp = temp_selected.data
else:
temp = np.hstack((temp, temp_selected.data))
# Check temperature range
if np.any(checked):
temp_range = np.nanmax(temp) - np.nanmin(temp)
else:
temp_range = 0
if temp_range > 2:
check[0] = 3
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 2 degrees;', 1, 5])
elif temp_range > 1:
check[0] = 2
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 1 degrees;', 2, 5])
else:
check[0] = 1
# Check for independent temperature reading
if 'user' in meas.ext_temp_chk:
try:
user = float(meas.ext_temp_chk['user'])
except (ValueError, TypeError) as e:
user = None
if user is None:
# No independent temperature reading
check[1] = 2
self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5])
elif meas.ext_temp_chk['adcp']:
# Compare user to manually entered ADCP temperature
diff = np.abs(user - meas.ext_temp_chk['adcp'])
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
else:
# Compare user to mean of all temperature data
diff = np.abs(user - np.nanmean(temp))
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
# Assign temperature status
max_check = max(check)
if max_check == 1:
self.temperature['status'] = 'good'
elif max_check == 2:
self.temperature['status'] = 'caution'
elif max_check == 3:
self.temperature['status'] = 'warning'
def moving_bed_qa(self, meas):
"""Applies quality checks to moving-bed tests.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.moving_bed['messages'] = []
self.moving_bed['code'] = 0
# Are there moving-bed tests?
if len(meas.mb_tests) < 1:
# No moving-bed test
self.moving_bed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Moving-bed tests available
mb_data = meas.mb_tests
# Are tests valid according to the user
user_valid_test = []
file_names = []
idx_selected = []
test_quality = []
mb_tests = []
mb = []
mb_test_type = []
loop = []
for n, test in enumerate(mb_data):
if test.user_valid:
user_valid_test.append(True)
file_names.append(test.transect.file_name)
if test.type == 'Loop' and not test.test_quality == 'Errors':
loop.append(test.moving_bed)
# Selected test
if test.selected:
idx_selected.append(n)
test_quality.append(test.test_quality)
mb_tests.append(test)
mb.append(test.moving_bed)
mb_test_type.append(test.type)
else:
user_valid_test.append(False)
if not any(user_valid_test):
# No valid test according to user
self.moving_bed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;',
1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Check for duplicate valid moving-bed tests
if len(np.unique(file_names)) < len(file_names):
self.moving_bed['messages'].append([
'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
if self.moving_bed['code'] == 0:
# Check test quality
if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0:
self.moving_bed['status'] = 'good'
self.moving_bed['code'] = 1
# Check if there is a moving-bed
if any(mb):
# Moving-bed present
self.moving_bed['messages'].append(
['Moving-Bed Test: A moving-bed is present, use GPS or moving-bed correction;', 2, 6])
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
# Check for test type
if sum(np.array(mb_test_type) == 'Stationary'):
# Check for GPS or 3 stationary tests
if len(mb_tests) < 3:
gps = []
for transect in meas.transects:
if transect.checked:
if transect.gps is None:
gps.append(False)
else:
gps.append(True)
if not all(gps):
# GPS not available for all selected transects
self.moving_bed['messages'].append([
'Moving-Bed Test: '
+ 'Less than 3 stationary tests available for moving-bed correction;',
2, 6])
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0:
# Quality check has warnings
self.moving_bed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, '
+ 'please review tests to determine validity;', 2, 6])
self.moving_bed['status'] = 'caution'
self.moving_bed['code'] = 2
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0:
# Manual override used
self.moving_bed['messages'].append(['MOVING-BED TEST: '
+ 'The user has manually forced the use of some tests;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Test has critical errors
self.moving_bed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors '
+ 'and will not be used;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
# Check multiple loops for consistency
if len(np.unique(loop)) > 1:
self.moving_bed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, '
+ 'review moving-bed tests;', 2, 6])
if self.moving_bed['code'] < 3:
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
def user_qa(self, meas):
"""Apply quality checks to user input data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.user['messages'] = []
self.user['status'] = 'good'
# Check for Station Name
self.user['sta_name'] = False
if meas.station_name is None:
self.user['messages'].append(['Site Info: Station name not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
# Check for Station Number
self.user['sta_number'] = False
if meas.station_number is None:
self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
def depths_qa(self, meas):
"""Apply quality checks to depth data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
self.depths['q_total'] = np.tile(np.nan, n_transects)
self.depths['q_max_run'] = np.tile(np.nan, n_transects)
self.depths['q_total_caution'] = np.tile(False, n_transects)
self.depths['q_run_caution'] = np.tile(False, n_transects)
self.depths['q_total_warning'] = np.tile(False, n_transects)
self.depths['q_run_warning'] = np.tile(False, n_transects)
self.depths['all_invalid'] = np.tile(False, n_transects)
self.depths['messages'] = []
self.depths['status'] = 'good'
self.depths['draft'] = 0
checked = []
drafts = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
in_transect_idx = transect.in_transect_idx
depths_selected = getattr(transect.depths, transect.depths.selected)
drafts.append(depths_selected.draft_use_m)
# Determine valid measured depths
if transect.depths.composite:
depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA'
depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN'
depth_valid = np.all(np.vstack((depth_na, depth_in)), 0)
else:
depth_valid_temp = depths_selected.valid_data[in_transect_idx]
depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan
depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0)
if not np.any(depth_valid):
self.depths['all_invalid'][n] = True
# Compute QA characteristics
q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n])
self.depths['q_total'][n] = q_total
self.depths['q_max_run'][n] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
self.depths['q_total_warning'][n] = True
elif q_total_percent > self.q_total_threshold_caution:
self.depths['q_total_caution'][n] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.depths['q_run_warning'][n] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.depths['q_run_caution'][n] = True
if checked:
# Create array of all unique draft values
draft_check = np.unique(np.round(drafts, 3))
# Check draft consistency
if len(draft_check) > 1:
self.depths['status'] = 'caution'
self.depths['draft'] = 1
self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10])
# Check for zero draft
if np.any(np.less(draft_check, 0.01)):
self.depths['status'] = 'warning'
self.depths['draft'] = 2
self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10])
# Check consecutive interpolated discharge criteria
if np.any(self.depths['q_run_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_run_caution']):
self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check total interpolated discharge criteria
if np.any(self.depths['q_total_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_total_caution']):
self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check if all depths are invalid
if np.any(self.depths['all_invalid']):
self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10])
self.depths['status'] = 'warning'
else:
self.depths['status'] = 'inactive'
def boat_qa(self, meas):
"""Apply quality checks to boat data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-',
'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2),
('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]},
'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-',
'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2),
('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]},
'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-',
'filter': [('All: ', 0), ('Original: ', 1), ('HDOP: ', 5)]}}
for dt_key, dt_value in data_type.items():
boat = getattr(self, dt_value['class'])
# Initialize dictionaries for each data type
boat['q_total_caution'] = np.tile(False, (n_transects, 6))
boat['q_max_run_caution'] = np.tile(False, (n_transects, 6))
boat['q_total_warning'] = np.tile(False, (n_transects, 6))
boat['q_max_run_warning'] = np.tile(False, (n_transects, 6))
boat['all_invalid'] = np.tile(False, n_transects)
boat['q_total'] = np.tile(np.nan, (n_transects, 6))
boat['q_max_run'] = np.tile(np.nan, (n_transects, 6))
boat['messages'] = []
status_switch = 0
avg_speed_check = 0
# Check the results of each filter
for dt_filter in dt_value['filter']:
boat['status'] = 'inactive'
# Quality check each transect
for n, transect in enumerate(meas.transects):
# Evaluate on transects used in the discharge computation
if transect.checked:
in_transect_idx = transect.in_transect_idx
# Check to see if data are available for the data_type
if getattr(transect.boat_vel, dt_value['class']) is not None:
boat['status'] = 'good'
# Compute quality characteristics
valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1],
in_transect_idx]
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
boat['q_total'][n, dt_filter[1]] = q_total
boat['q_max_run'][n, dt_filter[1]] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check if all invalid
if dt_filter[1] == 0 and not np.any(valid):
boat['all_invalid'][n] = True
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
boat['q_total_warning'][n, dt_filter[1]] = True
elif q_total_percent > self.q_total_threshold_caution:
boat['q_total_caution'][n, dt_filter[1]] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
boat['q_max_run_warning'][n, dt_filter[1]] = True
elif q_max_run_percent > self.q_run_threshold_caution:
boat['q_max_run_caution'][n, dt_filter[1]] = True
# Check boat velocity for vtg data
if dt_key is 'VTG' and transect.boat_vel.selected is 'vtg_vel' and avg_speed_check == 0:
avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps**2
+ transect.boat_vel.vtg_vel.v_mps**2)**0.5)
if avg_speed < 0.24:
boat['q_total_caution'][n, dt_filter[1]] = True
boat['messages'].append(
['vtg-AvgSpeed: VTG data may not be accurate for average boat speed less than'
+ '0.24 m/s (0.8 ft/s);', 2, 8])
avg_speed_check = 1
# Create message for consecutive invalid discharge
if boat['q_max_run_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for total invalid discharge
if boat['q_total_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for all data invalid
if boat['all_invalid'].any():
boat['status'] = 'warning'
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_value['filter'][0][0] +
'There are no valid data for one or more transects.;', 1, module_code])
# Set status
if status_switch == 2:
boat['status'] = 'warning'
elif status_switch == 1:
boat['status'] = 'caution'
setattr(self, dt_value['class'], boat)
def water_qa(self, meas):
"""Apply quality checks to water data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize filter labels and indices
prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:']
if meas.transects[0].adcp.manufacturer is 'TRDI':
filter_index = [0, 1, 2, 3, 4, 5]
else:
filter_index = [0, 1, 2, 3, 4, 5, 7]
n_transects = len(meas.transects)
n_filters = len(filter_index) + 1
# Initialize dictionaries for each data type
self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['all_invalid'] = np.tile(False, n_transects)
self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['messages'] = []
status_switch = 0
# TODO if meas had a property checked as list it would save creating that list multiple times
checked = []
for transect in meas.transects:
checked.append(transect.checked)
# At least one transect is being used to compute discharge
if any(checked):
# Loop through filters
for prefix_idx, filter_idx in enumerate(filter_index):
# Loop through transects
for n, transect in enumerate(meas.transects):
if transect.checked:
valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)
# Determine what data each filter have marked invalid. Original invalid data are excluded
valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)
if filter_idx > 1:
valid_int = valid.astype(int) - valid_original.astype(int)
valid = valid_int != -1
# Check if all data are invalid
if filter_idx == 0:
if np.nansum(valid.astype(int)) < 1:
self.w_vel['all_invalid'][n] = True
# TODO seems like the rest of this should be under else of all invalid or multiple messages
# generated.
# Compute characteristics
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
self.w_vel['q_total'][n, filter_idx] = q_total
self.w_vel['q_max_run'][n, filter_idx] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check total invalid discharge in ensembles for warning
if q_total_percent > self.q_total_threshold_warning:
self.w_vel['q_total_warning'][n, filter_idx] = True
# Apply run or cluster thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.w_vel['q_max_run_warning'][n, filter_idx] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.w_vel['q_max_run_caution'][n, filter_idx] = True
# Compute percent discharge interpolated for both cells and ensembles
# This approach doesn't exclude original data
valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T
q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \
+ np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \
+ np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)])
q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100
if q_invalid_total_percent > self.q_total_threshold_caution:
self.w_vel['q_total_caution'][n, filter_idx] = True
# Generate messages for ensemble run or clusters
if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for total_invalid Q
if np.any(self.w_vel['q_total_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for invalid ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_total_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for invalid cells and ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for all invalid
if np.any(self.w_vel['all_invalid']):
self.w_vel['messages'].append(['WT-', prefix[0], 'There are no valid data for one or more transects.',
1, 11])
status_switch = 2
# Set status
self.w_vel['status'] = 'good'
if status_switch == 2:
self.w_vel['status'] = 'warning'
elif status_switch == 1:
self.w_vel['status'] = 'caution'
else:
self.w_vel['status'] = 'inactive'
def extrapolation_qa(self, meas):
"""Apply quality checks to extrapolation methods
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.extrapolation['messages'] = []
checked = []
discharges = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
discharges.append(meas.discharge[n])
if any(checked):
self.extrapolation['status'] = 'good'
extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges)
if np.abs(extrap_uncertainty) > 2:
self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than '
+ '2 percent;', 2, 12])
self.extrapolation['messages'].append([' Carefully review the extrapolation;', 2, 12])
self.extrapolation['status'] = 'caution'
else:
self.extrapolation['status'] = 'inactive'
def edges_qa(self, meas):
"""Apply quality checks to edge estimates
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Intialize variables
self.edges['messages'] = []
checked = []
left_q = []
right_q = []
total_q = []
edge_dist_left = []
edge_dist_right = []
dist_moved_left = []
dist_moved_right = []
dist_made_good = []
left_type = []
right_type = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
left_q.append(meas.discharge[n].left)
right_q.append(meas.discharge[n].right)
total_q.append(meas.discharge[n].total)
dmr, dml, dmg = QAData.edge_distance_moved(transect)
dist_moved_right.append(dmr)
dist_moved_left.append(dml)
dist_made_good.append(dmg)
edge_dist_left.append(transect.edges.left.distance_m)
edge_dist_right.append(transect.edges.right.distance_m)
left_type.append(transect.edges.left.type)
right_type.append(transect.edges.right.type)
if any(checked):
# Set default status to good
self.edges['status'] = 'good'
# Check left edge q > 5%
self.edges['left_q'] = 0
left_q_percent = (np.nanmean(left_q) / np.nanmean(total_q)) * 100
if np.abs(left_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 2, 13])
self.edges['left_q'] = 1
# Check right edge q > 5%
self.edges['right_q'] = 0
right_q_percent = (np.nanmean(right_q) / np.nanmean(total_q)) * 100
if np.abs(right_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 2, 13])
self.edges['right_q'] = 1
# Check for consistent sign
q_positive = []
self.edges['left_sign'] = 0
for q in left_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13])
self.edges['left_sign'] = 1
q_positive = []
self.edges['right_sign'] = 0
for q in right_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13])
self.edges['right_sign'] = 1
# Check distance moved
dmg_5_percent = 0.05 * np.nanmean(dist_made_good)
avg_right_edge_dist = np.nanmean(edge_dist_right)
right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist])
self.edges['right_dist_moved_idx'] = np.where(dist_moved_right > right_threshold)[0]
if np.any(self.edges['right_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13])
avg_left_edge_dist = np.nanmean(edge_dist_left)
left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist])
self.edges['left_dist_moved_idx'] = np.where(dist_moved_left > left_threshold)[0]
if np.any(self.edges['left_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13])
# Check for edge ensembles marked invalid due to excluded distance
for transect in meas.transects:
if transect.checked:
ens_sum_excluded_data = np.nansum(transect.w_vel.valid_data[6, :, :], 0)
cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0)
ens_excluded_data = np.not_equal(ens_sum_excluded_data, cells_above_sl)
if any(ens_excluded_data):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: The excluded distance caused invalid ensembles '
+ 'in an edge, check edge distance;', 2, 13])
break
# Check edges for zero discharge
self.edges['left_zero'] = 0
left_zero_idx = np.where(left_q == 0)[0]
if left_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13])
self.edges['left_zero'] = 2
self.edges['right_zero'] = 0
right_zero_idx = np.where(right_q == 0)[0]
if right_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13])
self.edges['right_zero'] = 2
# Check consistent edge type
self.edges['left_type'] = 0
if len(np.unique(left_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13])
self.edges['left_type'] = 2
self.edges['right_type'] = 0
if len(np.unique(right_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13])
self.edges['right_type'] = 2
else:
self.edges['status'] = 'inactive'
@staticmethod
def invalid_qa(valid, discharge):
"""Computes the total invalid discharge in ensembles that have invalid data. The function also computes
the maximum run or cluster of ensembles with the maximum interpolated discharge.
Parameters
----------
valid: np.array(bool)
Array identifying valid and invalid ensembles.
discharge: QComp
Object of class QComp
Returns
-------
q_invalid_total: float
Total interpolated discharge in invalid ensembles
q_invalid_max_run: float
Maximum interpolated discharge in a run or cluster of invalid ensembles
ens_invalid: int
Total number of invalid ensembles
"""
# Create bool for invalid data
invalid = np.logical_not(valid)
q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \
+ np.nansum(discharge.bottom_ens[invalid])
# Compute total number of invalid ensembles
ens_invalid = np.sum(invalid)
# Compute the indices of where changes occur
valid_int = np.insert(valid.astype(int), 0, -1)
valid_int = np.append(valid_int, -1)
valid_run = np.where(np.diff(valid_int) != 0)[0]
run_length = np.diff(valid_run)
run_length0 = run_length[(valid[0] == 1)::2]
n_runs = len(run_length0)
if valid[0] is True:
n_start = 1
else:
n_start = 0
n_end = len(valid_run)-1
if n_runs > 1:
m = 0
q_invalid_run = []
for n in range(n_start, n_end, 2):
m += 1
idx_start = valid_run[n]
idx_end = valid_run[n+1]
q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end])
+ np.nansum(discharge.top_ens[idx_start:idx_end])
+ np.nansum(discharge.bottom_ens[idx_start:idx_end]))
# Determine the maximum discharge in a single run
q_invalid_max_run = np.nanmax(np.abs(q_invalid_run))
else:
q_invalid_max_run = 0
return q_invalid_total, q_invalid_max_run, ens_invalid
@staticmethod
def edge_distance_moved(transect):
"""Computes the boat movement during edge ensemble collection.
Parameters
----------
transect: Transect
Object of class Transect
Returns
-------
right_dist_moved: float
Distance in m moved during collection of right edge samples
left_dist_moved: float
Distance in m moved during collection of left edge samples
dmg: float
Distance made good for the entire transect
"""
boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
ens_duration = transect.date_time.ens_duration_sec
# Get boat velocities
if boat_selected is not None:
u_processed = boat_selected.u_processed_mps
v_processed = boat_selected.v_processed_mps
else:
u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)
v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)
# Compute boat coordinates
x_processed = np.nancumsum(u_processed * ens_duration)
y_processed = np.nancumsum(v_processed * ens_duration)
dmg = (x_processed[-1]**2 + y_processed[-1]**2)**0.5
# Compute left distance moved
# TODO should be a dist moved function
left_edge_idx = QComp.edge_ensembles('left', transect)
if len(left_edge_idx) > 0:
boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]]
boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]]
left_dist_moved = (boat_x**2 + boat_y**2)**0.5
else:
left_dist_moved = np.nan
# Compute right distance moved
right_edge_idx = QComp.edge_ensembles('right', transect)
if len(right_edge_idx) > 0:
boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]]
boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]]
right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
else:
right_dist_moved = np.nan
return right_dist_moved, left_dist_moved, dmg
| 2.75
| 3
|
readimc/_txt_file.py
|
BodenmillerGroup/readimc
| 0
|
12776222
|
<filename>readimc/_txt_file.py
import numpy as np
import pandas as pd
import re
from os import PathLike
from typing import BinaryIO, List, Optional, Sequence, Tuple, Union
from ._imc_file import IMCFile
from .data import AcquisitionBase
class TXTFile(IMCFile, AcquisitionBase):
_CHANNEL_REGEX = re.compile(
r"^(?P<label>.*)\((?P<metal>[a-zA-Z]+)(?P<mass>[0-9]+)[^0-9]*\)$"
)
def __init__(self, path: Union[str, PathLike]) -> None:
"""A class for reading IMC .txt files
:param path: path to the IMC .txt file
"""
super(TXTFile, self).__init__(path)
self._fh: Optional[BinaryIO] = None
self._num_channels: Optional[int] = None
self._channel_metals: Optional[List[str]] = None
self._channel_masses: Optional[List[int]] = None
self._channel_labels: Optional[List[str]] = None
@property
def num_channels(self) -> int:
if self._num_channels is None:
raise IOError(f"TXT file '{self.path.name}' has not been opened")
return self._num_channels
@property
def channel_metals(self) -> Sequence[str]:
if self._channel_metals is None:
raise IOError(f"TXT file '{self.path.name}' has not been opened")
return self._channel_metals
@property
def channel_masses(self) -> Sequence[int]:
if self._channel_masses is None:
raise IOError(f"TXT file '{self.path.name}' has not been opened")
return self._channel_masses
@property
def channel_labels(self) -> Sequence[str]:
if self._channel_labels is None:
raise IOError(f"TXT file '{self.path.name}' has not been opened")
return self._channel_labels
def __enter__(self) -> "TXTFile":
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def open(self) -> None:
"""Opens the IMC .txt file for reading.
It is good practice to use context managers whenever possible:
.. code-block:: python
with TXTFile("/path/to/file.txt") as f:
pass
"""
if self._fh is not None:
self._fh.close()
self._fh = open(self._path, mode="r")
(
self._num_channels,
self._channel_metals,
self._channel_masses,
self._channel_labels,
) = self._read_channels()
def close(self) -> None:
"""Closes the IMC .txt file.
It is good practice to use context managers whenever possible:
.. code-block:: python
with TXTFile("/path/to/file.txt") as f:
pass
"""
if self._fh is not None:
self._fh.close()
self._fh = None
def read_acquisition(self, *args) -> np.ndarray:
"""Reads IMC acquisition data as numpy array.
.. note::
This function takes a variable number of arguments for
compatibility with ``MCDFile``.
:return: the acquisition data as 32-bit floating point array,
shape: (c, y, x)
"""
self._fh.seek(0)
df = pd.read_table(self._fh, dtype=np.float32)
if tuple(df.columns[:3]) != (
"Start_push",
"End_push",
"Pushes_duration",
):
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
"push columns not found in tabular data"
)
if tuple(df.columns[3:6]) != ("X", "Y", "Z"):
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
"XYZ channels not found in tabular data"
)
width, height = df[["X", "Y"]].add(1).max(axis=0).astype(int)
if width * height != len(df.index):
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
"inconsistent acquisition image data size"
)
img = np.zeros((height, width, self.num_channels), dtype=np.float32)
img[df["Y"].astype(int), df["X"].astype(int), :] = df.values[:, 6:]
return np.moveaxis(img, -1, 0)
def _read_channels(self) -> Tuple[int, List[str], List[int], List[str]]:
self._fh.seek(0)
columns = self._fh.readline().split("\t")
if tuple(columns[:3]) != ("Start_push", "End_push", "Pushes_duration"):
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
"push columns not found in tabular data"
)
if tuple(columns[3:6]) != ("X", "Y", "Z"):
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
"XYZ channels not found in tabular data"
)
channel_metals: List[str] = []
channel_masses: List[int] = []
channel_labels: List[str] = []
for column in columns[6:]:
m = re.match(self._CHANNEL_REGEX, column)
if m is None:
raise IOError(
f"TXT file '{self.path.name}' corrupted: "
f"cannot extract channel information from text '{column}'"
)
channel_metals.append(m.group("metal"))
channel_masses.append(int(m.group("mass")))
channel_labels.append(m.group("label"))
return len(columns[6:]), channel_metals, channel_masses, channel_labels
def __repr__(self) -> str:
return str(self._path)
| 2.640625
| 3
|
src/video_player_test.py
|
Joao-Nogueira-gh/video-compressin
| 0
|
12776223
|
## @brief
# Module for testing of the VideoPlayer class
#
from VideoPlayer import *
import sys
if __name__ == "__main__":
if len(sys.argv)!=2:
print('\nUsage: python3 video_player_test.py <frameNumber>\n\nframeNumber->Number of video frames to play OR \'all\' for all frames in video\n\nWarning: Higher number of frames will take longer to complete!')
exit(0)
else:
fn=sys.argv[1]
if fn=='all':
fn=None
else:
fn=int(fn)
v1="../res/ducks_take_off_444_720p50.y4m"
v2="../res/ducks_take_off_422_720p50.y4m"
v3="../res/ducks_take_off_420_720p50.y4m"
ans=input('There are 3 videos in our repository,\n1)ducks_take_off_444\n2)ducks_take_off_422\n3)ducks_take_off_420\nEach in the respective format, choose the desired one (1,2,3) : ')
if ans=='1':
video=v1
elif ans=='2':
video=v2
elif ans=='3':
video=v3
else:
print('Invalid answer')
exit(0)
v=VideoPlayer(video)
v.play_video(frameNumber=fn)
| 3.25
| 3
|
migrations/sqlite_versions/2020-07-10_b7fc1ab24c92_add_checkconstraints_for_non_nullable_.py
|
debrief/pepys-import
| 4
|
12776224
|
"""Add CheckConstraints for non-nullable string cols
Revision ID: b7fc1ab24c92
Revises: <PASSWORD>
Create Date: 2020-07-10 13:24:56.007611
"""
from datetime import datetime
from uuid import uuid4
from alembic import op
from geoalchemy2 import Geometry
from sqlalchemy import DATE, Column, DateTime, ForeignKey, Integer, MetaData, String, Text
from sqlalchemy.dialects.sqlite import REAL, TIMESTAMP
from sqlalchemy.orm import declarative_base, deferred
from sqlalchemy.sql.schema import CheckConstraint, UniqueConstraint
from pepys_import.core.store import constants
from pepys_import.core.store.common_db import (
CommentMixin,
ElevationPropertyMixin,
LocationPropertyMixin,
LogMixin,
MediaMixin,
ReferenceRepr,
SynonymMixin,
)
from pepys_import.core.store.db_base import sqlite_naming_convention
from pepys_import.core.store.db_status import TableTypes
from pepys_import.utils.sqlalchemy_utils import UUIDType
# revision identifiers, used by Alembic.
revision = "b7fc1ab24c92"
down_revision = "d5d740c76aa3"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("Tasks", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Tasks_name", condition="name <> ''")
with op.batch_alter_table("Synonyms", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Synonyms_table", condition="\"table\" <> ''")
batch_op.create_check_constraint("ck_Synonyms_synonym", condition="synonym <> ''")
with op.batch_alter_table("Changes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Changes_user", condition="user <> ''")
batch_op.create_check_constraint("ck_Changes_reason", condition="reason <> ''")
with op.batch_alter_table("Logs", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Logs_table", condition="\"table\" <> ''")
with op.batch_alter_table("PlatformTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_PlatformTypes_name", condition="name <> ''")
with op.batch_alter_table("Nationalities", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Nationalities_name", condition="name <> ''")
with op.batch_alter_table("GeometryTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_GeometryTypes_name", condition="name <> ''")
with op.batch_alter_table("GeometrySubTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_GeometrySubTypes_name", condition="name <> ''")
with op.batch_alter_table("Users", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Users_name", condition="name <> ''")
with op.batch_alter_table("UnitTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_UnitTypes_name", condition="name <> ''")
with op.batch_alter_table("ClassificationTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ClassificationTypes_name", condition="name <> ''")
with op.batch_alter_table("ContactTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ContactTypes_name", condition="name <> ''")
with op.batch_alter_table("SensorTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_SensorTypes_name", condition="name <> ''")
with op.batch_alter_table("Privacies", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Privacies_name", condition="name <> ''")
with op.batch_alter_table("DatafileTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_DatafileTypes_name", condition="name <> ''")
with op.batch_alter_table("MediaTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_MediaTypes_name", condition="name <> ''")
with op.batch_alter_table("CommentTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_CommentTypes_name", condition="name <> ''")
with op.batch_alter_table("CommodityTypes", schema=None) as batch_op:
batch_op.create_check_constraint("ck_CommodityTypes_name", condition="name <> ''")
with op.batch_alter_table("ConfidenceLevels", schema=None) as batch_op:
batch_op.create_check_constraint("ck_ConfidenceLevels_name", condition="name <> ''")
with op.batch_alter_table("Comments", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Comments_content", condition="content <> ''")
with op.batch_alter_table("Media", schema=None) as batch_op:
batch_op.create_check_constraint("ck_Media_url", condition="url <> ''")
def downgrade():
Metadata = MetaData(naming_convention=sqlite_naming_convention)
BaseSpatiaLite = declarative_base(metadata=Metadata)
class Task(BaseSpatiaLite):
__tablename__ = constants.TASK
table_type = TableTypes.METADATA
table_type_id = 4
task_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
parent_id = Column(
UUIDType, ForeignKey("Tasks.task_id", onupdate="cascade"), nullable=False
)
start = Column(TIMESTAMP, nullable=False)
end = Column(TIMESTAMP, nullable=False)
environment = deferred(Column(String(150)))
location = deferred(Column(String(150)))
privacy_id = Column(
UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Tasks_name"),)
class Synonym(BaseSpatiaLite, SynonymMixin):
__tablename__ = constants.SYNONYM
table_type = TableTypes.METADATA
table_type_id = 7
synonym_id = Column(UUIDType, primary_key=True, default=uuid4)
table = Column(String(150), nullable=False)
entity = Column(UUIDType, nullable=False)
synonym = Column(String(150), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
CheckConstraint("\"table\" <> ''", name="ck_Synonyms_table"),
CheckConstraint("synonym <> ''", name="ck_Synonyms_synonym"),
)
class Change(BaseSpatiaLite):
__tablename__ = constants.CHANGE
table_type = TableTypes.METADATA
table_type_id = 8
change_id = Column(UUIDType, primary_key=True, default=uuid4)
user = Column(String(150), nullable=False)
modified = Column(DATE, nullable=False)
reason = Column(String(500), nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
CheckConstraint("user <> ''", name="ck_Changes_user"),
CheckConstraint("reason <> ''", name="ck_Changes_reason"),
)
class Log(BaseSpatiaLite, LogMixin):
__tablename__ = constants.LOG
table_type = TableTypes.METADATA
table_type_id = 9
log_id = Column(UUIDType, primary_key=True, default=uuid4)
table = Column(String(150), nullable=False)
id = Column(UUIDType, nullable=False)
field = Column(String(150))
new_value = Column(String(150))
change_id = Column(
UUIDType, ForeignKey("Changes.change_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("\"table\" <> ''", name="ck_Logs_table"),)
class PlatformType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.PLATFORM_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 13
platform_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_PlatformTypes_name"),)
class Nationality(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.NATIONALITY
table_type = TableTypes.REFERENCE
table_type_id = 14
nationality_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
priority = Column(Integer)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Nationalities_name"),)
class GeometryType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.GEOMETRY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 15
geo_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_GeometryTypes_name"),)
class GeometrySubType(BaseSpatiaLite):
__tablename__ = constants.GEOMETRY_SUBTYPE
table_type = TableTypes.REFERENCE
table_type_id = 16
geo_sub_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(String(150), nullable=False)
parent = Column(
UUIDType, ForeignKey("GeometryTypes.geo_type_id", onupdate="cascade"), nullable=False
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (
UniqueConstraint("name", "parent", name="uq_GeometrySubTypes_name_parent"),
CheckConstraint("name <> ''", name="ck_GeometrySubTypes_name"),
)
class User(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.USER
table_type = TableTypes.REFERENCE
table_type_id = 17
user_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Users_name"),)
class UnitType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.UNIT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 18
unit_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_UnitTypes_name"),)
class ClassificationType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CLASSIFICATION_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 19
class_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ClassificationTypes_name"),)
class ContactType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CONTACT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 20
contact_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ContactTypes_name"),)
class SensorType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.SENSOR_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 21
sensor_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_SensorTypes_name"),)
class Privacy(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.PRIVACY
table_type = TableTypes.REFERENCE
table_type_id = 22
privacy_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
level = Column(Integer, nullable=False)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_Privacies_name"),)
class DatafileType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.DATAFILE_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 23
datafile_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_DatafileTypes_name"),)
class MediaType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.MEDIA_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 24
media_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_MediaTypes_name"),)
class CommentType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.COMMENT_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 25
comment_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_CommentTypes_name"),)
class CommodityType(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.COMMODITY_TYPE
table_type = TableTypes.REFERENCE
table_type_id = 26
commodity_type_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_CommodityTypes_name"),)
class ConfidenceLevel(BaseSpatiaLite, ReferenceRepr):
__tablename__ = constants.CONFIDENCE_LEVEL
table_type = TableTypes.REFERENCE
table_type_id = 27
confidence_level_id = Column(UUIDType, primary_key=True, default=uuid4)
name = Column(
String(150),
nullable=False,
unique=True,
)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("name <> ''", name="ck_ConfidenceLevels_name"),)
class Comment(BaseSpatiaLite, CommentMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sensor_name = "N/A"
__tablename__ = constants.COMMENT
table_type = TableTypes.MEASUREMENT
table_type_id = 32
comment_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
time = Column(TIMESTAMP, nullable=False)
comment_type_id = Column(
UUIDType, ForeignKey("CommentTypes.comment_type_id", onupdate="cascade"), nullable=False
)
content = Column(Text, nullable=False)
source_id = Column(
UUIDType, ForeignKey("Datafiles.datafile_id", onupdate="cascade"), nullable=False
)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"))
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("content <> ''", name="ck_Comments_content"),)
class Media(BaseSpatiaLite, MediaMixin, ElevationPropertyMixin, LocationPropertyMixin):
__tablename__ = constants.MEDIA
table_type = TableTypes.MEASUREMENT
table_type_id = 34
media_id = Column(UUIDType, primary_key=True, default=uuid4)
platform_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
subject_id = Column(UUIDType, ForeignKey("Platforms.platform_id", onupdate="cascade"))
sensor_id = Column(UUIDType, ForeignKey("Sensors.sensor_id", onupdate="cascade"))
_location = deferred(
Column(
"location",
Geometry(geometry_type="POINT", srid=4326, management=True, spatial_index=False),
)
)
_elevation = deferred(Column("elevation", REAL))
time = Column(TIMESTAMP)
media_type_id = Column(
UUIDType, ForeignKey("MediaTypes.media_type_id", onupdate="cascade"), nullable=False
)
url = deferred(Column(String(150), nullable=False))
source_id = Column(
UUIDType, ForeignKey("Datafiles.datafile_id", onupdate="cascade"), nullable=False
)
privacy_id = Column(UUIDType, ForeignKey("Privacies.privacy_id", onupdate="cascade"))
remarks = Column(Text)
created_date = Column(DateTime, default=datetime.utcnow)
__table_args__ = (CheckConstraint("url <> ''", name="ck_Media_url"),)
with op.batch_alter_table("Tasks", schema=None, copy_from=Task.__table__) as batch_op:
batch_op.drop_constraint("ck_Tasks_name", type_="check")
with op.batch_alter_table("Synonyms", schema=None, copy_from=Synonym.__table__) as batch_op:
batch_op.drop_constraint("ck_Synonyms_table", type_="check")
batch_op.drop_constraint("ck_Synonyms_synonym", type_="check")
with op.batch_alter_table("Changes", schema=None, copy_from=Change.__table__) as batch_op:
batch_op.drop_constraint("ck_Changes_user", type_="check")
batch_op.drop_constraint("ck_Changes_reason", type_="check")
with op.batch_alter_table("Logs", schema=None, copy_from=Log.__table__) as batch_op:
batch_op.drop_constraint("ck_Logs_table", type_="check")
with op.batch_alter_table(
"PlatformTypes", schema=None, copy_from=PlatformType.__table__
) as batch_op:
batch_op.drop_constraint("ck_PlatformTypes_name", type_="check")
with op.batch_alter_table(
"Nationalities", schema=None, copy_from=Nationality.__table__
) as batch_op:
batch_op.drop_constraint("ck_Nationalities_name", type_="check")
with op.batch_alter_table(
"GeometryTypes", schema=None, copy_from=GeometryType.__table__
) as batch_op:
batch_op.drop_constraint("ck_GeometryTypes_name", type_="check")
with op.batch_alter_table(
"GeometrySubTypes", schema=None, copy_from=GeometrySubType.__table__
) as batch_op:
batch_op.drop_constraint("ck_GeometrySubTypes_name", type_="check")
with op.batch_alter_table("Users", schema=None, copy_from=User.__table__) as batch_op:
batch_op.drop_constraint("ck_Users_name", type_="check")
with op.batch_alter_table("UnitTypes", schema=None, copy_from=UnitType.__table__) as batch_op:
batch_op.drop_constraint("ck_UnitTypes_name", type_="check")
with op.batch_alter_table(
"ClassificationTypes", schema=None, copy_from=ClassificationType.__table__
) as batch_op:
batch_op.drop_constraint("ck_ClassificationTypes_name", type_="check")
with op.batch_alter_table(
"ContactTypes", schema=None, copy_from=ContactType.__table__
) as batch_op:
batch_op.drop_constraint("ck_ContactTypes_name", type_="check")
with op.batch_alter_table(
"SensorTypes", schema=None, copy_from=SensorType.__table__
) as batch_op:
batch_op.drop_constraint("ck_SensorTypes_name", type_="check")
with op.batch_alter_table("Privacies", schema=None, copy_from=Privacy.__table__) as batch_op:
batch_op.drop_constraint("ck_Privacies_name", type_="check")
with op.batch_alter_table(
"DatafileTypes", schema=None, copy_from=DatafileType.__table__
) as batch_op:
batch_op.drop_constraint("ck_DatafileTypes_name", type_="check")
with op.batch_alter_table("MediaTypes", schema=None, copy_from=MediaType.__table__) as batch_op:
batch_op.drop_constraint("ck_MediaTypes_name", type_="check")
with op.batch_alter_table(
"CommentTypes", schema=None, copy_from=CommentType.__table__
) as batch_op:
batch_op.drop_constraint("ck_CommentTypes_name", type_="check")
with op.batch_alter_table(
"CommodityTypes", schema=None, copy_from=CommodityType.__table__
) as batch_op:
batch_op.drop_constraint("ck_CommodityTypes_name", type_="check")
with op.batch_alter_table(
"ConfidenceLevels", schema=None, copy_from=ConfidenceLevel.__table__
) as batch_op:
batch_op.drop_constraint("ck_ConfidenceLevels_name", type_="check")
with op.batch_alter_table("Comments", schema=None, copy_from=Comment.__table__) as batch_op:
batch_op.drop_constraint("ck_Comments_content", type_="check")
with op.batch_alter_table("Media", schema=None, copy_from=Media.__table__) as batch_op:
batch_op.drop_constraint("ck_Media_url", type_="check")
| 1.867188
| 2
|
castellan/common/objects/key.py
|
vakwetu/castellan
| 0
|
12776225
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Key Class
This module defines the Key class. The Key class is the base class to
represent all encryption keys. The basis for this class was copied
from Java.
"""
from castellan.common.objects import managed_object
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Key(managed_object.ManagedObject):
"""Base class to represent all keys."""
@abc.abstractproperty
def algorithm(self):
"""Returns the key's algorithm.
Returns the key's algorithm. For example, "DSA" indicates that this key
is a DSA key and "AES" indicates that this key is an AES key.
"""
pass
@abc.abstractproperty
def bit_length(self):
"""Returns the key's bit length.
Returns the key's bit length. For example, for AES symmetric keys,
this refers to the length of the key, and for RSA keys, this refers to
the length of the modulus.
"""
pass
| 3.015625
| 3
|
play.py
|
davidschulte/alpha-thesis
| 0
|
12776226
|
from utils import *
from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame
from chinese_checkers.tensorflow.ResNet import NNetWrapper as nn
from chinese_checkers.Evaluator import Evaluator
from MCTS import MCTS
from chinese_checkers.InitializeAgent import InitializeAgent
from chinese_checkers.GreedyAgent import GreedyAgent
from chinese_checkers.TinyGUI import GUI
import numpy as np
args = dotdict({
'numMCTSSims': 2,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 41),
})
args2 = dotdict({
'numMCTSSims': 200,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 12),
})
game = ChineseCheckersGame()
gui = GUI(1)
nn1 = nn(game)
nn1.load_first_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
mcts1 = MCTS(game, nn1, args)
# nn2 = nn(game)
# nn2.load_first_checkpoint(args2.load_folder_file[0], args2.load_folder_file[1])
# mcts2 = MCTS(game, nn2, args2)
actor = InitializeAgent(game)
forward = GreedyAgent(game)
evaluator = Evaluator(None, mcts1, mcts1, game, gui, True)
scores_all = np.zeros((3, 3))
steps_all = 0
wrong_win_all = 0
for _ in range(20):
scores, steps, wrong_win = evaluator.play_game(1, 1)
for p in range(3):
if scores[p] == 3:
scores_all[p,0] += 1
elif scores[p] == 1:
scores_all[p,1] += 1
else:
scores_all[p,2] += 1
steps_all += steps
wrong_win_all += wrong_win
print(scores_all)
| 2
| 2
|
utils/load_custom_datasets.py
|
chansoopark98/MobileNet-SSD
| 2
|
12776227
|
from tensorflow.keras.utils import Sequence
import os
import pandas as pd
import random
import numpy as np
class DataGenerator(Sequence):
def __init__(self,
path_args,
batch_size: int,
shuffle: bool,
mode: str):
self.x_img_path = './train/'
self.x_label_path = './label/'
self.mode = mode
# train
self.x_img = os.listdir(self.x_img_path)
self.x_label = os.listdir(self.x_label_path)
# TODO validation and test dataset
self.x_list = []
self.y_list = []
self.load_dataset()
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def load_dataset(self):
for i, j in enumerate(self.x_img):
self.x_list.append(input_data)
self.y_list.append(result_data.astype(np.float))
def get_data_len(self):
return len(self.x_list), len(self.y_list)
def __len__(self):
return int(np.floor(len(self.x_list) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.x_list))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def get_input(self, index):
return self.x_list[index * self.batch_size:(index + 1) * self.batch_size]
def get_target(self, index):
return self.y_list[index * self.batch_size:(index + 1) * self.batch_size]
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
y_data = []
for j in range(start, stop):
data.append(self.x_list[j])
y_data.append(self.y_list[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
y_batch = [np.stack(samples, axis=0) for samples in zip(*y_data)]
# newer version of tf/keras want batch to be in tuple rather than list
return tuple(batch), tuple(y_batch)
| 2.796875
| 3
|
evennia_extensions/character_extensions/storage_wrappers.py
|
dvoraen/arxcode
| 42
|
12776228
|
from evennia_extensions.object_extensions.storage_wrappers import StorageWrapper
class RosterEntryWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.roster
def create_new_storage(self, instance):
raise AttributeError("This object does not have a RosterEntry to store that.")
class CharacterSheetWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactersheet
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import CharacterSheet
return CharacterSheet.objects.create(objectdb=instance.obj)
class CombatSettingsWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactercombatsettings
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import (
CharacterCombatSettings,
)
return CharacterCombatSettings.objects.create(objectdb=instance.obj)
class MessengerSettingsWrapper(StorageWrapper):
def get_storage(self, instance):
return instance.obj.charactermessengersettings
def create_new_storage(self, instance):
from evennia_extensions.character_extensions.models import (
CharacterMessengerSettings,
)
return CharacterMessengerSettings.objects.create(objectdb=instance.obj)
| 2.6875
| 3
|
tests/unit/test_translator.py
|
flatironhealth/aws-remediation-framework
| 22
|
12776229
|
import sys
import os
from importlib import reload
from nose.tools import assert_equal, assert_true, assert_false
from unittest import TestCase, mock
from unittest.mock import MagicMock
from resources.event_translator.main import translate_event
class TestTranslator(TestCase):
def test_sqs_SetQueueAttributes_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:<EMAIL>",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/abc@xyz.com",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-16T16:31:30Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-16T17:29:19Z",
"eventSource": "sqs.amazonaws.com",
"eventName": "SetQueueAttributes",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "console.amazonaws.com",
"requestParameters": {
"attributes": {
"Policy": '{"Version":"2012-10-17", "Id":"sqspolicy", "Statement":[{"Sid":"Sid1576516421540", "Effect":"Allow", "Principal":"*", "Action":"SQS:SendMessage", "Resource":"arn:aws:sqs:us-east-1:123456789012:misconfiguration_maker-bad"}]}'
},
"queueUrl": "https://sqs.us-east-1.amazonaws.com/123456789012/misconfiguration_maker-bad",
},
"responseElements": None,
"requestID": "8ab5b156-9e4f-5a82-91d4-e7275f60cd55",
"eventID": "9b5808a9-7a05-429e-be70-6256b9a745af",
"eventType": "AwsApiCall",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "https://sqs.us-east-1.amazonaws.com/123456789012/misconfiguration_maker-bad",
"region": "us-east-1",
"type": "sqs",
}
],
)
def test_sqs_CreateQueue_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1576513574938242000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1576513574938242000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-16T16:26:14Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-16T16:47:50Z",
"eventSource": "sqs.amazonaws.com",
"eventName": "CreateQueue",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.36 (go1.13.3; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"queueName": "remediator-resource-queue",
"tags": {
"App": "remediator",
"system": "aws-remediation",
"team": "security",
},
"attribute": {
"ReceiveMessageWaitTimeSeconds": "10",
"DelaySeconds": "90",
"MessageRetentionPeriod": "86400",
"MaximumMessageSize": "2048",
"VisibilityTimeout": "30",
},
},
"responseElements": {
"queueUrl": "https://sqs.us-east-1.amazonaws.com/123456789012/remediator-resource-queue"
},
"requestID": "80c37425-0ef7-587d-ab6e-2254b981ad47",
"eventID": "13c8a1f5-4b23-470b-9dfb-51479473287c",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "https://sqs.us-east-1.amazonaws.com/123456789012/remediator-resource-queue",
"region": "us-east-1",
"type": "sqs",
}
],
)
def test_sqs_AddPermission_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1576522229953466000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1576522229953466000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-16T18:50:30Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-16T18:51:15Z",
"eventSource": "sqs.amazonaws.com",
"eventName": "AddPermission",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-cli/1.16.286 Python/3.7.5 Darwin/18.7.0 botocore/1.13.25",
"requestParameters": {
"actions": ["SendMessage"],
"aWSAccountIds": ["123456789012"],
"label": "0",
"queueUrl": "https://sqs.us-east-1.amazonaws.com/123456789012/misconfiguration_maker-bad",
},
"responseElements": None,
"requestID": "e1ede60f-0cd7-52f2-9520-b07ed8b4b30c",
"eventID": "a8bdb7fb-66b0-45ed-a5e4-d13f1e91d26b",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "https://sqs.us-east-1.amazonaws.com/123456789012/misconfiguration_maker-bad",
"region": "us-east-1",
"type": "sqs",
}
],
)
def test_s3_PutBucketPolicy_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1575572476274064000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1575572476274064000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-05T19:01:16Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-05T19:02:08Z",
"eventSource": "s3.amazonaws.com",
"eventName": "PutBucketPolicy",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "[aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)]",
"requestParameters": {
"bucketName": "wkykdxpwwr67imj4-misconfig-maker",
"bucketPolicy": {
"Version": "2012-10-17",
"Statement": [
{
"Principal": "*",
"Action": ["s3:GetObjectAcl"],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::wkykdxpwwr67imj4-misconfig-maker/*"
],
}
],
},
"host": ["wkykdxpwwr67imj4-misconfig-maker.s3.amazonaws.com"],
"policy": [""],
},
"responseElements": None,
"additionalEventData": {
"SignatureVersion": "SigV4",
"CipherSuite": "ECDHE-RSA-AES128-GCM-SHA256",
"AuthenticationMethod": "AuthHeader",
},
"requestID": "0E1152F10A561447",
"eventID": "bc81ed34-b1d0-4a6c-987f-59f584db2173",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "wkykdxpwwr67imj4-misconfig-maker",
"region": "us-east-1",
"type": "s3_bucket",
}
],
)
def test_s3_PutBucketAcl_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:user-a",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/user-a",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "false",
"creationDate": "2019-12-09T16:18:23Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-09T16:26:47Z",
"eventSource": "s3.amazonaws.com",
"eventName": "PutBucketAcl",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "[S3Console/0.4, aws-internal/3 aws-sdk-java/1.11.666 Linux/4.9.184-0.1.ac.235.83.329.metal1.x86_64 OpenJDK_64-Bit_Server_VM/25.232-b09 java/1.8.0_232 vendor/Oracle_Corporation]",
"requestParameters": {
"bucketName": "bucket-name",
"AccessControlPolicy": {
"AccessControlList": {
"Grant": [
{
"Grantee": {
"xsi:type": "CanonicalUser",
"DisplayName": "aws+account-name",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"ID": "abcdefghijklmnop1234567890",
},
"Permission": "FULL_CONTROL",
},
{
"Grantee": {
"xsi:type": "Group",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "READ_ACP",
},
{
"Grantee": {
"xsi:type": "Group",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
},
"Permission": "WRITE",
},
]
},
"xmlns": "http://s3.amazonaws.com/doc/2006-03-01/",
"Owner": {
"DisplayName": "aws+account-name",
"ID": "abcdefghijklmnop1234567890",
},
},
"host": ["s3.amazonaws.com"],
"acl": [""],
},
"responseElements": None,
"additionalEventData": {
"SignatureVersion": "SigV4",
"CipherSuite": "ECDHE-RSA-AES128-SHA",
"AuthenticationMethod": "AuthHeader",
"vpcEndpointId": "vpce-example",
},
"requestID": "9C9E4CB44162228C",
"eventID": "0655c8f0-0130-4659-acc0-e29a65aca2e5",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
"vpcEndpointId": "vpce-example",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "bucket-name",
"region": "us-east-1",
"type": "s3_bucket",
}
],
)
def test_s3_CreateBucket_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1577212321281084000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1577212321281084000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-24T18:32:01Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-24T18:32:23Z",
"eventSource": "s3.amazonaws.com",
"eventName": "CreateBucket",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "[aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)]",
"requestParameters": {
"x-amz-acl": ["private"],
"host": ["o3hxotcqp2u9mbtl-misconfig-maker.s3.amazonaws.com"],
"bucketName": "o3hxotcqp2u9mbtl-misconfig-maker",
},
"responseElements": None,
"additionalEventData": {
"SignatureVersion": "SigV4",
"CipherSuite": "ECDHE-RSA-AES128-GCM-SHA256",
"AuthenticationMethod": "AuthHeader",
},
"requestID": "85C95550E930607C",
"eventID": "5cc82958-1953-4945-aab5-bc114e920c33",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "o3hxotcqp2u9mbtl-misconfig-maker",
"region": "us-east-1",
"type": "s3_bucket",
}
],
)
def test_ec2_ModifyImageAttribute_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1234567890123456789",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1234567890123456789",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-05T23:00:31Z",
},
},
},
"eventTime": "2019-12-05T23:05:15Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "ModifyImageAttribute",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"imageId": "ami-1234a",
"launchPermission": {
"remove": {"items": [{"userId": "000000000000"}]}
},
"attributeType": "launchPermission",
},
"responseElements": {"_return": True},
"requestID": "88cf05ef-c844-4f75-8456-391c388cade0",
"eventID": "b4469d10-a068-41eb-8d3f-ca36e5e8d06b",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "ami-1234a",
"region": "us-east-1",
"type": "ami",
}
],
)
def test_ec2_ModifySnapshotAttribute_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1234567890123456789",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1234567890123456789",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-05T23:00:31Z",
},
},
},
"eventTime": "2019-12-05T23:05:15Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "ModifySnapshotAttribute",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"snapshotId": "snap-abc123",
"createVolumePermission": {
"remove": {"items": [{"userId": "000000000000"}]}
},
"attributeType": "CREATE_VOLUME_PERMISSION",
},
"responseElements": {
"requestId": "8009c9ff-d63e-4e0d-9d71-cd23c35cb649",
"_return": True,
},
"requestID": "8009c9ff-d63e-4e0d-9d71-cd23c35cb649",
"eventID": "19e65b1e-2667-42da-82ff-84e8e032c41a",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "snap-abc123",
"region": "us-east-1",
"type": "ebs_snapshot",
}
],
)
def test_rds_ModifyDBSnapshotAttribute_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:<EMAIL>",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/<EMAIL>",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-04T21:52:47Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-04T22:27:29Z",
"eventSource": "rds.amazonaws.com",
"eventName": "ModifyDBSnapshotAttribute",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "console.amazonaws.com",
"requestParameters": {
"dBSnapshotIdentifier": "misconfiguration-maker",
"attributeName": "restore",
"valuesToAdd": ["000000000000"],
},
"responseElements": {
"dBSnapshotIdentifier": "misconfiguration-maker",
"dBSnapshotAttributes": [
{
"attributeName": "restore",
"attributeValues": ["000000000000"],
}
],
},
"requestID": "4e2915f3-c8c9-47d1-a0c5-ac1d3d709753",
"eventID": "19f5a1b0-c17e-4d1e-af69-bc42628dcde9",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "misconfiguration-maker",
"region": "us-east-1",
"type": "rds_snapshot",
}
],
)
def test_rds_ModifyDBInstance_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:role-name",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/role-name",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "false",
"creationDate": "2019-11-14T04:59:24Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-11-14T04:59:26Z",
"eventSource": "rds.amazonaws.com",
"eventName": "ModifyDBInstance",
"awsRegion": "us-east-1",
"sourceIPAddress": "172.16.58.3",
"userAgent": "Boto3/1.9.221 Python/3.6.9 Linux/4.14.138-99.102.amzn2.x86_64 exec-env/AWS_Lambda_python3.6 Botocore/1.12.221",
"requestParameters": {
"allowMajorVersionUpgrade": False,
"applyImmediately": True,
"publiclyAccessible": False,
"dBInstanceIdentifier": "dbname",
},
"responseElements": {
"dBInstanceArn": "arn:aws:rds:us-east-1:123456789012:db:dbname",
"storageEncrypted": False,
"preferredBackupWindow": "04:27-04:57",
"preferredMaintenanceWindow": "sat:09:34-sat:10:04",
"backupRetentionPeriod": 7,
"allocatedStorage": 20,
"storageType": "gp2",
"engineVersion": "5.7.22",
"dbInstancePort": 0,
"associatedRoles": [],
"optionGroupMemberships": [
{"status": "in-sync", "optionGroupName": "default:mysql-5-7"}
],
"dBParameterGroups": [
{
"dBParameterGroupName": "default.mysql5.7",
"parameterApplyStatus": "in-sync",
}
],
"instanceCreateTime": "Jul 17, 2019 7:31:39 PM",
"maxAllocatedStorage": 1000,
"monitoringInterval": 0,
"dBInstanceClass": "db.t2.micro",
"readReplicaDBInstanceIdentifiers": [],
"dBSubnetGroup": {
"dBSubnetGroupName": "default",
"dBSubnetGroupDescription": "default",
"subnets": [
{
"subnetAvailabilityZone": {"name": "us-east-1b"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
{
"subnetAvailabilityZone": {"name": "us-east-1a"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
{
"subnetAvailabilityZone": {"name": "us-east-1c"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
{
"subnetAvailabilityZone": {"name": "us-east-1f"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
{
"subnetAvailabilityZone": {"name": "us-east-1d"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
{
"subnetAvailabilityZone": {"name": "us-east-1e"},
"subnetIdentifier": "subnet-abcd",
"subnetStatus": "Active",
},
],
"vpcId": "vpc-abcd",
"subnetGroupStatus": "Complete",
},
"masterUsername": "admin",
"multiAZ": False,
"autoMinorVersionUpgrade": True,
"latestRestorableTime": "Nov 14, 2019 4:55:00 AM",
"engine": "mysql",
"httpEndpointEnabled": False,
"cACertificateIdentifier": "rds-ca-2015",
"dbiResourceId": "db-abcd",
"deletionProtection": False,
"endpoint": {
"address": "dbname.xyz.us-east-1.rds.amazonaws.com",
"port": 3306,
"hostedZoneId": "ABCDEFGHIJKLMN",
},
"dBSecurityGroups": [],
"pendingModifiedValues": {},
"dBInstanceStatus": "available",
"publiclyAccessible": True,
"domainMemberships": [],
"copyTagsToSnapshot": True,
"dBInstanceIdentifier": "dbname",
"licenseModel": "general-public-license",
"iAMDatabaseAuthenticationEnabled": False,
"performanceInsightsEnabled": False,
"vpcSecurityGroups": [
{"status": "active", "vpcSecurityGroupId": "sg-abcd"}
],
"dbname.xyz.us-east-1.rds.amazonaws.com"dbname",
"availabilityZone": "us-east-1d",
},
"requestID": "3ae00dda-20a9-45c7-b464-bf5b46ac97e7",
"eventID": "f15b2ee4-6a74-425a-8629-e91f9e6fcc3a",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "dbname",
"region": "us-east-1",
"type": "rds",
}
],
)
def test_rds_CreateDBInstance_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1575491847268719000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1575491847268719000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-04T20:37:27Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
},
},
"eventTime": "2019-12-04T21:14:11Z",
"eventSource": "rds.amazonaws.com",
"eventName": "CreateDBInstance",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"dbname.xyz.us-east-1.rds.amazonaws.com",
"dBInstanceIdentifier": "dbname",
"allocatedStorage": 5,
"dBInstanceClass": "db.t2.micro",
"engine": "mysql",
"masterUsername": "foo",
"masterUserPassword": "****",
"dBParameterGroupName": "default.mysql5.7",
"backupRetentionPeriod": 0,
"engineVersion": "5.7",
"autoMinorVersionUpgrade": True,
"publiclyAccessible": False,
"tags": [
{"key": "team", "value": "security"},
{"key": "Name", "value": "misconfiguration_maker"},
{"key": "App", "value": "misconfiguration_maker"},
{"key": "system", "value": "aws-remediation"},
],
"storageEncrypted": False,
"copyTagsToSnapshot": False,
"deletionProtection": False,
},
"responseElements": {
"dBInstanceIdentifier": "dbname",
"dBInstanceClass": "db.t2.micro",
"engine": "mysql",
"dBInstanceStatus": "creating",
"masterUsername": "foo",
"dbname.xyz.us-east-1.rds.amazonaws.com",
"allocatedStorage": 5,
"preferredBackupWindow": "04:37-05:07",
"backupRetentionPeriod": 0,
"dBSecurityGroups": [],
"vpcSecurityGroups": [
{"vpcSecurityGroupId": "sg-abcd", "status": "active"}
],
"dBParameterGroups": [
{
"dBParameterGroupName": "default.mysql5.7",
"parameterApplyStatus": "in-sync",
}
],
"dBSubnetGroup": {
"dBSubnetGroupName": "default",
"dBSubnetGroupDescription": "default",
"vpcId": "vpc-abcd",
"subnetGroupStatus": "Complete",
"subnets": [
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1b"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1a"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1c"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1f"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1d"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
{
"subnetIdentifier": "subnet-abcd",
"subnetAvailabilityZone": {"name": "us-east-1e"},
"subnetOutpost": {},
"subnetStatus": "Active",
},
],
},
"preferredMaintenanceWindow": "mon:08:13-mon:08:43",
"pendingModifiedValues": {"masterUserPassword": "****"},
"multiAZ": False,
"engineVersion": "5.7.22",
"autoMinorVersionUpgrade": True,
"readReplicaDBInstanceIdentifiers": [],
"licenseModel": "general-public-license",
"optionGroupMemberships": [
{"optionGroupName": "default:mysql-5-7", "status": "in-sync"}
],
"publiclyAccessible": False,
"storageType": "gp2",
"dbInstancePort": 0,
"storageEncrypted": False,
"dbiResourceId": "db-abcd",
"cACertificateIdentifier": "rds-ca-2015",
"domainMemberships": [],
"copyTagsToSnapshot": False,
"monitoringInterval": 0,
"dBInstanceArn": "arn:aws:rds:us-east-1:123456789012:db:dbname",
"iAMDatabaseAuthenticationEnabled": False,
"performanceInsightsEnabled": False,
"deletionProtection": False,
"associatedRoles": [],
"httpEndpointEnabled": False,
},
"requestID": "5243cd91-8200-4f75-bfdf-73b0838cfe70",
"eventID": "9899c3f4-5f18-4e91-8bbf-6748f1f0be67",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "dbname",
"delay": 900,
"region": "us-east-1",
"type": "rds",
}
],
)
def test_redshift_ModifyCluster_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:role-name-123456789012",
"arn": "arn:aws:sts::123456789012:assumed-role/local_remediator/role-name-123456789012",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-05T20:46:09Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/local_remediator",
"accountId": "123456789012",
"userName": "local_remediator",
},
},
},
"eventTime": "2019-12-05T20:46:13Z",
"eventSource": "redshift.amazonaws.com",
"eventName": "ModifyCluster",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "Boto3/1.10.25 Python/3.7.5 Darwin/18.7.0 Botocore/1.13.25",
"requestParameters": {
"clusterIdentifier": "misconfig-maker",
"masterUserPassword": "****",
"encrypted": True,
},
"responseElements": None,
"requestID": "469e7fef-17a0-11ea-9532-f3c8251db4e6",
"eventID": "754f30bf-a02c-49ab-9523-14c40d872979",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "misconfig-maker",
"delay": 900,
"region": "us-east-1",
"type": "redshift",
}
],
)
def test_redshift_CreateCluster_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1577208946667274000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1577208946667274000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-24T17:35:47Z",
},
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-<PASSWORD>",
},
},
},
"eventTime": "2019-12-24T17:36:59Z",
"eventSource": "redshift.amazonaws.com",
"eventName": "CreateCluster",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"clusterIdentifier": "misconfig-maker",
"allowVersionUpgrade": True,
"clusterVersion": "1.0",
"tags": [
{"value": "misconfiguration_maker", "key": "App"},
{"value": "security", "key": "team"},
{"value": "misconfiguration_maker", "key": "Name"},
{"value": "aws-remediation", "key": "system"},
],
"masterUsername": "foo",
"masterUserPassword": "****",
"automatedSnapshotRetentionPeriod": 1,
"port": 5439,
"dbname.xyz.us-east-1.rds.amazonaws.com",
"clusterType": "single-node",
"nodeType": "dc1.large",
"publiclyAccessible": True,
},
"responseElements": {
"nextMaintenanceWindowStartTime": "Dec 28, 2019 8:00:00 AM",
"nodeType": "dc1.large",
"clusterAvailabilityStatus": "Modifying",
"preferredMaintenanceWindow": "sat:08:00-sat:08:30",
"manualSnapshotRetentionPeriod": -1,
"clusterStatus": "creating",
"deferredMaintenanceWindows": [],
"vpcId": "vpc-abcd",
"enhancedVpcRouting": False,
"masterUsername": "foo",
"clusterSecurityGroups": [],
"pendingModifiedValues": {"masterUserPassword": "****"},
"maintenanceTrackName": "current",
"dbname.xyz.us-east-1.rds.amazonaws.com",
"clusterVersion": "1.0",
"encrypted": False,
"publiclyAccessible": True,
"tags": [
{"value": "misconfiguration_maker", "key": "App"},
{"value": "aws-remediation", "key": "system"},
{"value": "security", "key": "team"},
{"value": "misconfiguration_maker", "key": "Name"},
],
"clusterParameterGroups": [
{
"parameterGroupName": "default.redshift-1.0",
"parameterApplyStatus": "in-sync",
}
],
"allowVersionUpgrade": True,
"automatedSnapshotRetentionPeriod": 1,
"numberOfNodes": 1,
"vpcSecurityGroups": [
{"status": "active", "vpcSecurityGroupId": "sg-abcd"}
],
"iamRoles": [],
"clusterIdentifier": "misconfig-maker",
"clusterSubnetGroupName": "default",
},
"requestID": "fc2e9ff2-2673-11ea-9caa-c956bec1ce87",
"eventID": "8357728a-3df4-4604-aa5f-6ec57ead3371",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "misconfig-maker",
"delay": 900,
"region": "us-east-1",
"type": "redshift",
}
],
)
def test_ec2_RunInstances_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1234567890123456789",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1234567890123456789",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-19T15:22:58Z",
},
},
},
"eventTime": "2019-12-19T15:37:58Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "RunInstances",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"instancesSet": {
"items": [
{
"imageId": "ami-1234",
"minCount": 1,
"maxCount": 1,
}
]
},
"instanceType": "t2.micro",
"blockDeviceMapping": {},
"monitoring": {"enabled": False},
"disableApiTermination": False,
"iamInstanceProfile": {},
"ebsOptimized": False,
"tagSpecificationSet": {
"items": [
{
"resourceType": "instance",
"tags": [
{"key": "App", "value": "misconfiguration_maker"},
{"key": "Name", "value": "misconfiguration_maker"},
{"key": "system", "value": "aws-remediation"},
{"key": "team", "value": "security"},
],
}
]
},
},
"responseElements": {
"requestId": "da2549b5-1cfa-4312-a9c5-2343cf561dd9",
"reservationId": "r-abcd",
"ownerId": "123456789012",
"groupSet": {},
"instancesSet": {
"items": [
{
"instanceId": "i-abcd",
"imageId": "ami-1234",
"instanceState": {"code": 0, "name": "pending"},
"privateDnsName": "ip-192-168-1-1.ec2.internal",
"amiLaunchIndex": 0,
"productCodes": {},
"instanceType": "t2.micro",
"launchTime": 1576769878000,
"placement": {
"availabilityZone": "us-east-1d",
"tenancy": "default",
},
"monitoring": {"state": "disabled"},
"subnetId": "subnet-abcd",
"vpcId": "vpc-abcd",
"privateIpAddress": "172.16.58.3",
"stateReason": {
"code": "pending",
"message": "pending",
},
"architecture": "x86_64",
"rootDeviceType": "ebs",
"rootDeviceName": "/dev/sda1",
"blockDeviceMapping": {},
"virtualizationType": "hvm",
"hypervisor": "xen",
"tagSet": {
"items": [
{"key": "team", "value": "security"},
{
"key": "Name",
"value": "misconfiguration_maker",
},
{"key": "system", "value": "aws-remediation"},
{
"key": "App",
"value": "misconfiguration_maker",
},
]
},
"groupSet": {
"items": [
{
"groupId": "sg-abcd",
"groupName": "default",
}
]
},
"sourceDestCheck": True,
"networkInterfaceSet": {
"items": [
{
"networkInterfaceId": "eni-abcd",
"subnetId": "subnet-abcd",
"vpcId": "vpc-abcd",
"ownerId": "123456789012",
"status": "in-use",
"macAddress": "0e:89:82:29:40:d9",
"privateIpAddress": "172.16.58.3",
"privateDnsName": "ip-192-168-1-1.ec2.internal",
"sourceDestCheck": True,
"interfaceType": "interface",
"groupSet": {
"items": [
{
"groupId": "sg-abcd",
"groupName": "default",
}
]
},
"attachment": {
"attachmentId": "eni-abcd",
"deviceIndex": 0,
"status": "attaching",
"attachTime": 1576769878000,
"deleteOnTermination": True,
},
"privateIpAddressesSet": {
"item": [
{
"privateIpAddress": "172.16.58.3",
"privateDnsName": "ip-192-168-1-1.ec2.internal",
"primary": True,
}
]
},
"ipv6AddressesSet": {},
"tagSet": {},
}
]
},
"ebsOptimized": False,
"cpuOptions": {"coreCount": 1, "threadsPerCore": 1},
"capacityReservationSpecification": {
"capacityReservationPreference": "open"
},
"enclaveOptions": {"enabled": False},
"metadataOptions": {
"state": "pending",
"httpTokens": "optional",
"httpPutResponseHopLimit": 1,
"httpEndpoint": "enabled",
},
}
]
},
},
"requestID": "da2549b5-1cfa-4312-a9c5-2343cf561dd9",
"eventID": "4fb65f01-3889-4ecf-8a79-65fda2efe348",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "i-abcd",
"region": "us-east-1",
"type": "ec2",
}
],
)
def test_ec2_ModifyInstanceMetadataOptions_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:role-name",
"arn": "arn:aws:sts::123456789012:assumed-role/local_remediator/role-name",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/local_remediator",
"accountId": "123456789012",
"userName": "local_remediator",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-19T18:17:30Z",
},
},
},
"eventTime": "2019-12-19T18:17:30Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "ModifyInstanceMetadataOptions",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "Boto3/1.10.25 Python/3.7.5 Darwin/18.7.0 Botocore/1.13.25",
"errorCode": "Client.InvalidParameterValue",
"errorMessage": "The HttpEndpoint is not set to 'enabled' in this request. To specify a value for HttpTokens, set HttpEndpoint to 'enabled'.",
"requestParameters": {
"ModifyInstanceMetadataOptionsRequest": {
"HttpTokens": "required",
"InstanceId": "i-abcd",
}
},
"responseElements": None,
"requestID": "9ed413bf-ff0d-485a-a245-a7a5f5b86627",
"eventID": "c844317a-ba80-465e-aec9-65246f16062a",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "i-abcd",
"region": "us-east-1",
"type": "ec2",
}
],
)
def test_ec2_CreateSecurityGroup_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1577212321281084000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1577212321281084000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-24T18:32:01Z",
},
},
},
"eventTime": "2019-12-24T19:12:56Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "CreateSecurityGroup",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"groupName": "misconfiguration_maker",
"groupDescription": "Created by misconfiguration maker for testing",
},
"responseElements": {
"requestId": "04b3590c-9bd0-438d-ac08-fc051183fc7a",
"_return": True,
"groupId": "sg-abcd",
},
"requestID": "04b3590c-9bd0-438d-ac08-fc051183fc7a",
"eventID": "fb84b07f-d850-47bd-8845-416d70417cc5",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "sg-abcd",
"region": "us-east-1",
"type": "security_group",
}
],
)
def test_ec2_AuthorizeSecurityGroupIngress_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1577212321281084000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1577212321281084000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-24T18:32:01Z",
},
},
},
"eventTime": "2019-12-24T19:12:59Z",
"eventSource": "ec2.amazonaws.com",
"eventName": "AuthorizeSecurityGroupIngress",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"groupId": "sg-abcd",
"ipPermissions": {
"items": [
{
"ipProtocol": "tcp",
"fromPort": 443,
"toPort": 443,
"groups": {},
"ipRanges": {"items": [{"cidrIp": "0.0.0.0/0"}]},
"ipv6Ranges": {},
"prefixListIds": {},
}
]
},
},
"responseElements": {
"requestId": "3deb0532-605a-4b45-89cc-03ffa8362e41",
"_return": True,
},
"requestID": "3deb0532-605a-4b45-89cc-03ffa8362e41",
"eventID": "717b1b7e-e94f-4283-adff-e822c1b2e153",
"eventType": "AwsApiCall",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "sg-abcd",
"region": "us-east-1",
"type": "security_group",
}
],
)
def test_elb_RegisterInstancesWithLoadBalancer_translation(self):
event = {
"version": "0",
"id": "c2a207fb-cb90-7068-d4a9-6240d2cdd127",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.sqs",
"account": "123456789012",
"time": "2019-12-16T17:29:19Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "ABCDEFGHIJKLMNOPQRSTU:1577386646470489000",
"arn": "arn:aws:sts::123456789012:assumed-role/role-name/1577386646470489000",
"accountId": "123456789012",
"accessKeyId": "<KEY>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "ABCDEFGHIJKLMNOPQRSTU",
"arn": "arn:aws:iam::123456789012:role/role-name",
"accountId": "123456789012",
"userName": "role-name",
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "true",
"creationDate": "2019-12-26T18:57:26Z",
},
},
},
"eventTime": "2019-12-26T19:45:07Z",
"eventSource": "elasticloadbalancing.amazonaws.com",
"eventName": "RegisterInstancesWithLoadBalancer",
"awsRegion": "us-east-1",
"sourceIPAddress": "1.1.1.1",
"userAgent": "aws-sdk-go/1.25.38 (go1.13.4; darwin; amd64) APN/1.0 HashiCorp/1.0 Terraform/0.12.16 (+https://www.terraform.io)",
"requestParameters": {
"loadBalancerName": "misconfiguration-maker",
"instances": [{"instanceId": "i-abcd"}],
},
"responseElements": {
"instances": [{"instanceId": "i-abcd"}]
},
"requestID": "a896a473-55d0-4ece-b002-bfd4d60854e6",
"eventID": "403f2d60-bc4f-4748-8738-1874f1d5d4a9",
"eventType": "AwsApiCall",
"apiVersion": "2012-06-01",
"recipientAccountId": "123456789012",
},
}
assert_equal(
translate_event(event),
[
{
"account": "123456789012",
"id": "misconfiguration-maker",
"region": "us-east-1",
"type": "elb",
}
],
)
| 2.28125
| 2
|
blog/views/user.py
|
maxis1314/pyutils
| 2
|
12776230
|
# coding: utf-8
from flask import Flask,request,session,g,redirect,url_for,Blueprint
from flask import abort,render_template,flash
from helpers import getAvatar
import config
#from .base import BaseHandler
import base
config = config.rec()
user = Blueprint('user', __name__)
#class LoginHandler(BaseHandler):
@user.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if base.isAdmin():
return redirect("/")
else:
return render_template("login.html",getAvatar=getAvatar)
username = request.form['username']
password = request.form['password']
if base.userAuth(username, password):
base.currentUserSet(username)
return redirect("/")
else:
return redirect("/login")
#class LogoutHandler(BaseHandler):
@user.route('/logout')
def logout():
session.pop('user',None)
return redirect('/login')
| 2.3125
| 2
|
lesson16n3_projects/wcsc/auto_gen/code/states1/reply_agree.py
|
muzudho/py-state-machine-practice
| 0
|
12776231
|
class ReplyAgreeState():
def update(self, req):
# TODO 入力
msg = ""
# 分岐
if msg == 'start':
return ['Game']
else:
raise ValueError("Unexpected condition")
| 2.609375
| 3
|
display-patterns/Hierarchies/Pruebas/A62Tree_Map_Matplotlib.py
|
cimat/data-visualization-patterns
| 9
|
12776232
|
<reponame>cimat/data-visualization-patterns<gh_stars>1-10
import pylab
import random
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class Treemap:
def __init__(self, tree, iter_method, size_method, color_method):
self.ax = pylab.subplot(111,aspect='equal')
pylab.subplots_adjust(left=0, right=1, top=1, bottom=0)
self.ax.set_xticks([])
self.ax.set_yticks([])
self.size_method = size_method
self.iter_method = iter_method
self.color_method = color_method
self.addnode(tree)
def addnode(self, node, lower=[0,0], upper=[1,1], axis=0):
axis = axis % 2
self.draw_rectangle(lower, upper, node)
width = upper[axis] - lower[axis]
try:
for child in self.iter_method(node):
upper[axis] = lower[axis] + (width * float(size(child))) / size(node)
self.addnode(child, list(lower), list(upper), axis + 1)
lower[axis] = upper[axis]
except TypeError:
pass
def draw_rectangle(self, lower, upper, node):
r = Rectangle( lower, upper[0]-lower[0], upper[1] - lower[1],
edgecolor='k',
facecolor= self.color_method(node))
self.ax.add_patch(r)
if __name__ == '__main__':
size_cache = {}
def size(thing):
if isinstance(thing, int):
return thing
if thing in size_cache:
return size_cache[thing]
else:
size_cache[thing] = reduce(int.__add__, [size(x) for x in thing])
return size_cache[thing]
def random_color(thing):
return (random.random(),random.random(),random.random())
tree=((2,12),((4,(1,2)),(8,(1,2))))
Treemap(tree, iter, size, random_color)
plt.show()
| 3.03125
| 3
|
service2/application/routes.py
|
K1610174/Multi-service-QA-SFIA2
| 0
|
12776233
|
<reponame>K1610174/Multi-service-QA-SFIA2
from flask import redirect, url_for, Response, request
from application import app
import requests
import random
@app.route('/')
@app.route('/color', methods=['GET'])
def color():
color_list=["red","orange","yellow","green","blue","indigo","violet","ivory","gray","black","pink"]
color = random.choice(color_list)
return Response(color, mimetype='text/plain')
| 2.796875
| 3
|
Python/ad63.py
|
AungWinnHtut/CStutorial
| 0
|
12776234
|
# Guess password and output the score
chocolate = 2
playerlives = 1
playername = "Aung"
# this loop clears the screen
for i in range(1, 35):
print()
bonus = 0
numbercorrect = 0
# the player must try to guess the password
print("Now you must ener each letter that you remember ")
print("You will be given 3 times")
# add code here for a while loop using that counts from 1 to 3, so player has 3 guesses:
i = 0
while i < 3:
i = i + 1
letter = input("Try number " + str(i)+" : ")
if letter == "A" or letter == "R" or letter == "T":
numbercorrect = numbercorrect + 1
print("CORRECT - welldone")
else:
print("Wrong - sorry")
guess = input(
"NOW try and guess the password **** - the clue is in this line four times. Use the letters you were gives to help : ")
if guess == "star":
print("You are a star - you have opened the treasure chest of sweets and earned 1000 points")
bonus = 1000
score = (chocolate * 50) + (playerlives * 60) + bonus
# add code here to output playername
# add code here to output the number of bars of chocolate the player has
# add code here to output number of lives he player has
# add code here to output number of bonus points the player has
# add code here to output the player's score
print("Player Name : " + playername)
print("Total Chocolate Bar = " + str(chocolate))
print("Playerlives = " + str(playerlives))
print("Bonus point = " + str(bonus))
print("Player's Score = " + str(score))
# end
| 4.125
| 4
|
startleft/config/paths.py
|
iriusrisk/startleft
| 9
|
12776235
|
import os
default_cf_mapping_files = [os.path.dirname(__file__) + '/default-cloudformation-mapping.yaml']
| 1.304688
| 1
|
2_writeups/3_robot_exploitation/tutorial7/example5.py
|
araujorayza/robot_hacking_manual
| 141
|
12776236
|
#!/usr/bin/env python
from pwn import *
import os
# Exploiting vulnerable code narnia1.c:
#
# #include <stdio.h>
#
# int main(){
# int (*ret)();
#
# if(getenv("EGG")==NULL){
# printf("Give me something to execute at the env-variable EGG\n");
# exit(1);
# }
#
# printf("Trying to execute EGG!\n");
# ret = getenv("EGG");
# ret();
#
# return 0;
# }
# Define the context of the working machine
context(arch='amd64', os='linux')
# Compile the binary
log.info("Compiling the binary narnia1_local")
os.system('gcc narnia1.c -g -o narnia1_local -fno-stack-protector -z execstack')
# os.system('gcc narnia1.c -g -m32 -o narnia1_local -fno-stack-protector -z execstack')
# Get a simple shellcode
log.info("Putting together simple shellcode")
shellcode = asm(shellcraft.amd64.sh(), arch='amd64')
# print(shellcode)
log.info("Introduce shellcode in EGG env. variable")
os.environ["EGG"] = shellcode
log.info("Launching narnia1_local")
sh = process('narnia1_local')
sh.interactive()
| 2.84375
| 3
|
utils/georeferencer.py
|
DominikSpiljak/gis-backend
| 0
|
12776237
|
from geopy.geocoders import ArcGIS
import pyproj
class Georeferencer:
def __init__(self, crs):
self.georeferencer = ArcGIS()
self.transformer = pyproj.Transformer.from_crs("EPSG:4326", f"EPSG:{crs}")
def georeference(self, addresses):
result = {}
for address in addresses:
location = self.georeferencer.geocode(address)
x, y = self.transformer.transform(location.latitude, location.longitude)
result[address] = {"x": x, "y": y}
return result
if __name__ == "__main__":
refer = Georeferencer()
print(
refer.georeference(
[
"Špičkovina",
"Trg <NAME>",
]
)
)
| 3.171875
| 3
|
metaflow/plugins/aws/aws_utils.py
|
Netflix/metaflow
| 5,821
|
12776238
|
<gh_stars>1000+
import re
def get_docker_registry(image_uri):
"""
Explanation:
(.+?(?:[:.].+?)\/)? - [GROUP 0] REGISTRY
.+? - A registry must start with at least one character
(?:[:.].+?)\/ - A registry must have ":" or "." and end with "/"
? - Make a registry optional
(.*?) - [GROUP 1] REPOSITORY
.*? - Get repository name until separator
(?:[@:])? - SEPARATOR
?: - Don't capture separator
[@:] - The separator must be either "@" or ":"
? - The separator is optional
((?<=[@:]).*)? - [GROUP 2] TAG / DIGEST
(?<=[@:]) - A tag / digest must be preceeded by "@" or ":"
.* - Capture rest of tag / digest
? - A tag / digest is optional
Examples:
image
- None
- image
- None
example/image
- None
- example/image
- None
example/image:tag
- None
- example/image
- tag
example.domain.com/example/image:tag
- example.domain.com/
- example/image
- tag
192.168.127.12:123/example/image:tag
- 192.168.127.12:123/
- example/image
- tag
example.domain.com/example/image@sha256:45b23dee0
- example.domain.com/
- example/image
- sha256:45b23dee0
"""
pattern = re.compile(r"^(.+?(?:[:.].+?)\/)?(.*?)(?:[@:])?((?<=[@:]).*)?$")
registry, repository, tag = pattern.match(image_uri).groups()
if registry is not None:
registry = registry.rstrip("/")
return registry
| 2.90625
| 3
|
tests/test_hashlib_scrypt.py
|
isabella232/pynacl
| 0
|
12776239
|
# Copyright 2013 <NAME> and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from binascii import unhexlify
import pytest
import nacl.bindings
import nacl.encoding
import nacl.hashlib
from nacl.exceptions import UnavailableError
# Test vectors from rfc 7914, Page 13
# scrypt (P="", S="",
# N=16, r=1, p=1, dklen=64) =
# 77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97
# f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42
# fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17
# e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06
#
# scrypt (P="password", S="NaCl",
# N=1024, r=8, p=16, dkLen=64) =
# fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe
# 7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62
# 2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da
# c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40
#
# scrypt (P="pleaseletmein", S="SodiumChloride",
# N=16384, r=8, p=1, dkLen=64) =
# 70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb
# fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2
# d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9
# e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87
#
# scrypt (P="pleaseletmein", S="SodiumChloride",
# N=1048576, r=8, p=1, dkLen=64) =
# 21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81
# ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47
# 8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3
# 37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4
RFC_7914_VECTORS = [
(
b"",
b"",
16,
1,
1,
64,
(
b"77 d6 57 62 38 65 7b 20 3b 19 ca 42 c1 8a 04 97"
b"f1 6b 48 44 e3 07 4a e8 df df fa 3f ed e2 14 42"
b"fc d0 06 9d ed 09 48 f8 32 6a 75 3a 0f c8 1f 17"
b"e8 d3 e0 fb 2e 0d 36 28 cf 35 e2 0c 38 d1 89 06"
),
),
(
b"password",
b"NaCl",
1024,
8,
16,
64,
(
b"fd ba be 1c 9d 34 72 00 78 56 e7 19 0d 01 e9 fe"
b"7c 6a d7 cb c8 23 78 30 e7 73 76 63 4b 37 31 62"
b"2e af 30 d9 2e 22 a3 88 6f f1 09 27 9d 98 30 da"
b"c7 27 af b9 4a 83 ee 6d 83 60 cb df a2 cc 06 40"
),
),
(
b"pleaseletmein",
b"SodiumChloride",
16384,
8,
1,
64,
(
b"70 23 bd cb 3a fd 73 48 46 1c 06 cd 81 fd 38 eb"
b"fd a8 fb ba 90 4f 8e 3e a9 b5 43 f6 54 5d a1 f2"
b"d5 43 29 55 61 3f 0f cf 62 d4 97 05 24 2a 9a f9"
b"e6 1e 85 dc 0d 65 1e 40 df cf 01 7b 45 57 58 87"
),
),
(
b"pleaseletmein",
b"SodiumChloride",
1048576,
8,
1,
64,
(
b"21 01 cb 9b 6a 51 1a ae ad db be 09 cf 70 f8 81"
b"ec 56 8d 57 4a 2f fd 4d ab e5 ee 98 20 ad aa 47"
b"8e 56 fd 8f 4b a5 d0 9f fa 1c 6d 92 7c 40 f4 c3"
b"37 30 40 49 e8 a9 52 fb cb f4 5c 6f a7 7a 41 a4"
),
),
]
@pytest.mark.skipif(
not nacl.hashlib.SCRYPT_AVAILABLE,
reason="Requires full build of libsodium",
)
@pytest.mark.parametrize(
("password", "salt", "n", "r", "p", "dklen", "expected"), RFC_7914_VECTORS
)
def test_hashlib_scrypt_api(
password: bytes,
salt: bytes,
n: int,
r: int,
p: int,
dklen: int,
expected: bytes,
):
_exp = unhexlify(expected.replace(b" ", b""))
dgst = nacl.hashlib.scrypt(
password, salt=<PASSWORD>, n=n, r=r, p=p, dklen=dklen, maxmem=2 * (1024**3)
)
assert _exp == dgst
@pytest.mark.skipif(
nacl.hashlib.SCRYPT_AVAILABLE, reason="Requires minimal build of libsodium"
)
def test_hashlib_scrypt_unavailable():
with pytest.raises(UnavailableError):
nacl.hashlib.scrypt(b"")
| 1.625
| 2
|
textx/scoping/__init__.py
|
stanislaw/textX
| 346
|
12776240
|
<reponame>stanislaw/textX
#######################################################################
# Name: scoping.__init__.py
# Purpose: Meta-model / scope providers.
# Author: <NAME>
# License: MIT License
#######################################################################
import glob
import os
import errno
from os.path import join, exists, abspath
def metamodel_for_file_or_default_metamodel(filename, the_metamodel):
from textx import metamodel_for_file
from textx.exceptions import TextXRegistrationError
try:
return metamodel_for_file(filename)
except TextXRegistrationError:
return the_metamodel
# -----------------------------------------------------------------------------
# Scope helper classes:
# -----------------------------------------------------------------------------
class Postponed(object):
"""
Return an object of this class to postpone a reference resolution.
If you get circular dependencies in resolution logic, an error
is raised.
"""
class ModelRepository(object):
"""
This class has the responsibility to hold a set of (model-identifiers,
model) pairs as dictionary.
In case of some scoping providers the model-identifier is the absolute
filename of the model.
"""
def __init__(self):
self.name_idx = 1
self.filename_to_model = {}
def has_model(self, filename):
return abspath(filename) in self.filename_to_model
def add_model(self, model):
if model._tx_filename:
filename = abspath(model._tx_filename)
else:
filename = 'builtin_model_{}'.format(self.name_idx)
self.name_idx += 1
self.filename_to_model[filename] = model
def remove_model(self, model):
filename = None
for f, m in self.filename_to_model.items():
if m == model:
filename = f
if filename:
# print("*** delete {}".format(filename))
del self.filename_to_model[filename]
def __contains__(self, filename):
return self.has_model(filename)
def __iter__(self):
return iter(self.filename_to_model.values())
def __len__(self):
return len(self.filename_to_model)
def __getitem__(self, filename):
return self.filename_to_model[filename]
def __setitem__(self, filename, model):
self.filename_to_model[filename] = model
class GlobalModelRepository(object):
"""
This class has the responsibility to hold two ModelRepository objects:
- one for model-local visible models
- one for all models (globally, starting from some root model).
The second `ModelRepository` `all_models` is to cache already loaded models
and to prevent to load one model twice.
The class allows loading local models visible to the current model. The
current model is the model which references this `GlobalModelRepository` as
attribute `_tx_model_repository`
When loading a new local model, the current `GlobalModelRepository`
forwards the embedded `ModelRepository` `all_models` to the new
`GlobalModelRepository` of the next model. This is done using the
`pre_ref_resolution_callback` to set the necessary information before
resolving the references in the new loaded model.
"""
def __init__(self, all_models=None):
"""
Create a new repo for a model.
Args:
all_models: models to be added to this new repository.
"""
self.local_models = ModelRepository() # used for current model
if all_models is not None:
self.all_models = all_models # used to reuse already loaded models
else:
self.all_models = ModelRepository()
def remove_model(self, model):
self.all_models.remove_model(model)
self.local_models.remove_model(model)
def remove_models(self, models):
for m in models:
self.remove_model(m)
def load_models_using_filepattern(
self, filename_pattern, model, glob_args, is_main_model=False,
encoding='utf-8', add_to_local_models=True, model_params=None):
"""
Add a new model to all relevant objects.
Args:
filename_pattern: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
glob_args: arguments passed to the glob.glob function.
Returns:
the list of loaded models
"""
from textx import get_metamodel
if model is not None:
self.update_model_in_repo_based_on_filename(model)
the_metamodel = get_metamodel(model) # default metamodel
else:
the_metamodel = None
filenames = glob.glob(filename_pattern, **glob_args)
if len(filenames) == 0:
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern)
loaded_models = []
for filename in filenames:
the_metamodel = metamodel_for_file_or_default_metamodel(
filename, the_metamodel)
loaded_models.append(
self.load_model(the_metamodel, filename, is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models,
model_params=model_params))
return loaded_models
def load_model_using_search_path(
self, filename, model, search_path, is_main_model=False,
encoding='utf8', add_to_local_models=True, model_params=None):
"""
Add a new model to all relevant objects
Args:
filename: models to be loaded
model: model holding the loaded models in its _tx_model_repository
field (may be None).
search_path: list of search directories.
Returns:
the loaded model
"""
from textx import get_metamodel
if model:
self.update_model_in_repo_based_on_filename(model)
for the_path in search_path:
full_filename = join(the_path, filename)
# print(full_filename)
if exists(full_filename):
if model is not None:
the_metamodel = get_metamodel(model)
else:
the_metamodel = None
the_metamodel = metamodel_for_file_or_default_metamodel(
filename, the_metamodel)
return self.load_model(the_metamodel,
full_filename,
is_main_model,
encoding=encoding,
add_to_local_models=add_to_local_models,
model_params=model_params)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), filename)
def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True, model_params=None):
"""
Load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
"""
assert model_params is not None,\
"model_params needs to be specified"
filename = abspath(filename)
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
# print("CACHED {}".format(filename))
new_model = self.all_models[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding,
model_params=model_params)
self.all_models[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models[filename] = new_model
else:
# print("LOCALLY CACHED {}".format(filename))
pass
assert filename in self.all_models # to be sure...
return self.all_models[filename]
def _add_model(self, model):
filename = self.update_model_in_repo_based_on_filename(model)
# print("ADDED {}".format(filename))
self.local_models[filename] = model
def update_model_in_repo_based_on_filename(self, model):
"""
Adds a model to the repo (not initially visible)
Args:
model: the model to be added. If the model
has no filename, a name is invented
Returns:
the filename of the model added to the repo
"""
if model._tx_filename is None:
for fn in self.all_models.filename_to_model:
if self.all_models.filename_to_model[fn] == model:
# print("UPDATED/CACHED {}".format(fn))
return fn
i = 0
while self.all_models.has_model("anonymous{}".format(i)):
i += 1
myfilename = "anonymous{}".format(i)
self.all_models[myfilename] = model
else:
myfilename = abspath(model._tx_filename)
if (not self.all_models.has_model(myfilename)):
self.all_models[myfilename] = model
# print("UPDATED/ADDED/CACHED {}".format(myfilename))
return myfilename
def pre_ref_resolution_callback(self, other_model):
"""
internal: used to store a model after parsing into the repository
Args:
other_model: the parsed model
Returns:
nothing
"""
filename = other_model._tx_filename
# print("PRE-CALLBACK -> {}".format(filename))
assert (filename)
filename = abspath(filename)
other_model._tx_model_repository = \
GlobalModelRepository(self.all_models)
self.all_models[filename] = other_model
class ModelLoader(object):
"""
This class is an interface to mark a scope provider as an additional model
loader.
"""
def __init__(self):
pass
def load_models(self, model):
pass
def get_all_models_including_attached_models(model):
"""
get a list of all models stored within a model
(including the owning model).
@deprecated (BIC): use model_object.get_included_models()
Args:
model: the owning model
Returns:
a list of all models
"""
return get_included_models(model)
def get_included_models(model):
"""
get a list of all models stored within a model
(including the owning model).
Args:
model: the owning model
Returns:
a list of all models
"""
if (hasattr(model, "_tx_model_repository")):
models = list(model._tx_model_repository.all_models)
if model not in models:
models.append(model)
else:
models = [model]
return models
def is_file_included(filename, model):
"""
Determines if a file is included by a model. Also checks
for indirect inclusions (files included by included files).
Args:
filename: the file to be checked (filename is normalized)
model: the owning model
Returns:
True if the file is included, else False
(Note: if no _tx_model_repository is present,
the function always returns False)
"""
if (hasattr(model, "_tx_model_repository")):
all_entries = model._tx_model_repository.all_models
return all_entries.has_model(filename)
else:
return False
def remove_models_from_repositories(models,
models_to_be_removed):
"""
Remove models from all relevant repositories (_tx_model_repository
of models and related metamodel(s), if applicable).
Args:
models: the list of models from
which the models_to_be_removed have to be removed.
models_to_be_removed: models to be removed
Returns:
None
"""
assert isinstance(models, list)
for model in models:
if hasattr(model._tx_metamodel, "_tx_model_repository"):
model._tx_metamodel. \
_tx_model_repository.remove_models(models_to_be_removed)
if hasattr(model, "_tx_model_repository"):
model._tx_model_repository.remove_models(models_to_be_removed)
| 2.3125
| 2
|
python/runtime/pai/cluster_conf.py
|
lhw362950217/sqlflow
| 2
|
12776241
|
<gh_stars>1-10
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runtime.diagnostics import SQLFlowDiagnostic
def get_cluster_config(attrs):
"""Get PAI cluster config from attrs
Args:
attrs: input config
Returns:
The merged config by attrs and default
"""
default_map = {
"train.num_ps": 0,
"train.num_workers": 1,
"train.worker_cpu": 400,
"train.worker_gpu": 0,
"train.ps_cpu": 200,
"train.ps_gpu": 0,
"train.num_evaluator": 0,
"train.evaluator_cpu": 200,
"train.evaluator_gpu": 0,
}
update = dict([(k, v) for (k, v) in attrs.items() if k in default_map])
if not all(isinstance(v, int) for v in update.values()):
raise SQLFlowDiagnostic("value for cluster config should be int")
default_map.update(attrs)
ps = {
"count": default_map["train.num_ps"],
"cpu": default_map["train.ps_cpu"],
"gpu": default_map["train.ps_gpu"],
}
worker = {
"count": default_map["train.num_workers"],
"cpu": default_map["train.worker_cpu"],
"gpu": default_map["train.worker_gpu"],
}
# FIXME(weiguoz): adhoc for running distributed xgboost train on pai
if worker["count"] > 1 and ps["count"] < 1:
ps["count"] = 1
if default_map["train.num_evaluator"] == 0:
evaluator = None
elif default_map["train.num_evaluator"] == 1:
evaluator = {
"count": default_map["train.num_evaluator"],
"cpu": default_map["train.evaluator_cpu"],
"gpu": default_map["train.evaluator_gpu"],
}
else:
raise SQLFlowDiagnostic("train.num_evaluator should only be 1 or 0")
conf = {"ps": ps, "worker": worker}
if evaluator is not None:
conf["evaluator"] = evaluator
return conf
| 2.015625
| 2
|
tests/integration/client_test.py
|
superdosh/betterreads
| 0
|
12776242
|
"""Client is the primary interface for interacting with the Goodreads API. This integration test makes live API
calls and affirms that the correct objects are being returned. For a more comprehensive test that each of the interface
objects is created and functions properly when given the correct inputs, check the unit test suite."""
import os
import pytest
from betterreads.author import GoodreadsAuthor
from betterreads.book import GoodreadsBook
from betterreads.client import GoodreadsClient, GoodreadsClientException
from betterreads.comment import GoodreadsComment
from betterreads.event import GoodreadsEvent
from betterreads.group import GoodreadsGroup
from betterreads.review import GoodreadsReview
class TestClient:
@pytest.fixture
def test_client_fixture(self):
return GoodreadsClient(
os.environ.get("GOODREADS_KEY"), os.environ.get("GOODREADS_SECRET")
)
def test_auth_user_no_session(self, test_client_fixture):
with pytest.raises(GoodreadsClientException):
test_client_fixture.auth_user()
def test_author_by_id(self, test_client_fixture):
author = test_client_fixture.author(8566992)
assert isinstance(author, GoodreadsAuthor)
def test_author_by_name(self, test_client_fixture):
author = test_client_fixture.find_author("<NAME>")
assert isinstance(author, GoodreadsAuthor)
def test_book_by_id(self, test_client_fixture):
book = test_client_fixture.book(123455)
assert isinstance(book, GoodreadsBook)
def test_search_books(self, test_client_fixture):
books = test_client_fixture.search_books(
q="<NAME>", search_field="author"
)
assert len(books) > 1
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_book_no_options_given(self, test_client_fixture):
with pytest.raises(GoodreadsClientException):
test_client_fixture.book(None, None)
def test_search_books_with_one_book(self, test_client_fixture):
books = test_client_fixture.search_books(
"Childhood, Boyhood, Truth: From an African Youth to the Selfish Gene"
)
assert len(books) == 1
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_group_by_id(self, test_client_fixture):
group = test_client_fixture.group(8095)
assert isinstance(group, GoodreadsGroup)
def test_find_groups(self, test_client_fixture):
groups = test_client_fixture.find_groups("Goodreads Developers")
assert len(groups) > 1
assert all(isinstance(group, GoodreadsGroup) for group in groups)
def test_list_events(self, test_client_fixture):
events = test_client_fixture.list_events(80126)
assert len(events) > 0
assert all(isinstance(event, GoodreadsEvent) for event in events)
def test_search_books_total_pages(self, test_client_fixture):
num_pages = test_client_fixture.search_books_total_pages(
q="<NAME>", search_field="author"
)
assert isinstance(num_pages, int)
def test_search_books_all_pages(self, test_client_fixture):
books = test_client_fixture.search_books_all_pages(
q="<NAME>", search_field="author"
)
assert len(books) > 10
assert all(isinstance(book, GoodreadsBook) for book in books)
def test_get_review(self, test_client_fixture):
review = test_client_fixture.review(12345)
assert isinstance(review, GoodreadsReview)
def test_list_comments_review(self, test_client_fixture):
comments = test_client_fixture.list_comments("review", 1618778364)
assert all(isinstance(comment, GoodreadsComment) for comment in comments)
def test_get_recent_reviews(self, test_client_fixture):
reviews = test_client_fixture.recent_reviews()
assert all(isinstance(review, GoodreadsReview) for review in reviews)
| 2.625
| 3
|
src/services/middleware/workers_io.py
|
www439198341/V2RayCloudSpider
| 0
|
12776243
|
# -*- coding: utf-8 -*-
# Time : 2021/12/22 16:15
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import ast
from datetime import timedelta, datetime
from typing import List, Optional, Union
from redis.exceptions import ConnectionError, ResponseError
from services.middleware.stream_io import RedisClient
from services.settings import TIME_ZONE_CN, POOL_CAP
class EntropyHeap(RedisClient):
def __init__(self):
super().__init__()
def update(self, local_entropy: List[dict]):
self.db.lpush(self.PREFIX_ENTROPY, str(local_entropy))
def sync(self) -> List[dict]:
try:
response = self.db.lrange(self.PREFIX_ENTROPY, 0, 1)
if response:
remote_entropy = ast.literal_eval(
self.db.lrange(self.PREFIX_ENTROPY, 0, 1)[0]
)
return remote_entropy
return []
except ConnectionError:
return []
def set_new_cap(self, new_cap: int):
"""
设置新的统一队列容量
:param new_cap:
:return:
"""
self.db.set(name=self.PREFIX_CAPACITY, value=new_cap)
def get_unified_cap(self) -> int:
"""
返回统一队列容量,若没有设置,则返回配置文件的设定
:return:
"""
_unified_cap = self.db.get(self.PREFIX_CAPACITY)
return int(_unified_cap) if _unified_cap else POOL_CAP
def is_empty(self) -> bool:
return not bool(self.db.llen(self.PREFIX_ENTROPY))
class MessageQueue(RedisClient):
def __init__(self):
super().__init__()
self.group_name = "tasks_group"
self.consumer_name = "hexo"
self.max_queue_size = 5600
self.SYNERGY_PROTOCOL = "SYNERGY"
self.automated()
def is_exists_group(self, group_name: str) -> bool:
try:
groups = self.db.xinfo_groups(self.PREFIX_STREAM)
for group in groups:
if group.get("name", "") == group_name:
return True
return False
except ResponseError:
return False
def automated(self) -> None:
if not self.is_exists_group(self.group_name):
self.db.xgroup_create(
self.PREFIX_STREAM, self.group_name, id="0", mkstream=True
)
def ack(self, message_id: str) -> None:
self.db.xack(self.PREFIX_STREAM, self.group_name, message_id)
def broadcast_synergy_context(self, context: Union[dict, str]) -> None:
context = str(context) if isinstance(context, dict) else context
synergy_context = {self.SYNERGY_PROTOCOL: context}
self.db.xadd(
name=self.PREFIX_STREAM,
fields=synergy_context,
maxlen=self.max_queue_size,
approximate=True,
)
def listen(self, count: Optional[int] = None, block: Optional[int] = None):
while True:
try:
task_queue = self.db.xreadgroup(
groupname=self.group_name,
consumername=self.consumer_name,
streams={self.PREFIX_STREAM: ">"},
count=count,
block=block,
)
except ConnectionError:
yield None
else:
if task_queue:
_, message = task_queue[0]
yield message
class AccessControl(RedisClient):
def __init__(self, token: Optional[str] = None):
super().__init__()
self.PREFIX_ACCESS_USER = "v2rss:access:user"
self.PREFIX_ACCESS_LIMIT = "v2rss:access:limit"
if token:
self.init_tracer(token)
def init_tracer(self, token: str) -> None:
self.PREFIX_ACCESS_USER += f":{token}"
self.PREFIX_ACCESS_LIMIT += f":{token}"
# 自动注册
self._register()
def _register(self) -> None:
self.db.setnx(self.PREFIX_ACCESS_USER, 0)
def update(self) -> None:
self.db.setnx(self.PREFIX_ACCESS_LIMIT, 0)
self.db.incr(self.PREFIX_ACCESS_LIMIT)
self.db.incr(self.PREFIX_ACCESS_USER)
def _capture_access_trace(self):
_lifecycle = 10
self.db.setex(
name=self.PREFIX_ACCESS_LIMIT,
time=timedelta(seconds=_lifecycle),
value=str(datetime.now(TIME_ZONE_CN) + timedelta(seconds=_lifecycle)),
)
def is_user(self) -> bool:
return bool(self.db.exists(self.PREFIX_ACCESS_USER))
def is_repeat(self) -> bool:
return bool(self.db.exists(self.PREFIX_ACCESS_LIMIT))
| 2.359375
| 2
|
datahub/core/test/test_reversion.py
|
Staberinde/data-hub-api
| 6
|
12776244
|
<filename>datahub/core/test/test_reversion.py<gh_stars>1-10
from unittest import mock
import pytest
from datahub.core.reversion import EXCLUDED_BASE_MODEL_FIELDS, register_base_model
class TestRegisterBaseModel:
"""Tests for the `register_base_model` decorator."""
@mock.patch('datahub.core.reversion.reversion')
def test_without_args(self, mocked_reversion):
"""Test that the default exclude is used if no argument is passed in."""
register_base_model()
assert mocked_reversion.register.call_args_list == [
mock.call(exclude=EXCLUDED_BASE_MODEL_FIELDS),
]
@mock.patch('datahub.core.reversion.reversion')
def test_with_extra_exclude(self, mocked_reversion):
"""Test that if extra_exclude is passed in, it is appended to the default exclude list."""
register_base_model(extra_exclude=('other',))
assert mocked_reversion.register.call_args_list == [
mock.call(exclude=(*EXCLUDED_BASE_MODEL_FIELDS, 'other')),
]
@mock.patch('datahub.core.reversion.reversion')
def test_with_explicit_exclude(self, mocked_reversion):
"""Test that if exclude is passed in, it overrides the default one."""
register_base_model(exclude=('other',))
assert mocked_reversion.register.call_args_list == [
mock.call(exclude=('other',)),
]
@mock.patch('datahub.core.reversion.reversion')
def test_fails_with_extra_exclude_and_exclude(self, mocked_reversion):
"""Test that extra_exclude and exclude cannot be passed in at the same time."""
with pytest.raises(AssertionError):
register_base_model(exclude=('other',), extra_exclude=('other',))
@mock.patch('datahub.core.reversion.reversion')
def test_with_other_args(self, mocked_reversion):
"""Test passing any other argument forwards it untouched."""
register_base_model(ignore_duplicates=False)
assert mocked_reversion.register.call_args_list == [
mock.call(
exclude=EXCLUDED_BASE_MODEL_FIELDS,
ignore_duplicates=False,
),
]
| 2.75
| 3
|
backend/base/urls/order_urls.py
|
sasan-sohrabi/proshop-DjangoReact
| 0
|
12776245
|
from django.urls import path
from base.views import order_views as views
urlpatterns = [
]
| 1.179688
| 1
|
knap_spo_relax.py
|
Patyrn/Divide-and-Learn
| 0
|
12776246
|
from Experiments import test_knapsack_SPO_unit, test_knapsack_SPO
"""
Example SPO-Relax experiments for knapsack benchmarks.
Dependencies:
gcc/8.3.0
openmpi/3.1.4
python/3.7.4
scikit-learn/0.23.1-python-3.7.4
gurobi/9.0.0
numpy/1.17.3-python-3.7.4
matplotlib/3.2.1-python-3.7.4
"""
capacities = [12,24,48,72,96,120,144,172,196,220]
kfolds = [0,1,2,3,4]
dest_folder = 'Tests/Knapsack/weighted/spo'
test_knapsack_SPO(capacities=capacities, is_shuffle=False, NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5,
dest_folder=dest_folder, noise_level=0)
dest_folder = 'Tests/Knapsack/unit/spo'
capacities = [5,10,15,20,25,30,35,40]
test_knapsack_SPO_unit(capacities=capacities, is_shuffle=False, NUMBER_OF_RANDOM_TESTS=1, kfolds=kfolds, n_iter=5,
dest_folder=dest_folder, noise_level=0)
| 2.109375
| 2
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/NV/transform_feedback.py
|
JE-Chen/je_old_repo
| 0
|
12776247
|
<gh_stars>0
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_NV_transform_feedback'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_NV_transform_feedback',error_checker=_errors._error_checker)
GL_ACTIVE_VARYINGS_NV=_C('GL_ACTIVE_VARYINGS_NV',0x8C81)
GL_ACTIVE_VARYING_MAX_LENGTH_NV=_C('GL_ACTIVE_VARYING_MAX_LENGTH_NV',0x8C82)
GL_BACK_PRIMARY_COLOR_NV=_C('GL_BACK_PRIMARY_COLOR_NV',0x8C77)
GL_BACK_SECONDARY_COLOR_NV=_C('GL_BACK_SECONDARY_COLOR_NV',0x8C78)
GL_CLIP_DISTANCE_NV=_C('GL_CLIP_DISTANCE_NV',0x8C7A)
GL_GENERIC_ATTRIB_NV=_C('GL_GENERIC_ATTRIB_NV',0x8C7D)
GL_INTERLEAVED_ATTRIBS_NV=_C('GL_INTERLEAVED_ATTRIBS_NV',0x8C8C)
GL_LAYER_NV=_C('GL_LAYER_NV',0x8DAA)
GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV=_C('GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV',0x8C8A)
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV=_C('GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV',0x8C8B)
GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV=_C('GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV',0x8C80)
GL_NEXT_BUFFER_NV=_C('GL_NEXT_BUFFER_NV',-2)
GL_PRIMITIVES_GENERATED_NV=_C('GL_PRIMITIVES_GENERATED_NV',0x8C87)
GL_PRIMITIVE_ID_NV=_C('GL_PRIMITIVE_ID_NV',0x8C7C)
GL_RASTERIZER_DISCARD_NV=_C('GL_RASTERIZER_DISCARD_NV',0x8C89)
GL_SEPARATE_ATTRIBS_NV=_C('GL_SEPARATE_ATTRIBS_NV',0x8C8D)
GL_SKIP_COMPONENTS1_NV=_C('GL_SKIP_COMPONENTS1_NV',-6)
GL_SKIP_COMPONENTS2_NV=_C('GL_SKIP_COMPONENTS2_NV',-5)
GL_SKIP_COMPONENTS3_NV=_C('GL_SKIP_COMPONENTS3_NV',-4)
GL_SKIP_COMPONENTS4_NV=_C('GL_SKIP_COMPONENTS4_NV',-3)
GL_TEXTURE_COORD_NV=_C('GL_TEXTURE_COORD_NV',0x8C79)
GL_TRANSFORM_FEEDBACK_ATTRIBS_NV=_C('GL_TRANSFORM_FEEDBACK_ATTRIBS_NV',0x8C7E)
GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV=_C('GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV',0x8C8F)
GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV=_C('GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV',0x8C7F)
GL_TRANSFORM_FEEDBACK_BUFFER_NV=_C('GL_TRANSFORM_FEEDBACK_BUFFER_NV',0x8C8E)
GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV=_C('GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV',0x8C85)
GL_TRANSFORM_FEEDBACK_BUFFER_START_NV=_C('GL_TRANSFORM_FEEDBACK_BUFFER_START_NV',0x8C84)
GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV=_C('GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV',0x8C88)
GL_TRANSFORM_FEEDBACK_RECORD_NV=_C('GL_TRANSFORM_FEEDBACK_RECORD_NV',0x8C86)
GL_TRANSFORM_FEEDBACK_VARYINGS_NV=_C('GL_TRANSFORM_FEEDBACK_VARYINGS_NV',0x8C83)
GL_VERTEX_ID_NV=_C('GL_VERTEX_ID_NV',0x8C7B)
@_f
@_p.types(None,_cs.GLuint,arrays.GLcharArray)
def glActiveVaryingNV(program,name):pass
@_f
@_p.types(None,_cs.GLenum)
def glBeginTransformFeedbackNV(primitiveMode):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint)
def glBindBufferBaseNV(target,index,buffer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLintptr)
def glBindBufferOffsetNV(target,index,buffer,offset):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLintptr,_cs.GLsizeiptr)
def glBindBufferRangeNV(target,index,buffer,offset,size):pass
@_f
@_p.types(None,)
def glEndTransformFeedbackNV():pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLsizeiArray,arrays.GLuintArray,arrays.GLcharArray)
def glGetActiveVaryingNV(program,index,bufSize,length,size,type,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,arrays.GLintArray)
def glGetTransformFeedbackVaryingNV(program,index,location):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,arrays.GLcharArray)
def glGetVaryingLocationNV(program,name):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLintArray,_cs.GLenum)
def glTransformFeedbackAttribsNV(count,attribs,bufferMode):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLintArray,_cs.GLsizei,arrays.GLintArray,_cs.GLenum)
def glTransformFeedbackStreamAttribsNV(count,attribs,nbuffers,bufstreams,bufferMode):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLintArray,_cs.GLenum)
def glTransformFeedbackVaryingsNV(program,count,locations,bufferMode):pass
| 1.398438
| 1
|
tests/conftest.py
|
f213/richtypo.py
| 9
|
12776248
|
<gh_stars>1-10
def pytest_generate_tests(metafunc):
"""
Generate tests for ruledefs in yaml files according to their specs defined in-place
"""
if 'rule_name' in metafunc.fixturenames:
from richtypo.rules import load_from_file
rules = []
for ruledef in ['generic', 'ru', 'en']:
rules += [(name, rule) for name, rule in load_from_file(ruledef)]
metafunc.parametrize('rule_name, rule', rules)
| 2.34375
| 2
|
datasets/__init__.py
|
jonasvj/TFDE
| 0
|
12776249
|
root = 'data/'
import numpy as np
from ffjord.datasets.power import POWER
from ffjord.datasets.gas import GAS
from ffjord.datasets.hepmass import HEPMASS
from ffjord.datasets.miniboone import MINIBOONE
from ffjord.datasets.bsds300 import BSDS300
from .synthetic import EightGaussians
from .synthetic import Checkerboard
from .synthetic import TwoSpirals
from .mnist import MNIST_4x4, MNIST_7x7, MNIST_8x8, MNIST_16x16, MNIST_28x28
from utils import order_variables_partial_correlation
all_datasets = [
'power', 'gas', 'hepmass', 'miniboone', 'bsds300', '8gaussians',
'checkerboard', '2spirals', 'mnist_4x4', 'mnist_7x7', 'mnist_8x8',
'mnist_16x16', 'mnist_28x28']
def subsample_train_data(data, subsample_size):
rng = np.random.RandomState(seed=42)
rng.shuffle(data.trn.x)
data.trn.x = data.trn.x[:subsample_size]
def do_optimal_ordering(data, tr=False):
ordering = order_variables_partial_correlation(data.trn.x, tr=tr)
data.trn.x = data.trn.x[:, ordering]
data.val.x = data.val.x[:, ordering]
data.tst.x = data.tst.x[:, ordering]
def load_data(name, optimal_order=False, subsample_size=None, tr=False):
if name == 'power':
data = POWER()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'gas':
data = GAS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'hepmass':
data = HEPMASS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'miniboone':
data = MINIBOONE()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'bsds300':
data = BSDS300()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == '8gaussians':
return EightGaussians()
elif name == 'checkerboard':
return Checkerboard()
elif name == '2spirals':
return TwoSpirals()
elif name == 'mnist_4x4':
return MNIST_4x4(optimal_order)
elif name == 'mnist_7x7':
return MNIST_7x7(optimal_order)
elif name == 'mnist_8x8':
return MNIST_8x8(optimal_order)
elif name == 'mnist_16x16':
return MNIST_16x16(optimal_order)
elif name == 'mnist_28x28':
return MNIST_28x28(optimal_order)
| 2.109375
| 2
|
museos/webapp/migrations/0001_initial.py
|
LopezAlonsoVictor/X-Serv-Practica-Museos
| 0
|
12776250
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('comentario', models.TextField()),
('fecha', models.DateTimeField(default=datetime.datetime(2018, 4, 30, 15, 35, 58, 398297, tzinfo=utc))),
],
),
migrations.CreateModel(
name='Museo',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=32)),
('direccion', models.CharField(max_length=64)),
('descripcion', models.TextField()),
('barrio', models.CharField(max_length=32)),
('distrito', models.CharField(max_length=32)),
('accesibilidad', models.IntegerField()),
('telefono', models.BigIntegerField()),
('fax', models.BigIntegerField()),
('email', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Seleccion',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('museo', models.ForeignKey(to='webapp.Museo')),
],
),
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('contraseña', models.CharField(max_length=16)),
('color', models.CharField(max_length=16)),
('tamaño', models.IntegerField()),
('fondo', models.CharField(max_length=16)),
('titulo', models.CharField(max_length=16)),
('nombre', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='seleccion',
name='usuario',
field=models.ForeignKey(to='webapp.Usuario'),
),
migrations.AddField(
model_name='comentario',
name='museo',
field=models.ForeignKey(to='webapp.Museo'),
),
migrations.AddField(
model_name='comentario',
name='usuario',
field=models.ForeignKey(to='webapp.Usuario'),
),
]
| 1.757813
| 2
|