hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
872444167ede318f2615a3163f3e50610c67b32d | 18,222 | py | Python | passglm/data.py | bccho/pass-glm | 4ad14dd043532419dc413c509672b54d1eb8d516 | [
"MIT"
] | null | null | null | passglm/data.py | bccho/pass-glm | 4ad14dd043532419dc413c509672b54d1eb8d516 | [
"MIT"
] | null | null | null | passglm/data.py | bccho/pass-glm | 4ad14dd043532419dc413c509672b54d1eb8d516 | [
"MIT"
] | null | null | null | # Authors: Jonathan Huggins <jhuggins@mit.edu>
# Trevor Campbell <tdjc@mit.edu>
from __future__ import absolute_import, print_function
import sys
import csv
import hashlib
import pickle
from warnings import warn
import numpy as np
import numpy.random as npr
import scipy.sparse as sp
import sklearn.datasets as skl_ds
from sklearn import preprocessing
from .distributions import logistic_likelihood
from .utils import ensure_dimension_matches
import h5py
# based on: http://stackoverflow.com/questions/8955448/
def save_sparse_Xy(filename, X, y):
"""Save sparse X and array-like y as an npz file.
Parameters
----------
filename : string
X : sparse matrix, shape=(n_samples, n_features)
y : array-like, shape=(n_samples,)
"""
np.savez(filename, data=X.data, indices=X.indices, indptr=X.indptr,
shape=X.shape, y=y)
def save_Xy(filename, X, y):
"""Save X, y as an npz file.
Parameters
----------
filename : string
X : matrix-like, shape=(n_samples, n_features)
y : array-like, shape=(n_samples,)
"""
if sp.issparse(X):
save_sparse_Xy(filename, X, y)
else:
np.savez(filename, X=X, y=y)
def load_data(path, file_type, max_data=0, max_dim=0,
preprocess=True, include_offset=False, target_dim=None,
pos_label=None):
"""Load data from a variety of file types.
Parameters
----------
path : string
Data file path.
file_type : string
Supported file types are: 'svmlight', 'npy' (with the labels y in the
rightmost col), 'npz', 'hdf5' (with datasets 'x' and 'y'), and 'csv'
(with the labels y in the rightmost col)
max_data : int
If positive, maximum number of data points to use. If zero or negative,
all data is used. Default is 0.
max_dim : int
If positive, maximum number of features to use. If zero or negative,
all features are used. Default is 0.
preprocess : boolean or Transformer, optional
Flag indicating whether the data should be preprocessed. For sparse
data, the features are scaled to [-1, 1]. For dense data, the features
are scaled to have mean zero and variance one. Default is True.
include_offset : boolean, optional
Flag indicating that an offset feature should be added. Default is
False.
target_dim : int, optional
When given, ensure X initially has this many features. Projection will
be done after X is resized. Default is None.
Returns
-------
X : array-like matrix, shape=(n_samples, n_features)
y : int ndarray, shape=(n_samples,)
Each entry indicates whether each example is negative (-1 value) or
positive (+1 value)
pp_obj : None or Transformer
Transformer object used on data, or None if ``preprocess=False``
"""
if not isinstance(path, str):
raise ValueError("'path' must be a string")
if file_type in ["svmlight", "svm"]:
X, y = _load_svmlight_data(path)
elif file_type == "npy":
X, y = _load_npy_data(path)
elif file_type == "npz":
X, y = _load_npz_data(path)
elif file_type == "hdf5":
X, y = _load_hdf5_data(path)
elif file_type == "csv":
X, y = _load_csv_data(path)
else:
raise ValueError("unsupported file type, %s" % file_type)
if pos_label is None:
y_vals = set(y)
if len(y_vals) != 2:
raise ValueError('Only expected y to take on two values, but instead'
'takes on the values ' + ', '.join(y_vals))
if 1.0 not in y_vals:
raise ValueError('y does not take on 1.0 as one on of its values, but '
'instead takes on the values ' + ', '.join(y_vals))
if -1.0 not in y_vals:
y_vals.remove(1.0)
print('converting y values of %s to -1.0' % y_vals.pop())
y[y != 1.0] = -1.0
else:
y[y != pos_label] = -1.0
y[y == pos_label] = 1.0
if preprocess is False:
pp_obj = None
else:
if preprocess is True:
if sp.issparse(X):
pp_obj = preprocessing.MaxAbsScaler(copy=False)
else:
pp_obj = preprocessing.StandardScaler(copy=False)
else:
pp_obj = preprocess
if target_dim is not None and target_dim != pp_obj.scale_.shape[0]:
raise ValueError('target dim does not match pp_obj')
target_dim = pp_obj.scale_.shape[0]
if target_dim is not None:
X_dim = X.shape[1]
if X_dim < target_dim:
print('expanding X')
extra_shape = (X.shape[0], target_dim - X_dim)
if sp.issparse(X):
stack_fun = sp.hstack
extra = sp.csr_matrix(extra_shape)
else:
stack_fun = np.hstack
extra = np.zeros(extra_shape)
X = stack_fun([X, extra])
elif X_dim > target_dim:
print('shrinking X')
X = X[:,:target_dim]
if preprocess is True:
pp_obj.fit(X)
X = pp_obj.transform(X)
if include_offset:
X = preprocessing.add_dummy_feature(X)
if sp.issparse(X) and (X.nnz > np.prod(X.shape) / 10 or X.shape[1] <= 20):
print("X is either low-dimensional or not very sparse, so converting "
"to a numpy array")
X = X.toarray()
if isinstance(max_data, int) and max_data > 0 and max_data < X.shape[0]:
X = X[:max_data,:]
y = y[:max_data]
if isinstance(max_dim, int) and max_dim > 0 and max_dim < X.shape[1]:
X = X[:,:max_dim]
return X, y, pp_obj
def generate_gaussian_synthetic(num_samples, mean, covar, theta,
fname=None, include_offset=False):
"""Generate classification data with covariates from Gaussian distribution.
Generate `num_samples` data points with `X[i,:] ~ N(mean, covar)`, then use
a logistic likelihood model with parameter `theta` to generate `y[i]`.
If `include_offset = True`, then `X[i,-1] = 1`. Thus,
`total_features = n_features` if `include_offset = False` and
`n_features + 1` otherwise.
Parameters
----------
num_samples : int
mean : array-like, shape=(n_features,)
covar : matrix-like, shape=(n_features, n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : ndarray with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(mean, covar)
X = npr.multivariate_normal(mean, covar, num_samples)
if include_offset:
X = np.hstack((X, np.ones((num_samples, 1))))
return _generate_and_save_from_X(X, theta, fname)
def generate_gaussian_mixture(num_samples, weights, means, covar, theta,
fname=None, include_offset=False):
"""Generate classification data with covariates from Gaussian mixture.
Generate `num_samples` data points with `X[i,:] ~ N(means[j,:], covar)`
with probability `weights[j]`, then use a logistic likelihood model with
parameter `theta` to generate `y[i]`. If `include_offset = True`,
then `X[i,-1] = 1`. Thus, `total_features = n_features` if
`include_offset = False` and `n_features + 1` otherwise.
Parameters
----------
num_samples : int
weights : array-like, shape=(n_components,)
means : array-like, shape=(n_components, n_features)
covar : matrix-like, shape=(n_features, n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : ndarray with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(means, covar)
if means.shape[0] != weights.shape[0]:
raise ValueError("'means' and 'weights' shapes do not match")
components = npr.choice(weights.shape[0], num_samples, p=weights)
z = np.zeros(means.shape[1])
X = means[components, :] + npr.multivariate_normal(z, covar, num_samples)
if include_offset:
X = np.hstack((X, np.ones((num_samples, 1))))
return _generate_and_save_from_X(X, theta, fname)
def generate_reverse_mixture(num_samples, pos_prob, means, covar, fname=None):
"""Generate classification data class first, then Gaussian covariates.
Generate `num_samples` data points with `Pr[y[i] = 1] = pos_prob` and
`X[i,:] ~ N(means[y[i],:], covar)`.
Parameters
----------
num_samples : int
pos_prob : float
means : array-like, shape=(2, n_features)
covar : matrix-like, shape=(n_features, n_features)
fname : string, optional
If provided, save data to the provided filename
Returns
-------
X : ndarray with shape (num_samples, n_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(means, covar)
if means.shape[0] != 2:
raise ValueError("'means' must have exactly two means")
y = npr.rand(num_samples)
y[y <= pos_prob] = 1
y[y != 1] = -1
components = np.zeros(num_samples, dtype=np.int)
components[y == 1] = 1
z = np.zeros(means.shape[1])
X = means[components, :] + npr.multivariate_normal(z, covar, num_samples)
if fname is not None:
np.save(fname, np.hstack((X, y[:, np.newaxis])))
return X, y
def generate_binary_data(num_samples, probs, theta,
fname=None, include_offset=False, ):
"""Generate classification data with binary covariates.
Generate `num_samples` data points with `Pr[X[i,j] = 1] = probs[j]` and
a logistic likelihood model with parameter `theta` to generate `y[i]`.
If `include_offset = True`, then `X[i,-1] = 1`. Thus,
`total_features = n_features` if `include_offset = False` and
`n_features + 1` otherwise.
Parameters
----------
num_samples : int
probs : array-like, shape=(n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : csr_matrix with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
probs = probs[np.newaxis, :]
X = npr.rand(num_samples, probs.shape[1])
X[X <= probs] = 1
X[X != 1] = 0
X = sp.csr_matrix(X, dtype=np.int32)
if include_offset:
X = sp.hstack((X, np.ones((num_samples, 1), dtype=np.int32)),
format='csr')
return _generate_and_save_from_X(X, theta, fname)
def convert_categorical_data_to_svmlight(path, filetype, out_path, column_info,
positive_labels,
ignore_first_line=False,
delimeter=',',
init=None,
no_new_features=False):
"""Convert categorical data into svmlight format.
Column info is a space-separated list of information about each column.
The options for each column are:
* 'cat' - categorical data (induces multiple features)
* 'bin' - binary data (induces single feature)
* 'lab' - output label (can only be assigned to one column)
* 'num' - numeric data
* 'ign' - ignore column
Parameters
----------
path : string
file_type : string
Supported file types are: 'csv'
out_path : string
column_info : string
positive_labels : list of strings
ignore_first_line : boolean, optional
Default is False.
delimeter : string, optional
Default is ','.
init : tuple, optional
Output from previous execution of the function. Used to maintain
consistency across multiple conversions.
no_new_features : boolean, optional
If init is provided, then don't create any new features.
Returns
-------
next_index : int
data : object
"""
info = column_info.split(' ')
if info.count('lab') != 1:
raise ValueError('column_info must specify exactly one label column')
label_index = info.index('lab')
if init is not None:
next_index, data, label_map, next_label_id = init
if no_new_features:
next_index = -next_index
else:
next_index = 1
data = [dict() for i in range(len(info))]
next_label_id = 1
label_map = {}
if filetype == 'csv':
with open(path, 'rb') as csv_file, open(out_path, 'wb') as out_file:
reader = csv.reader(csv_file, delimiter=delimeter)
try:
if ignore_first_line:
reader.next()
for row in reader:
if len(info) != len(row):
raise ValueError('row %d had an unexpected number of '
'columns (expected %d, got %d)' %
(reader.line_num, len(info), len(row)))
if positive_labels is None:
# hex_h = hashlib.md5(row[label_index]).hexdigest()
# h = int(hex_h, 16) % 49979687
# out_file.write('%d ' % h)
if row[label_index] not in label_map:
label_map[row[label_index]] = next_label_id
next_label_id += 1
out_file.write('%d ' % label_map[row[label_index]])
elif row[label_index] in positive_labels:
out_file.write('1 ')
else:
out_file.write('-1 ')
entry_list = []
for i, val in enumerate(row):
entry, next_index = _process_row_entry(val, info[i],
data[i],
next_index)
if entry is not None:
entry_list.append(entry)
entry_list.sort(cmp=lambda x,y: cmp(x[0], y[0]))
out_file.write(' '.join(['%s:%s' % e for e in entry_list]))
out_file.write('\n')
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (path, reader.line_num, e))
if len(label_map) > 0:
with open(out_path + '.label_map', 'w') as f:
pickle.dump(label_map, f)
return abs(next_index), data
else:
raise ValueError("unsupported file type, %s" % file_type)
| 32.539286 | 83 | 0.573922 | # Authors: Jonathan Huggins <jhuggins@mit.edu>
# Trevor Campbell <tdjc@mit.edu>
from __future__ import absolute_import, print_function
import sys
import csv
import hashlib
import pickle
from warnings import warn
import numpy as np
import numpy.random as npr
import scipy.sparse as sp
import sklearn.datasets as skl_ds
from sklearn import preprocessing
from .distributions import logistic_likelihood
from .utils import ensure_dimension_matches
import h5py
# based on: http://stackoverflow.com/questions/8955448/
def save_sparse_Xy(filename, X, y):
"""Save sparse X and array-like y as an npz file.
Parameters
----------
filename : string
X : sparse matrix, shape=(n_samples, n_features)
y : array-like, shape=(n_samples,)
"""
np.savez(filename, data=X.data, indices=X.indices, indptr=X.indptr,
shape=X.shape, y=y)
def save_Xy(filename, X, y):
"""Save X, y as an npz file.
Parameters
----------
filename : string
X : matrix-like, shape=(n_samples, n_features)
y : array-like, shape=(n_samples,)
"""
if sp.issparse(X):
save_sparse_Xy(filename, X, y)
else:
np.savez(filename, X=X, y=y)
def _load_svmlight_data(path):
X, y = skl_ds.load_svmlight_file(path)
return X, y
def _load_npy_data(path):
xy = np.load(path)
X = xy[:, :-1]
y = xy[:, -1]
return X, y
def _load_npz_data(path):
loader = np.load(path)
if 'X' in loader:
X = loader['X']
else:
X = sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
y = loader['y']
return X, y
def _load_hdf5_data(path):
f = h5py.File(path, 'r')
X = f['x']
y = f['y']
f.close()
return X, y
def _load_csv_data(path):
xy = np.genfromtxt(path, delimiter=',')
X = xy[:, :-1]
y = xy[:, -1]
return X, y
def load_data(path, file_type, max_data=0, max_dim=0,
preprocess=True, include_offset=False, target_dim=None,
pos_label=None):
"""Load data from a variety of file types.
Parameters
----------
path : string
Data file path.
file_type : string
Supported file types are: 'svmlight', 'npy' (with the labels y in the
rightmost col), 'npz', 'hdf5' (with datasets 'x' and 'y'), and 'csv'
(with the labels y in the rightmost col)
max_data : int
If positive, maximum number of data points to use. If zero or negative,
all data is used. Default is 0.
max_dim : int
If positive, maximum number of features to use. If zero or negative,
all features are used. Default is 0.
preprocess : boolean or Transformer, optional
Flag indicating whether the data should be preprocessed. For sparse
data, the features are scaled to [-1, 1]. For dense data, the features
are scaled to have mean zero and variance one. Default is True.
include_offset : boolean, optional
Flag indicating that an offset feature should be added. Default is
False.
target_dim : int, optional
When given, ensure X initially has this many features. Projection will
be done after X is resized. Default is None.
Returns
-------
X : array-like matrix, shape=(n_samples, n_features)
y : int ndarray, shape=(n_samples,)
Each entry indicates whether each example is negative (-1 value) or
positive (+1 value)
pp_obj : None or Transformer
Transformer object used on data, or None if ``preprocess=False``
"""
if not isinstance(path, str):
raise ValueError("'path' must be a string")
if file_type in ["svmlight", "svm"]:
X, y = _load_svmlight_data(path)
elif file_type == "npy":
X, y = _load_npy_data(path)
elif file_type == "npz":
X, y = _load_npz_data(path)
elif file_type == "hdf5":
X, y = _load_hdf5_data(path)
elif file_type == "csv":
X, y = _load_csv_data(path)
else:
raise ValueError("unsupported file type, %s" % file_type)
if pos_label is None:
y_vals = set(y)
if len(y_vals) != 2:
raise ValueError('Only expected y to take on two values, but instead'
'takes on the values ' + ', '.join(y_vals))
if 1.0 not in y_vals:
raise ValueError('y does not take on 1.0 as one on of its values, but '
'instead takes on the values ' + ', '.join(y_vals))
if -1.0 not in y_vals:
y_vals.remove(1.0)
print('converting y values of %s to -1.0' % y_vals.pop())
y[y != 1.0] = -1.0
else:
y[y != pos_label] = -1.0
y[y == pos_label] = 1.0
if preprocess is False:
pp_obj = None
else:
if preprocess is True:
if sp.issparse(X):
pp_obj = preprocessing.MaxAbsScaler(copy=False)
else:
pp_obj = preprocessing.StandardScaler(copy=False)
else:
pp_obj = preprocess
if target_dim is not None and target_dim != pp_obj.scale_.shape[0]:
raise ValueError('target dim does not match pp_obj')
target_dim = pp_obj.scale_.shape[0]
if target_dim is not None:
X_dim = X.shape[1]
if X_dim < target_dim:
print('expanding X')
extra_shape = (X.shape[0], target_dim - X_dim)
if sp.issparse(X):
stack_fun = sp.hstack
extra = sp.csr_matrix(extra_shape)
else:
stack_fun = np.hstack
extra = np.zeros(extra_shape)
X = stack_fun([X, extra])
elif X_dim > target_dim:
print('shrinking X')
X = X[:,:target_dim]
if preprocess is True:
pp_obj.fit(X)
X = pp_obj.transform(X)
if include_offset:
X = preprocessing.add_dummy_feature(X)
if sp.issparse(X) and (X.nnz > np.prod(X.shape) / 10 or X.shape[1] <= 20):
print("X is either low-dimensional or not very sparse, so converting "
"to a numpy array")
X = X.toarray()
if isinstance(max_data, int) and max_data > 0 and max_data < X.shape[0]:
X = X[:max_data,:]
y = y[:max_data]
if isinstance(max_dim, int) and max_dim > 0 and max_dim < X.shape[1]:
X = X[:,:max_dim]
return X, y, pp_obj
def _generate_and_save_from_X(X, theta, fname):
lp = logistic_likelihood(theta, X, sum_result=False)
ln = logistic_likelihood(theta, -X, sum_result=False)
lmax = np.maximum(lp, ln)
lp -= lmax
ln -= lmax
p = np.exp(lp) / (np.exp(lp) + np.exp(ln))
y = npr.rand(X.shape[0])
y[y <= p] = 1
y[y != 1] = -1
if fname is not None:
if sp.issparse(X):
save_sparse_Xy(fname, X, y)
else:
np.save(fname, np.hstack((X, y[:, np.newaxis])))
return X, y
def _ensure_means_covar_match(means, covar):
if len(means.shape) == 1:
n_features = means.shape[0]
else:
n_features = means.shape[1]
if len(covar.shape) != 2 or covar.shape[0] != covar.shape[1]:
raise ValueError('invalid covariance matrix shape')
if n_features != covar.shape[0]:
raise ValueError('mean and covariance shapes do not match')
def generate_gaussian_synthetic(num_samples, mean, covar, theta,
fname=None, include_offset=False):
"""Generate classification data with covariates from Gaussian distribution.
Generate `num_samples` data points with `X[i,:] ~ N(mean, covar)`, then use
a logistic likelihood model with parameter `theta` to generate `y[i]`.
If `include_offset = True`, then `X[i,-1] = 1`. Thus,
`total_features = n_features` if `include_offset = False` and
`n_features + 1` otherwise.
Parameters
----------
num_samples : int
mean : array-like, shape=(n_features,)
covar : matrix-like, shape=(n_features, n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : ndarray with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(mean, covar)
X = npr.multivariate_normal(mean, covar, num_samples)
if include_offset:
X = np.hstack((X, np.ones((num_samples, 1))))
return _generate_and_save_from_X(X, theta, fname)
def generate_gaussian_mixture(num_samples, weights, means, covar, theta,
fname=None, include_offset=False):
"""Generate classification data with covariates from Gaussian mixture.
Generate `num_samples` data points with `X[i,:] ~ N(means[j,:], covar)`
with probability `weights[j]`, then use a logistic likelihood model with
parameter `theta` to generate `y[i]`. If `include_offset = True`,
then `X[i,-1] = 1`. Thus, `total_features = n_features` if
`include_offset = False` and `n_features + 1` otherwise.
Parameters
----------
num_samples : int
weights : array-like, shape=(n_components,)
means : array-like, shape=(n_components, n_features)
covar : matrix-like, shape=(n_features, n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : ndarray with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(means, covar)
if means.shape[0] != weights.shape[0]:
raise ValueError("'means' and 'weights' shapes do not match")
components = npr.choice(weights.shape[0], num_samples, p=weights)
z = np.zeros(means.shape[1])
X = means[components, :] + npr.multivariate_normal(z, covar, num_samples)
if include_offset:
X = np.hstack((X, np.ones((num_samples, 1))))
return _generate_and_save_from_X(X, theta, fname)
def generate_reverse_mixture(num_samples, pos_prob, means, covar, fname=None):
"""Generate classification data class first, then Gaussian covariates.
Generate `num_samples` data points with `Pr[y[i] = 1] = pos_prob` and
`X[i,:] ~ N(means[y[i],:], covar)`.
Parameters
----------
num_samples : int
pos_prob : float
means : array-like, shape=(2, n_features)
covar : matrix-like, shape=(n_features, n_features)
fname : string, optional
If provided, save data to the provided filename
Returns
-------
X : ndarray with shape (num_samples, n_features)
y : ndarray with shape (num_samples,)
"""
_ensure_means_covar_match(means, covar)
if means.shape[0] != 2:
raise ValueError("'means' must have exactly two means")
y = npr.rand(num_samples)
y[y <= pos_prob] = 1
y[y != 1] = -1
components = np.zeros(num_samples, dtype=np.int)
components[y == 1] = 1
z = np.zeros(means.shape[1])
X = means[components, :] + npr.multivariate_normal(z, covar, num_samples)
if fname is not None:
np.save(fname, np.hstack((X, y[:, np.newaxis])))
return X, y
def generate_binary_data(num_samples, probs, theta,
fname=None, include_offset=False, ):
"""Generate classification data with binary covariates.
Generate `num_samples` data points with `Pr[X[i,j] = 1] = probs[j]` and
a logistic likelihood model with parameter `theta` to generate `y[i]`.
If `include_offset = True`, then `X[i,-1] = 1`. Thus,
`total_features = n_features` if `include_offset = False` and
`n_features + 1` otherwise.
Parameters
----------
num_samples : int
probs : array-like, shape=(n_features)
theta : array-like, shape=(total_features,)
fname : string, optional
If provided, save data to the provided filename
include_offset : boolean, optional
Default is False.
Returns
-------
X : csr_matrix with shape (num_samples, total_features)
y : ndarray with shape (num_samples,)
"""
probs = probs[np.newaxis, :]
X = npr.rand(num_samples, probs.shape[1])
X[X <= probs] = 1
X[X != 1] = 0
X = sp.csr_matrix(X, dtype=np.int32)
if include_offset:
X = sp.hstack((X, np.ones((num_samples, 1), dtype=np.int32)),
format='csr')
return _generate_and_save_from_X(X, theta, fname)
def _process_row_entry(value, col_info, data, next_index):
if col_info in ['lab', 'ign'] or value == '':
return None, next_index
elif col_info == 'num':
if float(value) == 0:
return None, next_index
else:
if '__index__' not in data:
if next_index < 0:
return None, next_index
data['__index__'] = next_index
next_index += 1
return (data['__index__'], value), next_index
elif col_info == 'bin':
if value not in data:
if len(data) >= 2:
raise ValueError('binary data column has more than two values')
if len(data) == 0 or next_index < 0:
data[value] = None
else:
data[value] = next_index
next_index += 1
if data[value] is None:
return None, next_index
else:
return (data[value], "1"), next_index
elif col_info == 'cat':
if value not in data:
if next_index < 0:
data[value] = None
else:
data[value] = next_index
next_index += 1
if data[value] is None:
return None, next_index
else:
return (data[value], "1"), next_index
else:
raise ValueError('invalid column info "%s"' % col_info)
def convert_categorical_data_to_svmlight(path, filetype, out_path, column_info,
positive_labels,
ignore_first_line=False,
delimeter=',',
init=None,
no_new_features=False):
"""Convert categorical data into svmlight format.
Column info is a space-separated list of information about each column.
The options for each column are:
* 'cat' - categorical data (induces multiple features)
* 'bin' - binary data (induces single feature)
* 'lab' - output label (can only be assigned to one column)
* 'num' - numeric data
* 'ign' - ignore column
Parameters
----------
path : string
file_type : string
Supported file types are: 'csv'
out_path : string
column_info : string
positive_labels : list of strings
ignore_first_line : boolean, optional
Default is False.
delimeter : string, optional
Default is ','.
init : tuple, optional
Output from previous execution of the function. Used to maintain
consistency across multiple conversions.
no_new_features : boolean, optional
If init is provided, then don't create any new features.
Returns
-------
next_index : int
data : object
"""
info = column_info.split(' ')
if info.count('lab') != 1:
raise ValueError('column_info must specify exactly one label column')
label_index = info.index('lab')
if init is not None:
next_index, data, label_map, next_label_id = init
if no_new_features:
next_index = -next_index
else:
next_index = 1
data = [dict() for i in range(len(info))]
next_label_id = 1
label_map = {}
if filetype == 'csv':
with open(path, 'rb') as csv_file, open(out_path, 'wb') as out_file:
reader = csv.reader(csv_file, delimiter=delimeter)
try:
if ignore_first_line:
reader.next()
for row in reader:
if len(info) != len(row):
raise ValueError('row %d had an unexpected number of '
'columns (expected %d, got %d)' %
(reader.line_num, len(info), len(row)))
if positive_labels is None:
# hex_h = hashlib.md5(row[label_index]).hexdigest()
# h = int(hex_h, 16) % 49979687
# out_file.write('%d ' % h)
if row[label_index] not in label_map:
label_map[row[label_index]] = next_label_id
next_label_id += 1
out_file.write('%d ' % label_map[row[label_index]])
elif row[label_index] in positive_labels:
out_file.write('1 ')
else:
out_file.write('-1 ')
entry_list = []
for i, val in enumerate(row):
entry, next_index = _process_row_entry(val, info[i],
data[i],
next_index)
if entry is not None:
entry_list.append(entry)
entry_list.sort(cmp=lambda x,y: cmp(x[0], y[0]))
out_file.write(' '.join(['%s:%s' % e for e in entry_list]))
out_file.write('\n')
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (path, reader.line_num, e))
if len(label_map) > 0:
with open(out_path + '.label_map', 'w') as f:
pickle.dump(label_map, f)
return abs(next_index), data
else:
raise ValueError("unsupported file type, %s" % file_type)
| 2,840 | 0 | 184 |
d99dd7e522294f090a9fa1d9c4391cc83cffcaf6 | 2,776 | py | Python | test_settings.py | holvi/django-waffle | 46442854e7eb76959c684037e92c4a4fb925cf0c | [
"BSD-3-Clause"
] | null | null | null | test_settings.py | holvi/django-waffle | 46442854e7eb76959c684037e92c4a4fb925cf0c | [
"BSD-3-Clause"
] | 4 | 2016-06-17T06:24:55.000Z | 2022-01-04T14:50:00.000Z | test_settings.py | holvi/django-waffle | 46442854e7eb76959c684037e92c4a4fb925cf0c | [
"BSD-3-Clause"
] | 2 | 2016-03-08T07:51:34.000Z | 2019-06-19T12:52:22.000Z | import os
import django
from distutils.version import StrictVersion
DJANGO_VERSION = StrictVersion(django.get_version())
# Make filepaths relative to settings.
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
DEBUG = True
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
JINJA_CONFIG = {}
SITE_ID = 1
USE_I18N = False
SECRET_KEY = 'foobar'
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
},
# Provide a readonly DB for testing DB replication scenarios.
'readonly': {
'NAME': 'test.readonly.db',
'ENGINE': 'django.db.backends.sqlite3',
}
}
if 'DATABASE_URL' in os.environ:
try:
import dj_database_url
import psycopg2
DATABASES['default'] = dj_database_url.config()
except ImportError:
raise ImportError('Using the DATABASE_URL variable requires '
'dj-database-url and psycopg2. Try:\n\npip install '
'-r travis.txt')
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'waffle',
'test_app',
)
_MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'waffle.middleware.WaffleMiddleware',
)
if DJANGO_VERSION < StrictVersion('1.10.0'):
MIDDLEWARE_CLASSES = _MIDDLEWARE_CLASSES
else:
MIDDLEWARE = _MIDDLEWARE_CLASSES
ROOT_URLCONF = 'test_app.urls'
CUSTOM_USER_MODEL = 'auth.User'
_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'match_regex': r'jinja.*',
'match_extension': '',
'newstyle_gettext': True,
'context_processors': _CONTEXT_PROCESSORS,
'undefined': 'jinja2.Undefined',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'waffle.jinja.WaffleExtension',
],
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': _CONTEXT_PROCESSORS,
}
},
]
WAFFLE_FLAG_DEFAULT = False
WAFFLE_SWITCH_DEFAULT = False
WAFFLE_SAMPLE_DEFAULT = False
WAFFLE_READ_FROM_WRITE_DB = False
WAFFLE_OVERRIDE = False
WAFFLE_CACHE_PREFIX = 'test:'
| 25.009009 | 78 | 0.631484 | import os
import django
from distutils.version import StrictVersion
DJANGO_VERSION = StrictVersion(django.get_version())
# Make filepaths relative to settings.
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
DEBUG = True
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
JINJA_CONFIG = {}
SITE_ID = 1
USE_I18N = False
SECRET_KEY = 'foobar'
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
},
# Provide a readonly DB for testing DB replication scenarios.
'readonly': {
'NAME': 'test.readonly.db',
'ENGINE': 'django.db.backends.sqlite3',
}
}
if 'DATABASE_URL' in os.environ:
try:
import dj_database_url
import psycopg2
DATABASES['default'] = dj_database_url.config()
except ImportError:
raise ImportError('Using the DATABASE_URL variable requires '
'dj-database-url and psycopg2. Try:\n\npip install '
'-r travis.txt')
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'waffle',
'test_app',
)
_MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'waffle.middleware.WaffleMiddleware',
)
if DJANGO_VERSION < StrictVersion('1.10.0'):
MIDDLEWARE_CLASSES = _MIDDLEWARE_CLASSES
else:
MIDDLEWARE = _MIDDLEWARE_CLASSES
ROOT_URLCONF = 'test_app.urls'
CUSTOM_USER_MODEL = 'auth.User'
_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'match_regex': r'jinja.*',
'match_extension': '',
'newstyle_gettext': True,
'context_processors': _CONTEXT_PROCESSORS,
'undefined': 'jinja2.Undefined',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'waffle.jinja.WaffleExtension',
],
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': _CONTEXT_PROCESSORS,
}
},
]
WAFFLE_FLAG_DEFAULT = False
WAFFLE_SWITCH_DEFAULT = False
WAFFLE_SAMPLE_DEFAULT = False
WAFFLE_READ_FROM_WRITE_DB = False
WAFFLE_OVERRIDE = False
WAFFLE_CACHE_PREFIX = 'test:'
| 0 | 0 | 0 |
edac1439062883480f7c1b6aaac384f6716d8bc2 | 2,443 | py | Python | apphub/image_classification/lenet_mnist/tensorflow/lenet_mnist.py | vbvg2008/fastestimator-future | dbf7d597d1f97140f837345f6b06f1773d4fa299 | [
"Apache-1.1"
] | null | null | null | apphub/image_classification/lenet_mnist/tensorflow/lenet_mnist.py | vbvg2008/fastestimator-future | dbf7d597d1f97140f837345f6b06f1773d4fa299 | [
"Apache-1.1"
] | null | null | null | apphub/image_classification/lenet_mnist/tensorflow/lenet_mnist.py | vbvg2008/fastestimator-future | dbf7d597d1f97140f837345f6b06f1773d4fa299 | [
"Apache-1.1"
] | null | null | null | """This example showcase FastEstimator usage for tensorflow users. In this file, we use tf.dataset as data input.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import Sequential, layers
import fastestimator as fe
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.metric import Accuracy
if __name__ == "__main__":
est = get_estimator()
est.fit() | 35.405797 | 115 | 0.639787 | """This example showcase FastEstimator usage for tensorflow users. In this file, we use tf.dataset as data input.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import Sequential, layers
import fastestimator as fe
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.metric import Accuracy
def Scale(dataset):
dataset["x"] = tf.cast(dataset["x"], tf.float32)
dataset["y"] = tf.cast(dataset["y"], tf.int32)
dataset["x"] = dataset["x"] / 255.0
return dataset
def get_tensorflow_dataset(x, y, shuffle=True):
data = {"x": x, "y": y}
ds = tf.data.Dataset.from_tensor_slices(data)
if shuffle:
data_length = x.shape[0]
ds = ds.shuffle(data_length)
ds = ds.map(Scale, num_parallel_calls=4)
ds = ds.batch(32)
ds = ds.prefetch(1)
return ds
def LeNet(input_shape=(28, 28, 1), classes=10):
model = Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(classes, activation='softmax'))
return model
def get_estimator():
#step 1
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()
pipeline = fe.Pipeline(train_data=get_tensorflow_dataset(x=np.expand_dims(x_train, -1), y=y_train),
eval_data=get_tensorflow_dataset(x=np.expand_dims(x_eval, -1), y=y_eval, shuffle=False),
batch_size=32)
#step 2
model = fe.build(model=LeNet(), optimizer="adam")
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
CrossEntropy(inputs=("y_pred", "y"), outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
#step 3
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=2,
steps_per_epoch=1875,
traces=Accuracy(true_key="y", pred_key="y_pred"))
return estimator
if __name__ == "__main__":
est = get_estimator()
est.fit() | 1,875 | 0 | 92 |
8264cd116ef797f808fc0878529023592ab0d01d | 1,267 | py | Python | CalliopeAPI/decode_labels.py | RoZvEr/Calliope | 4ef5a71f0d2788508d6fef00ee98932e27b85105 | [
"MIT"
] | 1 | 2020-06-11T11:02:03.000Z | 2020-06-11T11:02:03.000Z | CalliopeAPI/decode_labels.py | RoZvEr/Calliope | 4ef5a71f0d2788508d6fef00ee98932e27b85105 | [
"MIT"
] | null | null | null | CalliopeAPI/decode_labels.py | RoZvEr/Calliope | 4ef5a71f0d2788508d6fef00ee98932e27b85105 | [
"MIT"
] | 1 | 2020-10-12T06:25:42.000Z | 2020-10-12T06:25:42.000Z | from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.keras import Input
from skimage import transform as st
import cv2
import tensorflow as tf
images = np.load('dataset/LLD_icon_numpy/dataset1.npy')
model = InceptionV3(include_top=True, weights='imagenet')
start_index = 0
end_index = 2000
count = 0
while count<50:
images_batch = images[start_index:end_index]
image_list = []
for img in images_batch:
img = cv2.resize(img, dsize=(299, 299), interpolation=cv2.INTER_CUBIC)
image_list.append(img)
images_list = np.array(image_list)
predictions = model.predict(images_list)
decoded_predictions = decode_predictions(predictions, top=3)
decoded_predictions = np.array(decoded_predictions)
np.save('predictions/decoded_predictions_'+str(count)+'.npy', decoded_predictions)
start_index = start_index + 2000
end_index = end_index + 2000
count = count + 1
| 28.795455 | 93 | 0.764799 | from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.keras import Input
from skimage import transform as st
import cv2
import tensorflow as tf
def get_predictions(images, model):
predictions = model.predict(images)
return predictions
images = np.load('dataset/LLD_icon_numpy/dataset1.npy')
model = InceptionV3(include_top=True, weights='imagenet')
start_index = 0
end_index = 2000
count = 0
while count<50:
images_batch = images[start_index:end_index]
image_list = []
for img in images_batch:
img = cv2.resize(img, dsize=(299, 299), interpolation=cv2.INTER_CUBIC)
image_list.append(img)
images_list = np.array(image_list)
predictions = model.predict(images_list)
decoded_predictions = decode_predictions(predictions, top=3)
decoded_predictions = np.array(decoded_predictions)
np.save('predictions/decoded_predictions_'+str(count)+'.npy', decoded_predictions)
start_index = start_index + 2000
end_index = end_index + 2000
count = count + 1
| 77 | 0 | 23 |
a6e51f3033941c14b71a08068bf7b30507da8fc4 | 695 | py | Python | docker/random_secret.py | davewalker5/NatureRecorderPy | 53b3c2a589986f91bc78999f40a6dc55f61afe36 | [
"MIT"
] | null | null | null | docker/random_secret.py | davewalker5/NatureRecorderPy | 53b3c2a589986f91bc78999f40a6dc55f61afe36 | [
"MIT"
] | null | null | null | docker/random_secret.py | davewalker5/NatureRecorderPy | 53b3c2a589986f91bc78999f40a6dc55f61afe36 | [
"MIT"
] | null | null | null | import os
import sys
def get_project_path():
"""
Return the path to the project root folder
:return: The path to the project root folder
"""
return os.path.dirname(os.path.dirname(__file__))
def set_web_app_secret():
"""
Replace the secret key in the web application Python source file
"""
file = os.path.join(get_project_path(), "data", ".env")
with open(file, mode="wt", encoding="utf-8") as f:
f.writelines([
f"SECRET_KEY={os.urandom(32).hex()}\n"
])
if __name__ == "__main__":
try:
set_web_app_secret()
sys.exit(0)
except BaseException as e:
print(f"Error: {e}")
sys.exit(1)
| 21.060606 | 68 | 0.6 | import os
import sys
def get_project_path():
"""
Return the path to the project root folder
:return: The path to the project root folder
"""
return os.path.dirname(os.path.dirname(__file__))
def set_web_app_secret():
"""
Replace the secret key in the web application Python source file
"""
file = os.path.join(get_project_path(), "data", ".env")
with open(file, mode="wt", encoding="utf-8") as f:
f.writelines([
f"SECRET_KEY={os.urandom(32).hex()}\n"
])
if __name__ == "__main__":
try:
set_web_app_secret()
sys.exit(0)
except BaseException as e:
print(f"Error: {e}")
sys.exit(1)
| 0 | 0 | 0 |
6229ff03ea610788af9d49dae4964428e3f65918 | 2,978 | py | Python | resources/lib/service.py | fatshotty/kodi.simple.vocal.service | a4676b8df403034481c7d02ee927b64e63d0ea42 | [
"Apache-2.0"
] | null | null | null | resources/lib/service.py | fatshotty/kodi.simple.vocal.service | a4676b8df403034481c7d02ee927b64e63d0ea42 | [
"Apache-2.0"
] | null | null | null | resources/lib/service.py | fatshotty/kodi.simple.vocal.service | a4676b8df403034481c7d02ee927b64e63d0ea42 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from resources.lib import kodiutils
from resources.lib import kodilogging
from resources.lib.messenger.socketclient import SockClient as SocketClient
import logging
import time
import xbmc
import xbmcaddon
from resources.lib.actions import execute
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
| 30.387755 | 157 | 0.658496 | # -*- coding: utf-8 -*-
from resources.lib import kodiutils
from resources.lib import kodilogging
from resources.lib.messenger.socketclient import SockClient as SocketClient
import logging
import time
import xbmc
import xbmcaddon
from resources.lib.actions import execute
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def onMessage(data):
logger.info('received remote action: {}', data)
result = execute(data)
if result:
header = kodiutils.get_string(32000)
kodiutils.notification(header, data['responseText'], sound=(data['action'] == 'input_unknown'))
else:
logger.warn('Cannot execute action')
def onConnect(sio):
header = kodiutils.get_string(32000)
message = kodiutils.get_string(32003)
kodiutils.notification(header, message, sound=False)
def onDisconnect(sio):
header = kodiutils.get_string(32000)
message = kodiutils.get_string(32004)
kodiutils.notification(header, message, sound=False)
def onLog(str):
logger.info( str )
def generate_uuid():
import random, string
# TODO: generate a new UUID for connecting to a server channel
ts = str( time.time() )
last_ts = ts[-3:]
code = "{}{}{}-{}{}{}{}-{}".format(
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
random.choice(string.ascii_letters).upper(),
last_ts
)
return code
def run():
monitor = xbmc.Monitor()
socket_client = None
logger.info("Starting! %s" % time.time() )
client_id = kodiutils.get_setting('client_id')
serverhost = kodiutils.get_setting("server_host")
if not client_id:
logger.info("No client_id found, generate a new one!" )
# generate a new client_id
client_id = generate_uuid()
kodiutils.set_setting('client_id', client_id)
logger.info("New client_id is ".format(client_id) )
logger.info("clientid {} and host {}".format(client_id, serverhost) )
logger.info("Try to connect..." )
socket_client = SocketClient( serverhost, onMessage, client_id = '/{}'.format(client_id), onLog = onLog, onConnect=onConnect, onDisconnect=onDisconnect)
socket_client.connect()
logger.info("connection established" )
# TODO: show notification for connection
while not monitor.abortRequested():
# Sleep/wait for abort for 5 seconds
if monitor.waitForAbort(5):
# Abort was requested while waiting. We should exit
break
logger.info("...going to disconnect and exit" )
socket_client.remove_event_listeners()
socket_client.disconnect()
| 2,448 | 0 | 150 |
cdf2573b00d17bc3a744d21d10c588b5f6b6c208 | 3,999 | py | Python | player.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | player.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | player.py | Ming-desu/POKEMING | 2def3b47e7c08b71885f14944bffe105a63cc12a | [
"MIT"
] | null | null | null | import random
from inventory import Inventory
from skill import Skill
from utility import Utility
# POKEMING - GON'NA CATCH 'EM ALL
# -- A simple hack 'n slash game in console
# -- This class is handles all player related things
# Default Constructor
# This prints the player's current stats
# This allows the player to attack an enemy
# <enemy> Enemy Object - The enemy to be attacked
# This function check if there is enough mana to cast the spell
# <mana_cost> Integer - The mana cost to cast the spell
# return boolean
# The function that allows the player to level up and gain some improvements
# This allows the player to take damage from an attack
# <value> Integer - The damage that the player will take
# This checks if the player is dead or not
# return boolean
# This allows the player to gain experience points and some coins after killing an enemy
# <enemy> Enemy Object - The enemy that was killed
# This allows the player to reflenish its mana in exchange with in-game coins
| 41.226804 | 132 | 0.615904 | import random
from inventory import Inventory
from skill import Skill
from utility import Utility
# POKEMING - GON'NA CATCH 'EM ALL
# -- A simple hack 'n slash game in console
# -- This class is handles all player related things
class Player:
# Default Constructor
def __init__(self):
# Public properties
self.name = input('Enter your character name: ')
self.attack_damage = random.randint(8, 12)
self.heal_amount = random.randint(8, 12)
self.max_health = 100
self.health = self.max_health
self.max_mana = 50
self.mana = self.max_mana
self.level = 1
self.experience_point = 0
self.max_experience_point = 100
self.inventory = Inventory()
self.skill = Skill(self)
# This prints the player's current stats
def stats(self):
print(f'Player name: {self.name} | LVL: {self.level} | Coins: {self.inventory.coins}')
print(f'HP: {self.health}/{self.max_health} | MP: {self.mana}/{self.max_mana}')
print(f'Exp: {self.experience_point}/{self.max_experience_point}')
# This allows the player to attack an enemy
# <enemy> Enemy Object - The enemy to be attacked
def attack(self, enemy):
# 5% chance for the player to miss the attack
if (miss_change := random.randint(0, 20)) == 1:
Utility.pause('Your attack missed!')
return
# 10% chance for the player to land a critical hit
if (x := random.randint(0, 10)) == 1:
enemy.damage(self.attack_damage + random.randint(0, 10))
Utility.pause('You landed a critical hit!')
return
# Damage the enemy
enemy.damage(self.attack_damage)
# This function check if there is enough mana to cast the spell
# <mana_cost> Integer - The mana cost to cast the spell
# return boolean
def has_enough_mana(self, mana_cost):
if self.mana > mana_cost:
self.mana -= mana_cost
return True
else:
Utility.pause('Not enough mana.')
return False
# The function that allows the player to level up and gain some improvements
def level_up(self):
# Check for the exp if it exceeds the needed exp points
if self.experience_point >= self.max_experience_point:
# Improve the health, mana, exp, attack damage of the player
self.level += 1
self.max_health += 10 + (random.randint(0, 10) * self.level)
self.health = self.max_health
self.max_mana += 10 + (random.randint(0, 10) * self.level)
self.mana = self.max_mana
self.experience_point -= self.max_experience_point
self.max_experience_point = round(self.max_experience_point * 1.5)
self.attack_damage += random.randint(0, 5)
# This allows the player to take damage from an attack
# <value> Integer - The damage that the player will take
def damage(self, value):
self.health -= round(value)
# This checks if the player is dead or not
# return boolean
def is_dead(self):
return True if self.health <= 0 else False
# This allows the player to gain experience points and some coins after killing an enemy
# <enemy> Enemy Object - The enemy that was killed
def killed_an_enemy(self, enemy):
self.experience_point += (random.randint(19, 40) * self.level)
self.inventory.coins += random.randint(0, 5)
self.level_up()
# This allows the player to reflenish its mana in exchange with in-game coins
def pots(self):
if self.inventory.coins - 3 <= 0:
Utility.pause('Not enough money')
return
self.inventory.coins -= 3
self.mana = self.mana + round(self.max_mana / 3) if self.mana + round(self.max_mana / 3) <= self.max_mana else self.max_mana | 2,653 | -8 | 265 |
f67bf9264ba8ea478956a636d641726ab2253966 | 1,932 | py | Python | client demo/python/client.py | ConanYu/CrawlService | 35af986f7d7360f2e7a6bdc02c2836e3c7cb4094 | [
"MIT"
] | 1 | 2021-10-19T01:14:19.000Z | 2021-10-19T01:14:19.000Z | client demo/python/client.py | ConanYu/CrawlService | 35af986f7d7360f2e7a6bdc02c2836e3c7cb4094 | [
"MIT"
] | 1 | 2022-03-04T04:17:13.000Z | 2022-03-04T04:17:13.000Z | client demo/python/client.py | ConanYu/CrawlService | 35af986f7d7360f2e7a6bdc02c2836e3c7cb4094 | [
"MIT"
] | 1 | 2021-06-05T09:17:10.000Z | 2021-06-05T09:17:10.000Z | import time
import grpc
import threading
from crawl_service import crawl_service_pb2
from crawl_service import crawl_service_pb2_grpc
if __name__ == '__main__':
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', '????????')
start_new_thread(g, 'vjudge', 'ConanYu')
start_new_thread(f, 'atcoder', 'ConanYu')
start_new_thread(g, 'codeforces', 'ConanYu')
start_new_thread(h, 'nowcoder')
start_new_thread(h, 'leetcode')
start_new_thread(h, 'atcoder')
start_new_thread(h, 'codeforces')
time.sleep(5.0)
| 30.666667 | 86 | 0.659938 | import time
import grpc
import threading
from crawl_service import crawl_service_pb2
from crawl_service import crawl_service_pb2_grpc
def start_new_thread(func, *args, **kwargs):
class MyThead(threading.Thread):
def __init__(self):
super().__init__()
self.setDaemon(False)
def run(self) -> None:
try:
ans = func(*args, **kwargs)
print(ans)
except:
pass
MyThead().start()
def f(platform: str, handle: str):
with grpc.insecure_channel('localhost:9851') as channel:
stub = crawl_service_pb2_grpc.CrawlServiceStub(channel)
return stub.GetUserContestRecord(crawl_service_pb2.GetUserSubmitRecordRequest(
platform=platform,
handle=handle,
))
def g(platform: str, handle: str):
with grpc.insecure_channel('localhost:9851') as channel:
stub = crawl_service_pb2_grpc.CrawlServiceStub(channel)
return stub.GetUserSubmitRecord(crawl_service_pb2.GetUserSubmitRecordRequest(
platform=platform,
handle=handle,
))
def h(platform: str):
with grpc.insecure_channel('localhost:9851') as channel:
stub = crawl_service_pb2_grpc.CrawlServiceStub(channel)
return stub.GetRecentContest(crawl_service_pb2.GetRecentContestRequest(
platform=platform,
))
if __name__ == '__main__':
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', 'ConanYu')
start_new_thread(f, 'codeforces', '????????')
start_new_thread(g, 'vjudge', 'ConanYu')
start_new_thread(f, 'atcoder', 'ConanYu')
start_new_thread(g, 'codeforces', 'ConanYu')
start_new_thread(h, 'nowcoder')
start_new_thread(h, 'leetcode')
start_new_thread(h, 'atcoder')
start_new_thread(h, 'codeforces')
time.sleep(5.0)
| 1,171 | 0 | 92 |
71f51f6bfca2916922e19760b83e5bc05244f4b1 | 465 | py | Python | dftparse/wien2k/scf2_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | 1 | 2021-04-10T13:09:30.000Z | 2021-04-10T13:09:30.000Z | dftparse/wien2k/scf2_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | 1 | 2021-01-16T01:05:30.000Z | 2021-01-16T01:05:30.000Z | dftparse/wien2k/scf2_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | null | null | null | from ..core import BlockParser
base_rules = [
(lambda x: ":GAP (global)" in x, _parse_bandgap)
]
class Scf2Parser(BlockParser):
"""Parser for Wien2k's .scf2 file"""
| 19.375 | 52 | 0.608602 | from ..core import BlockParser
def _parse_bandgap(line, lines):
bandgap = float(line.split()[6])
return {
'band gap': bandgap,
'band gap units': "eV"
}
base_rules = [
(lambda x: ":GAP (global)" in x, _parse_bandgap)
]
class Scf2Parser(BlockParser):
"""Parser for Wien2k's .scf2 file"""
def __init__(self, rules=base_rules):
BlockParser.__init__(self)
for rule in rules:
self.add_rule(rule)
| 237 | 0 | 50 |
6956e46dc35096b15353d8f7265d6a463913c523 | 157 | py | Python | pdtimeout/__init__.py | hugo-quantmetry/pandas-timeout | bc7e94afa1e533b7cd1472f455587436fea30855 | [
"Apache-2.0"
] | 2 | 2019-11-11T17:51:59.000Z | 2021-03-26T10:00:43.000Z | pdtimeout/__init__.py | hugo-quantmetry/pandas-timeout | bc7e94afa1e533b7cd1472f455587436fea30855 | [
"Apache-2.0"
] | null | null | null | pdtimeout/__init__.py | hugo-quantmetry/pandas-timeout | bc7e94afa1e533b7cd1472f455587436fea30855 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for pdtimeout."""
__author__ = """Hugo Perrier"""
__email__ = 'hperrier@quantmetry.com'
__version__ = '0.1.0'
| 19.625 | 38 | 0.649682 | # -*- coding: utf-8 -*-
"""Top-level package for pdtimeout."""
__author__ = """Hugo Perrier"""
__email__ = 'hperrier@quantmetry.com'
__version__ = '0.1.0'
| 0 | 0 | 0 |
2c1c949ef37005780640391e2f7ed9a5ed3c4b38 | 6,015 | py | Python | lib/modules/SQLMapInjector/__init__.py | mukeran/dinlas | bab6149fd69f3c12fd67ad03281b59182054fad6 | [
"MIT"
] | 2 | 2019-11-28T09:59:34.000Z | 2019-12-10T05:29:00.000Z | lib/modules/SQLMapInjector/__init__.py | mukeran/dinlas | bab6149fd69f3c12fd67ad03281b59182054fad6 | [
"MIT"
] | null | null | null | lib/modules/SQLMapInjector/__init__.py | mukeran/dinlas | bab6149fd69f3c12fd67ad03281b59182054fad6 | [
"MIT"
] | null | null | null | # coding:utf-8
import time
from urllib import parse
import logging
from .SQLMap import SQLMap, CONTENT_STATUS, CONTENT_TYPE
DEFAULT_LEVEL = 1
| 38.557692 | 117 | 0.510557 | # coding:utf-8
import time
from urllib import parse
import logging
from .SQLMap import SQLMap, CONTENT_STATUS, CONTENT_TYPE
DEFAULT_LEVEL = 1
class SQLMapInjector:
def __init__(self, results, reports, **kwargs):
report = {'title': 'SQL Injection',
'overview': 'SQL injection vulnerabilities allow an attacker to'
' alter the queries executed on the backend database. '
'An attacker may then be able to extract or modify informations stored in the database'
' or even escalate his privileges on the system.',
'entries': [], 'header': ['URL', 'method', 'Parameter', 'Type', 'Payload']}
self.sql_report = report
self.args = kwargs
self.reports = reports
self.results = results
self.sqlmap = SQLMap()
self.scanList = {}
self.running = []
self.vulnerable = []
@staticmethod
def meta():
return {
'name': 'SQL Injector for all',
'version': '1.0'
}
def launch(self):
self.sqlmap.launch()
def result(self):
"""
check all results of running scanning
:return:
"""
for task in self.running:
res = self.sqlmap.task("status", task["taskid"])
if res['status'] != 'running':
logging.info("[scan status: {}]: ID: {}".
format(res["status"], task["taskid"]))
task['status'] = res
task['log'] = self.sqlmap.task("log", task["taskid"])
task['data'] = self.sqlmap.task("data", task["taskid"])
logging.debug(task["url"])
self.parse_task(task)
# logging.debug(res)
task['log'] = res
self.running.remove(task)
def wait_result(self):
while self.running:
self.result()
logging.info("{} task still running".format(len(self.running)))
# logging.debug(self.running)
time.sleep(3)
self.sql_report['overview'] = 'Found {} Injections. <br>'.format(len(self.sql_report['entries'])) + \
self.sql_report['overview']
self.reports.append(self.sql_report)
logging.info("SQLMap Tasks are all finished !")
def parse_task(self, task):
if task['data']['data']:
self.vulnerable.append(task)
logging.critical('SQL injection found!')
logging.info(task['data'])
data = task['data']['data']
entries = []
for ent in data:
if ent['type'] == CONTENT_TYPE.TARGET:
continue
elif ent['type'] == CONTENT_TYPE.TECHNIQUES:
value = ent['value']
for vuln in value:
url = task['url']
method = task['option']['method']
parameter = vuln['parameter']
for d in vuln['data'].values():
title = d['title']
payload = d['payload']
entries.append([url, method, parameter, title, payload])
self.sql_report['entries'] += entries
rep = "%s" % task["log"]
if 'CRITICAL' in rep:
if 'all tested parameters do not appear to be injectable.' in rep:
# The detection process was error-free but didn't found a SQLi
logging.info('Not appear to be injectable.')
def flush_all(self):
self.sqlmap.admin("flush")
logging.debug("SQLMap tasks flushed.")
def add(self, form):
options = self.parse_form(form)
respond, taskid = self.sqlmap.scan(options)
url = form['url']
task = {"option": options, "taskid": taskid, "url": url}
self.scanList[url] = task
self.running.append(task)
def parse_form(self, form):
# option = {"url": url, "cookie": "PHPSESSID=muihhepaqno9bn31mhfrgstk00; security=low"}
option = {}
def value(field):
if 'default' in field and field['default']:
return field['default']
if field['type'] in ('text', 'username', 'password', 'hidden'):
return 'abc'
if field['type'] in ('email',):
return '123abc@abc.com'
if field['type'] in ('number', 'range'):
return '1'
if field['type'] in ('select',):
return field['values'][0]
if field['type'] in ('checkbox',):
if field['required']:
return 'on'
logging.error('Unknown input type {}'.format(field))
option['randomAgent'] = True
option['level'] = DEFAULT_LEVEL
option['url'] = form['url']
option['method'] = form['method']
if 'cookie' in self.args:
logging.debug('Cookie Set {}'.format(self.args['cookie']))
option['cookies'] = self.args['cookie']
if 'https' in option['url']:
option['forceSSL'] = True
if form['content-type'] == 'application/x-www-form-urlencoded':
data = {field['name']: value(field) for field in form['fields'].values() if value(field)}
if option['method'] == 'GET':
option['url'] = option['url'].split('?')[0]
option['url'] += '?' + parse.urlencode(data)
else:
option['data'] = parse.urlencode(data)
else:
logging.error("Unimplemented form encoding {} in url {}".format(form['content-type'], option['url']))
return option
def exec(self):
self.launch()
# logging.debug('requests: {}'.format(self.results))
for i in self.results['requests']:
self.add(i)
self.wait_result()
# logging.debug(report)
| 4,840 | 1,006 | 23 |
8725538a4a9284d4995693fbf51f7fae28d0ee58 | 1,920 | py | Python | models/hofm.py | nusdbsystem/ARM-Net | cafda2654b67ec51aeb8834d1995711f918db811 | [
"Apache-2.0"
] | 42 | 2021-06-30T12:36:43.000Z | 2022-03-22T17:14:05.000Z | models/hofm.py | nusdbsystem/ARM-Net | cafda2654b67ec51aeb8834d1995711f918db811 | [
"Apache-2.0"
] | null | null | null | models/hofm.py | nusdbsystem/ARM-Net | cafda2654b67ec51aeb8834d1995711f918db811 | [
"Apache-2.0"
] | 9 | 2021-07-01T07:40:50.000Z | 2022-03-21T09:18:10.000Z | import torch
from models.layers import Embedding, Linear, FactorizationMachine
class HOFMModel(torch.nn.Module):
"""
Model: Higher-Order Factorization Machines
Ref: M Blondel, et al. Higher-Order Factorization Machines, 2016.
"""
def forward(self, x):
"""
:param x: {'ids': LongTensor B*F, 'vals': FloatTensor B*F}
:return: y of size B, Regression and Classification (+sigmoid)
"""
x_emb = self.embedding(x) # B*F*Ex(order-1)
y = self.linear(x) + self.fm(x_emb[:, :, :self.nemb]) # B
for i in range(self.order-2):
emb = x_emb[:, :, (i+1)*self.nemb: (i+2)*self.nemb] # B*F*E
y += self.kernels[i](emb) # B
return y | 35.555556 | 82 | 0.526563 | import torch
from models.layers import Embedding, Linear, FactorizationMachine
class AnovaKernel(torch.nn.Module):
def __init__(self, order):
super().__init__()
self.order = order
def forward(self, x):
"""
:param x: FloatTensor B*F*E
"""
bsz, nfiled, nemb = x.shape
a_prev = torch.ones((bsz, nfiled+1, nemb), dtype=torch.float).to(x.device)
for order in range(self.order):
a = torch.zeros((bsz, nfiled+1, nemb), dtype=torch.float).to(x.device)
a[:, order+1:, :] += x[:, order:, :] * a_prev[:, order:-1, :]
a = torch.cumsum(a, dim=1)
a_prev = a
return torch.sum(a[:, -1, :], dim=-1) # B
class HOFMModel(torch.nn.Module):
"""
Model: Higher-Order Factorization Machines
Ref: M Blondel, et al. Higher-Order Factorization Machines, 2016.
"""
def __init__(self, nfeat, nemb, order):
super().__init__()
assert order >= 2, 'invalid order'
self.order = int(order)
self.nemb = nemb
self.embedding = Embedding(nfeat, nemb*(order-1))
self.linear = Linear(nfeat)
self.fm = FactorizationMachine(reduce_dim=True)
if order >= 3:
self.kernels = torch.nn.ModuleList([
AnovaKernel(order=i) for i in range(3, order+1)
])
def forward(self, x):
"""
:param x: {'ids': LongTensor B*F, 'vals': FloatTensor B*F}
:return: y of size B, Regression and Classification (+sigmoid)
"""
x_emb = self.embedding(x) # B*F*Ex(order-1)
y = self.linear(x) + self.fm(x_emb[:, :, :self.nemb]) # B
for i in range(self.order-2):
emb = x_emb[:, :, (i+1)*self.nemb: (i+2)*self.nemb] # B*F*E
y += self.kernels[i](emb) # B
return y | 506 | 575 | 50 |
97f49bfaccfe12bd9aa576beff581a747854924c | 544 | py | Python | 1348A Phoenix and Balance.py | YasirAhmad-EccentriX/CodeForces | d004b4d1b52a360ac6c06870e0a237345771e32c | [
"MIT"
] | 1 | 2021-01-29T16:30:09.000Z | 2021-01-29T16:30:09.000Z | 1348A Phoenix and Balance.py | YasirAhmad-EccentriX/CodeForces | d004b4d1b52a360ac6c06870e0a237345771e32c | [
"MIT"
] | null | null | null | 1348A Phoenix and Balance.py | YasirAhmad-EccentriX/CodeForces | d004b4d1b52a360ac6c06870e0a237345771e32c | [
"MIT"
] | null | null | null | for _ in range(int(input())):
n=int(input())
L=[2**(i+1) for i in range(n)] #I want an extra 1 @l[0] to make it less
w=[0,0]
L=[2**(i+1) for i in range(n)]
L[n//2 -1],L[-1]=L[-1],L[n//2 -1]
for i in range(n):
if i<(n//2):
w[0]+=L[i]
else:
w[1]+=L[i]
print(w[0]-w[1])
#Algorithm is based on following idea of rearranging
'''
print(l)
for i in range(0,n//2-1):
l[i],l[i+(n//2)]=l[i+(n//2)],l[i]
print(l)
print(l)
'''
| 25.904762 | 76 | 0.420956 | for _ in range(int(input())):
n=int(input())
L=[2**(i+1) for i in range(n)] #I want an extra 1 @l[0] to make it less
w=[0,0]
L=[2**(i+1) for i in range(n)]
L[n//2 -1],L[-1]=L[-1],L[n//2 -1]
for i in range(n):
if i<(n//2):
w[0]+=L[i]
else:
w[1]+=L[i]
print(w[0]-w[1])
#Algorithm is based on following idea of rearranging
'''
print(l)
for i in range(0,n//2-1):
l[i],l[i+(n//2)]=l[i+(n//2)],l[i]
print(l)
print(l)
'''
| 0 | 0 | 0 |
27ab4f6c00384f70c7dbad3e059681c17765dd37 | 6,649 | py | Python | util/angular_generator.py | mohrobati/react2angular | 2d73b0a6582f5bf8eef7b887c92e13a471eef125 | [
"MIT"
] | null | null | null | util/angular_generator.py | mohrobati/react2angular | 2d73b0a6582f5bf8eef7b887c92e13a471eef125 | [
"MIT"
] | null | null | null | util/angular_generator.py | mohrobati/react2angular | 2d73b0a6582f5bf8eef7b887c92e13a471eef125 | [
"MIT"
] | null | null | null | import esprima
import glob
import os
from copy import deepcopy
| 48.532847 | 140 | 0.575425 | import esprima
import glob
import os
from copy import deepcopy
class AngularGenerator:
def __init__(self):
self.appModuleHeader = """import { NgModule } from '@angular/core';\nimport { BrowserModule } from '@angular/platform-browser';\n"""
self.appModuleFooter = """
],
imports: [
BrowserModule,
],
providers: [],
bootstrap: [AppComponent]
})
export class AppModule { }
"""
self.allReactComponents = []
self.variables = {}
self.inputs = []
self.angularHTML = ""
def generateAppModule(self, allFiles):
components = [c.replace('angular', '.') for c in list(filter(self.isComponent, allFiles))]
componentNames = self.getComponentNames(components)
string = ""
for i in range(len(components)):
string += "import { " + componentNames[i] + " } from '" + components[i].replace(".ts", "") + "';\n"
string += """@NgModule({\n\tdeclarations: [\n\t\t"""
string += ",\n\t\t".join(componentNames)
f = open('angular/app.module.ts', "w")
f.write(self.appModuleHeader + string + self.appModuleFooter)
def getComponentNames(self, components):
componentNames = []
for component in components:
parts = component.split("/")
componentName = parts[len(parts) - 1].split(".")[0]
self.allReactComponents.append(componentName)
componentNames.append(componentName + "Component")
return componentNames
def isComponent(self, string):
return string.find('component.ts') > 0
def generateAngularComponent(self, parsedReactComponent, component):
currReactComponent = ""
angularComponent = "import { Component, Input } from '@angular/core';\n\n"
for element in parsedReactComponent.body:
if element.type == "VariableDeclaration":
self.variables[element.declarations[0].id.name] = element.declarations[0].init.value
elif element.type == "FunctionDeclaration" and element.id.name in self.allReactComponents:
currReactComponent = element.id.name
self.generateAngularComponent(element.body, False)
elif element.type == "ReturnStatement" and element.argument.type == "JSXElement":
self.parseJSX(element.argument)
if component:
angularComponent += self.getComponentArgs(currReactComponent)
angularComponent += self.generateAngularComponentSyntax(currReactComponent)
angularHTML = deepcopy(self.angularHTML)
self.angularHTML = ""
self.variables = {}
self.inputs = []
return angularComponent, angularHTML
def getComponentArgs(self, currReactComponent):
componentArgs = {}
componentArgs['templateUrl'] = './' + currReactComponent + '.component.html'
componentArgs['styleUrls'] = ['./' + currReactComponent + '.component.css']
if currReactComponent == 'App':
componentArgs['selector'] = 'app-root'
else:
componentArgs['selector'] = 'app-' + currReactComponent
return "@Component(" + str(componentArgs) + ")\n"
def generateAngularComponentSyntax(self, currReactComponent):
angularComponentSyntax = "export class " + currReactComponent + "Component {\n"
for input in self.inputs:
angularComponentSyntax += "@Input() " + input + ": string;\n"
angularComponentSyntax += "constructor() {\n"
for input in self.inputs:
if input in self.variables:
angularComponentSyntax += "\tthis." + input + "= '" + self.variables[input] + "';\n"
else:
angularComponentSyntax += "\tthis." + input + "= '';\n"
angularComponentSyntax += "}\n"
return angularComponentSyntax + "}"
def parseJSX(self, jsxElement):
if jsxElement.type == "JSXElement":
elementName = jsxElement.openingElement.name.name
if elementName in self.allReactComponents:
self.angularHTML += '<app-' + elementName
else:
self.angularHTML += '<' + elementName
for attribute in jsxElement.openingElement.attributes:
if attribute.value.expression:
if attribute.name.name == "style":
self.angularHTML += ' [' + attribute.name.name.replace("style", "ngStyle") + ']="'
else:
self.angularHTML += ' ' + attribute.name.name + '={{'
if attribute.value.expression.properties:
self.angularHTML += "{"
for p in attribute.value.expression.properties:
self.angularHTML += "'" + p.key.name + "'" + ": " + p.value.property.name + ' '
self.inputs.append(p.value.property.name)
self.angularHTML += '}"'
elif attribute.value.expression.object:
self.angularHTML += attribute.value.expression.property.name + '}} '
self.inputs.append(attribute.value.expression.property.name)
else:
self.angularHTML += attribute.value.expression.name + '}} '
self.inputs.append(attribute.value.expression.name)
else:
self.angularHTML += ' ' + attribute.name.name.replace("className",
"class") + '="' + attribute.value.value + '"'
if elementName in self.allReactComponents:
self.angularHTML += '></app-' + elementName
if jsxElement.openingElement.selfClosing and elementName not in self.allReactComponents:
self.angularHTML += '/>\n'
else:
self.angularHTML += '>\n'
for child in jsxElement.children:
if child.type == "JSXElement" or child.type == "JSXExpressionContainer" or child.type == "JSXText":
self.parseJSX(child)
if jsxElement.closingElement:
self.angularHTML += "</" + jsxElement.closingElement.name.name + ">\n"
elif jsxElement.type == "JSXExpressionContainer":
self.angularHTML += "{{" + jsxElement.expression.property.name + "}}"
self.inputs.append(jsxElement.expression.property.name)
elif jsxElement.type == "JSXText":
self.angularHTML += jsxElement.value.replace("\n", "").replace(" ", "")
| 6,344 | 2 | 239 |
bb37d42cef6fca003899ed07db9ebc425625699d | 849 | py | Python | arekit/common/folding/nofold.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 18 | 2019-12-14T18:43:11.000Z | 2022-03-21T05:55:36.000Z | arekit/common/folding/nofold.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 284 | 2020-08-08T20:52:44.000Z | 2022-03-31T05:26:20.000Z | arekit/common/folding/nofold.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | 1 | 2021-08-07T13:17:43.000Z | 2021-08-07T13:17:43.000Z | from arekit.common.folding.base import BaseDataFolding
class NoFolding(BaseDataFolding):
""" The case of absent folding in experiment.
"""
@property
def get_current_state(self):
""" Returns in order to be compatible with cv-based experiment format.
"""
return "0"
| 30.321429 | 102 | 0.652532 | from arekit.common.folding.base import BaseDataFolding
class NoFolding(BaseDataFolding):
""" The case of absent folding in experiment.
"""
def __init__(self, doc_ids_to_fold, supported_data_types):
if len(supported_data_types) > 1:
raise NotImplementedError("Experiments with such amount of data-types are not supported!")
super(NoFolding, self).__init__(doc_ids_to_fold=doc_ids_to_fold,
supported_data_types=supported_data_types)
@property
def Name(self):
return "na"
def fold_doc_ids_set(self):
return {
self._supported_data_types[0]: list(self._doc_ids_to_fold_set)
}
def get_current_state(self):
""" Returns in order to be compatible with cv-based experiment format.
"""
return "0"
| 461 | 0 | 80 |
17a8f2bfba5059763f22ff7512e75cf2117b306d | 1,176 | py | Python | pushpy_examples/client/web/c_module.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | pushpy_examples/client/web/c_module.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | pushpy_examples/client/web/c_module.py | briangu/push-examples | 3acf00d9f63523010ee3b70f3117d1be686c3335 | [
"MIT"
] | null | null | null | import tornado.web
from pushpy_examples.client.ex_push_manager import ExamplePushManager
from client.simple_interpreter import Interpreter, Adder, Multiplier
m = ExamplePushManager()
m.connect()
repl_code_store = m.repl_code_store()
repl_code_store.update({
"interpreter.Interpreter": Interpreter,
"interpreter.math.Adder": Adder,
"interpreter.math.Multiplier": Multiplier
}, sync=True)
# curl -X POST -d'["add", "add", 1, 2, "mul", 3, 4]' -H 'Content-Type: application/json' localhost:11000/math
repl_code_store.set("/web/math", MathHandler, sync=True)
| 30.153846 | 109 | 0.684524 | import tornado.web
from pushpy_examples.client.ex_push_manager import ExamplePushManager
from client.simple_interpreter import Interpreter, Adder, Multiplier
m = ExamplePushManager()
m.connect()
repl_code_store = m.repl_code_store()
repl_code_store.update({
"interpreter.Interpreter": Interpreter,
"interpreter.math.Adder": Adder,
"interpreter.math.Multiplier": Multiplier
}, sync=True)
# curl -X POST -d'["add", "add", 1, 2, "mul", 3, 4]' -H 'Content-Type: application/json' localhost:11000/math
class MathHandler(tornado.web.RequestHandler):
def post(self):
import json
from boot_common import local_tasks
ops = json.loads(self.request.body.decode("utf-8"))
# execute via the task manager
r = local_tasks.apply("interpreter.Interpreter", ops)[0]
self.write(f"task: {r}")
self.write("\n")
# execute via importing directly from the code store
from repl_code_store.interpreter import Interpreter
r = Interpreter().apply(ops=ops)[0]
self.write(f"import: {r}")
self.write("\n")
self.finish()
repl_code_store.set("/web/math", MathHandler, sync=True)
| 529 | 25 | 48 |
c01980284e1fbdf9e095dced754b4ee3129e556e | 6,427 | py | Python | scheduler-bot.py | alexdiem/odp-scheduler | 440ce83d5b2416722218aff265df1e49aba3a714 | [
"MIT"
] | null | null | null | scheduler-bot.py | alexdiem/odp-scheduler | 440ce83d5b2416722218aff265df1e49aba3a714 | [
"MIT"
] | null | null | null | scheduler-bot.py | alexdiem/odp-scheduler | 440ce83d5b2416722218aff265df1e49aba3a714 | [
"MIT"
] | null | null | null | import json
import logger
import os
import random
from datetime import date
from dotenv import load_dotenv
from sys import exit
import discord
from discord.ext import commands, tasks
from discord.utils import get
log = logger.setup_applevel_logger(file_name = 'app_debug.log')
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
CHANNEL = os.getenv('SCHEDULER_CHANNEL')
CAPTAINS = os.getenv('CAPTAINS')
with open('POLL_OPTIONS', 'r') as f:
POLL_OPTIONS = eval(f.read())
RIDES = list(POLL_OPTIONS.values())
SCHEDULE = dict((ride, []) for ride in RIDES)
CAPTAINS_PER_RIDE = zip(RIDES, [1, 1, 2, 1, 1])
SCHEDULER_MSG = "@channel I'm a level 1 naive scheduling bot, and I make mistakes. " +\
"<@!766548029116907570> will help me fix it.\n"
# Instantiate bot
bot = commands.Bot(command_prefix='!')
async def manage_schedule(channel, msg_id):
"""Read reactions on last poll message
"""
log.debug('Running {}.'.format(manage_schedule.__name__))
await bot.wait_until_ready()
try:
msg = await channel.fetch_message(msg_id)
except discord.errors.NotFound:
log.error("Discord error: Message ID {} not found.".format(msg_id))
await bot.close()
exit()
log.debug("Got message with ID {}".format(msg_id))
reactions = msg.reactions
log.debug('Calling {} on channel.'.format(read_reactions.__name__))
avail = await read_reactions(reactions)
for ride, n_cap in CAPTAINS_PER_RIDE:
log.debug('Calling {} on channel for {}.'.
format(create_ride_schedule.__name__, ride))
captains = await create_ride_schedule(ride, n_cap, avail)
SCHEDULE[ride] = captains
log.debug('Calling {}.'.format(update_logs.__name__))
update_logs()
log.debug('Calling {} on channel.'.format(post_schedule.__name__))
await post_schedule(channel)
async def read_reactions(reactions):
"""Read reactions from scheduler poll
"""
log.debug('Running {}.'.format(read_reactions.__name__))
await bot.wait_until_ready()
log.debug('Getting availability from poll.')
emojis = list(POLL_OPTIONS.keys())
avail = dict((ride, []) for ride in RIDES)
for reaction in reactions:
ride_index = ''
try:
ride_index = emojis.index(reaction.emoji)
except ValueError:
log.error("Invalid reaction found: " + reaction.emoji)
continue
users = await reaction.users().flatten()
for user in users:
if not user.bot:
avail[RIDES[ride_index]].append(user)
log.debug('Availability is: {}'.format(
"\n".join(f'{k}: {users_to_names(v)}' for k,v in avail.items())
))
return avail
async def create_ride_schedule(ride, n_cap, avail):
"""Create road captain schedule
"""
log.debug('Running {}.'.format(create_ride_schedule.__name__))
await bot.wait_until_ready()
log.debug('Choosing road captains for {}'.format(ride))
captains = []
# choose randomly for now
for i in range(n_cap):
#captain = prioritise_absence(ride, avail)
try:
captain = random.choice(avail[ride])
except IndexError:
captain = None
captains.append(captain)
# don't pick same captain twice for one ride
if captain in avail[ride]:
avail[ride].remove(captain)
for s in avail.keys():
if captain in avail[s] and len(avail[s]) > 2:
log.debug('Scheduled {} on {}. Removing them from {}'.
format(captain.display_name, ride, s))
avail[s].remove(captain)
log.debug(
"Road captains for {} are {}".format(ride, users_to_names(captains))
)
return captains
def prioritise_absence(ride, avail):
"""Prioritise captains that have been absent longer than others
"""
log.debug('Running {}.'.format(prioritise_absence.__name__))
with open('schedule') as f:
schedule = [json.loads(line) for line in f]
print(schedule)
exit()
return None
def update_logs():
"""Update log files.
"""
log.debug('Running {}.'.format(update_logs.__name__))
# log schedule to file
log.debug('Saving schedule to log.')
printable_schedule = SCHEDULE.copy()
for ride in printable_schedule:
printable_schedule[ride] = users_to_names(printable_schedule[ride])
printable_schedule['date'] = str(date.today())
json_schedule = json.dumps(printable_schedule)
with open("schedule", 'a') as f:
f.write('\n' + json_schedule)
log.debug(json_schedule)
async def post_schedule(channel):
"""Post schedule to channel.
"""
log.debug('Running {}.'.format(create_ride_schedule.__name__))
await bot.wait_until_ready()
msg = SCHEDULER_MSG + "\nRoad captains for this week are"
schedule_post = SCHEDULE.copy()
for ride in schedule_post:
schedule_post[ride] = users_to_tags(schedule_post[ride])
for k, v in schedule_post.items():
msg += f"\n\n**{k}**\n" +\
"\n".join(f'{c}' for c in v)
log.debug('Send message to channel: \n{}'.format(msg))
m = await channel.send(msg)
def users_to_names(users):
"""Convert a list of Users to a list of user names (str).
"""
return [u.display_name if u is not None else '' for u in users]
def users_to_tags(users):
"""Convert a list of Users to a list of tags (str).
"""
return ["<@!{}>".format(u.id) if u is not None else '' for u in users]
def user_to_tag(user):
"""Convert a list of Users to a list of tags (str).
"""
return "<@!{}>".format(user.id) if user is not None else ''
@bot.event
async def on_ready():
"""Set up variables and logging
"""
log.debug('Running {}'.format(on_ready.__name__))
await bot.wait_until_ready()
log.debug('Logged in as {}'.format(bot.user.name))
channel = bot.get_channel(int(CHANNEL))
log.debug('Channel is {}'.format(channel))
with open("MESSAGE_ID", 'r') as f:
msg_id = int(f.readline())
log.debug('Read poll message with ID {}.'.format(msg_id))
log.debug('Calling {} on channel.'.format(manage_schedule.__name__))
await manage_schedule(channel, msg_id)
log.debug('Shutdown poll bot.')
await bot.close()
log.debug('Starting scheduler bot.')
bot.run(TOKEN) | 27.702586 | 87 | 0.641046 | import json
import logger
import os
import random
from datetime import date
from dotenv import load_dotenv
from sys import exit
import discord
from discord.ext import commands, tasks
from discord.utils import get
log = logger.setup_applevel_logger(file_name = 'app_debug.log')
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
CHANNEL = os.getenv('SCHEDULER_CHANNEL')
CAPTAINS = os.getenv('CAPTAINS')
with open('POLL_OPTIONS', 'r') as f:
POLL_OPTIONS = eval(f.read())
RIDES = list(POLL_OPTIONS.values())
SCHEDULE = dict((ride, []) for ride in RIDES)
CAPTAINS_PER_RIDE = zip(RIDES, [1, 1, 2, 1, 1])
SCHEDULER_MSG = "@channel I'm a level 1 naive scheduling bot, and I make mistakes. " +\
"<@!766548029116907570> will help me fix it.\n"
# Instantiate bot
bot = commands.Bot(command_prefix='!')
async def manage_schedule(channel, msg_id):
"""Read reactions on last poll message
"""
log.debug('Running {}.'.format(manage_schedule.__name__))
await bot.wait_until_ready()
try:
msg = await channel.fetch_message(msg_id)
except discord.errors.NotFound:
log.error("Discord error: Message ID {} not found.".format(msg_id))
await bot.close()
exit()
log.debug("Got message with ID {}".format(msg_id))
reactions = msg.reactions
log.debug('Calling {} on channel.'.format(read_reactions.__name__))
avail = await read_reactions(reactions)
for ride, n_cap in CAPTAINS_PER_RIDE:
log.debug('Calling {} on channel for {}.'.
format(create_ride_schedule.__name__, ride))
captains = await create_ride_schedule(ride, n_cap, avail)
SCHEDULE[ride] = captains
log.debug('Calling {}.'.format(update_logs.__name__))
update_logs()
log.debug('Calling {} on channel.'.format(post_schedule.__name__))
await post_schedule(channel)
async def read_reactions(reactions):
"""Read reactions from scheduler poll
"""
log.debug('Running {}.'.format(read_reactions.__name__))
await bot.wait_until_ready()
log.debug('Getting availability from poll.')
emojis = list(POLL_OPTIONS.keys())
avail = dict((ride, []) for ride in RIDES)
for reaction in reactions:
ride_index = ''
try:
ride_index = emojis.index(reaction.emoji)
except ValueError:
log.error("Invalid reaction found: " + reaction.emoji)
continue
users = await reaction.users().flatten()
for user in users:
if not user.bot:
avail[RIDES[ride_index]].append(user)
log.debug('Availability is: {}'.format(
"\n".join(f'{k}: {users_to_names(v)}' for k,v in avail.items())
))
return avail
async def create_ride_schedule(ride, n_cap, avail):
"""Create road captain schedule
"""
log.debug('Running {}.'.format(create_ride_schedule.__name__))
await bot.wait_until_ready()
log.debug('Choosing road captains for {}'.format(ride))
captains = []
# choose randomly for now
for i in range(n_cap):
#captain = prioritise_absence(ride, avail)
try:
captain = random.choice(avail[ride])
except IndexError:
captain = None
captains.append(captain)
# don't pick same captain twice for one ride
if captain in avail[ride]:
avail[ride].remove(captain)
for s in avail.keys():
if captain in avail[s] and len(avail[s]) > 2:
log.debug('Scheduled {} on {}. Removing them from {}'.
format(captain.display_name, ride, s))
avail[s].remove(captain)
log.debug(
"Road captains for {} are {}".format(ride, users_to_names(captains))
)
return captains
def prioritise_absence(ride, avail):
"""Prioritise captains that have been absent longer than others
"""
log.debug('Running {}.'.format(prioritise_absence.__name__))
with open('schedule') as f:
schedule = [json.loads(line) for line in f]
print(schedule)
exit()
return None
def update_logs():
"""Update log files.
"""
log.debug('Running {}.'.format(update_logs.__name__))
# log schedule to file
log.debug('Saving schedule to log.')
printable_schedule = SCHEDULE.copy()
for ride in printable_schedule:
printable_schedule[ride] = users_to_names(printable_schedule[ride])
printable_schedule['date'] = str(date.today())
json_schedule = json.dumps(printable_schedule)
with open("schedule", 'a') as f:
f.write('\n' + json_schedule)
log.debug(json_schedule)
async def post_schedule(channel):
"""Post schedule to channel.
"""
log.debug('Running {}.'.format(create_ride_schedule.__name__))
await bot.wait_until_ready()
msg = SCHEDULER_MSG + "\nRoad captains for this week are"
schedule_post = SCHEDULE.copy()
for ride in schedule_post:
schedule_post[ride] = users_to_tags(schedule_post[ride])
for k, v in schedule_post.items():
msg += f"\n\n**{k}**\n" +\
"\n".join(f'{c}' for c in v)
log.debug('Send message to channel: \n{}'.format(msg))
m = await channel.send(msg)
def users_to_names(users):
"""Convert a list of Users to a list of user names (str).
"""
return [u.display_name if u is not None else '' for u in users]
def users_to_tags(users):
"""Convert a list of Users to a list of tags (str).
"""
return ["<@!{}>".format(u.id) if u is not None else '' for u in users]
def user_to_tag(user):
"""Convert a list of Users to a list of tags (str).
"""
return "<@!{}>".format(user.id) if user is not None else ''
@bot.event
async def on_ready():
"""Set up variables and logging
"""
log.debug('Running {}'.format(on_ready.__name__))
await bot.wait_until_ready()
log.debug('Logged in as {}'.format(bot.user.name))
channel = bot.get_channel(int(CHANNEL))
log.debug('Channel is {}'.format(channel))
with open("MESSAGE_ID", 'r') as f:
msg_id = int(f.readline())
log.debug('Read poll message with ID {}.'.format(msg_id))
log.debug('Calling {} on channel.'.format(manage_schedule.__name__))
await manage_schedule(channel, msg_id)
log.debug('Shutdown poll bot.')
await bot.close()
log.debug('Starting scheduler bot.')
bot.run(TOKEN) | 0 | 0 | 0 |
6a9980ccddb6716529146982c0896ecad8ea967a | 4,833 | py | Python | aperturedb/ImageDownloader.py | aperture-data/aperturedb-python | 186ae09a474df8e2d90ecdc7ba81e81879cef3ea | [
"Apache-2.0"
] | 1 | 2022-01-12T17:46:20.000Z | 2022-01-12T17:46:20.000Z | aperturedb/ImageDownloader.py | aperture-data/aperturedb-python | 186ae09a474df8e2d90ecdc7ba81e81879cef3ea | [
"Apache-2.0"
] | 11 | 2021-07-14T16:54:05.000Z | 2022-03-30T14:34:34.000Z | aperturedb/ImageDownloader.py | aperture-data/aperturedb-python | 186ae09a474df8e2d90ecdc7ba81e81879cef3ea | [
"Apache-2.0"
] | null | null | null | import time
import requests
import os
from os import path
import cv2
import numpy as np
from aperturedb import ParallelLoader
from aperturedb import CSVParser
from aperturedb import ProgressBar
HEADER_PATH = "filename"
HEADER_URL = "url"
class ImageDownloaderCSV(CSVParser.CSVParser):
'''
ApertureDB Image Downloader.
Expects a csv file with AT LEAST a "url" column, and
optionally a "filename" field.
If "filename" is not present, it is taken from the url.
'''
| 27.617143 | 93 | 0.564039 | import time
import requests
import os
from os import path
import cv2
import numpy as np
from aperturedb import ParallelLoader
from aperturedb import CSVParser
from aperturedb import ProgressBar
HEADER_PATH = "filename"
HEADER_URL = "url"
class ImageDownloaderCSV(CSVParser.CSVParser):
'''
ApertureDB Image Downloader.
Expects a csv file with AT LEAST a "url" column, and
optionally a "filename" field.
If "filename" is not present, it is taken from the url.
'''
def __init__(self, filename):
self.has_filename = False
super().__init__(filename)
def __getitem__(self, idx):
url = self.df.loc[idx, HEADER_URL]
if self.has_filename:
filename = self.df.loc[idx, HEADER_PATH]
else:
filename = self.url_to_filename(url)
return url, filename
def url_to_filename(self, url):
filename = url.split("/")[-1]
folder = "/tmp/images/"
return folder + filename
def validate(self):
self.header = list(self.df.columns.values)
if HEADER_URL not in self.header:
raise Exception("Error with CSV file field: url. Must be a field")
if HEADER_PATH in self.header:
self.has_filename = True
class ImageDownloader(ParallelLoader.ParallelLoader):
def __init__(self, db=None, dry_run=False, n_download_retries=0, check_if_present=False):
super().__init__(db, dry_run=dry_run)
self.type = "image"
self.check_img = check_if_present
self.images_already_downloaded = 0
self.n_download_retries = n_download_retries
def check_if_image_is_ok(self, filename, url):
if not os.path.exists(filename):
return False
try:
a = cv2.imread(filename)
if a.size <= 0:
print("Image present but error reading it:", url)
return False
except:
print("Image present but error decoding:", url)
return False
return True
def download_image(self, url, filename):
start = time.time()
if self.check_img and self.check_if_image_is_ok(filename, url):
self.images_already_downloaded += 1
self.times_arr.append(time.time() - start)
return
folder = os.path.dirname(filename)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
retries = 0
while True:
imgdata = requests.get(url)
if imgdata.ok:
break
else:
if retries >= self.n_download_retries:
break
print("WARNING: Retrying object:", url)
retries += 1
time.sleep(2)
if imgdata.ok:
fd = open(filename, "wb")
fd.write(imgdata.content)
fd.close()
try:
a = cv2.imread(filename)
if a.size <= 0:
print("Downloaded image size error:", url)
os.remove(filename)
self.error_counter += 1
except:
print("Downloaded image cannot be decoded:", url)
os.remove(filename)
self.error_counter += 1
else:
print("URL not found:", url)
self.error_counter += 1
self.times_arr.append(time.time() - start)
def worker(self, thid, generator, start, end):
if thid == 0 and self.stats:
pb = ProgressBar.ProgressBar()
for i in range(start, end):
url, filename = generator[i]
self.download_image(url, filename)
if thid == 0 and self.stats:
pb.update((i - start) / (end - start))
if thid == 0 and self.stats:
pb.update(1)
def print_stats(self):
print("====== ApertureDB ImageDownloader Stats ======")
times = np.array(self.times_arr)
if len(times) <= 0:
print("Error: No downloads.")
return
if self.images_already_downloaded > 0:
print("Images already present:", self.images_already_downloaded)
print("Images downloaded:", len(times) - self.images_already_downloaded)
print("Avg image time(s):", np.mean(times))
print("Image time std:", np.std (times))
print("Throughput (images/s)):",
1 / np.mean(times) * self.numthreads)
print("Total time(s):", self.ingestion_time)
print("Overall throughput (img/s):",
self.total_elements / self.ingestion_time)
if self.error_counter > 0:
print("Errors encountered:", self.error_counter)
print("=============================================")
| 4,026 | 32 | 266 |
5e2b0bf1820fcac8822e91fbca28bbd5f5522477 | 2,541 | py | Python | tests/test_spawner/test_env_vars.py | gzcf/polyaxon | 77ac8838c6444a36541e6c28aba7ae42de392fee | [
"MIT"
] | null | null | null | tests/test_spawner/test_env_vars.py | gzcf/polyaxon | 77ac8838c6444a36541e6c28aba7ae42de392fee | [
"MIT"
] | null | null | null | tests/test_spawner/test_env_vars.py | gzcf/polyaxon | 77ac8838c6444a36541e6c28aba7ae42de392fee | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import MagicMock
import pytest
from kubernetes import client
from libs.api import API_KEY_NAME
from scheduler.spawners.templates.env_vars import (
get_env_var,
get_from_app_secret,
get_resources_env_vars,
get_service_env_vars
)
@pytest.mark.spawner_mark
| 36.826087 | 81 | 0.653286 | from unittest import TestCase
from unittest.mock import MagicMock
import pytest
from kubernetes import client
from libs.api import API_KEY_NAME
from scheduler.spawners.templates.env_vars import (
get_env_var,
get_from_app_secret,
get_resources_env_vars,
get_service_env_vars
)
@pytest.mark.spawner_mark
class TestEnvVars(TestCase):
def test_env_vars(self):
# String value
env_var = get_env_var(name='foo', value='bar')
assert env_var.name == 'foo'
assert env_var.value == 'bar'
# Int value
env_var = get_env_var(name='foo', value=1)
assert env_var.name == 'foo'
assert env_var.value == '1'
# Dict value
env_var = get_env_var(name='foo', value={'moo': 'bar'})
assert env_var.name == 'foo'
assert env_var.value == '{"moo": "bar"}'
def test_get_from_app_secret(self):
env_var = get_from_app_secret(key_name='foo',
secret_key_name='secret_key',
secret_ref_name='secret_ref')
assert env_var.name == 'foo'
assert isinstance(env_var.value_from, client.V1EnvVarSource)
assert env_var.value_from.secret_key_ref.name == 'secret_ref'
assert env_var.value_from.secret_key_ref.key == 'secret_key'
def test_get_service_env_vars(self):
env_vars = get_service_env_vars()
assert len(env_vars) == 6
env_var_names = [env_var.name for env_var in env_vars]
assert 'POLYAXON_K8S_NAMESPACE' in env_var_names
assert 'POLYAXON_SECRET_KEY' in env_var_names
assert 'POLYAXON_INTERNAL_SECRET_TOKEN' in env_var_names
assert 'POLYAXON_RABBITMQ_PASSWORD' in env_var_names
assert 'POLYAXON_DB_PASSWORD' in env_var_names
assert API_KEY_NAME in env_var_names
def test_get_resources_env_vars(self):
env_vars = get_resources_env_vars(None)
assert any(item.name == 'NVIDIA_VISIBLE_DEVICES' and item.value == 'none'
for item in env_vars)
resources = MagicMock()
resources.gpu = None
env_vars = get_resources_env_vars(resources)
assert any(item.name == 'NVIDIA_VISIBLE_DEVICES' and item.value == 'none'
for item in env_vars)
resources = MagicMock()
resources.gpu.limits = '0'
env_vars = get_resources_env_vars(resources)
assert any(item.name == 'NVIDIA_VISIBLE_DEVICES' and item.value == 'none'
for item in env_vars)
| 2,081 | 7 | 129 |
21747df07c549e19382c22288039d178658f37f2 | 656 | py | Python | artssat/utils/plots.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | 3 | 2020-09-02T08:20:42.000Z | 2020-12-18T17:19:38.000Z | artssat/utils/plots.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | null | null | null | artssat/utils/plots.py | simonpf/pARTS | b4d9f4c2ceac594273c5589e44fe6a3a4f8d7028 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.reset_orig()
| 25.230769 | 66 | 0.53811 | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.reset_orig()
def plot_psds(x, y, z, dz = None, chs = 0, ax = None):
if ax is None:
ax = plt.gca()
n = y.shape[1]
pal = sns.cubehelix_palette(n, start = chs)
if dz is None:
dz = np.diff(z).mean() / 8.0
for i in range(n - 1, 0, -1):
y_0 = z[i]
y_1 = np.maximum(np.log10(y[:, i]), 0) * dz + y_0
ax.plot(x, y_1, c = "white", zorder = -i)
ax.fill_between(x, y_1, y_0, color = pal[i], zorder = -i)
ax.plot(x, y_0 * np.ones(x.size), c = pal[i], zorder = -i)
ax.set_xscale("log")
ax.grid(False)
| 543 | 0 | 23 |
12d931fb7f1b8dc76f52075e42fed7c7e04ddcf6 | 1,601 | py | Python | custom_components/xplora_watch/entity.py | Ludy87/xplora_watch | 85050d1bec061d7e6d11dd63e36fbddb296a0c12 | [
"MIT"
] | 7 | 2022-01-17T10:07:44.000Z | 2022-03-29T17:33:23.000Z | custom_components/xplora_watch/entity.py | Ludy87/xplora_watch | 85050d1bec061d7e6d11dd63e36fbddb296a0c12 | [
"MIT"
] | 17 | 2022-01-14T13:22:06.000Z | 2022-03-31T15:43:23.000Z | custom_components/xplora_watch/entity.py | Ludy87/xplora_watch | 85050d1bec061d7e6d11dd63e36fbddb296a0c12 | [
"MIT"
] | 4 | 2022-01-16T08:46:42.000Z | 2022-02-04T12:29:07.000Z | """Entity Xplora® Watch."""
from __future__ import annotations
import logging
from datetime import timedelta
from typing import Any, Dict
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.restore_state import RestoreEntity
from .helper import XploraUpdateTime
from pyxplora_api import pyxplora_api_async as PXA
_LOGGER = logging.getLogger(__name__)
| 29.648148 | 72 | 0.637726 | """Entity Xplora® Watch."""
from __future__ import annotations
import logging
from datetime import timedelta
from typing import Any, Dict
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.restore_state import RestoreEntity
from .helper import XploraUpdateTime
from pyxplora_api import pyxplora_api_async as PXA
_LOGGER = logging.getLogger(__name__)
class XploraSwitchEntity(XploraUpdateTime, SwitchEntity, RestoreEntity):
def __init__(
self,
switch: Dict[str, Any],
controller: PXA.PyXploraApi,
scan_interval: timedelta,
start_time: float,
name: str,
func_name: str,
icon: str,
) -> None:
super().__init__(scan_interval, start_time)
_LOGGER.debug(f"init switch {func_name} {name}")
self._controller: PXA.PyXploraApi = controller
self._switch = switch
self._attr_icon = icon
self._attr_is_on = self._state(self._switch["status"])
self._attr_name = name
self._attr_unique_id = switch["id"]
def _state(self, status: str) -> bool:
if status == "DISABLE":
return False
return True
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return supported attributes."""
days = ["So", "Mo", "Di", "Mi", "Do", "Fr", "Sa"]
weekRepeat = self._switch["weekRepeat"]
weekDays = []
for day in range(len(weekRepeat)):
if weekRepeat[day] == "1":
weekDays.append(days[day])
return {"Day(s)": ", ".join(weekDays)}
| 670 | 518 | 23 |
d4b4eb82d35cd281ad2c2012dc1cb465e3c69183 | 14,389 | py | Python | tests/apptrace/events_test.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-03-20T07:13:33.000Z | 2017-05-03T03:39:53.000Z | tests/apptrace/events_test.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 12 | 2017-07-10T07:04:06.000Z | 2017-07-26T09:32:54.000Z | tests/apptrace/events_test.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-05-04T11:25:32.000Z | 2017-07-11T09:10:01.000Z | """
Unit test for Treadmill apptrace events module.
"""
import unittest
import mock
from treadmill.apptrace import events
class AppTraceEventsTest(unittest.TestCase):
"""Test all event classes operations.
"""
@mock.patch('treadmill.apptrace.events.AbortedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ConfiguredTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.DeletedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.FinishedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.KilledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.PendingTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ScheduledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceExitedTraceEvent.from_data'),
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceRunningTraceEvent.from_data'),
mock.Mock(set_spec=True))
def test_factory(self):
"""Test class factory operations.
"""
events.AppTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.ScheduledTraceEvent.from_data.assert_called_with(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
events.PendingTraceEvent.from_data.assert_called_with(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
def test_factory_bad_event(self):
"""Tests that failure to parse the event returns None.
"""
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='does_not_exists',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
def test_scheduled(self):
"""Scheduled event operations.
"""
event = events.ScheduledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
where='here',
why='because',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'scheduled',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'where': 'here',
'why': 'because',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'scheduled',
'here:because',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ScheduledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here:because',
payload={'foo': 'bar'}
)
)
def test_pending(self):
"""Pending event operations.
"""
event = events.PendingTraceEvent(
why='created',
timestamp=2,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'pending',
'timestamp': 2,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
'why': 'created',
}
)
self.assertEqual(
event.to_data(),
(
2,
'tests',
'proid.foo#123',
'pending',
'created',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.PendingTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data='created',
payload={'foo': 'bar'}
)
)
def test_configured(self):
"""Configured event operations.
"""
event = events.ConfiguredTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'configured',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'configured',
'AAAA',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ConfiguredTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='configured',
event_data='AAAA',
payload={'foo': 'bar'}
)
)
def test_deleted(self):
"""Deleted event operations.
"""
event = events.DeletedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'deleted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'deleted',
'',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.DeletedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='deleted',
event_data='not used',
payload={'foo': 'bar'}
)
)
def test_finished(self):
"""Finished event operations.
"""
event = events.FinishedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'finished',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'finished',
'1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.FinishedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='finished',
event_data='1.2',
payload={'foo': 'bar'}
)
)
def test_aborted(self):
"""Aborted event operations.
"""
event = events.AbortedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
why='reason',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'aborted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'why': 'reason',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'aborted',
'reason',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.AbortedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='aborted',
event_data='reason',
payload={'foo': 'bar'}
)
)
def test_killed(self):
"""Killed event operations.
"""
event = events.KilledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
is_oom=True,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'killed',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'is_oom': True,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'killed',
'oom',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.KilledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='killed',
event_data='oom',
payload={'foo': 'bar'}
)
)
def test_service_running(self):
"""ServiceRunning event operations.
"""
event = events.ServiceRunningTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.web',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_running',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.web',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_running',
'AAAA.web.web',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceRunningTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data='AAAA.web.web',
payload={'foo': 'bar'}
)
)
def test_service_exited(self):
"""ServiceExited event operations.
"""
event = events.ServiceExitedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.x',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_exited',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.x',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_exited',
'AAAA.web.x.1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceExitedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_exited',
event_data='AAAA.web.x.1.2',
payload={'foo': 'bar'}
)
)
if __name__ == '__main__':
unittest.main()
| 28.493069 | 75 | 0.420738 | """
Unit test for Treadmill apptrace events module.
"""
import unittest
import mock
from treadmill.apptrace import events
class AppTraceEventsTest(unittest.TestCase):
"""Test all event classes operations.
"""
@mock.patch('treadmill.apptrace.events.AbortedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ConfiguredTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.DeletedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.FinishedTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.KilledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.PendingTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch('treadmill.apptrace.events.ScheduledTraceEvent.from_data',
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceExitedTraceEvent.from_data'),
mock.Mock(set_spec=True))
@mock.patch(('treadmill.apptrace.events'
'.ServiceRunningTraceEvent.from_data'),
mock.Mock(set_spec=True))
def test_factory(self):
"""Test class factory operations.
"""
events.AppTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.ScheduledTraceEvent.from_data.assert_called_with(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here',
payload={'foo': 'bar'}
)
events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
events.PendingTraceEvent.from_data.assert_called_with(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data=None,
payload={'foo': 'bar'}
)
def test_factory_bad_event(self):
"""Tests that failure to parse the event returns None.
"""
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='does_not_exists',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
res = events.AppTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data=None,
payload={'foo': 'bar'}
)
self.assertIsNone(res)
def test_scheduled(self):
"""Scheduled event operations.
"""
event = events.ScheduledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
where='here',
why='because',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'scheduled',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'where': 'here',
'why': 'because',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'scheduled',
'here:because',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ScheduledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='scheduled',
event_data='here:because',
payload={'foo': 'bar'}
)
)
def test_pending(self):
"""Pending event operations.
"""
event = events.PendingTraceEvent(
why='created',
timestamp=2,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'pending',
'timestamp': 2,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
'why': 'created',
}
)
self.assertEqual(
event.to_data(),
(
2,
'tests',
'proid.foo#123',
'pending',
'created',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.PendingTraceEvent.from_data(
timestamp=2,
source='tests',
instanceid='proid.foo#123',
event_type='pending',
event_data='created',
payload={'foo': 'bar'}
)
)
def test_configured(self):
"""Configured event operations.
"""
event = events.ConfiguredTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'configured',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'configured',
'AAAA',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ConfiguredTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='configured',
event_data='AAAA',
payload={'foo': 'bar'}
)
)
def test_deleted(self):
"""Deleted event operations.
"""
event = events.DeletedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'deleted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'deleted',
'',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.DeletedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='deleted',
event_data='not used',
payload={'foo': 'bar'}
)
)
def test_finished(self):
"""Finished event operations.
"""
event = events.FinishedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'finished',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'finished',
'1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.FinishedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='finished',
event_data='1.2',
payload={'foo': 'bar'}
)
)
def test_aborted(self):
"""Aborted event operations.
"""
event = events.AbortedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
why='reason',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'aborted',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'why': 'reason',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'aborted',
'reason',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.AbortedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='aborted',
event_data='reason',
payload={'foo': 'bar'}
)
)
def test_killed(self):
"""Killed event operations.
"""
event = events.KilledTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
is_oom=True,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'killed',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'is_oom': True,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'killed',
'oom',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.KilledTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='killed',
event_data='oom',
payload={'foo': 'bar'}
)
)
def test_service_running(self):
"""ServiceRunning event operations.
"""
event = events.ServiceRunningTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.web',
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_running',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.web',
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_running',
'AAAA.web.web',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceRunningTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_running',
event_data='AAAA.web.web',
payload={'foo': 'bar'}
)
)
def test_service_exited(self):
"""ServiceExited event operations.
"""
event = events.ServiceExitedTraceEvent(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
uniqueid='AAAA',
service='web.x',
rc=1,
signal=2,
payload={'foo': 'bar'}
)
self.assertEqual(
event.to_dict(),
{
'event_type': 'service_exited',
'timestamp': 1,
'source': 'tests',
'instanceid': 'proid.foo#123',
'uniqueid': 'AAAA',
'service': 'web.x',
'rc': 1,
'signal': 2,
'payload': {'foo': 'bar'},
}
)
self.assertEqual(
event.to_data(),
(
1,
'tests',
'proid.foo#123',
'service_exited',
'AAAA.web.x.1.2',
{'foo': 'bar'},
)
)
self.assertEqual(
event,
events.ServiceExitedTraceEvent.from_data(
timestamp=1,
source='tests',
instanceid='proid.foo#123',
event_type='service_exited',
event_data='AAAA.web.x.1.2',
payload={'foo': 'bar'}
)
)
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
8afd7dfb3a0429d776c6f9286411c593352a5aeb | 1,382 | py | Python | texext/tests/test_custom_plotdirective.py | ddeka2910/texext | 2e99f513b188468c9ec356a0ea1fe0622c5caab2 | [
"BSD-2-Clause"
] | 4 | 2015-10-10T05:12:58.000Z | 2020-06-26T11:54:34.000Z | texext/tests/test_custom_plotdirective.py | ddeka2910/texext | 2e99f513b188468c9ec356a0ea1fe0622c5caab2 | [
"BSD-2-Clause"
] | 10 | 2018-04-16T23:06:04.000Z | 2021-02-19T23:53:56.000Z | texext/tests/test_custom_plotdirective.py | ddeka2910/texext | 2e99f513b188468c9ec356a0ea1fe0622c5caab2 | [
"BSD-2-Clause"
] | 5 | 2019-10-23T21:14:31.000Z | 2020-12-11T16:11:16.000Z | """ Tests for plotdirective build using sphinx extensions
Test ability to combine plot_directive with mathcode
"""
from os.path import dirname, join as pjoin
import re
import sphinx
SPHINX_1p8 = sphinx.version_info[:2] >= (1, 8)
from sphinxtesters import PageBuilder
from texext.tests.test_plotdirective import EXP_PLOT_AND_MATH
PAGES = pjoin(dirname(__file__), 'plotdirective')
| 30.043478 | 74 | 0.692475 | """ Tests for plotdirective build using sphinx extensions
Test ability to combine plot_directive with mathcode
"""
from os.path import dirname, join as pjoin
import re
import sphinx
SPHINX_1p8 = sphinx.version_info[:2] >= (1, 8)
from sphinxtesters import PageBuilder
from texext.tests.test_plotdirective import EXP_PLOT_AND_MATH
PAGES = pjoin(dirname(__file__), 'plotdirective')
class TestCustomPlotDirective(PageBuilder):
# Test build and output of custom_plotdirective project
page_source_template = PAGES
@classmethod
def modify_pages(cls):
conf_fname = pjoin(cls.page_source, 'conf.py')
with open(conf_fname, 'rt') as fobj:
contents = fobj.read()
contents = contents.replace(
"'matplotlib.sphinxext.plot_directive'",
'"plot_directive"')
contents += """
< # Use custom plot_directive
< sys.path.insert(0, abspath(pjoin('.')))
< import plot_directive
< mathcode_plot_directive = plot_directive
"""
with open(conf_fname, 'wt') as fobj:
fobj.write(contents)
def test_plot_and_math(self):
doctree = self.get_doctree('plot_and_math')
assert len(doctree.document) == 1
tree_str = self.doctree2str(doctree)
# Sphinx by 1.3 adds "highlight_args={}", Sphinx at 1.1.3 does not
assert re.compile(EXP_PLOT_AND_MATH).search(tree_str)
| 787 | 186 | 23 |
8d2a862b1850a0c56f325abe1d1cac2277162762 | 800 | py | Python | src/sprites/grouping.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 1 | 2020-08-29T06:41:03.000Z | 2020-08-29T06:41:03.000Z | src/sprites/grouping.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 10 | 2019-07-15T05:15:38.000Z | 2020-11-25T03:14:03.000Z | src/sprites/grouping.py | mgear2/undervoid | 6c91a5786d29d766223831190952fd90ddc6a1e8 | [
"MIT"
] | 1 | 2020-11-22T08:25:26.000Z | 2020-11-22T08:25:26.000Z | # Copyright (c) 2020
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
import pygame as pg
| 33.333333 | 53 | 0.64875 | # Copyright (c) 2020
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
import pygame as pg
class Grouping:
def __init__(self):
self.all_sprites = pg.sprite.LayeredUpdates()
self.legs_sprite = pg.sprite.Group()
self.walls = pg.sprite.Group()
self.rifts = pg.sprite.Group()
self.stops_bullets = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.bullets = pg.sprite.Group()
self.graves = pg.sprite.Group()
self.items = pg.sprite.Group()
self.player_sprite = pg.sprite.Group()
self.cursor_sprite = pg.sprite.Group()
self.weaponvfx_sprite = pg.sprite.Group()
self.spawners = pg.sprite.Group()
| 566 | -6 | 49 |
99b51af7c93df2be5b309b548a2ceb77eedf5aaa | 2,090 | py | Python | aroma/tests/test_features.py | matthewcarlucci/aroma | 51664dcc01caef42124e11bc3c334078fed749a3 | [
"Apache-2.0"
] | 5 | 2021-08-31T17:36:59.000Z | 2021-12-13T04:51:56.000Z | aroma/tests/test_features.py | matthewcarlucci/aroma | 51664dcc01caef42124e11bc3c334078fed749a3 | [
"Apache-2.0"
] | 33 | 2020-11-10T15:50:41.000Z | 2021-01-15T17:10:46.000Z | aroma/tests/test_features.py | matthewcarlucci/aroma | 51664dcc01caef42124e11bc3c334078fed749a3 | [
"Apache-2.0"
] | 6 | 2021-02-09T22:39:43.000Z | 2021-12-08T15:13:17.000Z | """Tests for the features module."""
import numpy as np
from aroma import features
def test_feature_time_series(mel_mix, mc, max_correls):
"""Test the feature_time_series feature against pre-calculated values."""
np.random.seed(1)
# Read mel_mix
mel_mix = np.loadtxt(mel_mix)
# Run feature_time_series
max_RP_corr, _ = features.feature_time_series(mel_mix, mc)
# Read features csv
max_correls = np.load(max_correls)
assert np.allclose(max_correls, max_RP_corr, atol=1e-2)
# Run feature_time_series with metric metadata
metadata = {}
max_RP_corr, updated_metadata = features.feature_time_series(
mel_mix,
mc,
metric_metadata=metadata,
)
assert "max_RP_corr" in updated_metadata.keys()
def test_feature_frequency(mel_FT_mix, HFC):
"""Test the feature_frequency feature against pre-calculated values."""
np.random.seed(1)
# Read mel_FT_mix
mel_FT_mix = np.loadtxt(mel_FT_mix)
# Run feature_frequency
HFC_test, _ = features.feature_frequency(mel_FT_mix, TR=2)
# Read features csv
HFC = np.load(HFC)
assert np.allclose(HFC, HFC_test)
# Run feature_frequency with metric metadata
metadata = {}
HFC_test, updated_metadata = features.feature_frequency(
mel_FT_mix,
TR=2,
metric_metadata=metadata,
)
assert "HFC" in updated_metadata.keys()
def test_feature_spatial(mel_IC, edgeFract, csfFract):
"""Test the feature_spatial features against pre-calculated values."""
np.random.seed(1)
# Run feature_spatial
edge_fract, csf_fract, _ = features.feature_spatial(mel_IC)
# Read features csv
edgeFract = np.load(edgeFract)
csfFract = np.load(csfFract)
assert np.allclose(edgeFract, edge_fract)
assert np.allclose(csfFract, csf_fract)
# Run feature_spatial with metric metadata
metadata = {}
edge_fract, csf_fract, updated_metadata = features.feature_spatial(mel_IC, metadata)
assert "edge_fract" in updated_metadata.keys()
assert "csf_fract" in updated_metadata.keys()
| 27.866667 | 88 | 0.709569 | """Tests for the features module."""
import numpy as np
from aroma import features
def test_feature_time_series(mel_mix, mc, max_correls):
"""Test the feature_time_series feature against pre-calculated values."""
np.random.seed(1)
# Read mel_mix
mel_mix = np.loadtxt(mel_mix)
# Run feature_time_series
max_RP_corr, _ = features.feature_time_series(mel_mix, mc)
# Read features csv
max_correls = np.load(max_correls)
assert np.allclose(max_correls, max_RP_corr, atol=1e-2)
# Run feature_time_series with metric metadata
metadata = {}
max_RP_corr, updated_metadata = features.feature_time_series(
mel_mix,
mc,
metric_metadata=metadata,
)
assert "max_RP_corr" in updated_metadata.keys()
def test_feature_frequency(mel_FT_mix, HFC):
"""Test the feature_frequency feature against pre-calculated values."""
np.random.seed(1)
# Read mel_FT_mix
mel_FT_mix = np.loadtxt(mel_FT_mix)
# Run feature_frequency
HFC_test, _ = features.feature_frequency(mel_FT_mix, TR=2)
# Read features csv
HFC = np.load(HFC)
assert np.allclose(HFC, HFC_test)
# Run feature_frequency with metric metadata
metadata = {}
HFC_test, updated_metadata = features.feature_frequency(
mel_FT_mix,
TR=2,
metric_metadata=metadata,
)
assert "HFC" in updated_metadata.keys()
def test_feature_spatial(mel_IC, edgeFract, csfFract):
"""Test the feature_spatial features against pre-calculated values."""
np.random.seed(1)
# Run feature_spatial
edge_fract, csf_fract, _ = features.feature_spatial(mel_IC)
# Read features csv
edgeFract = np.load(edgeFract)
csfFract = np.load(csfFract)
assert np.allclose(edgeFract, edge_fract)
assert np.allclose(csfFract, csf_fract)
# Run feature_spatial with metric metadata
metadata = {}
edge_fract, csf_fract, updated_metadata = features.feature_spatial(mel_IC, metadata)
assert "edge_fract" in updated_metadata.keys()
assert "csf_fract" in updated_metadata.keys()
| 0 | 0 | 0 |
10267abea5abb4d14f2499ec05f13ead7e1b752b | 8,263 | py | Python | tests/animations/test_spinners.py | jhonatan-lopes/alive-progress | 3c5cc00e8b29e1813480292a2f5bb2d6414f2932 | [
"MIT"
] | 1 | 2022-02-09T08:20:50.000Z | 2022-02-09T08:20:50.000Z | tests/animations/test_spinners.py | DanteRARA/alive-progress | 8a9d246787a8c65b2bf1722a5ab30eddfb5c8d33 | [
"MIT"
] | null | null | null | tests/animations/test_spinners.py | DanteRARA/alive-progress | 8a9d246787a8c65b2bf1722a5ab30eddfb5c8d33 | [
"MIT"
] | null | null | null | import pytest
from alive_progress.animations.spinners import alongside_spinner_factory, \
bouncing_spinner_factory, delayed_spinner_factory, frame_spinner_factory, \
scrolling_spinner_factory, sequential_spinner_factory
from alive_progress.utils.cells import join_cells
@pytest.mark.parametrize('frames, expected', [
('a\nb', (('a', ' ', 'b'),)),
('abc', (('a', 'b', 'c'),)),
(('a\nb', '\nc '), (('a b', ' c '),)),
(('a ', ' b ', ' c'), (('a ', ' b ', ' c'),)),
(('a', '(a)', ' (*) '), (('aaaaa', '(a)(a', ' (*) '),)),
(('ok', '😺😺'), (('okok', '😺😺'),)),
])
@pytest.mark.parametrize('length, block, background, right, hide, expected', [
(None, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),)),
(None, None, ' ', False, True, ((' ', ' a', ' ab', 'abc', 'bc ', 'c '),)),
(None, None, ' ', True, False, (('abc', 'cab', 'bca'),)),
(None, None, ' ', False, False, (('abc', 'bca', 'cab'),)),
(2, None, '~', True, True, (('~~', 'c~', 'bc', 'ab', '~a'),)),
(2, None, '~', True, False, (('bc', 'ab', 'ca'),)),
(2, None, '~', False, True, (('~~', '~a', 'ab', 'bc', 'c~'),)),
(2, None, '~', False, False, (('ab', 'bc', 'ca'),)),
(3, None, '~', True, True, (('~~~', 'c~~', 'bc~', 'abc', '~ab', '~~a'),)),
(3, None, '~', True, False, (('abc', 'cab', 'bca'),)),
(3, None, '~', False, True, (('~~~', '~~a', '~ab', 'abc', 'bc~', 'c~~'),)),
(3, None, '~', False, False, (('abc', 'bca', 'cab'),)),
(4, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),)),
(4, None, ' ', True, False, (('abc ', ' abc', 'c ab', 'bc a'),)),
(4, None, ' ', False, True, ((' ', ' a', ' ab', ' abc', 'abc ', 'bc ', 'c '),)),
(4, None, ' ', False, False, ((' abc', 'abc ', 'bc a', 'c ab'),)),
(4, 1, '_', True, True, (('____', 'a___', '_a__', '__a_', '___a'),
('____', 'b___', '_b__', '__b_', '___b'),
('____', 'c___', '_c__', '__c_', '___c'))),
(4, 2, '_', True, False, (('aa__', '_aa_', '__aa', 'b__a'),
('bb__', '_bb_', '__bb', 'c__b'),
('cc__', '_cc_', '__cc', 'a__c'))),
])
@pytest.mark.parametrize('length, block, background, hide, expected', [
(None, None, None, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),
(' ', ' d', ' de', 'def', 'ef ', 'f '),)),
(None, None, None, False, (('abc',), ('def',),)),
(2, None, '~', True, (('~~', 'c~', 'bc', 'ab', '~a'), ('~~', '~d', 'de', 'ef', 'f~'),)),
(2, None, '~', False, (('bc', 'ab'), ('de', 'ef'),)),
(3, None, '+', True, (('+++', 'c++', 'bc+', 'abc', '+ab', '++a'),
('+++', '++d', '+de', 'def', 'ef+', 'f++'),)),
(3, None, '+', False, (('abc',), ('def',),)),
(4, None, ' ', True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),
(' ', ' d', ' de', ' def', 'def ', 'ef ', 'f '),)),
(4, None, ' ', False, (('abc ', ' abc'), (' def', 'def '),)),
(3, 1, '_', True, (('___', 'a__', '_a_', '__a'),
('___', '__d', '_d_', 'd__'),
('___', 'b__', '_b_', '__b'),
('___', '__e', '_e_', 'e__'),
('___', 'c__', '_c_', '__c'),
('___', '__f', '_f_', 'f__'))),
(5, 2, '_', False, (('aa___', '_aa__', '__aa_', '___aa'),
('___dd', '__dd_', '_dd__', 'dd___'),
('bb___', '_bb__', '__bb_', '___bb'),
('___ee', '__ee_', '_ee__', 'ee___'),
('cc___', '_cc__', '__cc_', '___cc'),
('___ff', '__ff_', '_ff__', 'ff___'))),
])
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b', '1c', '2a', '1b', '2c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b'), ('1c', '2a'), ('1b', '2c'))),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a'), ('12b', '34a', '56b'))),
])
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('11a', '22b', '33c'),)),
(('12', 'abc'), (('11a', '22b', '11c', '22a', '11b', '22c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('a',), ('2',), ('b',), ('3',), ('c',))),
(('12', 'abc'), (('1',), ('a',), ('2',), ('b',), ('1',), ('c',),
('2',), ('a',), ('1',), ('b',), ('2',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('a',), ('3', '4'), ('b',), ('5', '6'), ('a',),
('1', '2'), ('b',), ('3', '4'), ('a',), ('5', '6'), ('b',))),
])
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('2',), ('3',), ('a',), ('b',), ('c',))),
(('12', 'abc'), (('1',), ('2',), ('a',), ('b',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('3', '4'), ('5', '6'), ('a',), ('b',))),
])
@pytest.mark.parametrize('copies, offset, expected', [
(3, 1, (('123', '234', '345', '451', '512'),)),
(4, 2, (('1352', '2413', '3524', '4135', '5241'),)),
])
| 54.361842 | 95 | 0.471863 | import pytest
from alive_progress.animations.spinners import alongside_spinner_factory, \
bouncing_spinner_factory, delayed_spinner_factory, frame_spinner_factory, \
scrolling_spinner_factory, sequential_spinner_factory
from alive_progress.utils.cells import join_cells
@pytest.mark.parametrize('frames, expected', [
('a\nb', (('a', ' ', 'b'),)),
('abc', (('a', 'b', 'c'),)),
(('a\nb', '\nc '), (('a b', ' c '),)),
(('a ', ' b ', ' c'), (('a ', ' b ', ' c'),)),
(('a', '(a)', ' (*) '), (('aaaaa', '(a)(a', ' (*) '),)),
(('ok', '😺😺'), (('okok', '😺😺'),)),
])
def test_frame_spinner(frames, expected):
spinner_factory = frame_spinner_factory(frames)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('length, block, background, right, hide, expected', [
(None, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),)),
(None, None, ' ', False, True, ((' ', ' a', ' ab', 'abc', 'bc ', 'c '),)),
(None, None, ' ', True, False, (('abc', 'cab', 'bca'),)),
(None, None, ' ', False, False, (('abc', 'bca', 'cab'),)),
(2, None, '~', True, True, (('~~', 'c~', 'bc', 'ab', '~a'),)),
(2, None, '~', True, False, (('bc', 'ab', 'ca'),)),
(2, None, '~', False, True, (('~~', '~a', 'ab', 'bc', 'c~'),)),
(2, None, '~', False, False, (('ab', 'bc', 'ca'),)),
(3, None, '~', True, True, (('~~~', 'c~~', 'bc~', 'abc', '~ab', '~~a'),)),
(3, None, '~', True, False, (('abc', 'cab', 'bca'),)),
(3, None, '~', False, True, (('~~~', '~~a', '~ab', 'abc', 'bc~', 'c~~'),)),
(3, None, '~', False, False, (('abc', 'bca', 'cab'),)),
(4, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),)),
(4, None, ' ', True, False, (('abc ', ' abc', 'c ab', 'bc a'),)),
(4, None, ' ', False, True, ((' ', ' a', ' ab', ' abc', 'abc ', 'bc ', 'c '),)),
(4, None, ' ', False, False, ((' abc', 'abc ', 'bc a', 'c ab'),)),
(4, 1, '_', True, True, (('____', 'a___', '_a__', '__a_', '___a'),
('____', 'b___', '_b__', '__b_', '___b'),
('____', 'c___', '_c__', '__c_', '___c'))),
(4, 2, '_', True, False, (('aa__', '_aa_', '__aa', 'b__a'),
('bb__', '_bb_', '__bb', 'c__b'),
('cc__', '_cc_', '__cc', 'a__c'))),
])
def test_scrolling_spinner(length, block, background, right, hide, expected):
spinner_factory = scrolling_spinner_factory('abc', length, block, background,
right=right, hide=hide)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('length, block, background, hide, expected', [
(None, None, None, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),
(' ', ' d', ' de', 'def', 'ef ', 'f '),)),
(None, None, None, False, (('abc',), ('def',),)),
(2, None, '~', True, (('~~', 'c~', 'bc', 'ab', '~a'), ('~~', '~d', 'de', 'ef', 'f~'),)),
(2, None, '~', False, (('bc', 'ab'), ('de', 'ef'),)),
(3, None, '+', True, (('+++', 'c++', 'bc+', 'abc', '+ab', '++a'),
('+++', '++d', '+de', 'def', 'ef+', 'f++'),)),
(3, None, '+', False, (('abc',), ('def',),)),
(4, None, ' ', True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),
(' ', ' d', ' de', ' def', 'def ', 'ef ', 'f '),)),
(4, None, ' ', False, (('abc ', ' abc'), (' def', 'def '),)),
(3, 1, '_', True, (('___', 'a__', '_a_', '__a'),
('___', '__d', '_d_', 'd__'),
('___', 'b__', '_b_', '__b'),
('___', '__e', '_e_', 'e__'),
('___', 'c__', '_c_', '__c'),
('___', '__f', '_f_', 'f__'))),
(5, 2, '_', False, (('aa___', '_aa__', '__aa_', '___aa'),
('___dd', '__dd_', '_dd__', 'dd___'),
('bb___', '_bb__', '__bb_', '___bb'),
('___ee', '__ee_', '_ee__', 'ee___'),
('cc___', '_cc__', '__cc_', '___cc'),
('___ff', '__ff_', '_ff__', 'ff___'))),
])
def test_bouncing_spinner(length, block, background, hide, expected):
spinner_factory = bouncing_spinner_factory(('abc', 'def'), length, block, background,
right=True, hide=hide)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b', '1c', '2a', '1b', '2c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
def test_alongside_spinner(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs))
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b'), ('1c', '2a'), ('1b', '2c'))),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a'), ('12b', '34a', '56b'))),
])
def test_alongside_spinner_with_pivot(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs), pivot=0)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('11a', '22b', '33c'),)),
(('12', 'abc'), (('11a', '22b', '11c', '22a', '11b', '22c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
def test_alongside_spinner_custom(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs))
spinner = spinner_factory(3) # custom spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('a',), ('2',), ('b',), ('3',), ('c',))),
(('12', 'abc'), (('1',), ('a',), ('2',), ('b',), ('1',), ('c',),
('2',), ('a',), ('1',), ('b',), ('2',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('a',), ('3', '4'), ('b',), ('5', '6'), ('a',),
('1', '2'), ('b',), ('3', '4'), ('a',), ('5', '6'), ('b',))),
])
def test_sequential_spinner(inputs, expected, spinner_test):
spinner_factory = sequential_spinner_factory(*(spinner_test(*x) for x in inputs))
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('2',), ('3',), ('a',), ('b',), ('c',))),
(('12', 'abc'), (('1',), ('2',), ('a',), ('b',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('3', '4'), ('5', '6'), ('a',), ('b',))),
])
def test_sequential_spinner_no_intermix(inputs, expected, spinner_test):
spinner_factory = sequential_spinner_factory(*(spinner_test(*x) for x in inputs),
intermix=False)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('copies, offset, expected', [
(3, 1, (('123', '234', '345', '451', '512'),)),
(4, 2, (('1352', '2413', '3524', '4135', '5241'),)),
])
def test_delayed_spinner(copies, offset, expected, spinner_test):
spinner_factory = delayed_spinner_factory(spinner_test('12345'), copies, offset)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
| 2,644 | 0 | 198 |
9501241ca2ec8a93143bf2872026829d759bcb38 | 4,938 | py | Python | static/jmeter/libs/zpjUtils.py | huqiliang/yapi | 775205457c5ef92531f3b3fd7480ad62a1746f52 | [
"Apache-2.0"
] | null | null | null | static/jmeter/libs/zpjUtils.py | huqiliang/yapi | 775205457c5ef92531f3b3fd7480ad62a1746f52 | [
"Apache-2.0"
] | null | null | null | static/jmeter/libs/zpjUtils.py | huqiliang/yapi | 775205457c5ef92531f3b3fd7480ad62a1746f52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import datetime
import smtplib
import os
import subprocess
class ZpjUtils(object):
'''
一个工具类
'''
def __init__(self):
'''
Constructor
'''
begin_work_time = datetime.datetime.now()
smtp = None
'''
cmdstr:命令行
is_logger_out:是否输出日志,默认为True
返回值:0为正常,其他为异常
'''
'''
project_path:获取项目后存放的目录
project_name:项目名
git_url:git地址
'''
'''
pom_file: pom.xml
'''
'''
project_path:项目目录
'''
| 33.821918 | 133 | 0.518631 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import datetime
import smtplib
import os
import subprocess
class ZpjUtils(object):
'''
一个工具类
'''
def __init__(self):
'''
Constructor
'''
begin_work_time = datetime.datetime.now()
smtp = None
def begin_log(self, filename):
# 配置日志信息
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=filename,
filemode='w')
# 定义一个Handler打印INFO及以上级别的日志到sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname) -8s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info(u"现在时间是 " + self.begin_work_time.strftime("%Y-%m-%d %H:%M:%S"))
def end_log(self):
self.calc_time_diff(self.begin_work_time)
def calc_time_diff(self,begin_time):
logging.info(u"---- 开始时间:" + begin_time.strftime("%Y-%m-%d %H:%M:%S") + " ----")
logging.info(u"---- 结束时间:" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " ----")
a = datetime.datetime.now() - begin_time
b = a.seconds
d = int(b / 86400)
h = int(b % 86400 / 3600)
m = int(b % 3600 / 60)
s = b % 60
logging.info(u"---- 耗时:" + str(d) + u"天" + str(h) + u"小时" + str(m) + u"分钟" + str(s) + u"秒 ----")
def smtp_config(self, mail_host="smtp.ym.163.com", mail_user="postmaster@ipms.cn", mail_pass="deviskaifa"):
try:
self.smtp = smtplib.SMTP()
self.smtp.connect(mail_host, 25)
self.smtp.login(mail_user,mail_pass)
except self.smtp.SMTPException:
logging.error("Error: 无法登录邮箱")
'''
cmdstr:命令行
is_logger_out:是否输出日志,默认为True
返回值:0为正常,其他为异常
'''
def exec_cmd(self, cmdstr, is_logger_out=True):
logging.info(cmdstr)
if is_logger_out == False:
ret = os.system(cmdstr)
if ret == 0:
logging.info(cmdstr + "命令执行完成")
else:
logging.error(cmdstr + "命令执行出错")
else:
p = subprocess.Popen(cmdstr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
buff = p.stdout.readline()
if len(buff) == 0 and p.poll() != None:
break
buff = buff.strip()
if len(buff) != 0 :
logging.info(buff.strip())
ret = p.returncode
if ret == 0 :
logging.info(cmdstr + "命令执行完成")
else:
logging.error(cmdstr + "命令执行出错")
return ret
'''
project_path:获取项目后存放的目录
project_name:项目名
git_url:git地址
'''
def git_checkout(self, project_path, project_name, git_url):
if not (os.path.exists(project_path)):
self.exec_cmd("git clone " + git_url)
else:
current_path = os.getcwd()
os.chdir(project_name)
try:
self.exec_cmd("git checkout && git clean -xdf")
self.exec_cmd("git pull")
except:
pass
finally:
os.chdir(current_path)
'''
pom_file: pom.xml
'''
def mvn_package(self, pom_file, need_test = False):
# pom_file = os.path.join(project_path, "pom.xml")
ret = 0
if os.path.exists(pom_file) :
if need_test :
test_flag = "False"
else:
test_flag = "True"
ret = self.exec_cmd("mvn clean install -Dmaven.test.skip=" + test_flag + " -f " + pom_file)
else :
logging.debug(pom_file + "找不到")
return ret
'''
project_path:项目目录
'''
def sonar(self, project_path, project_key):
current_path = os.getcwd()
sonar_properties_file = os.path.join(project_path, "sonar-project.properties")
cmdstr = ""
if os.path.exists(sonar_properties_file) :
logging.info(u"项目自带 sonar-project.properties")
cmdstr = "sonar-scanner"
else:
cmdstr = "sonar-scanner -Dsonar.projectKey=" + project_key + " -Dsonar.sources=. -Dsonar.java.binaries=./target/classes"
os.chdir(project_path)
class_path = os.path.join(project_path, "target/classes")
if not (os.path.exists(class_path)):
os.makedirs(class_path)
ret = self.exec_cmd(cmdstr)
os.chdir(current_path)
return ret
| 4,285 | 0 | 228 |
04223732d716dc0152494f443b0133702feca9d3 | 1,133 | py | Python | scriptable/ast/expression/concat_expression.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | scriptable/ast/expression/concat_expression.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | scriptable/ast/expression/concat_expression.py | c7nw3r/scriptable | b285d865da41774c8321ebf51f5a468ef1c92402 | [
"Apache-2.0"
] | null | null | null | from typing import Union, List
from scriptable.api import AST
from scriptable.api.ast_binding import ASTBinding
DataType = Union[int, float]
| 29.815789 | 86 | 0.630185 | from typing import Union, List
from scriptable.api import AST
from scriptable.api.ast_binding import ASTBinding
DataType = Union[int, float]
class ConcatExpression(AST[str]):
def __init__(self, branch: List[AST]):
self.operand_stack = [branch[x] for x in range(0, len(branch), 2)]
self.operator_stack = [branch[x] for x in range(1, len(branch), 2)]
def execute(self, binding: ASTBinding) -> str:
from copy import deepcopy
def execute_branch(ast: AST):
result = ast
while isinstance(result, AST):
result = result.execute(deepcopy(binding))
return result
operand_stack = list(map(lambda ast: execute_branch(ast), self.operand_stack))
return "".join(map(str, operand_stack))
@staticmethod
def parse(branch: List[AST]):
return ConcatExpression(branch)
def __repr__(self):
stack = []
for a, b in zip(self.operand_stack, self.operator_stack):
stack.append(a)
stack.append(b)
stack.append(self.operand_stack[-1])
return " ".join(map(str, stack))
| 829 | 137 | 23 |
f3e306054f7264645b0dd28832dc9b6f41c24dd9 | 1,604 | py | Python | goal_tracker/__init__.py | kaloyan-marinov/goal-tracker | 91dbdfab44912c3690f71d0fac197864c56bcce2 | [
"MIT"
] | null | null | null | goal_tracker/__init__.py | kaloyan-marinov/goal-tracker | 91dbdfab44912c3690f71d0fac197864c56bcce2 | [
"MIT"
] | null | null | null | goal_tracker/__init__.py | kaloyan-marinov/goal-tracker | 91dbdfab44912c3690f71d0fac197864c56bcce2 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from config import config
# Create instances of necessary Flask extensions.
db = SQLAlchemy()
migrate = Migrate()
# Import the models so that they are registered with SQLAlchemy.
from goal_tracker import models
| 30.846154 | 93 | 0.743142 | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from config import config
# Create instances of necessary Flask extensions.
db = SQLAlchemy()
migrate = Migrate()
# Import the models so that they are registered with SQLAlchemy.
from goal_tracker import models
def create_app(config_name=None):
if config_name is None:
config_name = os.environ.get("GOAL_TRACKER_CONFIG", "development")
print(f"goal_tracker/__init__.py - config_name={config_name}")
app = Flask(__name__)
app.config.from_object(config[config_name])
# Initialize the Flask extensions.
db.init_app(app)
migrate.init_app(app, db)
# fmt: off
'''
Since we don't have an extension to initialize in the global scope,
we are going to do things directly in the Application Factory Function
- namely, we add a TimedJSONWebSignatureSerializer attribute to the application instance.
(This isn't the only way to do things, but adding it as an attribute to the
application ensures that, wherever you have access to `current_app`, you
will also have access to the TimedJSONWebSignatureSerializer instance.)
'''
# fmt: on
app.token_serializer = Serializer(app.config["SECRET_KEY"], expires_in=3600)
# Register `Blueprint`(s) with the application instance.
# (By themselves, `Blueprint`s are "inactive".)
from goal_tracker.api import api_bp
app.register_blueprint(api_bp, url_prefix="/api/v1.0")
return app
| 1,184 | 0 | 23 |
a5eee884f0ad6b513aab63d677e2f3fb20324a62 | 3,576 | py | Python | testing/components/functions/function_evaluation_test.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | 2 | 2019-05-31T09:50:47.000Z | 2021-03-06T09:38:47.000Z | testing/components/functions/function_evaluation_test.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | null | null | null | testing/components/functions/function_evaluation_test.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | 1 | 2019-05-30T09:39:46.000Z | 2019-05-30T09:39:46.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples
@pytest.mark.usefixtures("set_seed")
| 44.148148 | 99 | 0.590884 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import pytest
import mxnet as mx
import numpy as np
from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples
@pytest.mark.usefixtures("set_seed")
class TestFunctionEvaluation(object):
def _make_test_function_evaluation(self, broadcastable):
from mxfusion.components.functions.function_evaluation import FunctionEvaluation
from mxfusion.components import Variable
class DotFuncEval(FunctionEvaluation):
def __init__(self):
inputs = [('A', Variable(shape=(3, 4))), ('B', Variable(shape=(4, 5)))]
outputs = [('output', Variable(shape=(3, 5)))]
input_names = ['A', 'B']
output_names = ['output']
super(DotFuncEval, self).__init__(inputs=inputs, outputs=outputs,
input_names=input_names,
output_names=output_names,
broadcastable=broadcastable)
def eval_impl(self, F, A, B):
return F.linalg.gemm2(A, B)
return DotFuncEval()
@pytest.mark.parametrize("dtype, A, A_isSamples, B, B_isSamples, num_samples, broadcastable", [
(np.float64, np.random.rand(2,3,4), True, np.random.rand(4,5), False, 2, True),
(np.float64, np.random.rand(2,3,4), True, np.random.rand(2,4,5), True, 2, True),
(np.float64, np.random.rand(3,4), False, np.random.rand(4,5), False, 0, True),
(np.float64, np.random.rand(2,3,4), True, np.random.rand(4,5), False, 2, False),
(np.float64, np.random.rand(2,3,4), True, np.random.rand(2,4,5), True, 2, False),
(np.float64, np.random.rand(3,4), False, np.random.rand(4,5), False, 0, False)
])
def test_eval(self, dtype, A, A_isSamples, B, B_isSamples, num_samples,
broadcastable):
np_isSamples = A_isSamples or B_isSamples
if np_isSamples:
if not A_isSamples:
A_np = np.expand_dims(A, axis=0)
else:
A_np = A
if not B_isSamples:
B_np = np.expand_dims(B, axis=0)
else:
B_np = B
res_np = np.einsum('ijk, ikh -> ijh', A_np, B_np)
else:
res_np = A.dot(B)
eval = self._make_test_function_evaluation(broadcastable)
A_mx = mx.nd.array(A, dtype=dtype)
if not A_isSamples:
A_mx = add_sample_dimension(mx.nd, A_mx)
B_mx = mx.nd.array(B, dtype=dtype)
if not B_isSamples:
B_mx = add_sample_dimension(mx.nd, B_mx)
variables = {eval.A.uuid: A_mx, eval.B.uuid: B_mx}
res_rt = eval.eval(F=mx.nd, variables=variables)
assert np_isSamples == array_has_samples(mx.nd, res_rt)
assert np.allclose(res_np, res_rt.asnumpy())
| 1,979 | 711 | 22 |
a2817df3a138e0f2a7e0d54aff30a42f28c91d0a | 42,649 | py | Python | synthetic-data-generator/hello/data_generator.py | skyu0221/660-iot | d31f973c93871bfa8122f1b83364d0147d402e9e | [
"Apache-2.0"
] | null | null | null | synthetic-data-generator/hello/data_generator.py | skyu0221/660-iot | d31f973c93871bfa8122f1b83364d0147d402e9e | [
"Apache-2.0"
] | 8 | 2021-03-19T01:36:06.000Z | 2022-03-12T00:22:43.000Z | synthetic-data-generator/hello/data_generator.py | skyu0221/660-iot | d31f973c93871bfa8122f1b83364d0147d402e9e | [
"Apache-2.0"
] | null | null | null | import numpy as np
from datetime import datetime, timedelta
import json
import os
start_work = 9 * 60 * 60 # Work start from 9:00. unit: second
end_work = 17 * 60 * 60 # Work end at 17:00. unit: second
daily_report = 16 * 60 * 60 # Daily progress report at 16:00, in meeting room
daily_report_mean = 15 * 60 # Daily progress report average length 15 min
daily_report_std = 1 * 60 # Daily progress report std.dev 1 min
come_leave_flex_coef = 30 * 60 # Tend to come 8:30, average arrive at 9:00. Leave is similar. Exponential distribution
call_for_absence = 0.01 # Possibility of not come to the office
lunch_start_time = 12 * 60 * 60 # Lunch serve start time 12:00. unit: second
lunch_end_time = 13 * 60 * 60 # Lunch serve end time 13:00. unit: second
eat_time_a = 10 # average time for each person to eat lunch. Beta distribution
eat_time_b = 50 # average time for each person to eat lunch. Beta distribution
cut_off_time = 14 * 60 * 60 # After this time, the person won't come to work
day_cut_off = 24 * 60 * 60
start_synthetic_data = datetime(2020, 3, 25) # start date
end_synthetic_data = datetime(2020, 3, 27) # end date
report_interval = timedelta(seconds=1) # Time interval between two consecutive package
guest_lambda = 3 # Poisson arrival for unknown customers. unit: person per day
visit_colleague = 3 # How many times a worker goes to a colleague's office
average_stay_in_colleague_office = 30 * 60
std_stay_in_colleague_office = 4 * 60
average_stay_customer = 30 * 60
std_stay_customer = 5 * 60
sensor_type = ["Room_Outlet_Controller",
"Room_Motion_Sensor",
"Room_Temperature_Sensor",
"Room_Lock_Controller",
"Room_Door_Camera"]
# value = (np.random.beta(eat_time_a, eat_time_b, 10000) + 0.1) * 100
possible_locations = ["home", "Room_1_1_140", "Room_1_1_141", "Room_1_1_142", "Room_1_1_143", "Room_1_1_144",
"Room_1_1_150", "Room_1_1_184", "busy"]
walking_time = {"Room_1_1_140": {"Room_1_1_140": 0,
"Room_1_1_141": 2,
"Room_1_1_142": 2,
"Room_1_1_143": 3,
"Room_1_1_144": 4,
"Room_1_1_150": 1,
"Room_1_1_184": 2},
"Room_1_1_141": {"Room_1_1_140": 2,
"Room_1_1_141": 0,
"Room_1_1_142": 3,
"Room_1_1_143": 4,
"Room_1_1_144": 5,
"Room_1_1_150": 1,
"Room_1_1_184": 2},
"Room_1_1_142": {"Room_1_1_140": 2,
"Room_1_1_141": 3,
"Room_1_1_142": 0,
"Room_1_1_143": 2,
"Room_1_1_144": 2,
"Room_1_1_150": 1,
"Room_1_1_184": 3},
"Room_1_1_143": {"Room_1_1_140": 3,
"Room_1_1_141": 4,
"Room_1_1_142": 2,
"Room_1_1_143": 0,
"Room_1_1_144": 2,
"Room_1_1_150": 1,
"Room_1_1_184": 4},
"Room_1_1_144": {"Room_1_1_140": 4,
"Room_1_1_141": 5,
"Room_1_1_142": 2,
"Room_1_1_143": 2,
"Room_1_1_144": 0,
"Room_1_1_150": 1,
"Room_1_1_184": 5},
"Room_1_1_150": {"Room_1_1_140": 1,
"Room_1_1_141": 1,
"Room_1_1_142": 1,
"Room_1_1_143": 1,
"Room_1_1_144": 1,
"Room_1_1_150": 0,
"Room_1_1_184": 1},
"Room_1_1_184": {"Room_1_1_140": 2,
"Room_1_1_141": 4,
"Room_1_1_142": 4,
"Room_1_1_143": 5,
"Room_1_1_144": 5,
"Room_1_1_150": 1,
"Room_1_1_184": 0}}
lock_setting = {"Room_1_1_140": "Room_1_1_150",
"Room_1_1_141": "Room_1_1_150",
"Room_1_1_142": "Room_1_1_150",
"Room_1_1_143": "Room_1_1_150",
"Room_1_1_144": "Room_1_1_150",
"Room_1_1_150": "Room_1_1_184",
"Room_1_1_184": "home",
"home": "Room_1_1_184"}
worker_assign = [Person("Employee 1", "employee1@company.com", "Room_1_1_140"),
Person("Employee 2", "employee2@company.com", "Room_1_1_142"),
Person("Employee 3", "employee3@company.com", "Room_1_1_144"),
Person("Employee 4", "employee4@company.com", "Room_1_1_143")]
sensors = list()
# label, uuid, brick_name, room
with open("sensor_config.json", 'r') as json_file:
sensor_config = json.load(json_file)
for room in sensor_config:
for brick in sensor_config[room]:
uuid = sensor_config[room][brick]["UUID"]
label = sensor_config[room][brick]["Type"]
sensors.append(Sensor(label, uuid, brick, room))
sensors.append(Sensor("SmartThings v3 Hub", "ede0ef78-70b7-4756-9dd6-db82d33fc9eb", None, None))
# print(len(np.nonzero(np.array([False, False, True]))[0]))
# value = (np.random.beta(eat_time_a, eat_time_b, 10000) + 0.1) * 100
# print(24 * 60 * 60)
# plt.hist(value)
# plt.show()
| 58.184175 | 137 | 0.445778 | import numpy as np
from datetime import datetime, timedelta
import json
import os
start_work = 9 * 60 * 60 # Work start from 9:00. unit: second
end_work = 17 * 60 * 60 # Work end at 17:00. unit: second
daily_report = 16 * 60 * 60 # Daily progress report at 16:00, in meeting room
daily_report_mean = 15 * 60 # Daily progress report average length 15 min
daily_report_std = 1 * 60 # Daily progress report std.dev 1 min
come_leave_flex_coef = 30 * 60 # Tend to come 8:30, average arrive at 9:00. Leave is similar. Exponential distribution
call_for_absence = 0.01 # Possibility of not come to the office
lunch_start_time = 12 * 60 * 60 # Lunch serve start time 12:00. unit: second
lunch_end_time = 13 * 60 * 60 # Lunch serve end time 13:00. unit: second
eat_time_a = 10 # average time for each person to eat lunch. Beta distribution
eat_time_b = 50 # average time for each person to eat lunch. Beta distribution
cut_off_time = 14 * 60 * 60 # After this time, the person won't come to work
day_cut_off = 24 * 60 * 60
start_synthetic_data = datetime(2020, 3, 25) # start date
end_synthetic_data = datetime(2020, 3, 27) # end date
report_interval = timedelta(seconds=1) # Time interval between two consecutive package
guest_lambda = 3 # Poisson arrival for unknown customers. unit: person per day
visit_colleague = 3 # How many times a worker goes to a colleague's office
average_stay_in_colleague_office = 30 * 60
std_stay_in_colleague_office = 4 * 60
average_stay_customer = 30 * 60
std_stay_customer = 5 * 60
sensor_type = ["Room_Outlet_Controller",
"Room_Motion_Sensor",
"Room_Temperature_Sensor",
"Room_Lock_Controller",
"Room_Door_Camera"]
# value = (np.random.beta(eat_time_a, eat_time_b, 10000) + 0.1) * 100
possible_locations = ["home", "Room_1_1_140", "Room_1_1_141", "Room_1_1_142", "Room_1_1_143", "Room_1_1_144",
"Room_1_1_150", "Room_1_1_184", "busy"]
walking_time = {"Room_1_1_140": {"Room_1_1_140": 0,
"Room_1_1_141": 2,
"Room_1_1_142": 2,
"Room_1_1_143": 3,
"Room_1_1_144": 4,
"Room_1_1_150": 1,
"Room_1_1_184": 2},
"Room_1_1_141": {"Room_1_1_140": 2,
"Room_1_1_141": 0,
"Room_1_1_142": 3,
"Room_1_1_143": 4,
"Room_1_1_144": 5,
"Room_1_1_150": 1,
"Room_1_1_184": 2},
"Room_1_1_142": {"Room_1_1_140": 2,
"Room_1_1_141": 3,
"Room_1_1_142": 0,
"Room_1_1_143": 2,
"Room_1_1_144": 2,
"Room_1_1_150": 1,
"Room_1_1_184": 3},
"Room_1_1_143": {"Room_1_1_140": 3,
"Room_1_1_141": 4,
"Room_1_1_142": 2,
"Room_1_1_143": 0,
"Room_1_1_144": 2,
"Room_1_1_150": 1,
"Room_1_1_184": 4},
"Room_1_1_144": {"Room_1_1_140": 4,
"Room_1_1_141": 5,
"Room_1_1_142": 2,
"Room_1_1_143": 2,
"Room_1_1_144": 0,
"Room_1_1_150": 1,
"Room_1_1_184": 5},
"Room_1_1_150": {"Room_1_1_140": 1,
"Room_1_1_141": 1,
"Room_1_1_142": 1,
"Room_1_1_143": 1,
"Room_1_1_144": 1,
"Room_1_1_150": 0,
"Room_1_1_184": 1},
"Room_1_1_184": {"Room_1_1_140": 2,
"Room_1_1_141": 4,
"Room_1_1_142": 4,
"Room_1_1_143": 5,
"Room_1_1_144": 5,
"Room_1_1_150": 1,
"Room_1_1_184": 0}}
lock_setting = {"Room_1_1_140": "Room_1_1_150",
"Room_1_1_141": "Room_1_1_150",
"Room_1_1_142": "Room_1_1_150",
"Room_1_1_143": "Room_1_1_150",
"Room_1_1_144": "Room_1_1_150",
"Room_1_1_150": "Room_1_1_184",
"Room_1_1_184": "home",
"home": "Room_1_1_184"}
class Person:
def __init__(self, name, email, office=None):
self.name = name
self.office = office
self.email = email
self.position = np.zeros(day_cut_off)
def customer_come(self, start_time, end_time, dest):
start_time = start_time - int(np.random.exponential(5 * 60)) # Come eariler than expected
# decide the time takes from Room_1_1_150 door to the meeting room
arrive_office = start_time - 3 + get_white_bias(1)
# Decide the time takes to get to the building, average 3 second to the Room_1_1_150 door
arrive_door = arrive_office - 3 + get_white_bias(1)
# decide the time takes from meeting room to Room_1_1_150 door
leave_office = end_time + 3 + get_white_bias(1)
# Decide the time takes to leave to the building, average 3 second to the Room_1_1_150 door
leave_door = end_time + 3 + get_white_bias(1)
# Apply to the daily route
self.position[start_time:arrive_door] = possible_locations.index("Room_1_1_184")
self.position[arrive_door:arrive_office] = possible_locations.index("Room_1_1_150")
self.position[arrive_office:leave_office] = possible_locations.index(dest)
self.position[leave_office:leave_door] = possible_locations.index("Room_1_1_150")
self.position[leave_door:end_time] = possible_locations.index("Room_1_1_184")
def decide_come(self):
"""
Each person need to decide if he/she will come to work today, when exactly they come, and when exactly
they leave. Each person target to come at 8:30 am and leave at 5 pm, with a exponential distribution where
mu = 30 minute. Use exponential distribution because people tend to come/leave on time with a very small
chance of being late. In the meantime, assume that people directly goes into their own office right away.
They require some time to walk to their office.
:return: True if come to work, False otherwise
"""
self.position = np.zeros(day_cut_off)
# Decide absence
if np.random.random() < call_for_absence:
return False
else:
# Decide when come to office
arrival_time = (start_work - come_leave_flex_coef) + int(np.random.exponential(come_leave_flex_coef))
if arrival_time > cut_off_time:
return False
else:
# Decide when go back home
leave_time = end_work + int(np.random.exponential(come_leave_flex_coef))
if leave_time >= day_cut_off:
leave_time = day_cut_off - 1
# Decide the time takes to get to the building, average 3 second to the Room_1_1_150 door
arrive_door = arrival_time + 3 + get_white_bias(1)
# decide the time takes from Room_1_1_150 door to the office
arrive_office = arrive_door + int(self.office[-3:]) - 138 + get_white_bias(1)
# Decide the time takes to leave to the building, average 3 second to the Room_1_1_150 door
leave_door = leave_time - 3 + get_white_bias(1)
# decide the time takes from office to Room_1_1_150 door
leave_office = leave_door - int(self.office[-3:]) + 138 + get_white_bias(1)
# Apply to the daily route
self.position[arrival_time:arrive_door] = possible_locations.index("Room_1_1_184")
self.position[arrive_door:arrive_office] = possible_locations.index("Room_1_1_150")
self.position[arrive_office:leave_office] = possible_locations.index(self.office)
self.position[leave_office:leave_door] = possible_locations.index("Room_1_1_150")
self.position[leave_door:leave_time] = possible_locations.index("Room_1_1_184")
return True
def generate_lunch(self):
# Usually go for lunch immediately, with average delay of 5 minute
lunch_delay = int(np.random.exponential(5 * 60))
lunch_delay = max(lunch_delay, 20 * 60)
time_in_corridor_go = lunch_start_time + lunch_delay + walking_time[self.office][
"Room_1_1_184"] + get_white_bias(1)
lunch_finish_time = time_in_corridor_go + int((np.random.beta(eat_time_a, eat_time_b) + 0.1) * 6000)
time_in_corridor_back = lunch_finish_time + walking_time["Room_1_1_184"][self.office] + get_white_bias(1)
# Apply to the daily route
self.position[lunch_start_time:time_in_corridor_go] = possible_locations.index("Room_1_1_150")
self.position[time_in_corridor_go:lunch_finish_time] = possible_locations.index("Room_1_1_184")
self.position[lunch_finish_time:time_in_corridor_back] = possible_locations.index("Room_1_1_150")
def generate_daily_meeting(self):
# Arrive maximum 3 min early, 2 min late
meeting_attend = int(np.random.exponential(3 * 60))
meeting_attend = daily_report - max(meeting_attend, 5 * 60)
time_in_corridor_go = meeting_attend - walking_time[self.office]["Room_1_1_141"] + get_white_bias(1)
meeting_end = daily_report + int(np.random.normal(daily_report_mean, daily_report_std))
time_in_corridor_back = meeting_end + walking_time["Room_1_1_141"][self.office] + get_white_bias(1)
# Apply to the daily route
self.position[time_in_corridor_go:meeting_attend] = possible_locations.index("Room_1_1_150")
self.position[meeting_attend:meeting_end] = possible_locations.index("Room_1_1_141")
self.position[meeting_end:time_in_corridor_back] = possible_locations.index("Room_1_1_150")
def check_in_office(self, start, end):
return np.sum(self.position[start:end] == possible_locations.index(self.office)) == (end - start)
def get_in_office_range(self):
in_office = np.concatenate(([0],
np.equal(self.position, possible_locations.index(self.office)).view(np.int8),
[0]))
absdiff = np.abs(np.diff(in_office))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
return ranges
def handle_customer(self, num_customer):
# Set-up meeting time
in_office_range = self.get_in_office_range()
visit_length = int(np.random.normal(average_stay_customer, std_stay_customer))
in_office_duration = in_office_range[:, 1] - in_office_range[:, 0]
in_office_idx = np.nonzero(in_office_duration > visit_length)[0]
if len(in_office_idx) == 0:
visit_length = np.max(in_office_duration)
in_office_idx = np.nonzero(in_office_duration == visit_length)[0]
idx = np.random.choice(in_office_idx)
start_time = np.random.randint(in_office_range[idx, 0], in_office_range[idx, 1] - visit_length + 1)
end_time = start_time + visit_length
in_room = start_time + walking_time[self.office]["Room_1_1_141"] + get_white_bias(1)
out_room = end_time - walking_time["Room_1_1_141"][self.office] + get_white_bias(1)
# Decide meeting location
if num_customer > 1:
# Go meet in meeting room
room_name = "Room_1_1_141"
self.position[start_time:in_room] = possible_locations.index("Room_1_1_150")
self.position[in_room:out_room] = possible_locations.index("Room_1_1_141")
self.position[out_room:end_time] = possible_locations.index("Room_1_1_150")
else:
self.position[in_room:out_room] = possible_locations.index("busy")
room_name = self.office
return in_room, out_room, room_name
def generate_go_other_office(self):
for _ in range(np.random.poisson(visit_colleague)):
# Find available time for current person to meet some colleague
in_office_range = self.get_in_office_range()
visit_length = int(np.random.normal(average_stay_in_colleague_office, std_stay_in_colleague_office))
in_office_idx = np.nonzero((in_office_range[:, 1] - in_office_range[:, 0]) > visit_length)[0]
if len(in_office_idx) == 0:
continue
idx = np.random.choice(in_office_idx)
start_time = np.random.randint(in_office_range[idx, 0], in_office_range[idx, 1] - visit_length + 1)
end_time = start_time + visit_length
# Find available colleague
for coworker in worker_assign:
if coworker.check_in_office(start_time, end_time):
# Go meet the colleague
in_colleague = start_time + walking_time[self.office][coworker.office] + get_white_bias(1)
out_colleague = end_time - walking_time[coworker.office][self.office] + get_white_bias(1)
self.position[start_time:in_colleague] = possible_locations.index("Room_1_1_150")
self.position[in_colleague:out_colleague] = possible_locations.index(coworker.office)
self.position[out_colleague:end_time] = possible_locations.index("Room_1_1_150")
coworker.position[in_colleague:out_colleague] = possible_locations.index("busy")
break
def generate_daily_route(self, customer_list):
time_list = list()
self.generate_lunch()
self.generate_daily_meeting()
for num_customer in customer_list:
time_list.append(self.handle_customer(num_customer))
self.generate_go_other_office()
return time_list
def get_position(self, sec):
if self.position[sec] == possible_locations.index("busy"):
return self.office
return possible_locations[int(self.position[sec])]
def get_trigger(self):
pass
def get_white_bias(second):
return np.random.randint(second * 2 + 1) - second
worker_assign = [Person("Employee 1", "employee1@company.com", "Room_1_1_140"),
Person("Employee 2", "employee2@company.com", "Room_1_1_142"),
Person("Employee 3", "employee3@company.com", "Room_1_1_144"),
Person("Employee 4", "employee4@company.com", "Room_1_1_143")]
def generate_daily_data():
available_worker = list()
for i, worker in enumerate(worker_assign):
if worker.decide_come():
available_worker.append(i)
# print(available_worker)
guests = np.random.poisson(guest_lambda)
guest_assign = np.random.choice(available_worker, size=guests)
all_people = list()
guest_counter = 0
for i in available_worker:
worker = worker_assign[i]
all_people.append(worker)
guest_list = np.random.randint(1, 4, size=np.sum(guest_assign == i))
appointments = worker.generate_daily_route(guest_list)
for j, appointment in enumerate(appointments):
for _ in range(guest_list[j]):
new_guest = Person(f"Guest {guest_counter}", "")
guest_counter += 1
new_guest.customer_come(*appointment)
all_people.append(new_guest)
return all_people, str(datetime.now())[:10]
class Sensor:
def __init__(self, label, uuid, brick_name, room):
if label == "Room_Temperature_Sensor":
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'roomId': room,
'deviceTypeId': brick_name,
'deviceTypeName': 'SmartSense Button',
'deviceNetworkType': 'ZIGBEE',
'components': [{'id': 'main',
'capabilities': [{'id': 'temperatureMeasurement', 'version': 1},
{'id': 'battery', 'version': 1},
{'id': 'configuration', 'version': 1},
{'id': 'refresh', 'version': 1},
{'id': 'button', 'version': 1},
{'id': 'sensor', 'version': 1},
{'id': 'healthCheck', 'version': 1},
{'id': 'holdableButton', 'version': 1}]}],
'dth': {'deviceTypeId': brick_name,
'deviceTypeName': 'SmartSense Button',
'deviceNetworkType': 'ZIGBEE',
'completedSetup': True,
'networkSecurityLevel': 'UNKNOWN',
'hubId': 'ede0ef78-70b7-4756-9dd6-db82d33fc9eb'},
'type': 'DTH'}
self.response = {'components': {'main': {'battery': {'battery': {'unit': '%', 'value': 95}},
'button': {'button': {'value': 'pushed'},
'numberOfButtons': {'value': 1},
'supportedButtonValues': {'value': ['pushed',
'held',
'double']}},
'configuration': {},
'healthCheck': {'DeviceWatch-DeviceStatus': {'value': None},
'DeviceWatch-Enroll': {
'value': {'checkInterval': 7260,
'hubHardwareId': '0035',
'lowBatteryThresholds': [15,
7,
3],
'offlinePingable': '1',
'protocol': 'zigbee',
'scheme': 'TRACKED'}},
'checkInterval': {'data': {'hubHardwareId': '0035',
'offlinePingable': '1',
'protocol': 'zigbee'},
'unit': 's',
'value': 720},
'healthStatus': {'value': None}},
'holdableButton': {'button': {'value': 'pushed'},
'numberOfButtons': {'value': 1}},
'refresh': {},
'sensor': {},
'temperatureMeasurement': {'temperature': {'unit': 'C',
'value': 26}}}}}
elif label == "Room_Outlet_Controller":
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'roomId': room,
'deviceTypeId': brick_name,
'deviceTypeName': 'SmartPower Outlet',
'deviceNetworkType': 'ZIGBEE',
'components': [{'id': 'main',
'capabilities': [{'id': 'switch', 'version': 1},
{'id': 'configuration', 'version': 1},
{'id': 'refresh', 'version': 1},
{'id': 'powerMeter', 'version': 1},
{'id': 'sensor', 'version': 1},
{'id': 'actuator', 'version': 1},
{'id': 'healthCheck', 'version': 1},
{'id': 'outlet', 'version': 1}]}],
'dth': {'deviceTypeId': brick_name,
'deviceTypeName': 'SmartPower Outlet',
'deviceNetworkType': 'ZIGBEE',
'completedSetup': True,
'networkSecurityLevel': 'UNKNOWN',
'hubId': 'ede0ef78-70b7-4756-9dd6-db82d33fc9eb'},
'type': 'DTH'}
self.response = {'components': {'main': {'actuator': {},
'configuration': {},
'healthCheck': {'DeviceWatch-DeviceStatus': {'value': None},
'DeviceWatch-Enroll': {'value': None},
'checkInterval': {'data': {'hubHardwareId': '0035',
'protocol': 'zigbee'},
'unit': 's',
'value': 720},
'healthStatus': {'value': None}},
'outlet': {'switch': {'value': 'on'}},
'powerMeter': {'power': {'unit': 'W', 'value': 0.0}},
'refresh': {},
'sensor': {},
'switch': {'switch': {'value': 'on'}}}}}
elif label == "Room_Lock_Controller":
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'deviceManufacturerCode': '033F-0001-0001',
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'roomId': room,
'deviceTypeId': brick_name,
'deviceTypeName': 'Z-Wave Lock Without Codes',
'deviceNetworkType': 'ZWAVE',
'components': [{'id': 'main',
'capabilities': [{'id': 'battery', 'version': 1},
{'id': 'configuration', 'version': 1},
{'id': 'lock', 'version': 1},
{'id': 'refresh', 'version': 1},
{'id': 'sensor', 'version': 1},
{'id': 'actuator', 'version': 1},
{'id': 'healthCheck', 'version': 1}]}],
'dth': {'deviceTypeId': brick_name,
'deviceTypeName': 'Z-Wave Lock Without Codes',
'deviceNetworkType': 'ZWAVE',
'completedSetup': True,
'networkSecurityLevel': 'ZWAVE_S0_DOWNGRADE',
'hubId': 'ede0ef78-70b7-4756-9dd6-db82d33fc9eb'},
'type': 'DTH'}
self.response = {'components': {'main': {'actuator': {},
'battery': {'battery': {'unit': '%', 'value': 100}},
'configuration': {},
'healthCheck': {'DeviceWatch-DeviceStatus': {'data': {},
'value': None},
'DeviceWatch-Enroll': {'value': None},
'checkInterval': {'data': {'hubHardwareId': '0035',
'offlinePingable': '1',
'protocol': 'zwave'},
'unit': 's',
'value': 3600},
'healthStatus': {'data': {},
'value': None}},
'lock': {'lock': {'data': {}, 'value': 'locked'}},
'refresh': {},
'sensor': {}}}}
elif label == "Room_Motion_Sensor":
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'roomId': room,
'deviceTypeId': brick_name,
'deviceTypeName': 'SmartSense Motion Sensor',
'deviceNetworkType': 'ZIGBEE',
'components': [{'id': 'main',
'capabilities': [{'id': 'temperatureMeasurement', 'version': 1},
{'id': 'battery', 'version': 1},
{'id': 'motionSensor', 'version': 1},
{'id': 'configuration', 'version': 1},
{'id': 'refresh', 'version': 1},
{'id': 'sensor', 'version': 1},
{'id': 'healthCheck', 'version': 1}]}],
'dth': {'deviceTypeId': brick_name,
'deviceTypeName': 'SmartSense Motion Sensor',
'deviceNetworkType': 'ZIGBEE',
'completedSetup': True,
'networkSecurityLevel': 'UNKNOWN',
'hubId': 'ede0ef78-70b7-4756-9dd6-db82d33fc9eb'},
'type': 'DTH'}
self.response = {'components': {'main': {'battery': {'battery': {'unit': '%', 'value': 100}},
'configuration': {},
'healthCheck': {'DeviceWatch-DeviceStatus': {'value': None},
'DeviceWatch-Enroll': {
'value': {'checkInterval': 7260,
'hubHardwareId': '0035',
'lowBatteryThresholds': [15,
7,
3],
'offlinePingable': '1',
'protocol': 'zigbee',
'scheme': 'TRACKED'}},
'checkInterval': {'data': {'hubHardwareId': '0035',
'offlinePingable': '1',
'protocol': 'zigbee'},
'unit': 's',
'value': 720},
'healthStatus': {'data': {},
'value': None}},
'motionSensor': {'motion': {'value': 'inactive'}},
'refresh': {},
'sensor': {},
'temperatureMeasurement': {'temperature': {'unit': 'C',
'value': 27}}}}}
elif label == "Room_Door_Camera":
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'deviceManufacturerCode': 'Synthetic',
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'roomId': room,
'deviceTypeId': brick_name,
'deviceTypeName': 'Z-Wave Door Camera',
'deviceNetworkType': 'ZWAVE',
'components': [{'id': 'main',
'capabilities': [{'id': 'configuration', 'version': 1},
{'id': 'face', 'version': 1},
{'id': 'refresh', 'version': 1},
{'id': 'actuator', 'version': 1},
{'id': 'healthCheck', 'version': 1}]}],
'dth': {'deviceTypeId': brick_name,
'deviceTypeName': 'Z-Wave Door Camera',
'deviceNetworkType': 'ZWAVE',
'completedSetup': True,
'networkSecurityLevel': 'UNKNOWN',
'hubId': 'ede0ef78-70b7-4756-9dd6-db82d33fc9eb'},
'type': 'DTH'}
self.response = {'components': {'main': {'actuator': {},
'configuration': {},
'healthCheck': {'DeviceWatch-DeviceStatus': {'value': None},
'DeviceWatch-Enroll': {'value': None},
'checkInterval': {'data': {'hubHardwareId': '0035',
'protocol': 'zigbee'},
'unit': 's',
'value': 3600},
'healthStatus': {'value': None}},
'face': {'face': {'name': ['Alice', 'Bob', 'Charlie'], 'email': ['123@gmail.com']}},
'refresh': {},
'sensor': {}}}}
else:
self.data = {'deviceId': uuid,
'name': label,
'label': label,
'locationId': '76bf589a-a8d9-471b-9db6-d4838e9eea6f',
'components': [{'id': 'main',
'capabilities': [{'id': 'bridge', 'version': 1}]}],
'childDevices': [{'deviceId': '709459f7-dd70-43b7-b4d7-c4a4b4ab885a'}],
'profile': {'id': '3e28a82b-ba46-3f7e-80ea-432e23069551'},
'type': 'HUB'}
self.response = None
def generate_json(self, current, all_person):
if self.response is None:
return json.dumps({})
temp = 26
outlet = "off"
power = 0
lock = "locked"
motion = "inactive"
names = list()
emails = list()
sec = current.second + 60 * current.minute + 60 * 60 * current.hour
if self.data["name"] == "Room_Temperature_Sensor":
temp = self.get_temp(sec, all_person)
elif self.data["name"] == "Room_Outlet_Controller":
outlet, power = self.get_outlet()
elif self.data["name"] == "Room_Lock_Controller":
lock = self.get_lock(sec, all_person)
elif self.data["name"] == "Room_Motion_Sensor":
motion = self.get_motion(sec, all_person)
elif self.data["name"] == "Room_Door_Camera":
names, emails = self.get_face(sec, all_person)
if self.data["name"] == "Room_Temperature_Sensor":
self.response["components"]["main"]["temperatureMeasurement"]["temperature"]["value"] = temp
elif self.data["name"] == "Room_Outlet_Controller":
self.response["components"]["main"]["outlet"]["switch"]["value"] = outlet
self.response["components"]["main"]["switch"]["switch"]["value"] = \
self.response["components"]["main"]["outlet"]["switch"]["value"]
if self.response["components"]["main"]["outlet"]["switch"]["value"] == "off":
self.response["components"]["main"]["powerMeter"]["power"]["value"] = 0.0
else:
self.response["components"]["main"]["powerMeter"]["power"]["value"] = power
elif self.data["name"] == "Room_Lock_Controller":
self.response["components"]["main"]["lock"]["lock"]["value"] = lock
elif self.data["name"] == "Room_Motion_Sensor":
self.response["components"]["main"]["motionSensor"]["motion"]["value"] = motion
elif self.data["name"] == "Room_Door_Camera":
self.response["components"]["main"]["face"]["face"]["name"] = names
self.response["components"]["main"]["face"]["face"]["email"] = emails
# file_name = str(current).replace(' ', '_').replace(':', '_').replace('-', '_') + \
# '_' + self.data["roomId"] + \
# '_' + self.data["name"] + '.json'
# with open("output/" + file_name, 'w') as json_out:
# json.dump(self.response, json_out)
# print(self.response)
return json.dumps(self.response)
def get_temp(self, sec, all_person):
temp = 26
for person in all_person:
if person.get_position(sec) == self.data["roomId"]:
temp += 0.5
return temp
def get_outlet(self):
return np.random.choice(["on", "off"], p=[0.85, 0.15]), np.random.randint(10, 61) / 10
def get_lock(self, sec, all_person):
for person in all_person:
for delay_sec in range(1, 3):
if person.get_position(sec) == self.data["roomId"]:
# Get into the room
if sec - delay_sec < 0:
continue
if person.get_position(sec - delay_sec) == lock_setting[self.data["roomId"]]:
return "unlocked"
# Get out of the room
if sec + delay_sec >= 86400:
continue
if person.get_position(sec + delay_sec) == lock_setting[self.data["roomId"]]:
return "unlocked"
return "locked"
def get_motion(self, sec, all_person):
for person in all_person:
if person.get_position(sec) == self.data["roomId"]:
# print("Yes!")
return "active"
return "inactive"
def get_face(self, sec, all_person):
name = list()
email = list()
for person in all_person:
for delay_sec in range(1, 5):
if person.get_position(sec) == self.data["roomId"]:
# Get into the room
if sec - delay_sec < 0:
continue
if person.get_position(sec - delay_sec) == lock_setting[self.data["roomId"]]:
name.append(person.name)
email.append(person.email)
break
# Get out of the room
if sec + delay_sec >= 86400:
continue
if person.get_position(sec + delay_sec) == lock_setting[self.data["roomId"]]:
name.append(person.name)
email.append(person.email)
break
# if len(name) != 0:
# print(name)
return name, email
def get_sensor_data(uuid, current, all_people):
for sensor in sensors:
if sensor.data["deviceId"] == uuid:
return sensor.generate_json(current, all_people)
return json.dumps({"Error": "deviceId Invalid"})
def get_all_sensor_data(current, all_people):
result = {"time": str(current), "data": dict()}
for sensor in sensors:
uuid = sensor.data["deviceId"]
result["data"][uuid] = json.loads(sensor.generate_json(current, all_people))
return result
def get_sensor_setting():
scanned_devices = {"items": list(), "_links": {}}
for sensor in sensors:
scanned_devices["items"].append(sensor.data)
return scanned_devices
def main():
sensors = list()
# label, uuid, brick_name, room
with open("sensor_config.json", 'r') as json_file:
sensor_config = json.load(json_file)
for room in sensor_config:
for brick in sensor_config[room]:
uuid = sensor_config[room][brick]["UUID"]
label = sensor_config[room][brick]["Type"]
sensors.append(Sensor(label, uuid, brick, room))
sensors.append(Sensor("SmartThings v3 Hub", "ede0ef78-70b7-4756-9dd6-db82d33fc9eb", None, None))
# Generate sensor report
with open("scanned_devices.json", 'w') as json_file:
scanned_devices = {"items": list(), "_links": {}}
for sensor in sensors:
scanned_devices["items"].append(sensor.data)
json.dump(scanned_devices, json_file, indent=4)
current = start_synthetic_data
all_people = list()
results = dict()
for _ in range(int((end_synthetic_data - start_synthetic_data) / report_interval)):
# print(current)
results[str(current)[-8:].replace(':', '_')] = dict()
if current.hour + current.minute + current.second == 0:
# Generate a whole day data
all_people = generate_daily_data()
# for person in all_people:
# print(person.get_position(current.second + 60 * current.minute + 60 * 60 * current.hour))
for sensor in sensors:
sensor.generate_json(current, all_people, results)
current += report_interval
if current.hour + current.minute + current.second == 0:
time_str = str(current)[:10].replace(' ', '_').replace('-', '_')
with open(f"output/{time_str}", 'w') as json_out:
json.dump(results, json_out)
results = dict()
sensors = list()
# label, uuid, brick_name, room
with open("sensor_config.json", 'r') as json_file:
sensor_config = json.load(json_file)
for room in sensor_config:
for brick in sensor_config[room]:
uuid = sensor_config[room][brick]["UUID"]
label = sensor_config[room][brick]["Type"]
sensors.append(Sensor(label, uuid, brick, room))
sensors.append(Sensor("SmartThings v3 Hub", "ede0ef78-70b7-4756-9dd6-db82d33fc9eb", None, None))
# print(len(np.nonzero(np.array([False, False, True]))[0]))
# value = (np.random.beta(eat_time_a, eat_time_b, 10000) + 0.1) * 100
# print(24 * 60 * 60)
# plt.hist(value)
# plt.show()
| 33,625 | 2,776 | 372 |
51925404b642feba98f6462a0b576d07f825f428 | 5,994 | py | Python | analysis/evalHyperWords.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | analysis/evalHyperWords.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | analysis/evalHyperWords.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from os import path
import sys, subprocess, argparse
parser = argparse.ArgumentParser(description='Automates the process of evaluating a word embedding.')
parser.add_argument('file', type=str, help='Word embedding file.')
parser.add_argument('-skip-regen', '--skip-regen', action='store_false', help="Skip regenerating the numpy and .py cache files (default always regenerates)")
parser.add_argument('-skip-similarity', '--skip-similarity', action='store_true', help="Skip similarity analyses (default does not skip)")
parser.add_argument('-skip-analogy', '--skip-analogy', action='store_true', help="Skip analogy analyses (default does not skip)")
parser.add_argument('-preserve-base-embedding', '--preserve-base-embedding', action='store_true', help="Don't delete the base embedding after creating .npy and .vocab caches")
parser.add_argument('-vocab', '--vocab', type=str, default=None, help='Vocabulary file to recount from')
parser.add_argument('-verbose', '--verbose', action='store_true', help='Output bonus info for analysis')
parser.add_argument('-simlex-bin', '--simlex-bin', action='store_true', help='Set output to only simlex bin')
parser.add_argument('-cutoff', '--cutoff', type=int, default=200, help='Cutoff for evaluation')
args = parser.parse_args()
if args.simlex_bin:
args.verbose=True
script_home = path.dirname(__file__)
hyperdir = path.join(script_home, "hyperwords")
# Hyperwords requires embedding to be *.words
base_embedding = path.relpath(args.file)
words_ready = base_embedding.endswith(".words")
if words_ready:
embedding = base_embedding
base_embedding = base_embedding[:-6]
# Attempting to skip regen but it doesn't exist--soft default back to not skipping
if not args.skip_regen and not (path.exists(embedding+".npy") and path.exists(embedding+".vocab")):
args.skip_regen = True
elif not args.skip_regen and not args.simlex_bin:
print("Using existing HyperWords adjusted embedding based on {0} and {1}".format(embedding+".npy", embedding+".vocab"))
else:
# Create suitable name
embedding = path.relpath(base_embedding+".words")
if not args.simlex_bin:
print("{0} is not a *.words file (HyperWords requirement), copying to {1}...".format(base_embedding, embedding))
if path.exists(embedding): # Potentially unsafe to copy
print("{0} already exists. Overwrite? y/n: ".format(embedding), end='')
choice = input()
while choice.lower() not in ['y', 'n']:
print("Unrecognized input ({0})! {1} already exists. Overwrite? y/n: ".format(choice, embedding), end='')
choice = input()
if choice.lower() == 'y':
args.skip_regen = True # This will be a new file, it must have embeddings regenerated
import shutil
shutil.copyfile(base_embedding, embedding)
del shutil
else:
args.skip_regen = False
else: # No collision, safe to copy
import shutil
shutil.copyfile(base_embedding, embedding)
del shutil
# Create embedding for hyperwords
if args.skip_regen:
# Special case: Base embedding does not exist
if not path.exists(embedding):
if not args.simlex_bin:
print("No base embedding found to regenerate!!")
if path.exists(embedding+'.npy') and path.exists(embedding+'.vocab'):
if not args.simlex_bin:
print("Continuing with cached materials {0}.npy and {0}.vocab".format(embedding))
else:
print("No cached {0}.npy or {0}.vocab to use, please ensure the correct file path was specified.".format(embedding))
exit()
else:
# Apply numpy fixup for Hyperwords
if not args.simlex_bin:
print("Adjusting embedding for HyperWords Use...")
completed_proc = subprocess.run(['python2', path.relpath(hyperdir+'/hyperwords/text2numpy.py'), embedding])
if completed_proc.returncode != 0:
print("FAILURE! Aborting.")
exit()
# Preserve disk space after cache by removing the original ascii file
if not args.preserve_base_embedding:
import os
os.remove(embedding)
# Perform hyperwords evaluations
if not args.skip_similarity:
extension = ['--vocab', args.vocab] if args.vocab is not None else []
if args.verbose:
extension.extend(['--verbose', '1'])
if not args.simlex_bin:
print("Similarity Results (WS353, SimLex999)\n-------------------------------------")
cmd = ['python2', path.relpath(hyperdir+'/hyperwords/ws_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/ws/ws353.txt')]
cmd.extend(extension)
completed_proc = subprocess.run(cmd)
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(f'stdout ws353: {completed_proc.stdout}')
if completed_proc.stderr is not None:
print(f'stderr ws353: {completed_proc.stderr}')
print("FAILURE! Aborting.")
exit()
print()
cmd = ['python2', path.relpath(hyperdir+'/hyperwords/ws_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/ws/SimLex999.txt')]
cmd.extend(extension)
if args.cutoff > 0:
cmd.extend(['--cutoff', str(args.cutoff)])
completed_proc = subprocess.run(cmd)
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(f'stdout simlex999: {completed_proc.stdout}')
if completed_proc.stderr is not None:
print(f'stderr simlex999: {completed_proc.stderr}')
print("FAILURE! Aborting.")
exit()
if not args.simlex_bin:
print()
if not args.skip_analogy and not args.simlex_bin:
print("Google Analogy Results\n----------------------")
completed_proc = subprocess.run(['python2', path.relpath(hyperdir+'/hyperwords/analogy_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/analogy/google.txt')])
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(completed_proc.stdout)
if completed_proc.stderr is not None:
print(completed_proc.stderr)
print("FAILURE! Aborting.")
exit()
print()
| 47.19685 | 184 | 0.702703 | #!/usr/bin/env python3
from os import path
import sys, subprocess, argparse
parser = argparse.ArgumentParser(description='Automates the process of evaluating a word embedding.')
parser.add_argument('file', type=str, help='Word embedding file.')
parser.add_argument('-skip-regen', '--skip-regen', action='store_false', help="Skip regenerating the numpy and .py cache files (default always regenerates)")
parser.add_argument('-skip-similarity', '--skip-similarity', action='store_true', help="Skip similarity analyses (default does not skip)")
parser.add_argument('-skip-analogy', '--skip-analogy', action='store_true', help="Skip analogy analyses (default does not skip)")
parser.add_argument('-preserve-base-embedding', '--preserve-base-embedding', action='store_true', help="Don't delete the base embedding after creating .npy and .vocab caches")
parser.add_argument('-vocab', '--vocab', type=str, default=None, help='Vocabulary file to recount from')
parser.add_argument('-verbose', '--verbose', action='store_true', help='Output bonus info for analysis')
parser.add_argument('-simlex-bin', '--simlex-bin', action='store_true', help='Set output to only simlex bin')
parser.add_argument('-cutoff', '--cutoff', type=int, default=200, help='Cutoff for evaluation')
args = parser.parse_args()
if args.simlex_bin:
args.verbose=True
script_home = path.dirname(__file__)
hyperdir = path.join(script_home, "hyperwords")
# Hyperwords requires embedding to be *.words
base_embedding = path.relpath(args.file)
words_ready = base_embedding.endswith(".words")
if words_ready:
embedding = base_embedding
base_embedding = base_embedding[:-6]
# Attempting to skip regen but it doesn't exist--soft default back to not skipping
if not args.skip_regen and not (path.exists(embedding+".npy") and path.exists(embedding+".vocab")):
args.skip_regen = True
elif not args.skip_regen and not args.simlex_bin:
print("Using existing HyperWords adjusted embedding based on {0} and {1}".format(embedding+".npy", embedding+".vocab"))
else:
# Create suitable name
embedding = path.relpath(base_embedding+".words")
if not args.simlex_bin:
print("{0} is not a *.words file (HyperWords requirement), copying to {1}...".format(base_embedding, embedding))
if path.exists(embedding): # Potentially unsafe to copy
print("{0} already exists. Overwrite? y/n: ".format(embedding), end='')
choice = input()
while choice.lower() not in ['y', 'n']:
print("Unrecognized input ({0})! {1} already exists. Overwrite? y/n: ".format(choice, embedding), end='')
choice = input()
if choice.lower() == 'y':
args.skip_regen = True # This will be a new file, it must have embeddings regenerated
import shutil
shutil.copyfile(base_embedding, embedding)
del shutil
else:
args.skip_regen = False
else: # No collision, safe to copy
import shutil
shutil.copyfile(base_embedding, embedding)
del shutil
# Create embedding for hyperwords
if args.skip_regen:
# Special case: Base embedding does not exist
if not path.exists(embedding):
if not args.simlex_bin:
print("No base embedding found to regenerate!!")
if path.exists(embedding+'.npy') and path.exists(embedding+'.vocab'):
if not args.simlex_bin:
print("Continuing with cached materials {0}.npy and {0}.vocab".format(embedding))
else:
print("No cached {0}.npy or {0}.vocab to use, please ensure the correct file path was specified.".format(embedding))
exit()
else:
# Apply numpy fixup for Hyperwords
if not args.simlex_bin:
print("Adjusting embedding for HyperWords Use...")
completed_proc = subprocess.run(['python2', path.relpath(hyperdir+'/hyperwords/text2numpy.py'), embedding])
if completed_proc.returncode != 0:
print("FAILURE! Aborting.")
exit()
# Preserve disk space after cache by removing the original ascii file
if not args.preserve_base_embedding:
import os
os.remove(embedding)
# Perform hyperwords evaluations
if not args.skip_similarity:
extension = ['--vocab', args.vocab] if args.vocab is not None else []
if args.verbose:
extension.extend(['--verbose', '1'])
if not args.simlex_bin:
print("Similarity Results (WS353, SimLex999)\n-------------------------------------")
cmd = ['python2', path.relpath(hyperdir+'/hyperwords/ws_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/ws/ws353.txt')]
cmd.extend(extension)
completed_proc = subprocess.run(cmd)
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(f'stdout ws353: {completed_proc.stdout}')
if completed_proc.stderr is not None:
print(f'stderr ws353: {completed_proc.stderr}')
print("FAILURE! Aborting.")
exit()
print()
cmd = ['python2', path.relpath(hyperdir+'/hyperwords/ws_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/ws/SimLex999.txt')]
cmd.extend(extension)
if args.cutoff > 0:
cmd.extend(['--cutoff', str(args.cutoff)])
completed_proc = subprocess.run(cmd)
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(f'stdout simlex999: {completed_proc.stdout}')
if completed_proc.stderr is not None:
print(f'stderr simlex999: {completed_proc.stderr}')
print("FAILURE! Aborting.")
exit()
if not args.simlex_bin:
print()
if not args.skip_analogy and not args.simlex_bin:
print("Google Analogy Results\n----------------------")
completed_proc = subprocess.run(['python2', path.relpath(hyperdir+'/hyperwords/analogy_eval.py'), 'embedding', base_embedding, path.relpath(hyperdir+'/testsets/analogy/google.txt')])
if completed_proc.returncode != 0:
if completed_proc.stdout is not None:
print(completed_proc.stdout)
if completed_proc.stderr is not None:
print(completed_proc.stderr)
print("FAILURE! Aborting.")
exit()
print()
| 0 | 0 | 0 |
d097bc591f4b95b5d080c39a0bdd791b5d42748d | 6,713 | py | Python | module1-introduction-to-sql/rpg_queries1.py | bs3537/DS-Unit-3-Sprint-2-SQL-and-Databases | 7020d2a957bf7fcb150228ce9926dc9d59e252f0 | [
"MIT"
] | null | null | null | module1-introduction-to-sql/rpg_queries1.py | bs3537/DS-Unit-3-Sprint-2-SQL-and-Databases | 7020d2a957bf7fcb150228ce9926dc9d59e252f0 | [
"MIT"
] | null | null | null | module1-introduction-to-sql/rpg_queries1.py | bs3537/DS-Unit-3-Sprint-2-SQL-and-Databases | 7020d2a957bf7fcb150228ce9926dc9d59e252f0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""rpg_queries.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ufwd5h-epfNkHGiNJTkFTDQsbupk24Ld
"""
#How many total characters are there?
import sqlite3
conn = sqlite3.connect('rpg_db.sqlite3')
curs = conn.cursor()
#How many total characters are there?
query1 = "SELECT count(name) as character_count FROM charactercreator_character"
results1 = curs.execute(query1).fetchall()
print("Total number of characters=", results1)
#How many in each sublass?
#cleric subclass
query2= "SELECT count(character_ptr_id) as character_count FROM charactercreator_cleric"
results2 = curs.execute(query2).fetchall()
print("Total number of characters in cleric sublass=", results2)
#fighter sublass
query3 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_fighter"
results3 = curs.execute(query3).fetchall()
print("Total number of characters in fighter sublass=", results3)
#mage sublass
query4 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_mage"
results4 = curs.execute(query4).fetchall()
print("Total number of characters in mage sublass=", results4)
#thief sublass
query5 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_thief"
results5 = curs.execute(query5).fetchall()
print("Total number of characters in thief sublass=", results5)
#necromancer is a sublass of mage, so we don't need to count it separately
#The sum of each individual class from the above is = 302, total number of characters
#extrating table with each character and its class
query6="""
SELECT
ch.character_id
,ch.name
--,cl.*
--,f.*
--,m.*
--,th.*
-- ,if(cl.character_ptr_id is not null, "cleric", "todo") as char_type
,CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
"""
curs.execute(query6).fetchall()
#Number of characters in each class
query7= """
select
subquery1.char_type
,count(distinct subquery1.character_id) as char_count
from (
-- row per character (302 total)
select
ch.character_id
,ch.name
--,cl.*
--,f.*
--,m.*
--,th.*
-- ,if(cl.character_ptr_id is not null, "cleric", "todo") as char_type
,CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
) subquery1
group by subquery1.char_type
"""
curs.execute(query7).fetchall()
#Another way to find number of characters in each class
query8= """
-- row per character (302 total)
select
CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
,count(distinct ch.character_id) as char_count
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
group by char_type
"""
curs.execute(query8).fetchall()
#How many total items?
query9= """
select count(name) as item_count
from armory_item
"""
results6 = curs.execute(query9).fetchall()
print("Total number of items=", results6)
#How many items each character has?
query10="""
SELECT a.character_id, ch.name, count(a.item_id)
FROM charactercreator_character_inventory as a
left join charactercreator_character as ch on ch.character_id = a.character_id
GROUP BY a.character_id
LIMIT 20
"""
print("Character ID, character name, number of items")
curs.execute(query10).fetchall()
#How many weapons each character has?
#left join statement should come after FROM statement followed by WHERE, GROUP BY, etc.
query11= """
SELECT a.character_id, d.name, count(a.item_id)
FROM charactercreator_character_inventory as a, armory_item as b, armory_weapon as c
left join charactercreator_character as d on d.character_id = a.character_id
WHERE a.item_id = b.item_id AND b.item_id = c.item_ptr_id
GROUP BY a.character_id
LIMIT 20
"""
print("Character ID, Chacter name, number of weapons")
curs.execute(query11).fetchall()
#On average, how many items each Character has?
query12="""
SELECT COUNT(inventory.item_id) * 1.0/ COUNT(DISTINCT inventory.character_id)
FROM charactercreator_character_inventory AS inventory
"""
results6 = curs.execute(query12).fetchall()
print("Average number of items per character=", results6)
#On average how many weapons each character has?
query13="""
SELECT COUNT(a.item_id) *1.0 / COUNT(DISTINCT a.character_id)
FROM charactercreator_character_inventory AS a, armory_weapon AS b
WHERE a.item_id = b.item_ptr_id
"""
results7 = curs.execute(query13).fetchall()
print("Average number of weapons per character=", results7) | 34.425641 | 90 | 0.768807 | # -*- coding: utf-8 -*-
"""rpg_queries.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ufwd5h-epfNkHGiNJTkFTDQsbupk24Ld
"""
#How many total characters are there?
import sqlite3
conn = sqlite3.connect('rpg_db.sqlite3')
curs = conn.cursor()
#How many total characters are there?
query1 = "SELECT count(name) as character_count FROM charactercreator_character"
results1 = curs.execute(query1).fetchall()
print("Total number of characters=", results1)
#How many in each sublass?
#cleric subclass
query2= "SELECT count(character_ptr_id) as character_count FROM charactercreator_cleric"
results2 = curs.execute(query2).fetchall()
print("Total number of characters in cleric sublass=", results2)
#fighter sublass
query3 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_fighter"
results3 = curs.execute(query3).fetchall()
print("Total number of characters in fighter sublass=", results3)
#mage sublass
query4 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_mage"
results4 = curs.execute(query4).fetchall()
print("Total number of characters in mage sublass=", results4)
#thief sublass
query5 = "SELECT count(character_ptr_id) as character_count FROM charactercreator_thief"
results5 = curs.execute(query5).fetchall()
print("Total number of characters in thief sublass=", results5)
#necromancer is a sublass of mage, so we don't need to count it separately
#The sum of each individual class from the above is = 302, total number of characters
#extrating table with each character and its class
query6="""
SELECT
ch.character_id
,ch.name
--,cl.*
--,f.*
--,m.*
--,th.*
-- ,if(cl.character_ptr_id is not null, "cleric", "todo") as char_type
,CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
"""
curs.execute(query6).fetchall()
#Number of characters in each class
query7= """
select
subquery1.char_type
,count(distinct subquery1.character_id) as char_count
from (
-- row per character (302 total)
select
ch.character_id
,ch.name
--,cl.*
--,f.*
--,m.*
--,th.*
-- ,if(cl.character_ptr_id is not null, "cleric", "todo") as char_type
,CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
) subquery1
group by subquery1.char_type
"""
curs.execute(query7).fetchall()
#Another way to find number of characters in each class
query8= """
-- row per character (302 total)
select
CASE
WHEN cl.character_ptr_id is not null THEN "cleric"
WHEN f.character_ptr_id is not null THEN "fighter"
WHEN n.mage_ptr_id is not null THEN "mage-necro"
WHEN m.character_ptr_id is not null THEN "mage"
WHEN th.character_ptr_id is not null THEN "thief"
ELSE "todo"
END as char_type
,count(distinct ch.character_id) as char_count
from charactercreator_character as ch
left join charactercreator_cleric as cl on ch.character_id = cl.character_ptr_id
left join charactercreator_fighter as f on ch.character_id = f.character_ptr_id
left join charactercreator_mage as m on ch.character_id = m.character_ptr_id
left join charactercreator_thief as th on ch.character_id = th.character_ptr_id
-- left join charactercreator_necromancer as n on ch.character_id = n.character_ptr_id
left join charactercreator_necromancer as n on m.character_ptr_id = n.mage_ptr_id
group by char_type
"""
curs.execute(query8).fetchall()
#How many total items?
query9= """
select count(name) as item_count
from armory_item
"""
results6 = curs.execute(query9).fetchall()
print("Total number of items=", results6)
#How many items each character has?
query10="""
SELECT a.character_id, ch.name, count(a.item_id)
FROM charactercreator_character_inventory as a
left join charactercreator_character as ch on ch.character_id = a.character_id
GROUP BY a.character_id
LIMIT 20
"""
print("Character ID, character name, number of items")
curs.execute(query10).fetchall()
#How many weapons each character has?
#left join statement should come after FROM statement followed by WHERE, GROUP BY, etc.
query11= """
SELECT a.character_id, d.name, count(a.item_id)
FROM charactercreator_character_inventory as a, armory_item as b, armory_weapon as c
left join charactercreator_character as d on d.character_id = a.character_id
WHERE a.item_id = b.item_id AND b.item_id = c.item_ptr_id
GROUP BY a.character_id
LIMIT 20
"""
print("Character ID, Chacter name, number of weapons")
curs.execute(query11).fetchall()
#On average, how many items each Character has?
query12="""
SELECT COUNT(inventory.item_id) * 1.0/ COUNT(DISTINCT inventory.character_id)
FROM charactercreator_character_inventory AS inventory
"""
results6 = curs.execute(query12).fetchall()
print("Average number of items per character=", results6)
#On average how many weapons each character has?
query13="""
SELECT COUNT(a.item_id) *1.0 / COUNT(DISTINCT a.character_id)
FROM charactercreator_character_inventory AS a, armory_weapon AS b
WHERE a.item_id = b.item_ptr_id
"""
results7 = curs.execute(query13).fetchall()
print("Average number of weapons per character=", results7) | 0 | 0 | 0 |
7fefb6f60e2cc185f3cede33e1e7a8d888d88ccb | 6,011 | py | Python | pmdarima/arima/approx.py | Juanlu001/pmdarima | e504247e9a8900e848e7cbf38ef67d179881a644 | [
"MIT"
] | null | null | null | pmdarima/arima/approx.py | Juanlu001/pmdarima | e504247e9a8900e848e7cbf38ef67d179881a644 | [
"MIT"
] | null | null | null | pmdarima/arima/approx.py | Juanlu001/pmdarima | e504247e9a8900e848e7cbf38ef67d179881a644 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
#
# R approx function
from __future__ import absolute_import
from sklearn.utils.validation import check_array, column_or_1d
import numpy as np
from ..utils.array import c
from ..utils import get_callable
from ..compat.numpy import DTYPE
# since the C import relies on the C code having been built with Cython,
# and since the platform might name the .so file something funky (like
# _arima.cpython-35m-darwin.so), import this absolutely and not relatively.
from pmdarima.arima._arima import C_Approx
__all__ = [
'approx'
]
# the ints get passed to C code
VALID_APPROX = {
'constant': 2,
'linear': 1
}
# get the valid tie funcs
VALID_TIES = {
'ordered': None, # never really used...
'mean': np.average
}
# identity function defined once to avoid multiple lambda calls
# littered throughout
_identity = (lambda t: t)
def _regularize(x, y, ties):
"""Regularize the values, make them ordered and remove duplicates.
If the ``ties`` parameter is explicitly set to 'ordered' then order
is already assumed. Otherwise, the removal process will happen.
Parameters
----------
x : array-like, shape=(n_samples,)
The x vector.
y : array-like, shape=(n_samples,)
The y vector.
ties : str
One of {'ordered', 'mean'}, handles the ties.
"""
x, y = [
column_or_1d(check_array(arr, ensure_2d=False,
force_all_finite=False,
dtype=DTYPE))
for arr in (x, y)
]
nx = x.shape[0]
if nx != y.shape[0]:
raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0]))
# manipulate x if needed. if ties is 'ordered' we assume that x is
# already ordered and everything has been handled already...
if ties != 'ordered':
o = np.argsort(x)
# keep ordered with one another
x = x[o]
y = y[o]
# what if any are the same?
ux = np.unique(x)
if ux.shape[0] < nx:
# Do we want to warn for this?
# warnings.warn('collapsing to unique "x" values')
# vectorize this function to apply to each "cell" in the array
# replace the duplicates in the y array with the "tie" func
func = VALID_TIES.get(ties, _identity)
# maybe expensive to vectorize on the fly? Not sure; would need
# to do some benchmarking. However, we need to in order to keep y
# and x in scope...
y = np.vectorize(tie_apply)(func, ux)
# does ux need ordering? hmm..
x = ux
return x, y
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None,
yright=None, ties='mean'):
"""Linearly interpolate points.
Return a list of points which (linearly) interpolate given data points,
or a function performing the linear (or constant) interpolation.
Parameters
----------
x : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
y : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
xout : int, float or iterable
A scalar or iterable of numeric values specifying where
interpolation is to take place.
method : str, optional (default='linear')
Specifies the interpolation method to be used.
Choices are "linear" or "constant".
rule : int, optional (default=1)
An integer describing how interpolation is to take place
outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then
np.nans are returned for such points and if it is 2, the value at the
closest data extreme is used.
f : int, optional (default=0)
For ``method`` = "constant" a number between 0 and 1 inclusive,
indicating a compromise between left- and right-continuous step
functions. If y0 and y1 are the values to the left and right of the
point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f
for intermediate values. In this way the result is right-continuous
for f == 0 and left-continuous for f == 1, even for non-finite
``y`` values.
yleft : float, optional (default=None)
The value to be returned when input ``x`` values are less than
``min(x)``. The default is defined by the value of rule given below.
yright : float, optional (default=None)
The value to be returned when input ``x`` values are greater than
``max(x)``. The default is defined by the value of rule given below.
ties : str, optional (default='mean')
Handling of tied ``x`` values. Choices are "mean" or "ordered".
"""
if method not in VALID_APPROX:
raise ValueError('method must be one of %r' % VALID_APPROX)
# make sure xout is an array
xout = c(xout).astype(np.float64) # ensure double
# check method
method_key = method
# not a callable, actually, but serves the purpose..
method = get_callable(method_key, VALID_APPROX)
# copy/regularize vectors
x, y = _regularize(x, y, ties)
nx = x.shape[0]
# if len 1? (we've already handled where the size is 0, since we check that
# in the _regularize function when we call c1d)
if nx == 1:
if method_key == 'linear':
raise ValueError('need at least two points to '
'linearly interpolate')
# get yleft, yright
if yleft is None:
yleft = y[0] if rule != 1 else np.nan
if yright is None:
yright = y[-1] if rule != 1 else np.nan
# call the C subroutine
yout = C_Approx(x, y, xout, method, f, yleft, yright)
return xout, yout
| 32.317204 | 79 | 0.618865 | # -*- coding: utf-8 -*-
#
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
#
# R approx function
from __future__ import absolute_import
from sklearn.utils.validation import check_array, column_or_1d
import numpy as np
from ..utils.array import c
from ..utils import get_callable
from ..compat.numpy import DTYPE
# since the C import relies on the C code having been built with Cython,
# and since the platform might name the .so file something funky (like
# _arima.cpython-35m-darwin.so), import this absolutely and not relatively.
from pmdarima.arima._arima import C_Approx
__all__ = [
'approx'
]
# the ints get passed to C code
VALID_APPROX = {
'constant': 2,
'linear': 1
}
# get the valid tie funcs
VALID_TIES = {
'ordered': None, # never really used...
'mean': np.average
}
# identity function defined once to avoid multiple lambda calls
# littered throughout
_identity = (lambda t: t)
def _regularize(x, y, ties):
"""Regularize the values, make them ordered and remove duplicates.
If the ``ties`` parameter is explicitly set to 'ordered' then order
is already assumed. Otherwise, the removal process will happen.
Parameters
----------
x : array-like, shape=(n_samples,)
The x vector.
y : array-like, shape=(n_samples,)
The y vector.
ties : str
One of {'ordered', 'mean'}, handles the ties.
"""
x, y = [
column_or_1d(check_array(arr, ensure_2d=False,
force_all_finite=False,
dtype=DTYPE))
for arr in (x, y)
]
nx = x.shape[0]
if nx != y.shape[0]:
raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0]))
# manipulate x if needed. if ties is 'ordered' we assume that x is
# already ordered and everything has been handled already...
if ties != 'ordered':
o = np.argsort(x)
# keep ordered with one another
x = x[o]
y = y[o]
# what if any are the same?
ux = np.unique(x)
if ux.shape[0] < nx:
# Do we want to warn for this?
# warnings.warn('collapsing to unique "x" values')
# vectorize this function to apply to each "cell" in the array
def tie_apply(f, u_val):
vals = y[x == u_val] # mask y where x == the unique value
return f(vals)
# replace the duplicates in the y array with the "tie" func
func = VALID_TIES.get(ties, _identity)
# maybe expensive to vectorize on the fly? Not sure; would need
# to do some benchmarking. However, we need to in order to keep y
# and x in scope...
y = np.vectorize(tie_apply)(func, ux)
# does ux need ordering? hmm..
x = ux
return x, y
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None,
yright=None, ties='mean'):
"""Linearly interpolate points.
Return a list of points which (linearly) interpolate given data points,
or a function performing the linear (or constant) interpolation.
Parameters
----------
x : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
y : array-like, shape=(n_samples,)
Numeric vector giving the coordinates of the points
to be interpolated.
xout : int, float or iterable
A scalar or iterable of numeric values specifying where
interpolation is to take place.
method : str, optional (default='linear')
Specifies the interpolation method to be used.
Choices are "linear" or "constant".
rule : int, optional (default=1)
An integer describing how interpolation is to take place
outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then
np.nans are returned for such points and if it is 2, the value at the
closest data extreme is used.
f : int, optional (default=0)
For ``method`` = "constant" a number between 0 and 1 inclusive,
indicating a compromise between left- and right-continuous step
functions. If y0 and y1 are the values to the left and right of the
point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f
for intermediate values. In this way the result is right-continuous
for f == 0 and left-continuous for f == 1, even for non-finite
``y`` values.
yleft : float, optional (default=None)
The value to be returned when input ``x`` values are less than
``min(x)``. The default is defined by the value of rule given below.
yright : float, optional (default=None)
The value to be returned when input ``x`` values are greater than
``max(x)``. The default is defined by the value of rule given below.
ties : str, optional (default='mean')
Handling of tied ``x`` values. Choices are "mean" or "ordered".
"""
if method not in VALID_APPROX:
raise ValueError('method must be one of %r' % VALID_APPROX)
# make sure xout is an array
xout = c(xout).astype(np.float64) # ensure double
# check method
method_key = method
# not a callable, actually, but serves the purpose..
method = get_callable(method_key, VALID_APPROX)
# copy/regularize vectors
x, y = _regularize(x, y, ties)
nx = x.shape[0]
# if len 1? (we've already handled where the size is 0, since we check that
# in the _regularize function when we call c1d)
if nx == 1:
if method_key == 'linear':
raise ValueError('need at least two points to '
'linearly interpolate')
# get yleft, yright
if yleft is None:
yleft = y[0] if rule != 1 else np.nan
if yright is None:
yright = y[-1] if rule != 1 else np.nan
# call the C subroutine
yout = C_Approx(x, y, xout, method, f, yleft, yright)
return xout, yout
| 109 | 0 | 34 |
2c7fe0e587563e12e640e93436de71b423c3ba01 | 337 | py | Python | model/model.py | madtyn/mvcPython | 7377bc5e4eec058ff098b850159971f4830c0488 | [
"MIT"
] | null | null | null | model/model.py | madtyn/mvcPython | 7377bc5e4eec058ff098b850159971f4830c0488 | [
"MIT"
] | null | null | null | model/model.py | madtyn/mvcPython | 7377bc5e4eec058ff098b850159971f4830c0488 | [
"MIT"
] | null | null | null | from pathlib import Path
from common.observer import Observable
if __name__ == '__main__':
m = Model()
test_path = Path.home().resolve()
m.collect_metrics(str(test_path), [str(Path('.').resolve())], 'test.json')
| 22.466667 | 78 | 0.655786 | from pathlib import Path
from common.observer import Observable
class Model(Observable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if __name__ == '__main__':
m = Model()
test_path = Path.home().resolve()
m.collect_metrics(str(test_path), [str(Path('.').resolve())], 'test.json')
| 57 | 3 | 49 |
497b14452dfa29906a0b0faa661efbca15094cbc | 859 | py | Python | {{cookiecutter.service_name}}/{{cookiecutter.service_slug}}/schemas.py | zeel-dev/zebuker-cookiecutter | 31d25321b6dca8e3207f60da907e5a7996860308 | [
"BSD-2-Clause"
] | null | null | null | {{cookiecutter.service_name}}/{{cookiecutter.service_slug}}/schemas.py | zeel-dev/zebuker-cookiecutter | 31d25321b6dca8e3207f60da907e5a7996860308 | [
"BSD-2-Clause"
] | null | null | null | {{cookiecutter.service_name}}/{{cookiecutter.service_slug}}/schemas.py | zeel-dev/zebuker-cookiecutter | 31d25321b6dca8e3207f60da907e5a7996860308 | [
"BSD-2-Clause"
] | null | null | null | """Schemas linked to the database models"""
import re
from marshmallow import post_dump
from marshmallow_sqlalchemy import ModelSchema
class BaseSchema(ModelSchema):
"""
Custom base schema class that serializes datetime strings without the
timezone offset.
"""
@post_dump
def strip_timezone_offset(self, data):
"""Strips timezone offset from ISO8601/RFC3339 strings"""
for key in data:
if data[key] and isinstance(data[key], str):
matches = re.match(
r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d{6})?[+-]\d\d:\d\d$',
data[key]
)
if matches:
data[key] = data[key][:-6]
return data | 30.678571 | 79 | 0.575087 | """Schemas linked to the database models"""
import re
from marshmallow import post_dump
from marshmallow_sqlalchemy import ModelSchema
class BaseSchema(ModelSchema):
"""
Custom base schema class that serializes datetime strings without the
timezone offset.
"""
def __init__(self, strict=True, **kwargs):
super(BaseSchema, self).__init__(strict=strict, **kwargs)
@post_dump
def strip_timezone_offset(self, data):
"""Strips timezone offset from ISO8601/RFC3339 strings"""
for key in data:
if data[key] and isinstance(data[key], str):
matches = re.match(
r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d{6})?[+-]\d\d:\d\d$',
data[key]
)
if matches:
data[key] = data[key][:-6]
return data | 87 | 0 | 27 |
69957d6ab0eb1f7ead9b6b0190af524dec2bfe3a | 53 | py | Python | data/__init__.py | SparkJiao/pytorch-transformers-template | c64d55f346534b204ac35e814db03221a0856d84 | [
"MIT"
] | 1 | 2022-03-04T08:27:52.000Z | 2022-03-04T08:27:52.000Z | data/__init__.py | SparkJiao/pytorch-transformers-template | c64d55f346534b204ac35e814db03221a0856d84 | [
"MIT"
] | null | null | null | data/__init__.py | SparkJiao/pytorch-transformers-template | c64d55f346534b204ac35e814db03221a0856d84 | [
"MIT"
] | null | null | null | """
Write your own datasets under this directory.
""" | 17.666667 | 45 | 0.716981 | """
Write your own datasets under this directory.
""" | 0 | 0 | 0 |
03ff9706fddfab3e358c77794e9b1dc5caf1ec05 | 9,470 | py | Python | python/mpopt/qap/solver.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 1 | 2021-03-23T06:45:42.000Z | 2021-03-23T06:45:42.000Z | python/mpopt/qap/solver.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 1 | 2022-01-18T03:17:09.000Z | 2022-01-18T03:17:09.000Z | python/mpopt/qap/solver.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 2 | 2021-03-03T14:01:52.000Z | 2022-01-18T02:45:55.000Z | from ..common.solver import BaseSolver, DEFAULT_BATCH_SIZE, DEFAULT_MAX_BATCHES
from . import libmpopt_qap as lib
from . primals import Primals
import numpy
DEFAULT_GREEDY_GENERATIONS = 10
INFINITY_COST = 1e99
| 41.902655 | 139 | 0.67434 | from ..common.solver import BaseSolver, DEFAULT_BATCH_SIZE, DEFAULT_MAX_BATCHES
from . import libmpopt_qap as lib
from . primals import Primals
import numpy
DEFAULT_GREEDY_GENERATIONS = 10
INFINITY_COST = 1e99
class ModelDecomposition:
def __init__(self, model, with_uniqueness, unary_side='left'):
"""A helper for gathering information useful for a Lagrange-dual based
decomposition of a graph matching problem.
The argument `unary_side` (either `left` or `right`) determines which
point set of the underlying model is used as a unary node set. The
other one is used as label universe for all unary nodes. When a unary
node is created only labels for which a possible assignment is
available will be added to the node's label space. A bidirectional
mapping is remembered.
The argument `with_uniqueness` determines whether uniqueness constraints
will be inserted. If uniqueness constraints are inserted then no
additional infinity edges will be included in the decomposition, i.e.
the call to `add_infinity_edge_arcs` will not add new edges.
Without uniqueness constraints we have to insert additional infinity
edges to disallow impossible configurations that would have been
otherwise forbidden by the uniqueness constraints.
Note that currently `with_uniqueness` has no further impact. The
downstream code must check `with_uniqueness` to see if it should insert
uniqueness constraints into the solver or not.
"""
assert unary_side in ('left', 'right')
self.model = model
self.with_uniqueness = with_uniqueness
self.unary_side = unary_side
self.label_side = 'right' if unary_side == 'left' else 'left'
self.unary_to_nodeside = [] # unary solver index -> graph matching node index (left or right)
self.nodeside_to_unary = {} # same as above, only the other way around
for node in range(self.number_of_nodes):
if node in self.unary_set:
self.nodeside_to_unary[node] = len(self.unary_to_nodeside)
self.unary_to_nodeside.append(node)
self.pairwise = {}
self.no_forward = [0] * self.number_of_nodes
self.no_backward = [0] * self.number_of_nodes
for edge in self.model.edges:
self._insert_pairwise(*edge)
self._add_infinity_edge_arcs()
@property
def unary_set(self):
"""Returns the unary node set of the underlying graph matching model.
The result is a dict of lists of assignment-ids, detailing for each
unary in which assignments it is involved.
The unaries are indexed as in the original model, indices not occuring
in any assignment do also not occur in the dict.
"""
return getattr(self.model, self.unary_side)
@property
def number_of_nodes(self):
"""Returns the number of nodes of the underlying graph matching model.
The result is either the number of left or right points depending on
the side where we build the model.
"""
return getattr(self.model, 'no_' + self.unary_side)
@property
def label_set(self):
"""Returns the label set of the underlying graph matching model.
The result is a dict of lists of assignment-ids, detailing for each
label in which assignments it is involved.
The labels are indexed as in the original model, indices not occuring
in any assignment do also not occur in the dict.
"""
return getattr(self.model, self.label_side)
def _insert_pairwise(self, id_assignment1, id_assignment2, cost, create_new_edges=True):
"""Inserts a pairwise cost between the two assignments.
This method finds the corresponding unary nodes and creates an empty
edge if necessary. Afterwards the cost on the corresponding arc of the
edge are set to the cost.
New edges are only inserted if `create_new_edges` is set to `True`. If this
method creates a new edge, the forward and backward edge counter is updated.
Note that depending on `self.unary_side` the quadratic term will be
inserted between two left points or two right points of the underlying
graph matching model.
"""
node1 = getattr(self.model.assignments[id_assignment1], self.unary_side)
node2 = getattr(self.model.assignments[id_assignment2], self.unary_side)
pos_in_node1 = self.unary_set[node1].index(id_assignment1)
pos_in_node2 = self.unary_set[node2].index(id_assignment2)
idx1, idx2, pos1, pos2 = sort_ids(node1, node2, pos_in_node1, pos_in_node2)
if (idx1, idx2) not in self.pairwise and create_new_edges:
self.no_forward[idx1] += 1
self.no_backward[idx2] += 1
# The node set does not contain the dummy label. The pairwise edges
# will insert later need to have space for the dummy label. Hence
# we add +1 here for both dimensions.
shape = (len(self.unary_set[idx1]) + 1, len(self.unary_set[idx2]) + 1)
self.pairwise[idx1, idx2] = numpy.zeros(shape)
if (idx1, idx2) in self.pairwise:
assert self.pairwise[idx1, idx2][pos1, pos2] == 0.0
self.pairwise[idx1, idx2][pos1, pos2] = cost
def _add_infinity_edge_arcs(self):
"""Set pairwise edge cost to infinity for prohibiting assignment constraints.
If `self.with_uniqueness` is set to `True`, only existing edges will
get updated. Otherwise, non-existing edges will get created (costs are
initialized to zero). The later is needed for building a purely pairwise
graphical model.
"""
for label in self.label_set:
assigned_in = self.label_set[label]
for i in range(len(assigned_in) - 1):
for j in range(i+1, len(assigned_in)):
ass1 = assigned_in[i]
ass2 = assigned_in[j]
self._insert_pairwise(ass1, ass2, INFINITY_COST, create_new_edges=not self.with_uniqueness)
class Solver(BaseSolver):
def __init__(self):
super().__init__(lib)
def run(self, batch_size=DEFAULT_BATCH_SIZE, max_batches=DEFAULT_MAX_BATCHES, greedy_generations=DEFAULT_GREEDY_GENERATIONS):
return self.lib.solver_run(self.solver, batch_size, max_batches, greedy_generations)
def compute_greedy_assignment(self):
return self.lib.solver_compute_greedy_assignment(self.solver)
def construct_gm_model(deco):
from ..gm.model import Model as GmModel
edges = deco.pairwise
gm_model = GmModel()
for u, idx in enumerate(deco.unary_to_nodeside):
costs = [deco.model.assignments[ass_id].cost for ass_id in deco.unary_set[idx]]
costs.append(0.0)
gm_model.add_unary(costs)
for (idx1, idx2), costs in deco.pairwise.items():
gm_model.add_pairwise(idx1, idx2, costs)
return gm_model
def construct_solver(deco):
s = Solver()
g = lib.solver_get_graph(s.solver)
# insert unary factors
for u, idx in enumerate(deco.unary_to_nodeside):
f = lib.graph_add_unary(g, u, len(deco.unary_set[idx]) + 1, deco.no_forward[idx], deco.no_backward[idx])
for i, ass_id in enumerate(deco.unary_set[idx]):
lib.unary_set_cost(f, i, deco.model.assignments[ass_id].cost)
lib.unary_set_cost(f, i+1, 0.0)
# insert uniqueness factors
if deco.with_uniqueness:
for idx_uniqueness, (label_idx, assigned_in) in enumerate(deco.label_set.items()):
f = lib.graph_add_uniqueness(g, idx_uniqueness, len(assigned_in))
for slot, assignment_idx in enumerate(assigned_in):
assignment = deco.model.assignments[assignment_idx]
assert getattr(assignment, deco.label_side) == label_idx
label = deco.unary_set[getattr(assignment, deco.unary_side)].index(assignment_idx) # FIXME: O(n) is best avoided.
lib.graph_add_uniqueness_link(g, deco.nodeside_to_unary[getattr(assignment, deco.unary_side)], label, idx_uniqueness, slot)
# insert pairwise factors
for i, ((idx1, idx2), cost) in enumerate(deco.pairwise.items()): # use items()
f = lib.graph_add_pairwise(g, i, cost.shape[0], cost.shape[1])
lib.graph_add_pairwise_link(g, deco.nodeside_to_unary[idx1], deco.nodeside_to_unary[idx2], i)
for l_u in range(len(deco.unary_set[idx1]) + 1):
for l_v in range(len(deco.unary_set[idx2]) + 1):
lib.pairwise_set_cost(f, l_u, l_v, cost[l_u, l_v])
lib.solver_finalize(s.solver)
return s
def extract_primals(deco, solver):
primals = Primals(deco.model)
g = lib.solver_get_graph(solver.solver)
for u, idx in enumerate(deco.unary_to_nodeside):
lib_primal = lib.unary_get_primal(lib.graph_get_unary(g, u))
if lib_primal < len(deco.unary_set[idx]):
assignment_idx = deco.unary_set[idx][lib_primal]
assignment = deco.model.assignments[assignment_idx]
primals[idx] = getattr(assignment, deco.label_side)
else:
assert lib_primal == len(deco.unary_set[idx])
return primals
def sort_ids(id1, id2, pos1, pos2):
if id1 < id2:
return id1, id2, pos1, pos2
else:
return id2, id1, pos2, pos1
| 3,053 | 5,979 | 219 |
460f16294e073ca7bce6d7e13f69f59127b2bdb9 | 6,969 | py | Python | kitsune/notifications/api.py | AndrewDVXI/kitsune | 84bd4fa60346681c3fc5a03b0b1540fd1335cee2 | [
"BSD-3-Clause"
] | 929 | 2015-01-04T08:08:51.000Z | 2022-03-31T06:20:44.000Z | kitsune/notifications/api.py | hafixo/kitsune | d7756872e16590eea1c6adaeb5bc78f83414d753 | [
"BSD-3-Clause"
] | 1,751 | 2015-01-02T00:04:37.000Z | 2022-03-31T10:24:30.000Z | kitsune/notifications/api.py | hixio-mh/kitsune | d7756872e16590eea1c6adaeb5bc78f83414d753 | [
"BSD-3-Clause"
] | 605 | 2015-01-01T14:08:36.000Z | 2022-03-28T15:39:45.000Z | from django import forms
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
import django_filters
from actstream.models import Action
from rest_framework import serializers, viewsets, permissions, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from kitsune.notifications.models import (
PushNotificationRegistration,
Notification,
RealtimeRegistration,
)
from kitsune.sumo.api_utils import OnlyCreatorEdits, DateTimeUTCField, GenericRelatedField
class OnlyOwner(permissions.BasePermission):
"""
Only allow objects to affected by their owner.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
| 31.391892 | 94 | 0.667384 | from django import forms
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
import django_filters
from actstream.models import Action
from rest_framework import serializers, viewsets, permissions, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from kitsune.notifications.models import (
PushNotificationRegistration,
Notification,
RealtimeRegistration,
)
from kitsune.sumo.api_utils import OnlyCreatorEdits, DateTimeUTCField, GenericRelatedField
class OnlyOwner(permissions.BasePermission):
"""
Only allow objects to affected by their owner.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
user = getattr(request, "user", None)
owner = getattr(obj, "owner", None)
# Only the creator can modify things.
return user == owner
class NotificationSerializer(serializers.ModelSerializer):
is_read = serializers.ReadOnlyField()
timestamp = DateTimeUTCField("%Y-%m-%dT%H:%M:%SZ", source="action.timestamp")
actor = GenericRelatedField(source="action.actor")
verb = serializers.CharField(source="action.verb")
action_object = GenericRelatedField(source="action.action_object")
target = GenericRelatedField(source="action.target")
class Meta:
model = PushNotificationRegistration
fields = (
"action_object",
"actor",
"id",
"is_read",
"target",
"timestamp",
"verb",
)
class NotificationFilter(django_filters.FilterSet):
is_read = django_filters.BooleanFilter(method="filter_is_read", widget=forms.TextInput)
class Meta(object):
model = Notification
fields = [
"is_read",
]
def filter_is_read(self, queryset, name, value):
if value:
return queryset.exclude(read_at=None)
return queryset.filter(read_at=None)
class NotificationViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet
):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
permission_classes = [
permissions.IsAuthenticated,
OnlyOwner,
]
filterset_class = NotificationFilter
pagination_class = None
def get_queryset(self, *args, **kwargs):
qs = super(NotificationViewSet, self).get_queryset(*args, **kwargs)
return qs.filter(owner=self.request.user)
@action(detail=True, methods=["post"])
def mark_read(self, request, pk=None):
"""Mark the notification as read."""
notification = self.get_object()
notification.is_read = True
notification.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=["post"])
def mark_unread(self, request, pk=None):
"""Mark the notification as unread."""
notification = self.get_object()
notification.is_read = False
notification.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class PushNotificationRegistrationSerializer(serializers.ModelSerializer):
# Use usernames to reference users.
creator = serializers.SlugRelatedField(
slug_field="username", required=False, queryset=User.objects.all()
)
class Meta:
model = PushNotificationRegistration
fields = (
"creator",
"id",
"push_url",
)
def validate(self, data):
authed_user = getattr(self.context.get("request"), "user")
creator = data.get("creator")
if creator is None:
data["creator"] = authed_user
elif creator != authed_user:
raise serializers.ValidationError(
{"creator": "Can't register push notifications for another user."}
)
return data
class PushNotificationRegistrationViewSet(
mixins.CreateModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet
):
queryset = PushNotificationRegistration.objects.all()
serializer_class = PushNotificationRegistrationSerializer
permission_classes = [
permissions.IsAuthenticated,
OnlyCreatorEdits,
]
class RealtimeRegistrationSerializer(serializers.ModelSerializer):
endpoint = serializers.CharField(write_only=True)
creator = serializers.SlugRelatedField(
slug_field="username", required=False, queryset=User.objects.all()
)
content_type = serializers.SlugRelatedField(
slug_field="model", queryset=ContentType.objects.all()
)
class Meta:
model = RealtimeRegistration
fields = [
"id",
"creator",
"created",
"endpoint",
"content_type",
"object_id",
]
def validate(self, data):
data = super(RealtimeRegistrationSerializer, self).validate(data)
authed_user = getattr(self.context.get("request"), "user")
creator = data.get("creator")
if creator is None:
data["creator"] = authed_user
elif creator != authed_user:
raise serializers.ValidationError(
"Can't register push notifications for another user."
)
return data
class RealtimeActionSerializer(serializers.ModelSerializer):
action_object = GenericRelatedField(serializer_type="full")
actor = GenericRelatedField(serializer_type="full")
target = GenericRelatedField(serializer_type="full")
verb = serializers.CharField()
timestamp = DateTimeUTCField()
class Meta:
model = PushNotificationRegistration
fields = (
"action_object",
"actor",
"id",
"target",
"timestamp",
"verb",
)
class RealtimeRegistrationViewSet(
mixins.CreateModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet
):
queryset = RealtimeRegistration.objects.all()
serializer_class = RealtimeRegistrationSerializer
permission_classes = [
permissions.IsAuthenticated,
OnlyCreatorEdits,
]
@action(detail=True, methods=["get"])
def updates(self, request, pk=None):
"""Get all the actions that correspond to this registration."""
reg = self.get_object()
query = Q(actor_content_type=reg.content_type, actor_object_id=reg.object_id)
query |= Q(target_content_type=reg.content_type, target_object_id=reg.object_id)
query |= Q(
action_object_content_type=reg.content_type, action_object_object_id=reg.object_id
)
actions = Action.objects.filter(query)
serializer = RealtimeActionSerializer(actions, many=True)
return Response(serializer.data)
| 1,306 | 4,624 | 211 |
212c2bff91064b6f0d6c65cfcbed0e0e93656577 | 2,016 | py | Python | homeassistant/util/async_.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | homeassistant/util/async_.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/util/async_.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Asyncio backports for Python 3.6 compatibility."""
from asyncio import coroutines, ensure_future
from asyncio.events import AbstractEventLoop
import concurrent.futures
import logging
import threading
from typing import Any, Callable, Coroutine
_LOGGER = logging.getLogger(__name__)
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable, *args: Any
) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| 34.758621 | 80 | 0.699405 | """Asyncio backports for Python 3.6 compatibility."""
from asyncio import coroutines, ensure_future
from asyncio.events import AbstractEventLoop
import concurrent.futures
import logging
import threading
from typing import Any, Callable, Coroutine
_LOGGER = logging.getLogger(__name__)
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable, *args: Any
) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| 0 | 0 | 0 |
e1dd7ede6c94f72ccac6e9c9d45c7d3d3ef460e1 | 376 | py | Python | tests/test_hangman.py | daryavasilyeva/hangman | d0f1eaca4764c70ee9f5977bc4bdeca3c0ed67c2 | [
"MIT"
] | null | null | null | tests/test_hangman.py | daryavasilyeva/hangman | d0f1eaca4764c70ee9f5977bc4bdeca3c0ed67c2 | [
"MIT"
] | null | null | null | tests/test_hangman.py | daryavasilyeva/hangman | d0f1eaca4764c70ee9f5977bc4bdeca3c0ed67c2 | [
"MIT"
] | null | null | null | """Tests"""
from hangman import check_letter, reveal_letter, print_result
def test_check_letter():
"""check_letter"""
assert check_letter("world", "a") == 0
def test_reveal_letter():
"""reveal_letter"""
assert reveal_letter("te", ['*', 'e'], "t") == ['t', 'e']
def test_print_result():
"""test print result"""
assert print_result(5, "word") == 0
| 20.888889 | 61 | 0.62234 | """Tests"""
from hangman import check_letter, reveal_letter, print_result
def test_check_letter():
"""check_letter"""
assert check_letter("world", "a") == 0
def test_reveal_letter():
"""reveal_letter"""
assert reveal_letter("te", ['*', 'e'], "t") == ['t', 'e']
def test_print_result():
"""test print result"""
assert print_result(5, "word") == 0
| 0 | 0 | 0 |
2498fa119bb7a0d039236056c6f620bb91e0ce55 | 6,570 | py | Python | code/deepdetect/py/train_hard_negative.py | omidroshani/DeepDIA | 2af96056a62a49ff6ff10b6e176f0fba3f495843 | [
"BSD-3-Clause"
] | 25 | 2020-01-09T18:30:57.000Z | 2021-12-25T20:09:40.000Z | code/deepdetect/py/train_hard_negative.py | omidroshani/DeepDIA | 2af96056a62a49ff6ff10b6e176f0fba3f495843 | [
"BSD-3-Clause"
] | 2 | 2020-10-19T14:28:41.000Z | 2021-11-15T18:46:15.000Z | code/deepdetect/py/train_hard_negative.py | omidroshani/DeepDIA | 2af96056a62a49ff6ff10b6e176f0fba3f495843 | [
"BSD-3-Clause"
] | 14 | 2020-01-24T14:44:46.000Z | 2022-02-24T12:37:33.000Z | # coding: utf-8
from __future__ import print_function
import numpy as np
import pandas as pd
import os
import json
import re
from pep_detectability import PeptideDetectabilityTrainer
from pep_detectability import PeptideDetectabilityPredictor
working_dir = '.'
filenames_positive = [
f for f in os.listdir(working_dir)
if f.endswith(".detectability.csv") and \
not f.endswith("negative.detectability.csv")
]
data_positive = pd.concat([load_data(os.path.join(working_dir, f)) for f in filenames_positive])
filenames_negative = [
f for f in os.listdir(working_dir)
if f.endswith("negative.detectability.csv")
]
data_negative = pd.concat([load_data(os.path.join(working_dir, f)) for f in filenames_negative])
print('Positive {n}, negative {m}'.format(
n=len(data_positive), m=len(data_negative)
))
seed = 0
max_rounds = 10
positive_threshold = 0.5
fpr_threshold = 0.01
result_summary = {
'seed': seed,
'files': {
'positive': filenames_positive,
'negative': filenames_negative
}
}
save_data_json(result_summary, os.path.join(working_dir, 'training.json'))
results = []
for i in range(0, max_rounds + 1):
if i == 0:
model_path=None
indexes_negative = get_initial_negative_subset(
data_positive, data_negative, seed=seed
)
else:
predict_dir = '{working_dir}/training_{i}'.format(working_dir=working_dir, i=i - 1)
model_path = model_path = [
os.path.join(predict_dir, 'models', f)
for f in os.listdir(os.path.join(predict_dir, 'models'))
if re.match(r'^epoch_[0-9]+\.hdf5$', f) is not None
][-1]
indexes_negative, tpr, fpr, precision = get_hard_negative_subset(
data_positive, data_negative,
model_path=model_path,
positive_threshold=positive_threshold,
min_ratio=1
)
print('TPR: {tpr}, FPR: {fpr}, Precision: {precision}'.format(
tpr=tpr, fpr=fpr, precision=precision
))
if (len(results) > 0):
results[-1]['evaluate'] = {
'tpr': tpr,
'fpr': fpr,
'precision': precision
}
if (fpr <= fpr_threshold):
print('Early stopping')
break
if (i >= max_rounds):
break
print('Round {i}: train on {n} positive samples, {m} negative samples'.format(
i=i, n=len(data_positive), m=len(indexes_negative)
))
train_dir = '{working_dir}/training_{i}'.format(working_dir=working_dir, i=i)
os.makedirs(train_dir, exist_ok=True)
os.makedirs(os.path.join(train_dir, 'models'), exist_ok=True)
data1 = data_positive
data2 = data_negative.iloc[indexes_negative]
trainer = PeptideDetectabilityTrainer(
model_path=model_path,
save_path=os.path.join(train_dir, 'models', 'epoch_{epoch:03d}.hdf5'),
log_path=os.path.join(train_dir, 'training.log')
)
result = trainer.train(data1, data2, seed=seed)
trainer.save_model(os.path.join(train_dir, 'models', 'last_epoch.hdf5'))
result['split'] = {
'seed': seed,
'round': i,
'train': {
'positive': result['split']['train']['positive'],
'negative': indexes_negative \
[np.array(result['split']['train']['negative'])].tolist()
},
'validate': {
'positive': result['split']['validate']['positive'],
'negative': indexes_negative \
[np.array(result['split']['validate']['negative'])].tolist()
}
}
result['files'] = filenames_positive + filenames_negative
trainer.save_model(os.path.join(train_dir, 'models', 'last_epoch.hdf5'))
save_data_json(result, os.path.join(train_dir, 'training.json'))
results.append(result)
result_summary['rounds'] = results
save_data_json(result_summary, os.path.join(working_dir, 'training.json'))
| 34.397906 | 96 | 0.624201 | # coding: utf-8
from __future__ import print_function
import numpy as np
import pandas as pd
import os
import json
import re
from pep_detectability import PeptideDetectabilityTrainer
from pep_detectability import PeptideDetectabilityPredictor
def load_data(file):
with open(file, 'r') as f:
data = pd.read_csv(file)
return data
def save_data_json(data, file):
with open(file, 'w') as f:
json.dump(data, f)
def get_initial_negative_subset(data1, data2, ratio=1, seed=None):
if (data1.shape[0] * ratio >= data2.shape[0]):
return np.arange(data2.shape[0])
else:
np.random.seed(seed)
indexes = np.random.permutation(data2.shape[0])
return indexes[:(data1.shape[0] * ratio)]
def evaluate_model(data1, data2, model_path=None, model=None, positive_threshold=0.5):
predictor = PeptideDetectabilityPredictor(model_path=model_path, model=model)
prediction1 = predictor.predict(data1)
prediction2 = predictor.predict(data2)
tp = np.where(prediction1["detectability"].values >= positive_threshold)[0]
fn = np.where(prediction1["detectability"].values < positive_threshold)[0]
fp = np.where(prediction2["detectability"].values >= positive_threshold)[0]
tn = np.where(prediction2["detectability"].values < positive_threshold)[0]
tpr = len(tp) / (len(tp) + len(fn))
fpr = len(fp) / (len(fp) + len(tn))
precision = len(tp) / (len(tp) + len(fp))
return tpr, fpr, precision, \
tp, fn, fp, tn, \
prediction1[["detectability"]].values, prediction2[["detectability"]].values
def get_hard_negative_subset(data1, data2, model_path=None, model=None, \
positive_threshold=0.5, min_ratio=None, max_ratio=None):
tpr, fpr, precision, \
tp, fn, fp, tn, \
prediction1, prediction2 = \
evaluate_model(
data1, data2,
model_path=model_path, model=model,
positive_threshold=positive_threshold
)
indexes = fp
if min_ratio is not None and min_ratio > 0:
min_count = data1.shape[0] * min_ratio
else:
min_count = None
if max_ratio is not None and max_ratio > 0:
max_count = data1.shape[0] * max_ratio
else:
max_count = None
if min_count is not None and \
max_count is not None and \
min_count > max_count:
raise ValueError('min_count > max_count')
if min_count is not None and min_count > len(indexes):
indexes = np.concatenate(
(indexes, tn[np.argsort(-prediction2[tn], axis=None)])
)[:min_count]
elif max_count < len(indexes):
indexes = indexes[np.argsort(-prediction2[indexes], axis=None)[:max_count]]
return indexes, tpr, fpr, precision
working_dir = '.'
filenames_positive = [
f for f in os.listdir(working_dir)
if f.endswith(".detectability.csv") and \
not f.endswith("negative.detectability.csv")
]
data_positive = pd.concat([load_data(os.path.join(working_dir, f)) for f in filenames_positive])
filenames_negative = [
f for f in os.listdir(working_dir)
if f.endswith("negative.detectability.csv")
]
data_negative = pd.concat([load_data(os.path.join(working_dir, f)) for f in filenames_negative])
print('Positive {n}, negative {m}'.format(
n=len(data_positive), m=len(data_negative)
))
seed = 0
max_rounds = 10
positive_threshold = 0.5
fpr_threshold = 0.01
result_summary = {
'seed': seed,
'files': {
'positive': filenames_positive,
'negative': filenames_negative
}
}
save_data_json(result_summary, os.path.join(working_dir, 'training.json'))
results = []
for i in range(0, max_rounds + 1):
if i == 0:
model_path=None
indexes_negative = get_initial_negative_subset(
data_positive, data_negative, seed=seed
)
else:
predict_dir = '{working_dir}/training_{i}'.format(working_dir=working_dir, i=i - 1)
model_path = model_path = [
os.path.join(predict_dir, 'models', f)
for f in os.listdir(os.path.join(predict_dir, 'models'))
if re.match(r'^epoch_[0-9]+\.hdf5$', f) is not None
][-1]
indexes_negative, tpr, fpr, precision = get_hard_negative_subset(
data_positive, data_negative,
model_path=model_path,
positive_threshold=positive_threshold,
min_ratio=1
)
print('TPR: {tpr}, FPR: {fpr}, Precision: {precision}'.format(
tpr=tpr, fpr=fpr, precision=precision
))
if (len(results) > 0):
results[-1]['evaluate'] = {
'tpr': tpr,
'fpr': fpr,
'precision': precision
}
if (fpr <= fpr_threshold):
print('Early stopping')
break
if (i >= max_rounds):
break
print('Round {i}: train on {n} positive samples, {m} negative samples'.format(
i=i, n=len(data_positive), m=len(indexes_negative)
))
train_dir = '{working_dir}/training_{i}'.format(working_dir=working_dir, i=i)
os.makedirs(train_dir, exist_ok=True)
os.makedirs(os.path.join(train_dir, 'models'), exist_ok=True)
data1 = data_positive
data2 = data_negative.iloc[indexes_negative]
trainer = PeptideDetectabilityTrainer(
model_path=model_path,
save_path=os.path.join(train_dir, 'models', 'epoch_{epoch:03d}.hdf5'),
log_path=os.path.join(train_dir, 'training.log')
)
result = trainer.train(data1, data2, seed=seed)
trainer.save_model(os.path.join(train_dir, 'models', 'last_epoch.hdf5'))
result['split'] = {
'seed': seed,
'round': i,
'train': {
'positive': result['split']['train']['positive'],
'negative': indexes_negative \
[np.array(result['split']['train']['negative'])].tolist()
},
'validate': {
'positive': result['split']['validate']['positive'],
'negative': indexes_negative \
[np.array(result['split']['validate']['negative'])].tolist()
}
}
result['files'] = filenames_positive + filenames_negative
trainer.save_model(os.path.join(train_dir, 'models', 'last_epoch.hdf5'))
save_data_json(result, os.path.join(train_dir, 'training.json'))
results.append(result)
result_summary['rounds'] = results
save_data_json(result_summary, os.path.join(working_dir, 'training.json'))
| 2,410 | 0 | 115 |
9e0c31f3ec48056ceb415af9faf04e77f34f0cd0 | 48 | py | Python | tests/data_loader/__init__.py | usert5432/lstm_ee | 342ed4f5245311924d6a06b38c4f28eac77778e9 | [
"MIT"
] | 3 | 2020-06-04T17:00:26.000Z | 2022-02-22T16:56:09.000Z | tests/data_loader/__init__.py | usert5432/lstm_ee | 342ed4f5245311924d6a06b38c4f28eac77778e9 | [
"MIT"
] | null | null | null | tests/data_loader/__init__.py | usert5432/lstm_ee | 342ed4f5245311924d6a06b38c4f28eac77778e9 | [
"MIT"
] | 1 | 2021-01-17T04:19:41.000Z | 2021-01-17T04:19:41.000Z | """Various `lstm_ee.data.data_loader` tests"""
| 16 | 46 | 0.708333 | """Various `lstm_ee.data.data_loader` tests"""
| 0 | 0 | 0 |
5d203c9d71c48c60eb3ec646a3694c4aec573963 | 1,218 | py | Python | companies/migrations/0004_companynamechange.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | 1 | 2015-09-28T17:11:12.000Z | 2015-09-28T17:11:12.000Z | companies/migrations/0004_companynamechange.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | 4 | 2020-02-11T22:59:54.000Z | 2021-06-10T17:55:15.000Z | companies/migrations/0004_companynamechange.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 36.909091 | 146 | 0.559934 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companies', '0003_auto_20150526_1255'),
]
operations = [
migrations.CreateModel(
name='CompanyNameChange',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateField()),
('name_before', models.CharField(max_length=255)),
('name_after', models.CharField(max_length=255)),
('short_description', models.CharField(max_length=85)),
('long_description', models.CharField(max_length=255)),
('status', models.CharField(default=b'CO', max_length=2, choices=[(b'CO', 'Completed'), (b'FA', 'Failed'), (b'UP', 'Upcoming')])),
('company', models.ForeignKey(to='companies.Company')),
],
options={
'ordering': ['date'],
'verbose_name': 'Company Name Change',
'verbose_name_plural': 'Company Name Changes',
},
),
]
| 0 | 1,088 | 23 |
29fa2e9e8a9f4bead992c5e3ca0253fe77c94c60 | 566 | py | Python | python/0104.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | python/0104.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | python/0104.py | garywei944/LeetCode | 77453b6e3329f3309ad61fe578cb7b608317ba1b | [
"MIT"
] | null | null | null | from leetcode_tester import Tester
from src.binary_tree import TreeNode, null
from typing import Optional, List
if __name__ == '__main__':
solution = Solution()
test = Tester(solution.maxDepth)
test.addTest(
TreeNode.from_list([3, 9, 20, null, null, 15, 7]), 3
)
test.addTest(
TreeNode.from_list([1, null, 2]), 2
)
test.doTest()
| 22.64 | 67 | 0.628975 | from leetcode_tester import Tester
from src.binary_tree import TreeNode, null
from typing import Optional, List
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
if root is None:
return 0
return max(map(self.maxDepth, (root.left, root.right))) + 1
if __name__ == '__main__':
solution = Solution()
test = Tester(solution.maxDepth)
test.addTest(
TreeNode.from_list([3, 9, 20, null, null, 15, 7]), 3
)
test.addTest(
TreeNode.from_list([1, null, 2]), 2
)
test.doTest()
| 145 | -6 | 49 |
b06e5af1cd883a14debdabe2995244bde009957c | 279 | py | Python | seshat/reviewer/views/reviewers.py | deadlylaid/seshat | 7bc98187c6a8bbc6884edee56e605257526406af | [
"MIT"
] | 2 | 2019-11-20T04:21:00.000Z | 2021-06-11T04:02:37.000Z | seshat/reviewer/views/reviewers.py | deadlylaid/seshat | 7bc98187c6a8bbc6884edee56e605257526406af | [
"MIT"
] | 1 | 2019-10-03T06:03:49.000Z | 2019-10-03T06:03:49.000Z | seshat/reviewer/views/reviewers.py | deadlylaid/seshat | 7bc98187c6a8bbc6884edee56e605257526406af | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView
from reviewer.models import Reviewer
| 27.9 | 57 | 0.795699 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView
from reviewer.models import Reviewer
class ReviewerListView(LoginRequiredMixin, ListView):
login_url = 'login'
model = Reviewer
template_name = 'reviewer/list.html'
| 0 | 118 | 23 |
6c023fb1f27e6d7a5b2543f14e1f14e2952ae907 | 34,866 | py | Python | src/CKANData.py | bdolor/bcdc2bcdc | 7802e75d6595f89a80765b331b06f59122e5a866 | [
"Apache-2.0"
] | null | null | null | src/CKANData.py | bdolor/bcdc2bcdc | 7802e75d6595f89a80765b331b06f59122e5a866 | [
"Apache-2.0"
] | null | null | null | src/CKANData.py | bdolor/bcdc2bcdc | 7802e75d6595f89a80765b331b06f59122e5a866 | [
"Apache-2.0"
] | null | null | null | """CKAN module is a wrapper around API calls. All of these methods will return
raw json objects. The JSON that is returned can be used to construct CKANData
objects.
CKANData objects can be compared with one another. They will use the
CKANTransform methods to identify fields that should and should not be
used to compare two objects.
CKANTransform will also be used to transform on CKANData object to a new
Schema allowing like for like comparisons.
CKANTransform module will work directly with CKAN Data objects.
"""
# pylint: disable=logging-format-interpolation
import logging
import constants
import CKANTransform
import deepdiff
import pprint
LOGGER = logging.getLogger(__name__)
def validateTypeIsComparable(dataObj1, dataObj2):
"""A generic function that can be used to ensure two objects are comparable.
:param dataObj1: The first data object that is to be used in a comparison
:type ckanDataSet:
:raises IncompatibleTypesException: [description]
"""
dataType1 = type(dataObj1)
dataType2 = type(dataObj2)
if hasattr(dataObj1, 'dataType'):
dataType1 = dataObj1.dataType
if hasattr(dataObj2, 'dataType'):
dataType2 = dataObj2.dataType
if dataType2 != dataType1:
msg = 'You are attempting to compare two different types of objects ' + \
f'that are not comparable. dataObj1 is type: {dataType1} and ' + \
f'dataObj2 is type: with an object of type, {dataType2}'
raise IncompatibleTypesException(msg)
# ------------- Data Record defs -------------
class DataCell:
"""an object that can be used to wrap a data value and other meta data
about it from the perspective of a change
"""
def deleteIndexes(self, positions):
"""gets a list of the position that are to be trimmed from the struct
:param positions: a list of index positions for the self.struct list that
are to be removed.
:type positions: list of ints
"""
LOGGER.debug(f"remove positions: {positions}")
newStruct = []
for pos in range(0, len(self.struct)):
if pos not in positions:
newStruct.append(self.struct[pos])
else:
LOGGER.debug(f"removing: {pos} {self.struct[pos]}")
LOGGER.debug(f"old struct: {self.struct}")
LOGGER.debug(f"new struct: {newStruct}")
self.struct = newStruct
# transfer changes to the parent
def generateNewCell(self, key, transConf):
"""The current cell is a dict, generates a new cell for the position
associated with the input key.
:param key: a key of struct property
:type key: str
"""
newCell = DataCell(self.struct[key])
newCell.parent = self
newCell.parentKey = key
# copy the attributes from parent to child
newCell.include = self.include
newCell.ignoreList = self.ignoreList
newCell.ignoreFld = self.ignoreFld
newCell.parentType = self.parentType
# if the key is an embedded type, users, groups, etc...
if key in constants.VALID_TRANSFORM_TYPES:
newCell.ignoreList = transConf.getIgnoreList(key)
newCell.ignoreFld = transConf.getUniqueField(key)
newCell.parentType = key
if newCell.parentType is not None:
# if the key is equal to the name of the ignore field
if (newCell.ignoreFld) and key == newCell.ignoreFld:
# example key is 'name' and the ignore field is name
# now check to see if the value is in the ignore list
if newCell.struct in newCell.ignoreList:
# continue with example.. now the value for the key name
# is in the ignore list. Set the enclosing object... self
# to not be included.
self.include = False
return newCell
# -------------------- DATASET DELTA ------------------
class CKANDataSetDeltas:
"""Class used to represent differences between two objects of the same
type. Includes all the information necessary to proceed with the update.
:ivar adds: A list of dicts containing the user defined properties that need
to be populated to create an equivalent version of the src data
in dest.
:ivar deletes: A list of the names or ids on the dest side of objects that
should be deleted.
:ivar updates: Same structure as 'adds'. Only difference between these and
adds is these will get added to dest using an update method vs a create
method.
:ivar srcCKANDataset: CKANDataset object, maintain a reference to this
object so that can request CKAN records in the dataset with only
user generated fields included.
"""
def setAddDataset(self, addDataObj):
"""Adds a object to the list of objects that are identified as adds
Adds are objects that exist in the source but not the destination
:param addDataObj: data that is to be added
:type addDataObj: dict
:raises TypeError: raised if the input data is not type dict
"""
if not isinstance(addDataObj, dict):
msg = "addDataObj parameter needs to be type dict. You passed " + \
f"{type(addDataObj)}"
raise TypeError(msg)
self.adds.append(addDataObj)
def setAddDatasets(self, addList, replace=True):
"""adds a list of data to the adds property. The adds property
gets populated with data that should be added to the destination
ckan instance
:param addList: input list of data that should be added to the dest
instance
:type addList: struct
:param replace: if set to true, will replace any data that may already
exist in the adds property if set to false then will append to the
end of the struct, defaults to True
:type replace: bool, optional
"""
if replace:
LOGGER.info(f"populate add list with {len(addList)} items")
self.adds = addList
else:
LOGGER.info(f"adding {len(addList)} items to the add list")
self.adds.extend(addList)
def setDeleteDataset(self, deleteName):
"""Adds an object to the list of data that has been identified as a
Delete.
Deletes are records that exist in the destination but not the source.
:param deleteName: [description]
:type deleteName: [type]
:raises TypeError: [description]
"""
if not isinstance(deleteName, str):
msg = "deleteName parameter needs to be type str. You passed " + \
f"{type(deleteName)}"
raise TypeError(msg)
self.deletes.append(deleteName)
def setDeleteDatasets(self, deleteList, replace=True):
"""adds a list of data to the deletes property. The deletes property
gets populated with unique ids that should be removed from the destination
ckan instance
:param deleteList: input list of data that should be deleted from the dest
ckan instance
:type addList: struct
:param replace: if set to true, will replace any data that may already
exist in the deletes property, if set to false then will append to the
end of the struct, defaults to True
:type replace: bool, optional
"""
if replace:
LOGGER.info(f"populate delete list with {len(deleteList)} items")
self.deletes = deleteList
else:
LOGGER.info(f"adding {len(deleteList)} items to the delete list")
self.deletes.extend(deleteList)
def setUpdateDatasets(self, updateList):
"""Gets a list of data that should be used to update objects in the ckan
destination instance and adds the data to this object
:param updateList: list of data to be used to update the object
:type updateList: list
"""
LOGGER.info(f"adding {len(updateList)} records to update")
for updateData in updateList:
self.setUpdateDataSet(updateData)
def setUpdateDataSet(self, updateObj):
"""Adds a new dataset that is to be updated. When comparison of two
objects identifies that there is a difference, the object that passed to
this method is the src object with the data that should be copied to dest.
Updates are datasets that exist in source and destination however not all
the data is the same between them.
:param updateObj: the data that is to be updated
:type updateObj: dict
:raises TypeError: object must be type 'dict', raise if it is not.
:raises ValueError: object must have a 'name' property
"""
if not isinstance(updateObj, dict):
msg = "updateObj parameter needs to be type dict. You passed " + \
f"{type(updateObj)}"
raise TypeError(msg)
if 'name' not in updateObj:
msg = 'Update object MUST contain a property \'name\'. Object ' + \
f'provided: {updateObj}'
raise ValueError(msg)
LOGGER.debug(f"adding update for {updateObj['name']}")
self.updates[updateObj['name']] = updateObj
def filterNonUserGeneratedFields(self, ckanDataSet):
"""
Receives either a dict or list:
* dict: key is the unique id for the dataset
* list: a list of dicts describing a list of data
objects.
Iterates over all the data in the ckanDataSet struct, removing non
user generated fields and returns a json struct (dict) with only
fields that are user defined
:param ckanDataSet: a ckan data set
:type ckanDataSet: CKANDataSet or an object that subclasses it
"""
# get the unique id for this dataset type
uniqueIdentifier = self.srcCKANDataset.transConf.getUniqueField(
self.srcCKANDataset.dataType)
# if generating a dataset to be used to update a dataset, then check to
# see if there are machine generated fields that should be included in the
# update
LOGGER.debug(f"uniqueIdentifier: {uniqueIdentifier}")
if isinstance(ckanDataSet, dict):
filteredData = {}
uniqueIds = ckanDataSet.keys()
elif isinstance(ckanDataSet, list):
filteredData = []
# below is wrong as it returns all unique ids, we only want the
# unique ids provided in the struct ckanDataSet
#uniqueIds = self.srcCKANDataset.getUniqueIdentifiers()
uniqueIds = []
for record in ckanDataSet:
uniqueIds.append(record[uniqueIdentifier])
else:
msg = f'type received is {type(ckanDataSet)}, expecting list or dict'
raise IncompatibleTypesException(msg)
for uniqueId in uniqueIds:
#LOGGER.debug(f"uniqueId: {uniqueId}")
ckanRec = self.srcCKANDataset.getRecordByUniqueId(uniqueId)
compStruct = ckanRec.getComparableStruct()
if isinstance(ckanDataSet, dict):
filteredData[uniqueId] = compStruct
elif isinstance(ckanDataSet, list):
filteredData.append(compStruct)
return filteredData
def getUpdateData(self):
""" creates and returns a structure that can be used to update the object
in question.
:return: a dictionary where the key values are the unique identifiers
and the values are the actual struct that should be used to update
the destination ckan instance.
:rtype: dict
"""
# should return only fields that are user generated
updates = self.filterNonUserGeneratedFields(self.updates)
#LOGGER.debug(f'updates: {updates}')
updateFields = self.transConf.getFieldsToIncludeOnUpdate(self.destCKANDataset.dataType)
if updateFields:
# need to add these onto each record from the destination
# instances data
updates = self.addUpdateAutoGenFields(updates, updateFields)
return updates
def addUpdateAutoGenFields(self, dataDict, autoGenFieldList):
"""dataDict contains the data that is to be used for the update that
originates from the source ckan instance. autoGenFieldList is a list of
field names that should be added to the struct from the destination
ckan instance
:param dataDict: The update data struct which is a dictionary where the
keys are the unique identifier, in most cases the keys are the name
property. The values in this struct are the values from the source
ckan instance.
:type dataDict: dict
:param autoGenFieldList: a list of field names that should be added to
the struct from the destination ckan instance
:type autoGenFieldList: list
:return: The values in the dataDict with the destination instance fields
defined in autoGenFieldList appended to the dictionary
:rtype: dict
"""
for uniqueId in dataDict:
record = self.destCKANDataset.getRecordByUniqueId(uniqueId)
for field2Add in autoGenFieldList:
fieldValue = record.getFieldValue(field2Add)
dataDict[uniqueId][field2Add] = fieldValue
LOGGER.debug(f"adding: {field2Add}:{fieldValue} to {uniqueId}")
return dataDict
# -------------------- DATASETS --------------------
class CKANDataSet:
"""This class wraps a collection of datasets. Includes an iterator that
will return a CKANRecord object.
:raises IncompatibleTypesException: This method is raised when comparing two
incompatible types.
"""
def reset(self):
"""reset the iterator
"""
self.iterCnt = 0
def getUniqueIdentifiers(self):
"""Iterates through the records in the dataset extracting the values from
the unique identifier field as defined in the config file.
:return: list of values found in the datasets unique constrained field.
:rtype: list
"""
self.reset()
uniqueIds = []
for record in self:
uniqueIds.append(record.getUniqueIdentifier())
return uniqueIds
def getRecordByUniqueId(self, uniqueValueToRetrieve):
"""Gets the record that aligns with this unique id.
"""
retVal = None
if not self.uniqueidRecordLookup:
self.reset()
for record in self:
recordID = record.getUniqueIdentifier()
self.uniqueidRecordLookup[recordID] = record
if uniqueValueToRetrieve == recordID:
retVal = record
else:
if uniqueValueToRetrieve in self.uniqueidRecordLookup:
retVal = self.uniqueidRecordLookup[uniqueValueToRetrieve]
return retVal
def getDeleteList(self, destUniqueIdSet, srcUniqueIdSet):
"""gets a set of unique ids from the source and destination ckan instances,
compares the two lists and generates a list of ids that should be deleted
from the destination instance. Excludes any ids that are identified in
the ignore list defined in the transformation configuration file.
:param destUniqueIdSet: a set of unique ids found the destination ckan
instance
:type destUniqueIdSet: set
:param srcUniqueIdSet: a set of the unique ids in the source ckan instance
:type srcUniqueIdSet: set
"""
ignoreList = self.transConf.getIgnoreList(self.dataType)
deleteSet = destUniqueIdSet.difference(srcUniqueIdSet)
deleteList = []
for deleteUniqueName in deleteSet:
#Check to see if the user is in the ignore list, only add if it is not
if deleteUniqueName not in ignoreList:
deleteList.append(deleteUniqueName)
return deleteList
def getAddList(self, destUniqueIdSet, srcUniqueIdSet):
"""Gets a two sets of unique ids, one for the data on the source ckan
instance and another for the destination ckan instance. Using this
information returns a list of unique ids that should be added to the
destination instance
:param destUniqueIdSet: a set of unique ids from the destination ckan
instance.
:type destUniqueIdSet: set
:param srcUniqueIdSet: a set of unique ids from the source ckan instance
:type srcUniqueIdSet: set
:return: a list of unique ids that should be added to the destination
ckan instance. Will exclude any unique ids identified in the
transformation configuration ignore list.
:rtype: list
"""
# in source but not in dest, ie adds
addSet = srcUniqueIdSet.difference(destUniqueIdSet)
ignoreList = self.transConf.getIgnoreList(self.dataType)
addList = []
for addRecordUniqueName in addSet:
#LOGGER.debug(f"addRecord: {addRecordUniqueName}")
if addRecordUniqueName not in ignoreList:
addDataSet = self.getRecordByUniqueId(addRecordUniqueName)
addDataStruct = addDataSet.getComparableStruct()
addList.append(addDataStruct)
return addList
def getDelta(self, destDataSet):
"""Compares this dataset with the provided 'ckanDataSet' dataset and
returns a CKANDatasetDelta object that identifies
* additions
* deletions
* updates
Assumption is that __this__ object is the source dataset and the object
in the parameter destDataSet is the destination dataset, or the dataset
that is to be updated
:param destDataSet: the dataset that is going to be updated so it
matches the contents of the source dataset
:type ckanDataSet: CKANDataSet
"""
deltaObj = CKANDataSetDeltas(self, destDataSet)
dstUniqueIds = set(destDataSet.getUniqueIdentifiers())
srcUniqueids = set(self.getUniqueIdentifiers())
deleteList = self.getDeleteList(dstUniqueIds, srcUniqueids)
deltaObj.setDeleteDatasets(deleteList)
addList = self.getAddList(dstUniqueIds, srcUniqueids)
deltaObj.setAddDatasets(addList)
updateList = self.getUpdatesList(dstUniqueIds, srcUniqueids, destDataSet)
deltaObj.setUpdateDatasets(updateList)
return deltaObj
def __eq__(self, ckanDataSet):
""" Identifies if the input dataset is the same as this dataset
:param ckanDataSet: The input CKANDataset
:type ckanDataSet: either CKANDataSet, or a subclass of it
"""
LOGGER.debug("DATASET EQ")
retVal = True
# TODO: rework this, should be used to compare a collection
validateTypeIsComparable(self, ckanDataSet)
# get the unique identifiers and verify that input has all the
# unique identifiers as this object
inputUniqueIds = ckanDataSet.getUniqueIdentifiers()
thisUniqueIds = self.getUniqueIdentifiers()
LOGGER.debug(f"inputUniqueIds: {inputUniqueIds}")
LOGGER.debug(f"thisUniqueIds: {thisUniqueIds}")
if set(inputUniqueIds) == set(thisUniqueIds):
# has all the unique ids, now need to look at the differences
# in the data
LOGGER.debug(f"iterate ckanDataSet: {ckanDataSet}")
LOGGER.debug(f"ckanDataSet record count: {len(ckanDataSet)}")
for inputRecord in ckanDataSet:
LOGGER.debug(f"iterating: {inputRecord}")
recordUniqueId = inputRecord.getUniqueIdentifier()
compareRecord = self.getRecordByUniqueId(recordUniqueId)
LOGGER.debug(f"type 1 and 2... {type(inputRecord)} {type(compareRecord)}")
if inputRecord != compareRecord:
LOGGER.debug(f"---------{recordUniqueId} doesn't have equal")
retVal = False
break
else:
LOGGER.debug(f"unique ids don't align")
retVal = False
return retVal
class CKANUsersDataSet(CKANDataSet):
"""Used to represent a collection of CKAN user data.
:param CKANData: [description]
:type CKANData: [type]
"""
# ----------------- EXCEPTIONS
class UserDefinedFieldDefinitionError(Exception):
"""Raised when the transformation configuration encounters an unexpected
value or type
"""
| 41.507143 | 117 | 0.64246 | """CKAN module is a wrapper around API calls. All of these methods will return
raw json objects. The JSON that is returned can be used to construct CKANData
objects.
CKANData objects can be compared with one another. They will use the
CKANTransform methods to identify fields that should and should not be
used to compare two objects.
CKANTransform will also be used to transform on CKANData object to a new
Schema allowing like for like comparisons.
CKANTransform module will work directly with CKAN Data objects.
"""
# pylint: disable=logging-format-interpolation
import logging
import constants
import CKANTransform
import deepdiff
import pprint
LOGGER = logging.getLogger(__name__)
def validateTypeIsComparable(dataObj1, dataObj2):
"""A generic function that can be used to ensure two objects are comparable.
:param dataObj1: The first data object that is to be used in a comparison
:type ckanDataSet:
:raises IncompatibleTypesException: [description]
"""
dataType1 = type(dataObj1)
dataType2 = type(dataObj2)
if hasattr(dataObj1, 'dataType'):
dataType1 = dataObj1.dataType
if hasattr(dataObj2, 'dataType'):
dataType2 = dataObj2.dataType
if dataType2 != dataType1:
msg = 'You are attempting to compare two different types of objects ' + \
f'that are not comparable. dataObj1 is type: {dataType1} and ' + \
f'dataObj2 is type: with an object of type, {dataType2}'
raise IncompatibleTypesException(msg)
# ------------- Data Record defs -------------
class CKANRecord:
def __init__(self, jsonData, dataType):
self.jsonData = jsonData
self.dataType = dataType
self.transConf = CKANTransform.TransformationConfig()
self.userPopulatedFields = self.transConf.getUserPopulatedProperties(self.dataType)
def getFieldValue(self, fieldName):
return self.jsonData[fieldName]
def getUniqueIdentifier(self):
"""returns the value in the field described in the transformation
configuration file as unique.
:return: value of unique field
:rtype: any
"""
# look up the name of the field in the transformation configuration
# that describes the unique id field
# get the unique id field value from the dict
uniqueFieldName = self.transConf.getUniqueField(self.dataType)
return self.jsonData[uniqueFieldName]
def getComparableStruct(self, struct=None, flds2Include=None):
"""Receives the data returned by one of the CKAN end points, recursively
iterates over it returning a new data structure that contains only the
fields that are user populated. (removing auto generated fields).
Field definitions are retrieved from the transformation configuration
file.
:param struct: The input CKAN data structure
:type struct: list, dict
:param flds2Include: Used internally during recursion to ensure the
userfields line up with the current level of recursion, defaults to None
:type flds2Include: list / dict, optional
:return: The new data structure with only user generated fields
:rtype: dict or list
"""
# TODO: ckan obj can have embedded objects. Example orgs contain users.
# when looking at users should consider the users: ignore values
# need to figure out how to implement this.
if struct is None and flds2Include is None:
struct = self.jsonData
flds2Include = self.userPopulatedFields
#LOGGER.debug(f"struct: {struct}, flds2Include: {flds2Include}")
newStruct = None
# only fields defined in this struct should be included in the output
if isinstance(flds2Include, list):
# currently assuming that if a list is found there will be a single
# record in the flds2Include configuration that describe what to
# do with each element in the list
newStruct = []
if isinstance(flds2Include[0], dict):
for structElem in struct:
dataValue = self.getComparableStruct(structElem, flds2Include[0])
newStruct.append(dataValue)
return newStruct
elif isinstance(flds2Include, dict):
newStruct = {}
for key in flds2Include:
# if the key is a datatype then:
# - get the unique-id for that data type
# - get the ignore list for that data type
# - check each value to make sure its not part
# of an ignore list. If it is then do not
# include the data.
# thinking this is part of a post process that should be run
# after the comparable struct is generated.
#LOGGER.debug(f'----key: {key}')
#LOGGER.debug(f'flds2Include: {flds2Include}')
#LOGGER.debug(f"flds2Include[key]: {flds2Include[key]}")
#LOGGER.debug(f'struct: {struct}')
#LOGGER.debug(f'newStruct: {newStruct}')
#LOGGER.debug(f'struct[key]: {struct[key]}')
newStruct[key] = self.getComparableStruct(struct[key], flds2Include[key])
#LOGGER.debug(f"newStruct: {newStruct}")
return newStruct
elif isinstance(flds2Include, bool):
#LOGGER.debug(f"-----------{struct} is {flds2Include}")
return struct
return newStruct
def removeEmbeddedIgnores(self, dataCell):
"""many data structs in CKAN can contain embedded data types. Example
of data types in CKAN: users, groups, organizations, packages, resources
An example of an embedded type... users are embedded in organizations.
this impacts comparison as any datatype can be configured with an
ignore_list. The ignore list identifies the unique id of records that
should be ignored for that data type.
This is easy to handle for the actual data type. Example for users, a
delta object is generated that identifies all the differences even if
they are in the ignore_list. The update process however will ignore any
differences that correspond with the ignore list.
For embedded data we want to consider any data that is in the ignore
list of embedded data types and not include these when differences between
two objects are calculated.
This method will recursively iterate through the data structure:
* identify if a property is an embedded type.
* If so remove any children that match the ignore_list defined for the
type that is being embedded.
:param struct: [description]
:type struct: [type]
"""
# need to figure out how to remove non
#LOGGER.debug("--------- REMOVE EMBED IGNORES ---------")
if isinstance(dataCell.struct, dict):
for objProperty in dataCell.struct:
#LOGGER.debug(f"objProperty: {objProperty}")
newCell = dataCell.generateNewCell(objProperty, self.transConf)
newCell = self.removeEmbeddedIgnores(newCell)
dataCell.copyChanges(newCell)
elif isinstance(dataCell.struct, list):
positions2Remove = []
for listPos in range(0, len(dataCell.struct)):
#LOGGER.debug(f"listPos: {listPos} - {dataCell.struct[listPos]}")
newCell = dataCell.generateNewCell(listPos, self.transConf)
newCell = self.removeEmbeddedIgnores(newCell)
if not newCell.include:
positions2Remove.append(listPos)
#LOGGER.debug("adding value: {listPos} to remove")
#LOGGER.debug(f"include value: {dataCell.include}")
LOGGER.debug(f"removing positions: {positions2Remove}")
dataCell.deleteIndexes(positions2Remove)
#LOGGER.debug(f'returning... {dataCell.struct}, {dataCell.include}')
#LOGGER.debug(f"ignore struct: {self.transConf.transConf['users']['ignore_list']}")
return dataCell
def __eq__(self, inputRecord):
LOGGER.debug("_________ EQ CALLED")
diff = self.getDiff(inputRecord)
retVal = True
if diff:
retVal = False
return retVal
def isIgnore(self, inputRecord):
"""evaluates the current record to determine if it is defined in the
transformation config as one that should be ignored
:param inputRecord: a data struct (dict) for the current record type
:type inputRecord: dict
"""
retVal = False
ignoreField = self.transConf.getUniqueField(self.dataType)
ignoreList = self.transConf.getIgnoreList(self.dataType)
if ignoreField in inputRecord.jsonData:
if inputRecord.jsonData[ignoreField] in ignoreList:
retVal = True
return retVal
def getDiff(self, inputRecord):
# retrieve a comparable structure, and remove embedded data types
# that have been labelled as ignores
# TODO: before do anything check to see if this record is an
diff = None
# don't even go any further if the records unique id, usually name is in
# the ignore list
if not self.isIgnore(inputRecord):
thisComparable = self.getComparableStruct()
dataCell = DataCell(thisComparable)
dataCellNoIgnores = self.removeEmbeddedIgnores(dataCell)
thisComparable = dataCellNoIgnores.struct
# do the same thing for the input data structure
inputComparable = inputRecord.getComparableStruct()
dataCell = DataCell(inputComparable)
dataCellNoIgnores = self.removeEmbeddedIgnores(dataCell)
inputComparable = dataCell.struct
diff = deepdiff.DeepDiff(thisComparable,
inputComparable,
ignore_order=True)
if diff:
pp = pprint.PrettyPrinter(indent=4)
pp.pformat(inputComparable)
LOGGER.debug("inputComparable: %s", pp.pformat(inputComparable))
LOGGER.debug('thisComparable: %s', pp.pformat(thisComparable))
return diff
def __ne__(self, inputRecord):
LOGGER.debug(f"__________ NE record CALLED: {type(inputRecord)}, {type(self)}")
retVal = True
if self == inputRecord:
retVal = False
LOGGER.debug(f"retval from __ne__: {retVal}")
return retVal
class DataCell:
"""an object that can be used to wrap a data value and other meta data
about it from the perspective of a change
"""
def __init__(self, struct, include=True):
self.struct = struct
self.include = include
self.ignoreList = None
self.ignoreFld = None
self.parent = None
self.parentType = None
self.parentKey = None
def copyChanges(self, childDataCell):
self.struct[childDataCell.parentKey] = childDataCell.struct
def deleteIndexes(self, positions):
"""gets a list of the position that are to be trimmed from the struct
:param positions: a list of index positions for the self.struct list that
are to be removed.
:type positions: list of ints
"""
LOGGER.debug(f"remove positions: {positions}")
newStruct = []
for pos in range(0, len(self.struct)):
if pos not in positions:
newStruct.append(self.struct[pos])
else:
LOGGER.debug(f"removing: {pos} {self.struct[pos]}")
LOGGER.debug(f"old struct: {self.struct}")
LOGGER.debug(f"new struct: {newStruct}")
self.struct = newStruct
# transfer changes to the parent
def generateNewCell(self, key, transConf):
"""The current cell is a dict, generates a new cell for the position
associated with the input key.
:param key: a key of struct property
:type key: str
"""
newCell = DataCell(self.struct[key])
newCell.parent = self
newCell.parentKey = key
# copy the attributes from parent to child
newCell.include = self.include
newCell.ignoreList = self.ignoreList
newCell.ignoreFld = self.ignoreFld
newCell.parentType = self.parentType
# if the key is an embedded type, users, groups, etc...
if key in constants.VALID_TRANSFORM_TYPES:
newCell.ignoreList = transConf.getIgnoreList(key)
newCell.ignoreFld = transConf.getUniqueField(key)
newCell.parentType = key
if newCell.parentType is not None:
# if the key is equal to the name of the ignore field
if (newCell.ignoreFld) and key == newCell.ignoreFld:
# example key is 'name' and the ignore field is name
# now check to see if the value is in the ignore list
if newCell.struct in newCell.ignoreList:
# continue with example.. now the value for the key name
# is in the ignore list. Set the enclosing object... self
# to not be included.
self.include = False
return newCell
class CKANUserRecord(CKANRecord):
def __init__(self, jsonData):
recordType = constants.TRANSFORM_TYPE_USERS
CKANRecord.__init__(self, jsonData, recordType)
class CKANGroupRecord(CKANRecord):
def __init__(self, jsonData):
recordType = constants.TRANSFORM_TYPE_GROUPS
CKANRecord.__init__(self, jsonData, recordType)
class CKANOrganizationRecord(CKANRecord):
def __init__(self, jsonData):
recordType = constants.TRANSFORM_TYPE_ORGS
CKANRecord.__init__(self, jsonData, recordType)
# -------------------- DATASET DELTA ------------------
class CKANDataSetDeltas:
"""Class used to represent differences between two objects of the same
type. Includes all the information necessary to proceed with the update.
:ivar adds: A list of dicts containing the user defined properties that need
to be populated to create an equivalent version of the src data
in dest.
:ivar deletes: A list of the names or ids on the dest side of objects that
should be deleted.
:ivar updates: Same structure as 'adds'. Only difference between these and
adds is these will get added to dest using an update method vs a create
method.
:ivar srcCKANDataset: CKANDataset object, maintain a reference to this
object so that can request CKAN records in the dataset with only
user generated fields included.
"""
def __init__(self, srcCKANDataset, destCKANDataset):
self.adds = []
self.deletes = []
self.updates = {}
self.srcCKANDataset = srcCKANDataset
self.destCKANDataset = destCKANDataset
self.transConf = self.srcCKANDataset.transConf
def setAddDataset(self, addDataObj):
"""Adds a object to the list of objects that are identified as adds
Adds are objects that exist in the source but not the destination
:param addDataObj: data that is to be added
:type addDataObj: dict
:raises TypeError: raised if the input data is not type dict
"""
if not isinstance(addDataObj, dict):
msg = "addDataObj parameter needs to be type dict. You passed " + \
f"{type(addDataObj)}"
raise TypeError(msg)
self.adds.append(addDataObj)
def setAddDatasets(self, addList, replace=True):
"""adds a list of data to the adds property. The adds property
gets populated with data that should be added to the destination
ckan instance
:param addList: input list of data that should be added to the dest
instance
:type addList: struct
:param replace: if set to true, will replace any data that may already
exist in the adds property if set to false then will append to the
end of the struct, defaults to True
:type replace: bool, optional
"""
if replace:
LOGGER.info(f"populate add list with {len(addList)} items")
self.adds = addList
else:
LOGGER.info(f"adding {len(addList)} items to the add list")
self.adds.extend(addList)
def setDeleteDataset(self, deleteName):
"""Adds an object to the list of data that has been identified as a
Delete.
Deletes are records that exist in the destination but not the source.
:param deleteName: [description]
:type deleteName: [type]
:raises TypeError: [description]
"""
if not isinstance(deleteName, str):
msg = "deleteName parameter needs to be type str. You passed " + \
f"{type(deleteName)}"
raise TypeError(msg)
self.deletes.append(deleteName)
def setDeleteDatasets(self, deleteList, replace=True):
"""adds a list of data to the deletes property. The deletes property
gets populated with unique ids that should be removed from the destination
ckan instance
:param deleteList: input list of data that should be deleted from the dest
ckan instance
:type addList: struct
:param replace: if set to true, will replace any data that may already
exist in the deletes property, if set to false then will append to the
end of the struct, defaults to True
:type replace: bool, optional
"""
if replace:
LOGGER.info(f"populate delete list with {len(deleteList)} items")
self.deletes = deleteList
else:
LOGGER.info(f"adding {len(deleteList)} items to the delete list")
self.deletes.extend(deleteList)
def setUpdateDatasets(self, updateList):
"""Gets a list of data that should be used to update objects in the ckan
destination instance and adds the data to this object
:param updateList: list of data to be used to update the object
:type updateList: list
"""
LOGGER.info(f"adding {len(updateList)} records to update")
for updateData in updateList:
self.setUpdateDataSet(updateData)
def setUpdateDataSet(self, updateObj):
"""Adds a new dataset that is to be updated. When comparison of two
objects identifies that there is a difference, the object that passed to
this method is the src object with the data that should be copied to dest.
Updates are datasets that exist in source and destination however not all
the data is the same between them.
:param updateObj: the data that is to be updated
:type updateObj: dict
:raises TypeError: object must be type 'dict', raise if it is not.
:raises ValueError: object must have a 'name' property
"""
if not isinstance(updateObj, dict):
msg = "updateObj parameter needs to be type dict. You passed " + \
f"{type(updateObj)}"
raise TypeError(msg)
if 'name' not in updateObj:
msg = 'Update object MUST contain a property \'name\'. Object ' + \
f'provided: {updateObj}'
raise ValueError(msg)
LOGGER.debug(f"adding update for {updateObj['name']}")
self.updates[updateObj['name']] = updateObj
def filterNonUserGeneratedFields(self, ckanDataSet):
"""
Receives either a dict or list:
* dict: key is the unique id for the dataset
* list: a list of dicts describing a list of data
objects.
Iterates over all the data in the ckanDataSet struct, removing non
user generated fields and returns a json struct (dict) with only
fields that are user defined
:param ckanDataSet: a ckan data set
:type ckanDataSet: CKANDataSet or an object that subclasses it
"""
# get the unique id for this dataset type
uniqueIdentifier = self.srcCKANDataset.transConf.getUniqueField(
self.srcCKANDataset.dataType)
# if generating a dataset to be used to update a dataset, then check to
# see if there are machine generated fields that should be included in the
# update
LOGGER.debug(f"uniqueIdentifier: {uniqueIdentifier}")
if isinstance(ckanDataSet, dict):
filteredData = {}
uniqueIds = ckanDataSet.keys()
elif isinstance(ckanDataSet, list):
filteredData = []
# below is wrong as it returns all unique ids, we only want the
# unique ids provided in the struct ckanDataSet
#uniqueIds = self.srcCKANDataset.getUniqueIdentifiers()
uniqueIds = []
for record in ckanDataSet:
uniqueIds.append(record[uniqueIdentifier])
else:
msg = f'type received is {type(ckanDataSet)}, expecting list or dict'
raise IncompatibleTypesException(msg)
for uniqueId in uniqueIds:
#LOGGER.debug(f"uniqueId: {uniqueId}")
ckanRec = self.srcCKANDataset.getRecordByUniqueId(uniqueId)
compStruct = ckanRec.getComparableStruct()
if isinstance(ckanDataSet, dict):
filteredData[uniqueId] = compStruct
elif isinstance(ckanDataSet, list):
filteredData.append(compStruct)
return filteredData
def getAddData(self):
LOGGER.debug(f'add data: {type(self.adds)} {len(self.adds)}')
adds = self.filterNonUserGeneratedFields(self.adds)
return adds
def getDeleteData(self):
return self.deletes
def getUpdateData(self):
""" creates and returns a structure that can be used to update the object
in question.
:return: a dictionary where the key values are the unique identifiers
and the values are the actual struct that should be used to update
the destination ckan instance.
:rtype: dict
"""
# should return only fields that are user generated
updates = self.filterNonUserGeneratedFields(self.updates)
#LOGGER.debug(f'updates: {updates}')
updateFields = self.transConf.getFieldsToIncludeOnUpdate(self.destCKANDataset.dataType)
if updateFields:
# need to add these onto each record from the destination
# instances data
updates = self.addUpdateAutoGenFields(updates, updateFields)
return updates
def addUpdateAutoGenFields(self, dataDict, autoGenFieldList):
"""dataDict contains the data that is to be used for the update that
originates from the source ckan instance. autoGenFieldList is a list of
field names that should be added to the struct from the destination
ckan instance
:param dataDict: The update data struct which is a dictionary where the
keys are the unique identifier, in most cases the keys are the name
property. The values in this struct are the values from the source
ckan instance.
:type dataDict: dict
:param autoGenFieldList: a list of field names that should be added to
the struct from the destination ckan instance
:type autoGenFieldList: list
:return: The values in the dataDict with the destination instance fields
defined in autoGenFieldList appended to the dictionary
:rtype: dict
"""
for uniqueId in dataDict:
record = self.destCKANDataset.getRecordByUniqueId(uniqueId)
for field2Add in autoGenFieldList:
fieldValue = record.getFieldValue(field2Add)
dataDict[uniqueId][field2Add] = fieldValue
LOGGER.debug(f"adding: {field2Add}:{fieldValue} to {uniqueId}")
return dataDict
def __str__(self):
# addNames = []
# for add in self.adds:
# addNames.append(add['name'])
# updateNames = self.updates.keys()
msg = f"add datasets: {len(self.adds)}, deletes: {len(self.deletes)} " + \
f"updates: {len(self.updates)}"
return msg
# -------------------- DATASETS --------------------
class CKANDataSet:
"""This class wraps a collection of datasets. Includes an iterator that
will return a CKANRecord object.
:raises IncompatibleTypesException: This method is raised when comparing two
incompatible types.
"""
def __init__(self, jsonData, dataType):
self.jsonData = jsonData
self.dataType = dataType
self.transConf = CKANTransform.TransformationConfig()
self.userPopulatedFields = self.transConf.getUserPopulatedProperties(self.dataType)
self.iterCnt = 0
self.recordConstructor = CKANRecord
# an index to help find records faster. constructed
# the first time a record is requested
self.uniqueidRecordLookup = {}
def reset(self):
"""reset the iterator
"""
self.iterCnt = 0
def getUniqueIdentifiers(self):
"""Iterates through the records in the dataset extracting the values from
the unique identifier field as defined in the config file.
:return: list of values found in the datasets unique constrained field.
:rtype: list
"""
self.reset()
uniqueIds = []
for record in self:
uniqueIds.append(record.getUniqueIdentifier())
return uniqueIds
def getRecordByUniqueId(self, uniqueValueToRetrieve):
"""Gets the record that aligns with this unique id.
"""
retVal = None
if not self.uniqueidRecordLookup:
self.reset()
for record in self:
recordID = record.getUniqueIdentifier()
self.uniqueidRecordLookup[recordID] = record
if uniqueValueToRetrieve == recordID:
retVal = record
else:
if uniqueValueToRetrieve in self.uniqueidRecordLookup:
retVal = self.uniqueidRecordLookup[uniqueValueToRetrieve]
return retVal
def getDeleteList(self, destUniqueIdSet, srcUniqueIdSet):
"""gets a set of unique ids from the source and destination ckan instances,
compares the two lists and generates a list of ids that should be deleted
from the destination instance. Excludes any ids that are identified in
the ignore list defined in the transformation configuration file.
:param destUniqueIdSet: a set of unique ids found the destination ckan
instance
:type destUniqueIdSet: set
:param srcUniqueIdSet: a set of the unique ids in the source ckan instance
:type srcUniqueIdSet: set
"""
ignoreList = self.transConf.getIgnoreList(self.dataType)
deleteSet = destUniqueIdSet.difference(srcUniqueIdSet)
deleteList = []
for deleteUniqueName in deleteSet:
#Check to see if the user is in the ignore list, only add if it is not
if deleteUniqueName not in ignoreList:
deleteList.append(deleteUniqueName)
return deleteList
def getAddList(self, destUniqueIdSet, srcUniqueIdSet):
"""Gets a two sets of unique ids, one for the data on the source ckan
instance and another for the destination ckan instance. Using this
information returns a list of unique ids that should be added to the
destination instance
:param destUniqueIdSet: a set of unique ids from the destination ckan
instance.
:type destUniqueIdSet: set
:param srcUniqueIdSet: a set of unique ids from the source ckan instance
:type srcUniqueIdSet: set
:return: a list of unique ids that should be added to the destination
ckan instance. Will exclude any unique ids identified in the
transformation configuration ignore list.
:rtype: list
"""
# in source but not in dest, ie adds
addSet = srcUniqueIdSet.difference(destUniqueIdSet)
ignoreList = self.transConf.getIgnoreList(self.dataType)
addList = []
for addRecordUniqueName in addSet:
#LOGGER.debug(f"addRecord: {addRecordUniqueName}")
if addRecordUniqueName not in ignoreList:
addDataSet = self.getRecordByUniqueId(addRecordUniqueName)
addDataStruct = addDataSet.getComparableStruct()
addList.append(addDataStruct)
return addList
def getUpdatesList(self, destUniqueIdSet, srcUniqueIdSet, destDataSet):
ignoreList = self.transConf.getIgnoreList(self.dataType)
chkForUpdateIds = srcUniqueIdSet.intersection(destUniqueIdSet)
updateDataList = []
for chkForUpdateId in chkForUpdateIds:
# now make sure the id is not in the ignore list
if chkForUpdateIds not in ignoreList:
srcRecordForUpdate = self.getRecordByUniqueId(chkForUpdateId)
destRecordForUpdate = destDataSet.getRecordByUniqueId(chkForUpdateId)
# if they are different then identify as an update. The __eq__
# method for dataset is getting called here. __eq__ will consider
# ignore lists. If record is in ignore list it will return as
# equal.
if srcRecordForUpdate != destRecordForUpdate:
updateDataList.append(srcRecordForUpdate.jsonData)
return updateDataList
def getDelta(self, destDataSet):
"""Compares this dataset with the provided 'ckanDataSet' dataset and
returns a CKANDatasetDelta object that identifies
* additions
* deletions
* updates
Assumption is that __this__ object is the source dataset and the object
in the parameter destDataSet is the destination dataset, or the dataset
that is to be updated
:param destDataSet: the dataset that is going to be updated so it
matches the contents of the source dataset
:type ckanDataSet: CKANDataSet
"""
deltaObj = CKANDataSetDeltas(self, destDataSet)
dstUniqueIds = set(destDataSet.getUniqueIdentifiers())
srcUniqueids = set(self.getUniqueIdentifiers())
deleteList = self.getDeleteList(dstUniqueIds, srcUniqueids)
deltaObj.setDeleteDatasets(deleteList)
addList = self.getAddList(dstUniqueIds, srcUniqueids)
deltaObj.setAddDatasets(addList)
updateList = self.getUpdatesList(dstUniqueIds, srcUniqueids, destDataSet)
deltaObj.setUpdateDatasets(updateList)
return deltaObj
def __eq__(self, ckanDataSet):
""" Identifies if the input dataset is the same as this dataset
:param ckanDataSet: The input CKANDataset
:type ckanDataSet: either CKANDataSet, or a subclass of it
"""
LOGGER.debug("DATASET EQ")
retVal = True
# TODO: rework this, should be used to compare a collection
validateTypeIsComparable(self, ckanDataSet)
# get the unique identifiers and verify that input has all the
# unique identifiers as this object
inputUniqueIds = ckanDataSet.getUniqueIdentifiers()
thisUniqueIds = self.getUniqueIdentifiers()
LOGGER.debug(f"inputUniqueIds: {inputUniqueIds}")
LOGGER.debug(f"thisUniqueIds: {thisUniqueIds}")
if set(inputUniqueIds) == set(thisUniqueIds):
# has all the unique ids, now need to look at the differences
# in the data
LOGGER.debug(f"iterate ckanDataSet: {ckanDataSet}")
LOGGER.debug(f"ckanDataSet record count: {len(ckanDataSet)}")
for inputRecord in ckanDataSet:
LOGGER.debug(f"iterating: {inputRecord}")
recordUniqueId = inputRecord.getUniqueIdentifier()
compareRecord = self.getRecordByUniqueId(recordUniqueId)
LOGGER.debug(f"type 1 and 2... {type(inputRecord)} {type(compareRecord)}")
if inputRecord != compareRecord:
LOGGER.debug(f"---------{recordUniqueId} doesn't have equal")
retVal = False
break
else:
LOGGER.debug(f"unique ids don't align")
retVal = False
return retVal
def next(self):
return self.__next__()
def __next__(self):
if self.iterCnt >= len(self.jsonData):
self.iterCnt = 0
raise StopIteration
ckanRecord = None
# if the record constructor is a CKANRecord then use the two parameter
# constructor, otherwise the type is already defined in subclass of the
# CKANRecord
if self.recordConstructor == CKANRecord:
ckanRecord = self.recordConstructor(self.jsonData[self.iterCnt], self.dataType)
else:
ckanRecord = self.recordConstructor(self.jsonData[self.iterCnt]) # pylint: disable=no-value-for-parameter
self.iterCnt += 1
return ckanRecord
def __iter__(self):
return self
def __len__(self):
return len(self.jsonData)
class CKANUsersDataSet(CKANDataSet):
"""Used to represent a collection of CKAN user data.
:param CKANData: [description]
:type CKANData: [type]
"""
def __init__(self, jsonData):
CKANDataSet.__init__(self, jsonData, constants.TRANSFORM_TYPE_USERS)
self.recordConstructor = CKANUserRecord
class CKANGroupDataSet(CKANDataSet):
def __init__(self, jsonData):
CKANDataSet.__init__(self, jsonData, constants.TRANSFORM_TYPE_GROUPS)
self.recordConstructor = CKANGroupRecord
class CKANOrganizationDataSet(CKANDataSet):
def __init__(self, jsonData):
CKANDataSet.__init__(self, jsonData, constants.TRANSFORM_TYPE_ORGS)
self.recordConstructor = CKANGroupRecord
# ----------------- EXCEPTIONS
class UserDefinedFieldDefinitionError(Exception):
"""Raised when the transformation configuration encounters an unexpected
value or type
"""
def __init__(self, message):
LOGGER.debug(f"error message: {message}")
self.message = message
class IncompatibleTypesException(Exception):
def __init__(self, message):
LOGGER.debug(f"error message: {message}")
self.message = message
| 6,169 | 7,180 | 693 |
a716982e7f05cf8e2d5c8a7553f150a6cba86c7d | 969 | py | Python | devicedata/management/commands/provider_find.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 24 | 2017-03-19T16:17:37.000Z | 2021-11-07T15:35:33.000Z | devicedata/management/commands/provider_find.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 117 | 2016-04-19T12:35:10.000Z | 2022-02-22T13:19:05.000Z | devicedata/management/commands/provider_find.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 11 | 2017-08-08T12:11:39.000Z | 2021-12-08T05:34:06.000Z | from django.core.management import BaseCommand
from devicedata.generic import _get_provider
from devices.models import Device
| 35.888889 | 97 | 0.639835 | from django.core.management import BaseCommand
from devicedata.generic import _get_provider
from devices.models import Device
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('device_ids', nargs='*', type=int)
def handle(self, *args, **options):
if "device_ids" in options and len(options["device_ids"]) > 0:
devices = Device.objects.filter(pk__in=options["device_ids"])
else:
devices = Device.objects.filter(data_provider="", ipaddress__isnull=False)
if len(devices) == 0:
self.stdout.write("Could not find any devices with data provider.")
return
for device in devices:
provider = _get_provider(device)
if provider is not None:
device.data_provider = provider.name
device.save()
self.stdout.write("Processed: {0} with {1}".format(device, device.data_provider))
| 758 | 6 | 77 |
3b63cd4e1e16948b5cafa357994d2747e7cf0d6c | 2,458 | py | Python | packages/mccomponents/python/mccomponents/sample/phonon/obsolete/register_LinearlyInterpolatedDOS.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/python/mccomponents/sample/phonon/obsolete/register_LinearlyInterpolatedDOS.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/python/mccomponents/sample/phonon/obsolete/register_LinearlyInterpolatedDOS.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#factory method that wraps boost python binding
def linearlyinterpolateddos_bp(
e0, de, n, Z):
'''create boost python object of LinearlyInterpolatedDOS
e0: minimum phonon energy. float
de: phonon energy step. float
n: number of points.
Z: values of DOS at the energy points defined by (e0, de, n)
'''
import mccomponents.mccomponentsbp as b
Z1 = b.vector_double( n )
for i in range(n): Z1[i] = Z[i]
return b.LinearlyInterpolatedDOS_dbl( e0, de, n, Z1 )
#python class to represent LinearlyInterpolatedDOS
from AbstractDOS import AbstractDOS as base
#register new type
# 2. the handler of engine renderer
# 3. the handler to call python bindings
import mccomponents.homogeneous_scatterer as hs
# 4. register the new class and handlers
hs.register (
LinearlyInterpolatedDOS, onLinearlyInterpolatedDOS,
{'BoostPythonBinding':linearlyinterpolateddos_bp_handler} )
# version
__id__ = "$Id$"
# End of file
| 26.430108 | 81 | 0.616355 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#factory method that wraps boost python binding
def linearlyinterpolateddos_bp(
e0, de, n, Z):
'''create boost python object of LinearlyInterpolatedDOS
e0: minimum phonon energy. float
de: phonon energy step. float
n: number of points.
Z: values of DOS at the energy points defined by (e0, de, n)
'''
import mccomponents.mccomponentsbp as b
Z1 = b.vector_double( n )
for i in range(n): Z1[i] = Z[i]
return b.LinearlyInterpolatedDOS_dbl( e0, de, n, Z1 )
#python class to represent LinearlyInterpolatedDOS
from AbstractDOS import AbstractDOS as base
class LinearlyInterpolatedDOS(base):
def __init__(self, doshist):
'''doshist: a histogram instance'''
self.doshist = doshist
return
def identify(self, visitor): return visitor.onLinearlyInterpolatedDOS( self )
pass # end of AbstractDOS
#register new type
# 2. the handler of engine renderer
def onLinearlyInterpolatedDOS(self, linearlyinterpolateddos):
doshist = linearlyinterpolateddos.doshist
eaxis = doshist.axisFromName('energy')
if eaxis.size() < 3 :
raise RuntimeError("energy axis has too few bins: %s" % (
eaxis, ))
energies = eaxis.binCenters()
e0 = energies[0]
de = energies[1] - energies[0]
assert de>0, "energy bin should be incremental"
dearr = energies[1:] - energies[:-1]
#make sure bin sizes are all the same
import numpy
assert numpy.all( numpy.abs( dearr-de ) < 1e-7*de )
n = eaxis.size()
Z = doshist.data().storage().asNumarray()
return self.factory.linearlyinterpolateddos(
e0, de, n, Z )
# 3. the handler to call python bindings
def linearlyinterpolateddos_bp_handler(self, e0, de, n, Z):
return linearlyinterpolateddos_bp(e0, de, n, Z)
import mccomponents.homogeneous_scatterer as hs
# 4. register the new class and handlers
hs.register (
LinearlyInterpolatedDOS, onLinearlyInterpolatedDOS,
{'BoostPythonBinding':linearlyinterpolateddos_bp_handler} )
# version
__id__ = "$Id$"
# End of file
| 848 | 202 | 66 |
7fccc3ed20ff70caf1f3b1b885d87d0a061db7f8 | 549 | py | Python | python/stringio_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | 1 | 2017-10-14T04:23:45.000Z | 2017-10-14T04:23:45.000Z | python/stringio_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | python/stringio_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
from StringIO import StringIO
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.writelines(line)
f.writelines(lines[-2:])
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line = ', f.readline()
print 'Position = ', f.tell()
line = f.readline()
print 'Second line = ', line
f.seek(-len(line), 1)
line2 = f.read(len(line))
print line2
| 21.96 | 47 | 0.655738 | #!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
from StringIO import StringIO
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.writelines(line)
f.writelines(lines[-2:])
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line = ', f.readline()
print 'Position = ', f.tell()
line = f.readline()
print 'Second line = ', line
f.seek(-len(line), 1)
line2 = f.read(len(line))
print line2
| 0 | 0 | 0 |
17ca753f935c10af5ae4ce9a89cd6e2d15fc1486 | 441 | py | Python | stockserver.py | cnu/zeromq-talk | b090ed8be7e7ea6a1c71664e052e2ba91587f9ef | [
"MIT"
] | 2 | 2016-02-09T17:41:55.000Z | 2020-11-02T15:37:01.000Z | stockserver.py | cnu/zeromq-talk | b090ed8be7e7ea6a1c71664e052e2ba91587f9ef | [
"MIT"
] | null | null | null | stockserver.py | cnu/zeromq-talk | b090ed8be7e7ea6a1c71664e052e2ba91587f9ef | [
"MIT"
] | null | null | null | #
# Stock Ticker server
# Binds PUB socket to tcp://*:5556
# Publishes random stock updates
#
import zmq
import time
import random
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5556")
scrips = ['AAPL', 'GOOG', 'MSFT', 'AMZN']
while True:
scrip = random.choice(scrips)
price = random.randrange(20,700)
msg = "%s: %d" % (scrip, price)
print msg
socket.send(msg)
time.sleep(0.5) | 19.173913 | 41 | 0.641723 | #
# Stock Ticker server
# Binds PUB socket to tcp://*:5556
# Publishes random stock updates
#
import zmq
import time
import random
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5556")
scrips = ['AAPL', 'GOOG', 'MSFT', 'AMZN']
while True:
scrip = random.choice(scrips)
price = random.randrange(20,700)
msg = "%s: %d" % (scrip, price)
print msg
socket.send(msg)
time.sleep(0.5) | 0 | 0 | 0 |
4251f1c6adc55b7041994a2cbbc15c3c8fc6f74d | 22,510 | py | Python | wampnado/messages.py | rexlunae/tornwamp | 881538c6ae7909e06a15a838a0d84ebb94a2aed2 | [
"Apache-2.0"
] | null | null | null | wampnado/messages.py | rexlunae/tornwamp | 881538c6ae7909e06a15a838a0d84ebb94a2aed2 | [
"Apache-2.0"
] | null | null | null | wampnado/messages.py | rexlunae/tornwamp | 881538c6ae7909e06a15a838a0d84ebb94a2aed2 | [
"Apache-2.0"
] | null | null | null | """
WAMP messages definitions and serializers.
Compatible with WAMP Document Revision: RC3, 2014/08/25, available at:
https://github.com/tavendo/WAMP/blob/master/spec/basic.md
"""
import json
import msgpack
import uuid
from copy import deepcopy
from enum import IntEnum, Enum
from io import BytesIO
from base64 import b64encode, standard_b64decode
from wampnado.identifier import create_global_id
from wampnado.features import server_features, Options
PUBLISHER_NODE_ID = uuid.uuid4()
def decode_b64(s):
"""
Finds all the binary objects in the struct, and recursively converts them to base64 prepended by \0, per the WAMP standard.
"""
if isinstance(s, dict):
ret = {}
for k,v in s.items():
ret[k] = decode_b64(v)
return ret
elif isinstance(s, list) or isinstance(s, tuple):
ret = []
for v in s:
ret.append(decode_b64(v))
return ret
elif isinstance(s, str) and s.beginswith('\0'):
return standard_b64decode(s[1:])
else:
return s
def encode_bin_as_b64(s):
"""
Finds all the binary objects in the struct, and recursively converts them to base64 prepended by \0, per the WAMP standard.
"""
if isinstance(s, dict):
ret = {}
for k,v in s.items():
ret[k] = encode_bin_as_b64(v)
return ret
elif isinstance(s, list) or isinstance(s, tuple):
ret = []
for v in s:
ret.append(encode_bin_as_b64(v))
return ret
elif isinstance(s, bytes):
return '\0{}'.format(b64encode(s).decode('ascii'))
elif isinstance(s, Enum):
return encode_bin_as_b64(s.value)
else:
return s
class Code(IntEnum):
"""
Enum which represents currently supported WAMP messages.
"""
HELLO = 1
WELCOME = 2
ABORT = 3
# CHALLENGE = 4
# AUTHENTICATE = 5
GOODBYE = 6
# HEARTBEAT = 7
ERROR = 8
PUBLISH = 16
PUBLISHED = 17
SUBSCRIBE = 32
SUBSCRIBED = 33
UNSUBSCRIBE = 34
UNSUBSCRIBED = 35
EVENT = 36
CALL = 48
# CANCEL = 49
RESULT = 50
REGISTER = 64
REGISTERED = 65
# UNREGISTER = 66
# UNREGISTERED = 67
INVOCATION = 68
INTERRUPT = 69
YIELD = 70
class BroadcastMessage(object):
"""
This is a message that a procedure may want delivered.
This class is composed of an EventMessage and a uri name
"""
@property
@property
@classmethod
def from_text(cls, text):
"""
Make a BroadcastMessage from text in a json struct
"""
raw = json.loads(text)
event_msg = EventMessage.from_text(raw["event_message"])
msg = cls(
uri_name=raw["uri_name"],
event_message=event_msg,
publisher_connection_id=raw["publisher_connection_id"]
)
msg.publisher_node_id = raw["publisher_node_id"]
return msg
@classmethod
def from_bin(cls, bin):
"""
Make a BroadcastMessage from a binary blob
"""
raw = msgpack.unpackb(bin, raw=False)
event_msg = EventMessage.from_text(raw["event_message"])
msg = cls(
uri_name=raw["uri_name"],
event_message=event_msg,
publisher_connection_id=raw["publisher_connection_id"]
)
msg.publisher_node_id = raw["publisher_node_id"]
return msg
class Message(object):
"""
Represent any WAMP message.
"""
details = {}
@property
def id(self):
"""
For all kinds of messages (except ERROR) that have [Request|id], it is
in the second position of the array.
"""
if (len(self.value) > 1) and isinstance(self.value[1], int):
return self.value[1]
return -1
@property
def json(self):
"""
Create a JSON representation of this message.
"""
message_value = deepcopy(self.value)
return json.dumps(encode_bin_as_b64(message_value))
@property
def msgpack(self):
"""
Create a MSGPack representation for this message.
"""
message_value = deepcopy(self.value)
for index, item in enumerate(message_value):
if isinstance(item, Code):
message_value[index] = item.value
return msgpack.packb(message_value, use_bin_type=True)
def error(self, text, info=None):
"""
Add error description and aditional information.
This is useful for ABORT and ERROR messages.
"""
self.details["message"] = text
if info:
self.details["details"] = info
@classmethod
def from_text(cls, text):
"""
Decode text to JSON and return a Message object accordingly.
"""
raw = decode_b64(json.loads(text))
raw[0] = Code(raw[0]) # make it an object of type Code
return cls(*raw)
@classmethod
def from_bin(cls, bin):
"""
Decode binary blob to a message and return a Message object accordingly.
"""
print(bin)
raw = msgpack.unpackb(bin, raw=False)
raw[0] = Code(raw[0]) # make it an object of type Code
return cls(*raw)
def _update_args_and_kargs(self):
"""
Append args and kwargs to message value, according to their existance
or not.
"""
if self.kwargs:
self.value.append(self.args)
self.value.append(self.kwargs)
else:
if self.args:
self.value.append(self.args)
class HelloMessage(Message):
"""
Sent by a Client to initiate opening of a WAMP session:
[HELLO, Realm|uri, Details|dict]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#hello
"""
class AbortMessage(Message):
"""
Both the Router and the Client may abort the opening of a WAMP session
[ABORT, Details|dict, Reason|uri]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#abort
"""
class WelcomeMessage(Message):
"""
Sent from the server side to open a WAMP session.
The WELCOME is a reply message to the Client's HELLO.
[WELCOME, Session|id, Details|dict]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#welcome
"""
class GoodbyeMessage(Message):
"""
Both the Server and the Client may abort the opening of a WAMP session
[ABORT, Details|dict, Reason|uri]
"""
class ResultMessage(Message):
"""
Result of a call as returned by Dealer to Caller.
[RESULT, CALL.Request|id, Details|dict]
[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]
[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]
"""
class CallMessage(Message):
"""
Call as originally issued by the Caller to the Dealer.
[CALL, Request|id, Options|dict, Procedure|uri]
[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]
[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]
"""
class InterruptMessage(Message):
"""
Stop a progressive result before it's finished.
[INTERRUPT, INVOCATION.Request|id, Options|dict]
"""
class InvocationMessage(Message):
"""
Used by the dealer to request an RPC from a client. The client should respond with a YIELD message if successful.
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]
"""
class YieldMessage(Message):
"""
Used by the dealer to deliver the result of an RPC to the requesting client. The client should respond with a YIELD message if successful.
[YIELD, INVOCATION.Request|id, Options|dict]
[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]
[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]
"""
class ErrorMessage(Message):
"""
Error reply sent by a Peer as an error response to different kinds of
requests.
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri,
Arguments|list]
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri,
Arguments|list, ArgumentsKw|dict]
"""
class SubscribeMessage(Message):
"""
A Subscriber communicates its interest in a uri to the Server by sending
a SUBSCRIBE message:
[SUBSCRIBE, Request|id, Options|dict, uri|uri]
"""
class SubscribedMessage(Message):
"""
If the Broker is able to fulfill and allow the subscription, it answers by
sending a SUBSCRIBED message to the Subscriber:
[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]
"""
class RPCRegisterMessage(Message):
"""
A Registerer communicates its interest in a uri to the Server by sending
a REGISTER message:
[REGISTER, Request|id, Options|dict, uri|uri]
"""
class RPCRegisteredMessage(Message):
"""
If the Broker is able to fulfill and allow the registration, it answers by
sending a REGISTERED message to the Registerer:
[REGISTERED, REGISTER.Request|id, Registration|id]
"""
class PublishMessage(Message):
"""
Sent by a Publisher to a Broker to publish an event.
[PUBLISH, Request|id, Options|dict, uri|uri]
[PUBLISH, Request|id, Options|dict, uri|uri, Arguments|list]
[PUBLISH, Request|id, Options|dict, uri|uri, Arguments|list, ArgumentsKw|dict]
"""
class PublishedMessage(Message):
"""
Acknowledge sent by a Broker to a Publisher for acknowledged publications.
[PUBLISHED, PUBLISH.Request|id, Publication|id]
"""
class EventMessage(Message):
"""
Event dispatched by Broker to Subscribers for subscription the event was matching.
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]
When transmitting an EventMessage between redis pubsubs the
subscription_id will be omitted (it can only be resolved in the
subscriber.)
"""
@property
@subscription_id.setter
class UnsubscribeMessage(Message):
"""
Unsubscribe request sent by a Subscriber to a Broker to unsubscribe a subscription.
[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]
"""
class UnsubscribedMessage(Message):
"""
Acknowledge sent by a Broker to a Subscriber to acknowledge unsubscription.
[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]
"""
CODE_TO_CLASS = {
Code.HELLO: HelloMessage,
Code.WELCOME: WelcomeMessage,
Code.ABORT: AbortMessage,
# CHALLENGE = 4
# AUTHENTICATE = 5
Code.GOODBYE: GoodbyeMessage,
# HEARTBEAT = 7
Code.ERROR: ErrorMessage,
Code.PUBLISH: PublishMessage,
Code.PUBLISHED: PublishedMessage,
Code.SUBSCRIBE: SubscribeMessage,
Code.SUBSCRIBED: SubscribedMessage,
Code.UNSUBSCRIBE: UnsubscribeMessage,
Code.UNSUBSCRIBED: UnsubscribedMessage,
Code.EVENT: EventMessage,
Code.CALL: CallMessage,
# CANCEL = 49
Code.RESULT: ResultMessage,
Code.REGISTER: RPCRegisterMessage, # 64
Code.REGISTERED: RPCRegisteredMessage, # 65
# UNREGISTER = 66
# UNREGISTERED = 67
Code.INVOCATION: InvocationMessage, # 68
Code.INTERRUPT: InterruptMessage,
# INTERRUPT = 69
Code.YIELD: YieldMessage, # 70
}
ERROR_PRONE_CODES = [Code.CALL, Code.SUBSCRIBE, Code.UNSUBSCRIBE, Code.PUBLISH]
def build_error_message(in_message, uri, description):
"""
Return ErrorMessage instance (*) provided:
- incoming message which generated error
- error uri
- error description
(*) If incoming message is not prone to ERROR message reponse, return None.
"""
msg = Message.from_text(in_message)
if msg.code in ERROR_PRONE_CODES:
MsgClass = CODE_TO_CLASS[msg.code]
msg = MsgClass.from_text(in_message)
answer = ErrorMessage(
request_code=msg.code,
request_id=msg.request_id,
uri=uri
)
answer.error(description)
return answer
| 33.447251 | 143 | 0.649445 | """
WAMP messages definitions and serializers.
Compatible with WAMP Document Revision: RC3, 2014/08/25, available at:
https://github.com/tavendo/WAMP/blob/master/spec/basic.md
"""
import json
import msgpack
import uuid
from copy import deepcopy
from enum import IntEnum, Enum
from io import BytesIO
from base64 import b64encode, standard_b64decode
from wampnado.identifier import create_global_id
from wampnado.features import server_features, Options
PUBLISHER_NODE_ID = uuid.uuid4()
def decode_b64(s):
"""
Finds all the binary objects in the struct, and recursively converts them to base64 prepended by \0, per the WAMP standard.
"""
if isinstance(s, dict):
ret = {}
for k,v in s.items():
ret[k] = decode_b64(v)
return ret
elif isinstance(s, list) or isinstance(s, tuple):
ret = []
for v in s:
ret.append(decode_b64(v))
return ret
elif isinstance(s, str) and s.beginswith('\0'):
return standard_b64decode(s[1:])
else:
return s
def encode_bin_as_b64(s):
"""
Finds all the binary objects in the struct, and recursively converts them to base64 prepended by \0, per the WAMP standard.
"""
if isinstance(s, dict):
ret = {}
for k,v in s.items():
ret[k] = encode_bin_as_b64(v)
return ret
elif isinstance(s, list) or isinstance(s, tuple):
ret = []
for v in s:
ret.append(encode_bin_as_b64(v))
return ret
elif isinstance(s, bytes):
return '\0{}'.format(b64encode(s).decode('ascii'))
elif isinstance(s, Enum):
return encode_bin_as_b64(s.value)
else:
return s
class Code(IntEnum):
"""
Enum which represents currently supported WAMP messages.
"""
HELLO = 1
WELCOME = 2
ABORT = 3
# CHALLENGE = 4
# AUTHENTICATE = 5
GOODBYE = 6
# HEARTBEAT = 7
ERROR = 8
PUBLISH = 16
PUBLISHED = 17
SUBSCRIBE = 32
SUBSCRIBED = 33
UNSUBSCRIBE = 34
UNSUBSCRIBED = 35
EVENT = 36
CALL = 48
# CANCEL = 49
RESULT = 50
REGISTER = 64
REGISTERED = 65
# UNREGISTER = 66
# UNREGISTERED = 67
INVOCATION = 68
INTERRUPT = 69
YIELD = 70
class BroadcastMessage(object):
"""
This is a message that a procedure may want delivered.
This class is composed of an EventMessage and a uri name
"""
def __init__(self, uri_name, event_message, publisher_connection_id):
assert isinstance(event_message, EventMessage), "only event messages are supported"
self.uri_name = uri_name
self.event_message = event_message
self.publisher_connection_id = publisher_connection_id
self.publisher_node_id = PUBLISHER_NODE_ID.hex
@property
def json(self):
info_struct = {
"publisher_node_id": self.publisher_node_id,
"publisher_connection_id": self.publisher_connection_id,
"uri_name": self.uri_name,
"event_message": self.event_message.json,
}
return json.dumps(encode_bin_as_b64(info_struct))
@property
def msgpack(self):
info_struct = {
"publisher_node_id": self.publisher_node_id,
"publisher_connection_id": self.publisher_connection_id,
"uri_name": self.uri_name,
"event_message": self.event_message.msgpack,
}
return msgpack.packb(info_struct, use_bin_type=True)
@classmethod
def from_text(cls, text):
"""
Make a BroadcastMessage from text in a json struct
"""
raw = json.loads(text)
event_msg = EventMessage.from_text(raw["event_message"])
msg = cls(
uri_name=raw["uri_name"],
event_message=event_msg,
publisher_connection_id=raw["publisher_connection_id"]
)
msg.publisher_node_id = raw["publisher_node_id"]
return msg
@classmethod
def from_bin(cls, bin):
"""
Make a BroadcastMessage from a binary blob
"""
raw = msgpack.unpackb(bin, raw=False)
event_msg = EventMessage.from_text(raw["event_message"])
msg = cls(
uri_name=raw["uri_name"],
event_message=event_msg,
publisher_connection_id=raw["publisher_connection_id"]
)
msg.publisher_node_id = raw["publisher_node_id"]
return msg
class Message(object):
"""
Represent any WAMP message.
"""
details = {}
def __init__(self, code, *data, **kdata):
self.code = code
self.value = [code] + list(data)
self.args = kdata.get("args", [])
self.kwargs = kdata.get("kwargs", {})
@property
def id(self):
"""
For all kinds of messages (except ERROR) that have [Request|id], it is
in the second position of the array.
"""
if (len(self.value) > 1) and isinstance(self.value[1], int):
return self.value[1]
return -1
@property
def json(self):
"""
Create a JSON representation of this message.
"""
message_value = deepcopy(self.value)
return json.dumps(encode_bin_as_b64(message_value))
@property
def msgpack(self):
"""
Create a MSGPack representation for this message.
"""
message_value = deepcopy(self.value)
for index, item in enumerate(message_value):
if isinstance(item, Code):
message_value[index] = item.value
return msgpack.packb(message_value, use_bin_type=True)
def error(self, text, info=None):
"""
Add error description and aditional information.
This is useful for ABORT and ERROR messages.
"""
self.details["message"] = text
if info:
self.details["details"] = info
@classmethod
def from_text(cls, text):
"""
Decode text to JSON and return a Message object accordingly.
"""
raw = decode_b64(json.loads(text))
raw[0] = Code(raw[0]) # make it an object of type Code
return cls(*raw)
@classmethod
def from_bin(cls, bin):
"""
Decode binary blob to a message and return a Message object accordingly.
"""
print(bin)
raw = msgpack.unpackb(bin, raw=False)
raw[0] = Code(raw[0]) # make it an object of type Code
return cls(*raw)
def _update_args_and_kargs(self):
"""
Append args and kwargs to message value, according to their existance
or not.
"""
if self.kwargs:
self.value.append(self.args)
self.value.append(self.kwargs)
else:
if self.args:
self.value.append(self.args)
class HelloMessage(Message):
"""
Sent by a Client to initiate opening of a WAMP session:
[HELLO, Realm|uri, Details|dict]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#hello
"""
def __init__(self, code=Code.HELLO, realm="", details=None):
self.code = code
self.realm = realm
self.details = details if details else {}
self.value = [self.code, self.realm, self.details]
class AbortMessage(Message):
"""
Both the Router and the Client may abort the opening of a WAMP session
[ABORT, Details|dict, Reason|uri]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#abort
"""
def __init__(self, code=Code.ABORT, details=None, reason=None):
assert reason is not None, "AbortMessage must have a reason"
self.code = code
self.details = details or {}
self.reason = reason
self.value = [self.code, self.details, self.reason]
class WelcomeMessage(Message):
"""
Sent from the server side to open a WAMP session.
The WELCOME is a reply message to the Client's HELLO.
[WELCOME, Session|id, Details|dict]
https://github.com/tavendo/WAMP/blob/master/spec/basic.md#welcome
"""
def __init__(self, code=Code.WELCOME, session_id=None, details=None):
self.code = code
self.session_id = session_id or create_global_id()
self.details = details or server_features
self.value = [self.code, self.session_id, self.details]
class GoodbyeMessage(Message):
"""
Both the Server and the Client may abort the opening of a WAMP session
[ABORT, Details|dict, Reason|uri]
"""
def __init__(self, code=Code.GOODBYE, details=None, reason=None):
self.code = code
self.details = details or {}
self.reason = reason or ""
self.value = [self.code, self.details, self.reason]
class ResultMessage(Message):
"""
Result of a call as returned by Dealer to Caller.
[RESULT, CALL.Request|id, Details|dict]
[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list]
[RESULT, CALL.Request|id, Details|dict, YIELD.Arguments|list, YIELD.ArgumentsKw|dict]
"""
def __init__(self, code=Code.RESULT, request_id=None, details=None, args=None, kwargs=None):
assert request_id is not None, "ResultMessage must have request_id"
self.code = code
self.request_id = request_id
self.details = details or {}
self.args = args or []
self.kwargs = kwargs or {}
self.value = [
self.code,
self.request_id,
self.details
]
self._update_args_and_kargs()
class CallMessage(Message):
"""
Call as originally issued by the Caller to the Dealer.
[CALL, Request|id, Options|dict, Procedure|uri]
[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list]
[CALL, Request|id, Options|dict, Procedure|uri, Arguments|list, ArgumentsKw|dict]
"""
def __init__(self, code=Code.CALL, request_id=None, options={}, procedure=None, args=[], kwargs={}):
assert request_id is not None, "CallMessage must have request_id"
assert procedure is not None, "CallMessage must have procedure"
self.code = code
self.request_id = request_id
self.procedure = procedure
self.options = Options(**options)
self.args = args
self.kwargs = kwargs
self.value = [
self.code,
self.request_id,
self.options,
self.procedure,
]
self._update_args_and_kargs()
class InterruptMessage(Message):
"""
Stop a progressive result before it's finished.
[INTERRUPT, INVOCATION.Request|id, Options|dict]
"""
def __init__(self, code=Code.INTERRUPT, request_id=None, options={}):
assert request_id is not None, "InterruptMessage must have request_id"
self.request_id = request_id
self.options = Options(**options)
self.value = [
self.code,
self.request_id,
self.options,
]
class InvocationMessage(Message):
"""
Used by the dealer to request an RPC from a client. The client should respond with a YIELD message if successful.
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict]
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list]
[INVOCATION, Request|id, REGISTERED.Registration|id, Details|dict, CALL.Arguments|list, CALL.ArgumentsKw|dict]
"""
def __init__(self, code=Code.INVOCATION, request_id=None, registration_id=None, details={}, args=None, kwargs=None):
if request_id is None:
request_id = create_global_id()
assert request_id is not None, "InvocationMessage must have request_id"
assert registration_id is not None, "InvocationMessage must have registration_id"
self.code = code
self.request_id = request_id
self.registration_id = registration_id or {}
self.details = details
self.args = args
self.kwargs = kwargs
self.value = [
self.code,
self.request_id,
self.registration_id,
self.details,
]
self._update_args_and_kargs()
class YieldMessage(Message):
"""
Used by the dealer to deliver the result of an RPC to the requesting client. The client should respond with a YIELD message if successful.
[YIELD, INVOCATION.Request|id, Options|dict]
[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list]
[YIELD, INVOCATION.Request|id, Options|dict, Arguments|list, ArgumentsKw|dict]
"""
def __init__(self, code=Code.YIELD, request_id=None, options=None, args=None, kwargs=None):
assert request_id is not None, "YieldMessage must have request_id"
self.code = code
self.options = Options(**options)
self.request_id = request_id
self.args = args
self.kwargs = kwargs
self.value = [
self.code,
self.request_id,
self.options,
]
self._update_args_and_kargs()
class ErrorMessage(Message):
"""
Error reply sent by a Peer as an error response to different kinds of
requests.
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri,
Arguments|list]
[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri,
Arguments|list, ArgumentsKw|dict]
"""
def __init__(self, code=Code.ERROR, request_code=None, request_id=None, details=None, uri=None, args=None, kwargs=None):
assert request_code is not None, "ErrorMessage must have request_code"
assert request_id is not None, "ErrorMessage must have request_id"
assert uri is not None, "ErrorMessage must have uri"
self.code = code
self.request_code = request_code
self.request_id = request_id
self.details = details or {}
self.uri = uri
self.args = args or []
self.kwargs = kwargs or {}
self.value = [
self.code,
self.request_code,
self.request_id,
self.details,
self.uri
]
self._update_args_and_kargs()
class SubscribeMessage(Message):
"""
A Subscriber communicates its interest in a uri to the Server by sending
a SUBSCRIBE message:
[SUBSCRIBE, Request|id, Options|dict, uri|uri]
"""
def __init__(self, code=Code.SUBSCRIBE, request_id=None, options=None, uri=None):
assert request_id is not None, "SubscribeMessage must have request_id"
assert uri is not None, "SubscribeMessage must have uri"
self.code = code
self.request_id = request_id
self.options = Options(**options)
self.uri = uri
self.value = [self.code, self.request_id, self.options, self.uri]
class SubscribedMessage(Message):
"""
If the Broker is able to fulfill and allow the subscription, it answers by
sending a SUBSCRIBED message to the Subscriber:
[SUBSCRIBED, SUBSCRIBE.Request|id, Subscription|id]
"""
def __init__(self, code=Code.SUBSCRIBED, request_id=None, subscription_id=None):
assert request_id is not None, "SubscribedMessage must have request_id"
assert subscription_id is not None, "SubscribedMessage must have subscription_id"
self.code = code
self.request_id = request_id
self.subscription_id = subscription_id
self.value = [self.code, self.request_id, self.subscription_id]
class RPCRegisterMessage(Message):
"""
A Registerer communicates its interest in a uri to the Server by sending
a REGISTER message:
[REGISTER, Request|id, Options|dict, uri|uri]
"""
def __init__(self, code=Code.REGISTER, request_id=None, options=None, uri=None):
assert request_id is not None, "RegisterMessage must have request_id"
assert uri is not None, "RegisterMessage must have uri"
self.code = code
self.request_id = request_id
self.options = Options(**options)
self.uri = uri
self.value = [self.code, self.request_id, self.options, self.uri]
class RPCRegisteredMessage(Message):
"""
If the Broker is able to fulfill and allow the registration, it answers by
sending a REGISTERED message to the Registerer:
[REGISTERED, REGISTER.Request|id, Registration|id]
"""
def __init__(self, code=Code.REGISTERED, request_id=None, registration_id=None):
if registration_id is None:
registration_id = create_global_id()
assert request_id is not None, "RegisteredMessage must have request_id"
assert registration_id is not None, "RegisteredMessage must have registration_id"
self.code = code
self.request_id = request_id
self.registration_id = registration_id
#self.uri = uri
self.value = [self.code, self.request_id, self.registration_id]
class PublishMessage(Message):
"""
Sent by a Publisher to a Broker to publish an event.
[PUBLISH, Request|id, Options|dict, uri|uri]
[PUBLISH, Request|id, Options|dict, uri|uri, Arguments|list]
[PUBLISH, Request|id, Options|dict, uri|uri, Arguments|list, ArgumentsKw|dict]
"""
def __init__(self, code=Code.PUBLISH, request_id=None, options={}, uri_name=None, args=None, kwargs=None):
assert request_id is not None, "PublishMessage must have request_id"
assert uri_name is not None, "PublishMessage must have uri"
self.code = code
self.request_id = request_id
self.options = Options(**options)
self.uri_name = uri_name
self.args = args or []
self.kwargs = kwargs or {}
self.value = [self.code, self.request_id, self.options, self.uri_name]
self._update_args_and_kargs()
class PublishedMessage(Message):
"""
Acknowledge sent by a Broker to a Publisher for acknowledged publications.
[PUBLISHED, PUBLISH.Request|id, Publication|id]
"""
def __init__(self, code=Code.PUBLISHED, request_id=None, publication_id=None):
assert request_id is not None, "PublishedMessage must have request_id"
assert publication_id is not None, "PublishedMessage must have publication_id"
self.code = code
self.request_id = request_id
self.publication_id = publication_id
self.value = [self.code, self.request_id, self.publication_id]
class EventMessage(Message):
"""
Event dispatched by Broker to Subscribers for subscription the event was matching.
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict]
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list]
[EVENT, SUBSCRIBED.Subscription|id, PUBLISHED.Publication|id, Details|dict, PUBLISH.Arguments|list, PUBLISH.ArgumentsKw|dict]
When transmitting an EventMessage between redis pubsubs the
subscription_id will be omitted (it can only be resolved in the
subscriber.)
"""
def __init__(self, code=Code.EVENT, subscription_id=None, publication_id=None, details=None, args=None, kwargs=None):
assert publication_id is not None, "EventMessage must have publication_id"
self.code = code
self._subscription_id = subscription_id
self.publication_id = publication_id
self.details = details or {}
self.args = args or []
self.kwargs = kwargs or {}
self.value = [self.code, self.subscription_id, self.publication_id, self.details]
self._update_args_and_kargs()
@property
def subscription_id(self):
return self._subscription_id
@subscription_id.setter
def subscription_id(self, id_):
self._subscription_id = id_
self.value[1] = id_
class UnsubscribeMessage(Message):
"""
Unsubscribe request sent by a Subscriber to a Broker to unsubscribe a subscription.
[UNSUBSCRIBE, Request|id, SUBSCRIBED.Subscription|id]
"""
def __init__(self, code=Code.UNSUBSCRIBE, request_id=None, subscription_id=None):
assert request_id is not None, "UnsubscribeMessage must have request_id"
assert subscription_id is not None, "UnsubscribeMessage must have subscription_id"
self.code = code
self.request_id = request_id
self.subscription_id = subscription_id
self.value = [self.code, self.request_id, self.subscription_id]
class UnsubscribedMessage(Message):
"""
Acknowledge sent by a Broker to a Subscriber to acknowledge unsubscription.
[UNSUBSCRIBED, UNSUBSCRIBE.Request|id]
"""
def __init__(self, code=Code.UNSUBSCRIBED, request_id=None):
assert request_id is not None, "UnsubscribedMessage must have request_id"
self.code = code
self.request_id = request_id
self.value = [self.code, self.request_id]
CODE_TO_CLASS = {
Code.HELLO: HelloMessage,
Code.WELCOME: WelcomeMessage,
Code.ABORT: AbortMessage,
# CHALLENGE = 4
# AUTHENTICATE = 5
Code.GOODBYE: GoodbyeMessage,
# HEARTBEAT = 7
Code.ERROR: ErrorMessage,
Code.PUBLISH: PublishMessage,
Code.PUBLISHED: PublishedMessage,
Code.SUBSCRIBE: SubscribeMessage,
Code.SUBSCRIBED: SubscribedMessage,
Code.UNSUBSCRIBE: UnsubscribeMessage,
Code.UNSUBSCRIBED: UnsubscribedMessage,
Code.EVENT: EventMessage,
Code.CALL: CallMessage,
# CANCEL = 49
Code.RESULT: ResultMessage,
Code.REGISTER: RPCRegisterMessage, # 64
Code.REGISTERED: RPCRegisteredMessage, # 65
# UNREGISTER = 66
# UNREGISTERED = 67
Code.INVOCATION: InvocationMessage, # 68
Code.INTERRUPT: InterruptMessage,
# INTERRUPT = 69
Code.YIELD: YieldMessage, # 70
}
ERROR_PRONE_CODES = [Code.CALL, Code.SUBSCRIBE, Code.UNSUBSCRIBE, Code.PUBLISH]
def build_error_message(in_message, uri, description):
"""
Return ErrorMessage instance (*) provided:
- incoming message which generated error
- error uri
- error description
(*) If incoming message is not prone to ERROR message reponse, return None.
"""
msg = Message.from_text(in_message)
if msg.code in ERROR_PRONE_CODES:
MsgClass = CODE_TO_CLASS[msg.code]
msg = MsgClass.from_text(in_message)
answer = ErrorMessage(
request_code=msg.code,
request_id=msg.request_id,
uri=uri
)
answer.error(description)
return answer
| 9,304 | 0 | 662 |
b981440ef6dd9840fe7b1f1f885870c5756d942a | 6,285 | py | Python | instagram/bind.py | bearprada/instagram-tag-monitor | 3cad22d9abd3b44c9c8714bb1c3cb41a3c876c5f | [
"MIT"
] | 2 | 2015-12-19T08:04:28.000Z | 2017-07-28T10:25:12.000Z | instagram/bind.py | bearprada/instagram-tag-monitor | 3cad22d9abd3b44c9c8714bb1c3cb41a3c876c5f | [
"MIT"
] | null | null | null | instagram/bind.py | bearprada/instagram-tag-monitor | 3cad22d9abd3b44c9c8714bb1c3cb41a3c876c5f | [
"MIT"
] | null | null | null | import urllib
from oauth2 import OAuth2Request
import re
from json_import import simplejson
re_path_template = re.compile('{\w+}')
| 40.811688 | 128 | 0.555449 | import urllib
from oauth2 import OAuth2Request
import re
from json_import import simplejson
re_path_template = re.compile('{\w+}')
def encode_string(value):
return value.encode('utf-8') \
if isinstance(value, unicode) else str(value)
class InstagramClientError(Exception):
def __init__(self, error_message):
self.error_message = error_message
def __str__(self):
return self.error_message
class InstagramAPIError(Exception):
def __init__(self, status_code, error_type, error_message, *args, **kwargs):
self.status_code = status_code
self.error_type = error_type
self.error_message = error_message
def __str__(self):
return "(%s) %s-%s" % (self.status_code, self.error_type, self.error_message)
def bind_method(**config):
class InstagramAPIMethod(object):
path = config['path']
method = config.get('method', 'GET')
accepts_parameters = config.get("accepts_parameters", [])
requires_target_user = config.get('requires_target_user', False)
paginates = config.get('paginates', False)
root_class = config.get('root_class', None)
response_type = config.get("response_type", "list")
include_secret = config.get("include_secret", False)
objectify_response = config.get("objectify_response", True)
def __init__(self, api, *args, **kwargs):
self.api = api
self.as_generator = kwargs.pop("as_generator", False)
self.return_json = kwargs.pop("return_json", False)
self.max_pages = kwargs.pop("max_pages", 3)
self.parameters = {}
self._build_parameters(args, kwargs)
self._build_path()
def _build_parameters(self, args, kwargs):
# via tweepy https://github.com/joshthecoder/tweepy/
for index, value in enumerate(args):
if value is None:
continue
try:
self.parameters[self.accepts_parameters[index]] = encode_string(value)
except IndexError:
raise InstagramClientError("Too many arguments supplied")
for key, value in kwargs.iteritems():
if value is None:
continue
if key in self.parameters:
raise InstagramClientError("Parameter %s already supplied" % key)
self.parameters[key] = encode_string(value)
if 'user_id' in self.accepts_parameters and not 'user_id' in self.parameters \
and not self.requires_target_user:
self.parameters['user_id'] = 'self'
def _build_path(self):
for variable in re_path_template.findall(self.path):
name = variable.strip('{}')
try:
value = urllib.quote(self.parameters[name])
except KeyError:
raise Exception('No parameter value found for path variable: %s' % name)
del self.parameters[name]
self.path = self.path.replace(variable, value)
self.path = self.path + '.%s' % self.api.format
def _do_api_request(self, url, method="GET", body=None, headers=None):
headers = headers or {}
response, content = OAuth2Request(self.api).make_request(url, method=method, body=body, headers=headers)
if response['status'] == '503':
raise InstagramAPIError(response['status'], "Rate limited", "Your client is making too many request per second")
try:
content_obj = simplejson.loads(content)
except ValueError:
raise InstagramClientError('Unable to parse response, not valid JSON.')
api_responses = []
status_code = content_obj['meta']['code']
if status_code == 200:
if not self.objectify_response:
return content_obj, None
if self.response_type == 'list':
for entry in content_obj['data']:
if self.return_json:
api_responses.append(entry)
else:
obj = self.root_class.object_from_dictionary(entry)
api_responses.append(obj)
elif self.response_type == 'entry':
data = content_obj['data']
if self.return_json:
api_responses = data
else:
api_responses = self.root_class.object_from_dictionary(data)
elif self.response_type == 'empty':
pass
return api_responses, content_obj.get('pagination', {}).get('next_url')
else:
raise InstagramAPIError(status_code, content_obj['meta']['error_type'], content_obj['meta']['error_message'])
def _paginator_with_url(self, url, method="GET", body=None, headers=None):
headers = headers or {}
pages_read = 0
while url and pages_read < self.max_pages:
api_responses, url = self._do_api_request(url, method, body, headers)
pages_read += 1
yield api_responses, url
return
def execute(self):
url, method, body, headers = OAuth2Request(self.api).prepare_request(self.method,
self.path,
self.parameters,
include_secret=self.include_secret)
if self.as_generator:
return self._paginator_with_url(url, method, body, headers)
else:
content, next = self._do_api_request(url, method, body, headers)
if self.paginates:
return content, next
else:
return content
def _call(api, *args, **kwargs):
method = InstagramAPIMethod(api, *args, **kwargs)
return method.execute()
return _call
| 5,919 | 31 | 199 |
07c776590fbaaa6dfaf168c37f9d45306a336f43 | 1,142 | py | Python | cleaning_ex/cleaner.py | cltrudeau/codingnomads | 2a13f36186a5d0fc85ea54690e621b7ba9e2041e | [
"MIT"
] | 1 | 2020-05-06T17:34:33.000Z | 2020-05-06T17:34:33.000Z | cleaning_ex/cleaner.py | cltrudeau/codingnomads | 2a13f36186a5d0fc85ea54690e621b7ba9e2041e | [
"MIT"
] | 5 | 2021-03-31T19:46:09.000Z | 2021-09-22T19:34:02.000Z | cleaning_ex/cleaner.py | cltrudeau/codingnomads | 2a13f36186a5d0fc85ea54690e621b7ba9e2041e | [
"MIT"
] | 1 | 2020-06-04T20:58:21.000Z | 2020-06-04T20:58:21.000Z | #!/usr/bin/env python
from pathlib import Path
import unicodedata
output = []
filename = Path(__file__).parent.resolve() / "data/books.tsv"
with open(filename) as handle:
lines = handle.readlines()
# lines will contain a row for each line in the file
# Loop through lines and do data scrubbing, put each result in output
for line in lines:
fields = line.split('\t')
# Start with known blank values
num = fields[0]
last_name = ''
first_name = ''
title = ''
date = ''
subjects = ''
if len(fields) > 1:
# Has a "name" field
parts = fields[1].split(',')
first_name = ''
last_name = parts[0]
if len(parts) > 1:
first_name = parts[1]
if "(" in first_name:
first_name, _ = first_name.split('(')
if len(fields) > 2:
title = fields[2]
nfkd_form = unicodedata.normalize('NFKD', title)
title = nfkd_form.encode('ASCII', 'ignore')
title = title.decode('utf-8')
out_line = f'{num}\t{last_name}\t{first_name}\t{title}'
output.append(out_line)
for line in output:
print(line)
| 22.84 | 69 | 0.590193 | #!/usr/bin/env python
from pathlib import Path
import unicodedata
output = []
filename = Path(__file__).parent.resolve() / "data/books.tsv"
with open(filename) as handle:
lines = handle.readlines()
# lines will contain a row for each line in the file
# Loop through lines and do data scrubbing, put each result in output
for line in lines:
fields = line.split('\t')
# Start with known blank values
num = fields[0]
last_name = ''
first_name = ''
title = ''
date = ''
subjects = ''
if len(fields) > 1:
# Has a "name" field
parts = fields[1].split(',')
first_name = ''
last_name = parts[0]
if len(parts) > 1:
first_name = parts[1]
if "(" in first_name:
first_name, _ = first_name.split('(')
if len(fields) > 2:
title = fields[2]
nfkd_form = unicodedata.normalize('NFKD', title)
title = nfkd_form.encode('ASCII', 'ignore')
title = title.decode('utf-8')
out_line = f'{num}\t{last_name}\t{first_name}\t{title}'
output.append(out_line)
for line in output:
print(line)
| 0 | 0 | 0 |
0b2cf5747c8d26ddf32a96fbcf1bcecd26618ae5 | 417 | py | Python | reference_solutions/upload_bin.py | yahoo/hardware-hackme-1 | d7acbf36854b088a34419f9697f64aaf0e3be83c | [
"MIT"
] | 4 | 2020-04-17T20:22:15.000Z | 2020-05-02T15:04:41.000Z | reference_solutions/upload_bin.py | yahoo/hardware-hackme-1 | d7acbf36854b088a34419f9697f64aaf0e3be83c | [
"MIT"
] | null | null | null | reference_solutions/upload_bin.py | yahoo/hardware-hackme-1 | d7acbf36854b088a34419f9697f64aaf0e3be83c | [
"MIT"
] | 1 | 2020-05-04T01:32:05.000Z | 2020-05-04T01:32:05.000Z | #!/usr/bin/env python3
# Copyright 2020, Verizon Media
# Licensed under the terms of the MIT license. See LICENSE file in project root for terms.
import binascii
import serial
import subprocess
import sys
in_serport = sys.argv[1]
in_fn = sys.argv[2]
with open(in_fn, 'rb') as f:
payload = f.read()
ser = serial.Serial(in_serport, 115200, timeout=1)
# WTF?!
for b in payload:
ser.write(bytes([b]))
ser.flush()
| 18.954545 | 90 | 0.721823 | #!/usr/bin/env python3
# Copyright 2020, Verizon Media
# Licensed under the terms of the MIT license. See LICENSE file in project root for terms.
import binascii
import serial
import subprocess
import sys
in_serport = sys.argv[1]
in_fn = sys.argv[2]
with open(in_fn, 'rb') as f:
payload = f.read()
ser = serial.Serial(in_serport, 115200, timeout=1)
# WTF?!
for b in payload:
ser.write(bytes([b]))
ser.flush()
| 0 | 0 | 0 |
87ed6e9506b6c62eb792dd5aa392831ff1405747 | 172 | py | Python | main.py | ArKits/Tuberc | 2506f6141c68e24ac2f2371362faef87876e8a53 | [
"MIT"
] | 1 | 2018-02-11T08:41:04.000Z | 2018-02-11T08:41:04.000Z | main.py | ArKits/tuberc | 2506f6141c68e24ac2f2371362faef87876e8a53 | [
"MIT"
] | null | null | null | main.py | ArKits/tuberc | 2506f6141c68e24ac2f2371362faef87876e8a53 | [
"MIT"
] | null | null | null | from app import app, db
from app.models import User, Channel
@app.shell_context_processor | 28.666667 | 55 | 0.738372 | from app import app, db
from app.models import User, Channel
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Channel': Channel} | 60 | 0 | 22 |
e7ecca1e60e9ec1066dc078308487bb237b3ca10 | 7,846 | py | Python | skills_ml/job_postings/corpora/__init__.py | bhagyaramgpo/skills-ml | be520fc2a2f88bff756d25e57c3378a465a1dcb2 | [
"MIT"
] | 147 | 2016-12-05T19:45:05.000Z | 2022-02-17T03:03:28.000Z | skills_ml/job_postings/corpora/__init__.py | bhagyaramgpo/skills-ml | be520fc2a2f88bff756d25e57c3378a465a1dcb2 | [
"MIT"
] | 390 | 2016-12-02T03:11:13.000Z | 2022-03-28T22:08:20.000Z | skills_ml/job_postings/corpora/__init__.py | bhagyaramgpo/skills-ml | be520fc2a2f88bff756d25e57c3378a465a1dcb2 | [
"MIT"
] | 66 | 2017-12-14T16:33:24.000Z | 2022-02-17T03:03:31.000Z | from random import randint
from skills_ml.algorithms.nlp import clean_html, clean_str, lowercase_strip_punc, word_tokenize, sentence_tokenize, section_extract, strip_bullets_from_line
from gensim.models.doc2vec import TaggedDocument
from skills_utils.common import safe_get
class CorpusCreator(object):
"""
A base class for objects that convert common schema
job listings into a corpus in documnet level suitable for use by
machine learning algorithms or specific tasks.
Example:
```python
from skills_ml.job_postings.common_schema import JobPostingCollectionSample
from skills_ml.job_postings.corpora.basic import CorpusCreator
job_postings_generator = JobPostingCollectionSample()
# Default will include all the cleaned job postings
corpus = CorpusCreator(job_postings_generator)
# For getting a the raw job postings without any cleaning
corpus = CorpusCreator(job_postings_generator, raw=True)
```
Attributes:
job_posting_generator (generator): an iterable that generates JSON strings.
Each string is expected to represent a job listing
conforming to the common schema
See sample_job_listing.json for an example of this schema
document_schema_fields (list): an list of schema fields to be included
raw (bool): a flag whether to return the raw documents or transformed documents
Yield:
(dict): a dictinary only with selected fields as keys and corresponding raw/cleaned value
"""
@property
class SimpleCorpusCreator(CorpusCreator):
"""
An object that transforms job listing documents by picking
important schema fields and returns them as one large lowercased string
"""
class Doc2VecGensimCorpusCreator(CorpusCreator):
"""Corpus for training Gensim Doc2Vec
An object that transforms job listing documents by picking
important schema fields and yields them as one large cleaned array of words
Example:
```python
from skills_ml.job_postings.common_schema import JobPostingCollectionSample
from skills_ml.job_postings.corpora.basic import Doc2VecGensimCorpusCreator
job_postings_generator = JobPostingCollectionSample()
corpus = Doc2VecGensimCorpusCreator(job_postings_generator)
Attributes:
job_posting_generator (generator): a job posting generator
document_schema_fields (list): an list of schema fields to be included
"""
class Word2VecGensimCorpusCreator(CorpusCreator):
"""
An object that transforms job listing documents by picking
important schema fields and yields them as one large cleaned array of words
"""
class JobCategoryCorpusCreator(CorpusCreator):
"""
An object that extract the label of each job listing document which could be onet soc code or
occupationalCategory and yields them as a lowercased string
"""
document_schema_fields = [
'occupationalCategory']
class SectionExtractWord2VecCorpusCreator(Word2VecGensimCorpusCreator):
"""Only return the contents of the configured section headers.
Heavily utilizes skills_ml.algorithms.nlp.section_extract.
For more detail on how to define 'sections', refer to its docstring.
"""
class RawCorpusCreator(CorpusCreator):
"""
An object that yields the joined raw string of job posting
"""
| 39.034826 | 156 | 0.686974 | from random import randint
from skills_ml.algorithms.nlp import clean_html, clean_str, lowercase_strip_punc, word_tokenize, sentence_tokenize, section_extract, strip_bullets_from_line
from gensim.models.doc2vec import TaggedDocument
from skills_utils.common import safe_get
class CorpusCreator(object):
"""
A base class for objects that convert common schema
job listings into a corpus in documnet level suitable for use by
machine learning algorithms or specific tasks.
Example:
```python
from skills_ml.job_postings.common_schema import JobPostingCollectionSample
from skills_ml.job_postings.corpora.basic import CorpusCreator
job_postings_generator = JobPostingCollectionSample()
# Default will include all the cleaned job postings
corpus = CorpusCreator(job_postings_generator)
# For getting a the raw job postings without any cleaning
corpus = CorpusCreator(job_postings_generator, raw=True)
```
Attributes:
job_posting_generator (generator): an iterable that generates JSON strings.
Each string is expected to represent a job listing
conforming to the common schema
See sample_job_listing.json for an example of this schema
document_schema_fields (list): an list of schema fields to be included
raw (bool): a flag whether to return the raw documents or transformed documents
Yield:
(dict): a dictinary only with selected fields as keys and corresponding raw/cleaned value
"""
def __init__(self, job_posting_generator=None, document_schema_fields=['description','experienceRequirements', 'qualifications', 'skills'],
raw=False):
self.job_posting_generator = job_posting_generator
self.raw = raw
self.document_schema_fields = document_schema_fields
self.join_spaces = ' '.join
self.key = ['onet_soc_code']
@property
def metadata(self):
meta_dict = {'corpus_creator': ".".join([self.__module__ , self.__class__.__name__])}
if self.job_posting_generator:
meta_dict.update(self.job_posting_generator.metadata)
return meta_dict
def _clean(self, document):
for f in self.document_schema_fields:
try:
cleaned = clean_html(document[f]).replace('\n','')
cleaned = " ".join(cleaned.split())
document[f] = cleaned
except KeyError:
pass
return document
def _transform(self, document):
if self.raw:
return self._join(document)
else:
return self._clean(document)
def _join(self, document):
return self.join_spaces([
document.get(field, '') for field in self.document_schema_fields
])
def __iter__(self):
for document in self.job_posting_generator:
document = {key: document[key] for key in self.document_schema_fields}
yield self._transform(document)
class SimpleCorpusCreator(CorpusCreator):
"""
An object that transforms job listing documents by picking
important schema fields and returns them as one large lowercased string
"""
def _clean(self, document):
return self.join_spaces([
lowercase_strip_punc(document.get(field, ''))
for field in self.document_schema_fields
])
class Doc2VecGensimCorpusCreator(CorpusCreator):
"""Corpus for training Gensim Doc2Vec
An object that transforms job listing documents by picking
important schema fields and yields them as one large cleaned array of words
Example:
```python
from skills_ml.job_postings.common_schema import JobPostingCollectionSample
from skills_ml.job_postings.corpora.basic import Doc2VecGensimCorpusCreator
job_postings_generator = JobPostingCollectionSample()
corpus = Doc2VecGensimCorpusCreator(job_postings_generator)
Attributes:
job_posting_generator (generator): a job posting generator
document_schema_fields (list): an list of schema fields to be included
"""
def __init__(self, job_posting_generator, document_schema_fields=['description','experienceRequirements', 'qualifications', 'skills'], *args, **kwargs):
super().__init__(job_posting_generator, document_schema_fields, *args, **kwargs)
self.lookup = {}
self.k = 0 if not self.lookup else max(self.lookup.keys()) + 1
def _clean(self, document):
return self.join_spaces([
clean_str(document[field])
for field in self.document_schema_fields
])
def _transform(self, document):
words = self._clean(document).split()
tag = [self.k]
return TaggedDocument(words, tag)
def __iter__(self):
for document in self.job_posting_generator:
self.lookup[self.k] = safe_get(document, *self.key)
yield self._transform(document)
self.k += 1
class Word2VecGensimCorpusCreator(CorpusCreator):
"""
An object that transforms job listing documents by picking
important schema fields and yields them as one large cleaned array of words
"""
def __init__(self, job_posting_generator, document_schema_fields=['description','experienceRequirements', 'qualifications', 'skills'], *args, **kwargs):
super().__init__(job_posting_generator, document_schema_fields, *args, **kwargs)
def _clean(self, document):
return self.join_spaces([
clean_str(document[field])
for field in self.document_schema_fields
])
def _transform(self, document):
if self.raw:
return [word_tokenize(s) for s in sentence_tokenize(self._join(document))]
else:
return [word_tokenize(s) for s in sentence_tokenize(self._clean(document))]
def __iter__(self):
for document in self.job_posting_generator:
document = {key: document[key] for key in self.document_schema_fields}
sentences = self._transform(document)
for sentence in sentences:
yield sentence
class JobCategoryCorpusCreator(CorpusCreator):
"""
An object that extract the label of each job listing document which could be onet soc code or
occupationalCategory and yields them as a lowercased string
"""
document_schema_fields = [
'occupationalCategory']
def _transform(self, document):
return self.join_spaces([
lowercase_strip_punc(document[field])
for field in self.document_schema_fields
])
class SectionExtractWord2VecCorpusCreator(Word2VecGensimCorpusCreator):
"""Only return the contents of the configured section headers.
Heavily utilizes skills_ml.algorithms.nlp.section_extract.
For more detail on how to define 'sections', refer to its docstring.
"""
def __init__(self, section_regex, *args, **kwargs):
super().__init__(*args, **kwargs)
self.section_regex = section_regex
def _transform(self, document):
lines_from_section = section_extract(self.section_regex, document['description'])
return [word_tokenize(clean_str(strip_bullets_from_line(line.text))) for line in lines_from_section]
class RawCorpusCreator(CorpusCreator):
"""
An object that yields the joined raw string of job posting
"""
def __init__(self, job_posting_generator, document_schema_fields=['description','experienceRequirements', 'qualifications', 'skills']):
super().__init__(job_posting_generator, document_schema_fields)
def _transform(self, document):
return self.join_spaces([document[field] for field in self.document_schema_fields])
| 3,845 | 0 | 533 |
52777aa0752ed891ab623cd6d0d931391f0f9e8a | 9,901 | py | Python | gym_bot_app/models.py | talorabr/RashatzimBot | 3d56ecd77c8cc8cf78310e4675f68a4ca3b1616e | [
"Apache-2.0"
] | null | null | null | gym_bot_app/models.py | talorabr/RashatzimBot | 3d56ecd77c8cc8cf78310e4675f68a4ca3b1616e | [
"Apache-2.0"
] | null | null | null | gym_bot_app/models.py | talorabr/RashatzimBot | 3d56ecd77c8cc8cf78310e4675f68a4ca3b1616e | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
from __future__ import unicode_literals
from datetime import datetime, timedelta
from mongoengine import (Document,
ListField,
StringField,
BooleanField,
DateTimeField,
EmbeddedDocument,
LazyReferenceField,
CachedReferenceField,
EmbeddedDocumentListField,
)
from gym_bot_app import DAYS_NAME
from gym_bot_app.query_sets import ExtendedQuerySet
| 36.003636 | 125 | 0.6061 | # encoding: utf-8
from __future__ import unicode_literals
from datetime import datetime, timedelta
from mongoengine import (Document,
ListField,
StringField,
BooleanField,
DateTimeField,
EmbeddedDocument,
LazyReferenceField,
CachedReferenceField,
EmbeddedDocumentListField,
)
from gym_bot_app import DAYS_NAME
from gym_bot_app.query_sets import ExtendedQuerySet
class Day(EmbeddedDocument):
name = StringField(required=True, max_length=64)
selected = BooleanField(default=False)
@classmethod
def get_week_days(cls):
return [cls(day_name) for day_name in DAYS_NAME]
def __repr__(self):
return '<Day {day_name} {selected}>'.format(day_name=self.name,
selected='selected' if self.selected
else 'not selected')
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
class Trainee(Document):
id = StringField(required=True, primary_key=True)
first_name = StringField(required=True)
training_days = EmbeddedDocumentListField(Day)
class TraineeQuerySet(ExtendedQuerySet):
def create(self, id, first_name):
training_days = Day.get_week_days()
return super(Trainee.TraineeQuerySet, self).create(id=unicode(id),
first_name=unicode(first_name),
training_days=training_days)
meta = {
'queryset_class': TraineeQuerySet,
}
def unselect_all_days(self):
for day in self.training_days:
day.selected = False
self.save()
def is_training_today(self):
today = datetime.now().strftime('%A')
return self.is_training_in_day(day_name=today)
def is_training_in_day(self, day_name):
return self.training_days.get(name=day_name).selected
def add_training_info(self, training_date, trained):
"""Add training info to trainee.
Args:
training_date(datetime.date | datetime.datetime): date of the training info.
trained(bool): whether trainee trained or not.
Returns:
TrainingDayInfo. instance of the created training day info.
Raises:
RuntimeError. in case trainee already have training day info in the given date.
"""
if self.get_training_info(training_date=training_date):
raise RuntimeError('Already created training day info for today.')
return TrainingDayInfo.objects.create(trainee=self.pk,
date=training_date,
trained=trained)
def get_training_info(self, training_date):
"""Check trainee training info of given date.
Args:
training_date(datetime.date | datetime.datetime): date of requested training date.
Returns:
list. all TrainingDayInfo of requested training date.
"""
next_day = training_date + timedelta(days=1)
return TrainingDayInfo.objects.filter(trainee=self.pk,
date__gte=training_date,
date__lt=next_day)
@staticmethod
def _calculate_training_percentage(num_of_trained_days, num_missed_training_days):
"""Calculate the training percentage based on the number of trained days.
Args:
num_of_trained_days(int): number of days the trainee trained.
num_missed_training_days(int): number of days trainee commit to workout but did not.
Returns:
int. training percentage.
"""
if (num_of_trained_days + num_missed_training_days) > 0:
training_percentage = (100.0 / (num_of_trained_days + num_missed_training_days) * num_of_trained_days)
return int(round(training_percentage))
else:
return 0
@staticmethod
def _calculate_average_training_days_per_week(trained_days):
"""Calculate the average training days per week.
Args:
trained_days(list<TrainingDayInfo>): all training days info when trainee did workout.
Returns:
float. average training days per week.
"""
today = datetime.today()
first_day_of_current_week = today - timedelta(days=today.weekday())
trained_days_without_last_week = trained_days.filter(date__lt=first_day_of_current_week)
first_trained_day = trained_days_without_last_week.first()
if first_trained_day:
first_day_of_week = first_trained_day.date - timedelta(days=first_trained_day.date.weekday())
num_of_trained_days_per_week = [0]
for trained_day in trained_days_without_last_week:
if (trained_day.date - first_day_of_week).days >= 7: # Start new week.
first_day_of_week = trained_day.date - timedelta(days=trained_day.date.weekday())
num_of_trained_days_per_week.append(0)
# Increase number of trained days of the current week.
num_of_trained_days_per_week[-1] += 1
num_of_weeks_since_started_to_train = float((first_day_of_current_week - first_trained_day.date).days) / 7
return float(sum(num_of_trained_days_per_week)) / round(num_of_weeks_since_started_to_train)
else:
return 0.
def get_training_statistics(self):
"""Trainee training statistics.
Calculate training statistics based on the TrainingDaysInfo of the trainee.
Returns.
int. number of days trainee went to the gym.
int. number of days trainee did not go to the gym although it marked as training day.
int. percentage of actually going to the gym vs missing days.
float. average number of training days per week.
"""
training_days_info = TrainingDayInfo.objects.filter(trainee=self.pk).order_by('date')
trained_days = training_days_info.filter(trained=True)
trained_days_count = trained_days.count()
missed_training_days_count = training_days_info.filter(trained=False).count()
training_percentage = self._calculate_training_percentage(num_of_trained_days=trained_days_count,
num_missed_training_days=missed_training_days_count)
average_training_days_per_week = self._calculate_average_training_days_per_week(trained_days=trained_days)
return (trained_days_count,
missed_training_days_count,
training_percentage,
average_training_days_per_week)
@property
def groups(self):
return Group.objects.filter(trainees__contains=self)
def __repr__(self):
return '<Trainee {id} {first_name}>'.format(id=self.id,
first_name=self.first_name)
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
class TrainingDayInfo(Document):
trainee = LazyReferenceField(document_type=Trainee)
date = DateTimeField(default=datetime.now)
trained = BooleanField()
meta = {
'indexes': [('trainee', '-date')],
'index_background': True,
}
def __repr__(self):
return '<TrainingDayInfo trainee {trainee_pk} {trained} {date}>'.format(trainee_pk=self.trainee.pk,
trained='trained' if self.trained
else 'did not train',
date=self.date.strftime('%d-%m-%Y %H:%M:%S'))
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
class Group(Document):
id = StringField(required=True, primary_key=True)
trainees = ListField(CachedReferenceField(Trainee, auto_sync=True))
class GroupQuerySet(ExtendedQuerySet):
def create(self, id, trainees=[]):
return super(Group.GroupQuerySet, self).create(id=unicode(id),
trainees=trainees)
meta = {
'queryset_class': GroupQuerySet,
}
def add_trainee(self, new_trainee):
self.update(push__trainees=new_trainee)
return self
def get_trainees_of_today(self):
today = datetime.now().strftime('%A')
return self.get_trainees_in_day(today)
def get_trainees_in_day(self, day_name):
return [trainee for trainee in self.trainees if trainee.is_training_in_day(day_name)]
def __repr__(self):
return '<Group {id}>'.format(id=self.id)
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
def __iter__(self):
return iter(self.trainees)
class Admin(Document):
id = StringField(required=True, primary_key=True)
class AdminQuerySet(ExtendedQuerySet):
def create(self, id):
return super(Admin.AdminQuerySet, self).create(id=unicode(id))
def is_admin(self, id):
return bool(self.get(id=id))
meta = {
'queryset_class': AdminQuerySet,
}
def __repr__(self):
return '<Admin {id}>'.format(id=self.id)
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
| 2,530 | 6,661 | 115 |
7416069b1655abcb7b4ce4fa39b10a99ec5fc400 | 1,244 | py | Python | src/tictactoe/gui/View.py | dawidcdv/tictactoe | d42016ce7765bba8d1852e82de4e08648ce4d62a | [
"MIT"
] | null | null | null | src/tictactoe/gui/View.py | dawidcdv/tictactoe | d42016ce7765bba8d1852e82de4e08648ce4d62a | [
"MIT"
] | null | null | null | src/tictactoe/gui/View.py | dawidcdv/tictactoe | d42016ce7765bba8d1852e82de4e08648ce4d62a | [
"MIT"
] | null | null | null | from tkinter import Button, messagebox
from abc import ABCMeta, abstractmethod
from src.tictactoe.game.Board import Board
| 31.897436 | 87 | 0.607717 | from tkinter import Button, messagebox
from abc import ABCMeta, abstractmethod
from src.tictactoe.game.Board import Board
class View:
class Controller:
__metaclass__ = ABCMeta
@abstractmethod
def select(self, position : int): raise NotImplementedError
def __init__(self, tk, controller: Controller):
self.root = tk
self.controller = controller
self.buttons = [None] * 9
def createBoard(self):
for i in range(9):
self.buttons[i] = Button(self.root, text="", height=4, width=8,
command=lambda pos=i: self.controller.select(pos))
self.buttons[i].grid(row=((int)(i / 3)), column=(i % 3))
def showWinner(self, mark : Board.Mark):
if mark is None:
message = "Remis"
elif mark is Board.Mark.CROSS:
message = "Wygral krzyzyk"
else :
message = "Wygralo kolko"
messagebox.showinfo(title="Zwyciezca", message=message)
def showError(self,errorText):
messagebox.showerror(title="Blad", message=errorText)
def checkOnBoard(self, pos, mark : Board.Mark):
self.buttons[pos]["text"] = "X" if mark == Board.Mark.CROSS else "O"
| 862 | 235 | 23 |
1484137076d74041a15c64db005d71834958165e | 404 | py | Python | more_than_basics/builtins/builtins.py | xames3/python_tuts_v2 | 666d23743d617a33de9782dadcd06a97494b3823 | [
"Apache-2.0"
] | null | null | null | more_than_basics/builtins/builtins.py | xames3/python_tuts_v2 | 666d23743d617a33de9782dadcd06a97494b3823 | [
"Apache-2.0"
] | null | null | null | more_than_basics/builtins/builtins.py | xames3/python_tuts_v2 | 666d23743d617a33de9782dadcd06a97494b3823 | [
"Apache-2.0"
] | null | null | null | # List of Builtins referred from here: https://docs.python.org/3/library/functions.html
# xa = oct(10)
import os
import glob
# print(xa, (type(xa)), sep="\n")
xa = ["xa", "pranali", "srushti", "shailesh", "nimesh"]
xa = [idx.upper() for idx in xa]
# ya = xa.copy()
# za = []
# for idx in xa:
# za.append(idx.upper())
print(xa)
# print(za)
# 1. Write brackets
# 2. Write for loop as is BUT w/o :
| 17.565217 | 87 | 0.616337 | # List of Builtins referred from here: https://docs.python.org/3/library/functions.html
# xa = oct(10)
import os
import glob
# print(xa, (type(xa)), sep="\n")
xa = ["xa", "pranali", "srushti", "shailesh", "nimesh"]
xa = [idx.upper() for idx in xa]
# ya = xa.copy()
# za = []
# for idx in xa:
# za.append(idx.upper())
print(xa)
# print(za)
# 1. Write brackets
# 2. Write for loop as is BUT w/o :
| 0 | 0 | 0 |
65c9d0d6f453e2e5695efe48fee0cf0b1d9c2adc | 7,188 | py | Python | cogs/Offices.py | seanrowland101/Ayesha-2.0 | 4f3c2ca9fff37559fa07d1138d3df5c9a33a838b | [
"MIT"
] | null | null | null | cogs/Offices.py | seanrowland101/Ayesha-2.0 | 4f3c2ca9fff37559fa07d1138d3df5c9a33a838b | [
"MIT"
] | null | null | null | cogs/Offices.py | seanrowland101/Ayesha-2.0 | 4f3c2ca9fff37559fa07d1138d3df5c9a33a838b | [
"MIT"
] | null | null | null | import discord
from discord.commands.commands import Option, OptionChoice
from discord.commands.context import ApplicationContext
from discord.ext import commands, pages
from discord.ext.commands import BucketType, cooldown
import asyncio
import random
import schedule
from Utilities import AssociationObject, Checks, Finances, ItemObject, PlayerObject, Vars
class Offices(commands.Cog):
"""Offices Text"""
# EVENTS
@commands.Cog.listener()
# COMMANDS
@commands.slash_command(guild_ids=[762118688567984151])
async def offices(self, ctx):
"""View the map, tax rate, and this week's elected officeholders."""
async with self.bot.db.acquire() as conn:
comptroller_rec = await PlayerObject.get_comptroller(conn)
mayor_rec = await PlayerObject.get_mayor(conn)
comptroller = await self.bot.fetch_user(
comptroller_rec['officeholder'])
mayor = await self.bot.fetch_user(mayor_rec['officeholder'])
tax_info = await Finances.get_tax_info(conn)
embed = discord.Embed(
title=f"This Week's Officeholders!",
color=Vars.ABLUE)
embed.add_field(name="Mayor",
value=f"{mayor.mention}: **{mayor_rec['user_name']}**")
embed.add_field(name="Comptroller",
value=f"{comptroller.mention}: **{comptroller_rec['user_name']}**")
embed.add_field(
name=f"Current Tax Rate: `{tax_info['tax_rate']}`%",
value=(
f"The mayor has collected `{tax_info['Collected']}` gold "
f"so far this term."),
inline=False)
embed.set_image(url="https://i.imgur.com/jpLztYK.jpg")
await ctx.respond(embed=embed)
@commands.slash_command(guild_ids=[762118688567984151])
@commands.check(Checks.is_mayor)
@cooldown(1, 43200, BucketType.user)
async def tax(self, ctx, tax_rate : Option(float,
description="The new tax rate as a percentage (0-9.99)",
min_value=0,
max_value=9.99)):
"""Set the tax rate over Aramythia, earning you a small percentage."""
tax_rate = round(tax_rate, 2)
async with self.bot.db.acquire() as conn:
await Finances.set_tax_rate(conn, tax_rate, ctx.author.id)
await ctx.respond("You have changed the tax rate.")
await self.bot.announcement_channel.send(
f"Mayor {ctx.author.mention} has set the tax rate to `{tax_rate}%`."
)
@commands.slash_command(guild_ids=[762118688567984151])
async def territories(self, ctx):
"""See which brotherhoods control the outlying areas of the map."""
async with self.bot.db.acquire() as conn:
# Tuple with area and the accompanying owner Association Object
te_list = [
(area, await AssociationObject.get_territory_controller(
conn, area))
for area in Vars.TERRITORIES]
embed = discord.Embed(
title="Territories Controlled by a Brotherhood",
description=(
"Brotherhoods in control of a territory get a 50% bonus "
"to rewards from `/work` in that territory."),
color=Vars.ABLUE)
embed.set_image(url="https://i.imgur.com/jpLztYK.jpg")
for assc in te_list:
text = assc[1].name
if not assc[1].is_empty:
text += f" (ID: `{assc[1].id}`)"
embed.add_field(name=assc[0], value=text)
await ctx.respond(embed=embed)
| 43.041916 | 90 | 0.536867 | import discord
from discord.commands.commands import Option, OptionChoice
from discord.commands.context import ApplicationContext
from discord.ext import commands, pages
from discord.ext.commands import BucketType, cooldown
import asyncio
import random
import schedule
from Utilities import AssociationObject, Checks, Finances, ItemObject, PlayerObject, Vars
class Offices(commands.Cog):
"""Offices Text"""
def __init__(self, bot):
self.bot = bot
async def update_offices():
# Give payout to current mayor and comptroller
async with self.bot.db.acquire() as conn:
comp_rec = await PlayerObject.get_comptroller(conn)
comptroller = await PlayerObject.get_player_by_id(
conn, comp_rec['officeholder'])
mayor_rec = await PlayerObject.get_mayor(conn)
mayor = await PlayerObject.get_player_by_id(
conn, mayor_rec['officeholder'])
tax_info = await Finances.get_tax_info(conn)
payout = int(tax_info['Collected'] / 100)
await comptroller.give_gold(conn, payout)
await mayor.give_gold(conn, payout)
psql1 = """
WITH gravitas_leader AS (
SELECT user_id
FROM players
WHERE assc IN (
SELECT assc_id
FROM associations
WHERE assc_type = 'College'
)
ORDER BY gravitas DESC
LIMIT 1
)
INSERT INTO officeholders (officeholder, office)
VALUES ((SELECT user_id FROM gravitas_leader), 'Mayor')
RETURNING officeholder;
"""
psql2 = """
WITH gold_leader AS (
SELECT user_id
FROM players
WHERE assc IN (
SELECT assc_id
FROM associations
WHERE assc_type = 'Guild'
)
ORDER BY gold DESC
LIMIT 1
)
INSERT INTO officeholders (officeholder, office)
VALUES ((SELECT user_id FROM gold_leader), 'Comptroller')
RETURNING officeholder;
"""
new_mayor_id = await conn.fetchval(psql1)
new_comp_id = await conn.fetchval(psql2)
new_mayor = await self.bot.fetch_user(new_mayor_id)
new_comp = await self.bot.fetch_user(new_comp_id)
await self.bot.announcement_channel.send(
f"Congratulations to our new mayor {new_mayor.mention} and "
f"comptroller {new_comp.mention}, who will be serving "
f"Aramithea for this week!")
def run_offices_func():
asyncio.run_coroutine_threadsafe(update_offices(), self.bot.loop)
async def schedule_office_updates():
office_scheduler = schedule.Scheduler()
office_scheduler.every().wednesday.at("12:00").do(run_offices_func)
while True:
office_scheduler.run_pending()
await asyncio.sleep(office_scheduler.idle_seconds)
asyncio.ensure_future(schedule_office_updates())
# EVENTS
@commands.Cog.listener()
async def on_ready(self):
print("Offices is ready.")
# COMMANDS
@commands.slash_command(guild_ids=[762118688567984151])
async def offices(self, ctx):
"""View the map, tax rate, and this week's elected officeholders."""
async with self.bot.db.acquire() as conn:
comptroller_rec = await PlayerObject.get_comptroller(conn)
mayor_rec = await PlayerObject.get_mayor(conn)
comptroller = await self.bot.fetch_user(
comptroller_rec['officeholder'])
mayor = await self.bot.fetch_user(mayor_rec['officeholder'])
tax_info = await Finances.get_tax_info(conn)
embed = discord.Embed(
title=f"This Week's Officeholders!",
color=Vars.ABLUE)
embed.add_field(name="Mayor",
value=f"{mayor.mention}: **{mayor_rec['user_name']}**")
embed.add_field(name="Comptroller",
value=f"{comptroller.mention}: **{comptroller_rec['user_name']}**")
embed.add_field(
name=f"Current Tax Rate: `{tax_info['tax_rate']}`%",
value=(
f"The mayor has collected `{tax_info['Collected']}` gold "
f"so far this term."),
inline=False)
embed.set_image(url="https://i.imgur.com/jpLztYK.jpg")
await ctx.respond(embed=embed)
@commands.slash_command(guild_ids=[762118688567984151])
@commands.check(Checks.is_mayor)
@cooldown(1, 43200, BucketType.user)
async def tax(self, ctx, tax_rate : Option(float,
description="The new tax rate as a percentage (0-9.99)",
min_value=0,
max_value=9.99)):
"""Set the tax rate over Aramythia, earning you a small percentage."""
tax_rate = round(tax_rate, 2)
async with self.bot.db.acquire() as conn:
await Finances.set_tax_rate(conn, tax_rate, ctx.author.id)
await ctx.respond("You have changed the tax rate.")
await self.bot.announcement_channel.send(
f"Mayor {ctx.author.mention} has set the tax rate to `{tax_rate}%`."
)
@commands.slash_command(guild_ids=[762118688567984151])
async def territories(self, ctx):
"""See which brotherhoods control the outlying areas of the map."""
async with self.bot.db.acquire() as conn:
# Tuple with area and the accompanying owner Association Object
te_list = [
(area, await AssociationObject.get_territory_controller(
conn, area))
for area in Vars.TERRITORIES]
embed = discord.Embed(
title="Territories Controlled by a Brotherhood",
description=(
"Brotherhoods in control of a territory get a 50% bonus "
"to rewards from `/work` in that territory."),
color=Vars.ABLUE)
embed.set_image(url="https://i.imgur.com/jpLztYK.jpg")
for assc in te_list:
text = assc[1].name
if not assc[1].is_empty:
text += f" (ID: `{assc[1].id}`)"
embed.add_field(name=assc[0], value=text)
await ctx.respond(embed=embed)
def setup(bot):
bot.add_cog(Offices(bot)) | 3,365 | 0 | 80 |
44c2fe335e43cc6f717f5b780437fc4c47a6dd18 | 3,700 | py | Python | tushare_trader/__init__.py | gorf/tushare-trader | 7502e11acb509f481041445e69e2f6960b3fba0d | [
"BSD-3-Clause"
] | 1 | 2020-07-12T02:40:11.000Z | 2020-07-12T02:40:11.000Z | tushare_trader/__init__.py | gorf/tushare-trader | 7502e11acb509f481041445e69e2f6960b3fba0d | [
"BSD-3-Clause"
] | 3 | 2020-07-12T01:13:17.000Z | 2021-03-31T19:16:04.000Z | tushare_trader/__init__.py | gorf/tushare-trader | 7502e11acb509f481041445e69e2f6960b3fba0d | [
"BSD-3-Clause"
] | null | null | null | __version__ = '0.7.7'
__author__ = 'Jimmy Liu'
"""
for trading data
"""
from tushare_trader.stock.trading import (get_hist_data, get_tick_data,
get_today_all, get_realtime_quotes,
get_h_data, get_today_ticks,
get_index, get_hists,
get_k_data,
get_sina_dd)
"""
for trading data
"""
from tushare_trader.stock.fundamental import (get_stock_basics, get_report_data,
get_profit_data,
get_operation_data, get_growth_data,
get_debtpaying_data, get_cashflow_data,
get_balance_sheet, get_profit_statement, get_cash_flow)
"""
for macro data
"""
from tushare_trader.stock.macro import (get_gdp_year, get_gdp_quarter,
get_gdp_for, get_gdp_pull,
get_gdp_contrib, get_cpi,
get_ppi, get_deposit_rate,
get_loan_rate, get_rrr,
get_money_supply, get_money_supply_bal,
get_gold_and_foreign_reserves)
"""
for classifying data
"""
from tushare_trader.stock.classifying import (get_industry_classified, get_concept_classified,
get_area_classified, get_gem_classified,
get_sme_classified, get_st_classified,
get_hs300s, get_sz50s, get_zz500s,
get_terminated, get_suspended)
"""
for macro data
"""
from tushare_trader.stock.newsevent import (get_latest_news, latest_content,
get_notices, notice_content,
guba_sina)
"""
for reference
"""
from tushare_trader.stock.reference import (profit_data, forecast_data,
xsg_data, fund_holdings,
new_stocks, sh_margins,
sh_margin_details,
sz_margins, sz_margin_details,
top10_holders)
"""
for shibor
"""
from tushare_trader.stock.shibor import (shibor_data, shibor_quote_data,
shibor_ma_data, lpr_data,
lpr_ma_data)
"""
for LHB
"""
from tushare_trader.stock.billboard import (top_list, cap_tops, broker_tops,
inst_tops, inst_detail)
"""
for utils
"""
from tushare_trader.util.dateu import (trade_cal, is_holiday)
"""
for DataYes Token
"""
from tushare_trader.util.upass import (set_token, get_token, get_broker,
set_broker, remove_broker)
from tushare_trader.datayes.api import *
from tushare_trader.internet.boxoffice import (realtime_boxoffice, day_boxoffice,
day_cinema, month_boxoffice)
"""
for fund data
"""
from tushare_trader.fund.nav import (get_nav_open, get_nav_close, get_nav_grading,
get_nav_history, get_fund_info)
"""
for trader API
"""
from tushare_trader.trader.trader import TraderAPI
"""
for futures API
"""
from tushare_trader.futures.intlfutures import (get_intlfuture)
from tushare_trader.stock.globals import (global_realtime)
from tushare_trader.util.mailmerge import (MailMerge)
| 33.035714 | 95 | 0.536216 | __version__ = '0.7.7'
__author__ = 'Jimmy Liu'
"""
for trading data
"""
from tushare_trader.stock.trading import (get_hist_data, get_tick_data,
get_today_all, get_realtime_quotes,
get_h_data, get_today_ticks,
get_index, get_hists,
get_k_data,
get_sina_dd)
"""
for trading data
"""
from tushare_trader.stock.fundamental import (get_stock_basics, get_report_data,
get_profit_data,
get_operation_data, get_growth_data,
get_debtpaying_data, get_cashflow_data,
get_balance_sheet, get_profit_statement, get_cash_flow)
"""
for macro data
"""
from tushare_trader.stock.macro import (get_gdp_year, get_gdp_quarter,
get_gdp_for, get_gdp_pull,
get_gdp_contrib, get_cpi,
get_ppi, get_deposit_rate,
get_loan_rate, get_rrr,
get_money_supply, get_money_supply_bal,
get_gold_and_foreign_reserves)
"""
for classifying data
"""
from tushare_trader.stock.classifying import (get_industry_classified, get_concept_classified,
get_area_classified, get_gem_classified,
get_sme_classified, get_st_classified,
get_hs300s, get_sz50s, get_zz500s,
get_terminated, get_suspended)
"""
for macro data
"""
from tushare_trader.stock.newsevent import (get_latest_news, latest_content,
get_notices, notice_content,
guba_sina)
"""
for reference
"""
from tushare_trader.stock.reference import (profit_data, forecast_data,
xsg_data, fund_holdings,
new_stocks, sh_margins,
sh_margin_details,
sz_margins, sz_margin_details,
top10_holders)
"""
for shibor
"""
from tushare_trader.stock.shibor import (shibor_data, shibor_quote_data,
shibor_ma_data, lpr_data,
lpr_ma_data)
"""
for LHB
"""
from tushare_trader.stock.billboard import (top_list, cap_tops, broker_tops,
inst_tops, inst_detail)
"""
for utils
"""
from tushare_trader.util.dateu import (trade_cal, is_holiday)
"""
for DataYes Token
"""
from tushare_trader.util.upass import (set_token, get_token, get_broker,
set_broker, remove_broker)
from tushare_trader.datayes.api import *
from tushare_trader.internet.boxoffice import (realtime_boxoffice, day_boxoffice,
day_cinema, month_boxoffice)
"""
for fund data
"""
from tushare_trader.fund.nav import (get_nav_open, get_nav_close, get_nav_grading,
get_nav_history, get_fund_info)
"""
for trader API
"""
from tushare_trader.trader.trader import TraderAPI
"""
for futures API
"""
from tushare_trader.futures.intlfutures import (get_intlfuture)
from tushare_trader.stock.globals import (global_realtime)
from tushare_trader.util.mailmerge import (MailMerge)
| 0 | 0 | 0 |
ad79a8d3eee9a3a57784a6474509d0a1b53512b5 | 222 | py | Python | menus/admin.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | 1 | 2019-03-06T19:46:18.000Z | 2019-03-06T19:46:18.000Z | menus/admin.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | 2 | 2020-06-05T19:41:06.000Z | 2021-06-10T21:05:53.000Z | menus/admin.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from django_mptt_admin.admin import DjangoMpttAdmin
from menus.models import SystemMenu
admin.site.register(SystemMenu, SystemMenuAdmin)
| 22.2 | 51 | 0.842342 | from django.contrib import admin
from django_mptt_admin.admin import DjangoMpttAdmin
from menus.models import SystemMenu
class SystemMenuAdmin(DjangoMpttAdmin):
pass
admin.site.register(SystemMenu, SystemMenuAdmin)
| 0 | 27 | 23 |
7da8a3e5f21fcbab4d59bff10ca9fcf193fdd4ce | 591 | py | Python | 21312312.py | zhangbo2008/chineseOCR-latest | 8eeb7d4bdf1bf460a3b8190d21507639db4c77f3 | [
"MIT"
] | 8 | 2019-10-29T07:56:04.000Z | 2020-11-27T07:44:25.000Z | 21312312.py | zhangbo2008/chineseOCR-latest | 8eeb7d4bdf1bf460a3b8190d21507639db4c77f3 | [
"MIT"
] | null | null | null | 21312312.py | zhangbo2008/chineseOCR-latest | 8eeb7d4bdf1bf460a3b8190d21507639db4c77f3 | [
"MIT"
] | 1 | 2020-11-03T12:45:27.000Z | 2020-11-03T12:45:27.000Z | ##
import requests
tmp2='http://pics5.baidu.com/feed/38dbb6fd5266d0164f8e49cdd24aa40234fa3522.jpeg?token=c2cbcc6f929b40eb8881ceb4746c0e8c&s=1994CB1452725B94340248850300F0AB'
print(tmp2)
r = requests.get(tmp2)
with open('tmp.jpg', 'wb') as f:
f.write(r.content)
##
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
##
| 22.730769 | 154 | 0.763113 | ##
import requests
tmp2='http://pics5.baidu.com/feed/38dbb6fd5266d0164f8e49cdd24aa40234fa3522.jpeg?token=c2cbcc6f929b40eb8881ceb4746c0e8c&s=1994CB1452725B94340248850300F0AB'
print(tmp2)
r = requests.get(tmp2)
with open('tmp.jpg', 'wb') as f:
f.write(r.content)
##
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
print("1111111111")
##
| 0 | 0 | 0 |
c683ff8f817eb532dbd2635547cbcb925c679a26 | 37,972 | py | Python | watex/utils/tricks.py | WEgeophysics/watex | 21616ce35372a095c3dd624f82a5282b15cb2c91 | [
"MIT"
] | 3 | 2021-06-19T02:16:46.000Z | 2021-07-16T15:56:49.000Z | watex/utils/tricks.py | WEgeophysics/watex | 21616ce35372a095c3dd624f82a5282b15cb2c91 | [
"MIT"
] | null | null | null | watex/utils/tricks.py | WEgeophysics/watex | 21616ce35372a095c3dd624f82a5282b15cb2c91 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
===============================================================================
Copyright (c) 2021 Kouadio K. Laurent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
===============================================================================
.. synopsis:: 'watex.utils.wmathandtricks'
Module for computing
Created on Mon Jun 21 14:43:25 2021
.. _electrical-resistivity-profile::`erp`
.. _vertical-electrical-sounding::`ves`
.. _station-position::`pk`
.._anomaly-boundaries:`anBounds`
@author: @Daniel03
"""
import os
import warnings
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from scipy.interpolate import interp1d as sp1d
from ..utils._watexlog import watexlog
from ..utils.decorator import deprecated
import watex.utils.exceptions as Wex
_logger =watexlog.get_watex_logger(__name__)
def compute_lower_anomaly(erp_array, station_position=None,
step=None, **kws):
"""
Function to get the minimum value on the ERP array.
If `pk` is provided wil give the index of pk
:param erp_array: array of apparent resistivity profile
:type erp_array: array_like
:param station position: array of station position (survey) , if not given
and `step` is known , set the step value and
`station_position` will compute automatically
:type station_position: array_like
:param step: The distance between measurement im meter. If given will
recompute the `station_position`
:returns: * `bestSelectedDict`: dict containing best anomalies
with the anomaly resistivities range.
* `anpks`: Main positions of best select anomaly
* `collectanlyBounds`: list of arrays of select anomaly values
* `min_pks`: list of tuples (pk,
minVal of best anomalies points.)
:rtype: tuple
:Example:
>>> from watex.utils.wmathandtricks import compute_lower_anolamy
>>> import pandas as pd
>>> path_to_= 'data/l10_gbalo.xlsx'
>>> dataRes=pd.read_excel(erp_data).to_numpy()[:,-1]
>>> anomaly, *_ = compute_lower_anomaly(erp_array=data, step =10)
>>> anomaly
"""
display_infos= kws.pop('diplay_infos', False)
# got minumum of erp data
collectanlyBounds=[]
if step is not None:
station_position = np.arange(0, step * len(erp_array), step)
min_pks= get_minVal(erp_array) # three min anomaly values
# compute new_pjk
# find differents anomlies boundaries
for ii, (rho, index) in enumerate(min_pks) :
_, _, anlyBounds= drawn_anomaly_boundaries(erp_data = erp_array,
appRes = rho, index=index)
collectanlyBounds.append(anlyBounds)
if station_position is None :
pks =np.array(['?' for ii in range(len(erp_array))])
else : pks =station_position
if pks.dtype in ['int', 'float']:
anpks =np.array([pks[skanIndex ] for
(_, skanIndex) in min_pks ])
else : anpks ='?'
bestSelectedDICT={}
for ii, (pk, anb) in enumerate(zip(anpks, collectanlyBounds)):
bestSelectedDICT['{0}_pk{1}'.format(ii+1, pk)] = anb
if display_infos:
print('{0:+^100}'.format(
' *Best Conductive anomaly points (BCPts)* '))
fmtAnText(anFeatures=bestSelectedDICT)
return bestSelectedDICT, anpks, collectanlyBounds, min_pks
def get_minVal(array):
"""
Function to find the three minimum values on array and their
corresponding indexes
:param array: array of values
:type array: array_like
:returns: Three minimum values of rho, index in rho_array
:rtype: tuple
"""
holdList =[]
if not isinstance(array, (list, tuple, np.ndarray)):
if isinstance(array, float):
array=np.array([array])
else :
try :
array =np.array([float(array)])
except:
raise Wex.WATexError_float('Could not convert %s to float!')
try :
# first option:find minimals locals values
minlocals = argrelextrema(array, np.less)[0]
temp_array =np.array([array[int(index)] for index in minlocals])
if len(minlocals) ==0:
ix = np.where(array ==array.min())
if len(ix)>1:
ix =ix[0]
temp_array = array[int(ix)]
except :
# second option: use archaic computation.
temp_array =np.sort(array)
else :
temp_array= np.sort(temp_array)
ss=0
for ii, tem_ar in enumerate(temp_array) :
if ss >=3 :
holdList=holdList[:3]
break
min_index = np.where(array==tem_ar)[0]
if len(min_index)==1 :
holdList.append((array[int(min_index)],
int(min_index)))
ss +=ii
elif len(min_index) > 1 :
# loop the index array and find the min for consistency
for jj, indx in enumerate(min_index):
holdList.append((array[int(indx)],
int(indx)))
ss =len(holdList)
# for consistency keep the 3 best min values
if len(holdList)>3 :
holdList = holdList[:3]
return holdList
def drawn_anomaly_boundaries(erp_data, appRes, index):
"""
Function to drawn anomaly boundary
and return the anomaly with its boundaries
:param erp_data: erp profile
:type erp_data: array_like or list
:param appRes: resistivity value of minimum pk anomaly
:type appRes: float
:param index: index of minimum pk anomaly
:type index: int
:return: anomaly boundary
:rtype: list of array_like
"""
f = 0 # flag to mention which part must be calculated
if index ==0 :
f = 1 # compute only right part
elif appRes ==erp_data[-1]:
f=2 # compute left part
def loop_sideBound(term):
"""
loop side bar from anomaly and find the term side
:param term: is array of left or right side of anomaly.
:type term: array
:return: side bar
:type: array_like
"""
tem_drawn =[]
maxT=0
for ii, tem_rho in enumerate(term) :
diffRes_betw_2pts= tem_rho - appRes
if diffRes_betw_2pts > maxT :
maxT = diffRes_betw_2pts
tem_drawn.append(tem_rho)
elif diffRes_betw_2pts < maxT :
# rho_limit = tem_rho
break
return np.array(tem_drawn)
# first broke erp profile from the anomalies
if f ==0 or f==2 :
left_term = erp_data[:index][::-1] # flip left term for looping
# flip again to keep the order
left_limit = loop_sideBound(term=left_term)[::-1]
if f==0 or f ==1 :
right_term= erp_data[index :]
right_limit=loop_sideBound(right_term)
# concat right and left to get the complete anomaly
if f==2:
anomalyBounds = np.append(left_limit,appRes)
elif f ==1 :
anomalyBounds = np.array([appRes]+ right_limit.tolist())
else:
left_limit = np.append(left_limit, appRes)
anomalyBounds = np.concatenate((left_limit, right_limit))
return appRes, index, anomalyBounds
def defineAnomaly(erp_data, station_position=None, pks=None,
dipole_length=10., **kwargs):
"""
Function will select the different anomalies. If pk is not given,
the best three anomalies on the survey lines will be
computed automatically
:param erp_data: Electrical resistivity profiling
:type erp_data: array_like
:param pks: station positions anomaly boundaries (pk_begin, pk_end)
If selected anomalies is more than one, set `pks` into dict
where number of anomaly =keys and pks = values
:type pks: list or dict
:param dipole_length: Distance between two measurements in meters
Change the `dipole lengh
:type dipole_length: float
:param station_position: station position array
:type statiion_position: array_like
:return: list of anomalies bounds
"""
selectedPk =kwargs.pop('selectedPk', None)
bestSelectedDICT={}
if station_position is not None :
dipole_length = (station_position.max()-
station_position.min())/(len(station_position -1))
if station_position is None :
station_position =np.arange(0, dipole_length * len(erp_data),
dipole_length)
def getBound(pksbounds):
"""
Get the bound from the given `pks`
:param pksbounds: Anomaly boundaries
:type pksbounds: list of array_like
:returns: * anomBounds- array of appRes values of anomaly
:rtype: array_like
"""
# check if bound is on station positions
for spk in pksbounds :
if not pksbounds.min() <= spk <= pksbounds.max():
raise Wex.WATexError_AnomalyBounds(
'Bound <{0}> provided is out of range !'
'Dipole length is set to = {1} m.'
' Please set a new bounds.')
pkinf = np.where(station_position==pksbounds.min())[0]
pksup = np.where(station_position==pksbounds.max())[0]
anomBounds = erp_data[int(pkinf):int(pksup)+1]
return anomBounds
if pks is None :
bestSelectedDICT, *_= compute_lower_anomaly(
erp_array=erp_data, step=dipole_length,
station_position =station_position)
elif isinstance(pks, list):
pks =np.array(sorted(pks))
collectanlyBounds = getBound(pksbounds= pks)
# get the length of selected anomalies and computed the station
# location wich composed the bounds (Xbegin and Xend)
pkb, *_= find_pk_from_selectedAn(
an_res_range=collectanlyBounds, pos=pks,
selectedPk=selectedPk)
bestSelectedDICT={ '1_{}'.format(pkb):collectanlyBounds}
elif isinstance(pks, dict):
for ii, (keys, values) in enumerate(pks.items()):
if isinstance(values, list):
values =np.array(values)
collectanlyBounds= getBound(pksbounds=values)
pkb, *_= find_pk_from_selectedAn(
an_res_range=collectanlyBounds, pos=pks,
selectedPk=selectedPk)
bestSelectedDICT['{0}_{1}'.format(ii+1, pkb)]=collectanlyBounds
return bestSelectedDICT
def find_pk_from_selectedAn(an_res_range, pos=None, selectedPk=None):
"""
Function to select the main :ref:`pk` from both :ref:`anBounds`.
:paran an_res_range: anomaly resistivity range on :ref:`erp` line.
:type an_res_range: array_like
:param pos: position of anomaly boundaries (inf and sup):
anBounds = [90, 130]
- 130 is max boundary and 90 the min boundary
:type pos: list
:param selectedPk:
User can set its own position of the right anomaly. Be sure that
the value provided is right position .
Could not compute again provided that `pos`
is not `None`.
:return: anomaly station position.
:rtype: str 'pk{position}'
:Example:
>>> from watex.utils.wmathandtricks import find_pk_from_selectedAn
>>> resan = np.array([168,130, 93,146,145])
>>> pk= find_pk_from_selectedAn(
... resan, pos=[90, 13], selectedPk= 'str20')
>>> pk
"""
#compute dipole length from pos
if pos is not None :
if isinstance(pos, list):
pos =np.array(pos)
if pos is None and selectedPk is None :
raise Wex.WATexError_parameter_number(
'Give at least the anomaly boundaries'
' before computing the selected anomaly position.')
if selectedPk is not None : # mean is given
if isinstance(selectedPk, str):
if selectedPk.isdigit() :
sPk= int(selectedPk)
elif selectedPk.isalnum():
oss = ''.join([s for s in selectedPk
if s.isdigit()])
sPk =int(oss)
else :
try :
sPk = int(selectedPk)
except : pass
if pos is not None : # then compare the sPk and ps value
try :
if not pos.min()<= sPk<=pos.max():
warnings.warn('Wrong position given <{}>.'
' Should compute new positions.'.
format(selectedPk))
_logger.debug('Wrong position given <{}>.'
'Should compute new positions.'.
format(selectedPk))
except UnboundLocalError:
print("local variable 'sPk' referenced before assignment")
else :
return 'pk{}'.format(sPk ), an_res_range
else :
selectedPk='pk{}'.format(sPk )
return selectedPk , an_res_range
if isinstance(pos, list):
pos =np.array(pos)
if isinstance(an_res_range, list):
an_res_range =np.array(an_res_range)
dipole_length = (pos.max()-pos.min())/(len(an_res_range)-1)
tem_loc = np.arange(pos.min(), pos.max()+dipole_length, dipole_length)
# find min value of collected anomalies values
locmin = np.where (an_res_range==an_res_range.min())[0]
if len(locmin) >1 : locmin =locmin[0]
pk_= int(tem_loc[int(locmin)]) # find the min pk
selectedPk='pk{}'.format(pk_)
return selectedPk , an_res_range
def fmtAnText(anFeatures=None, title=['Ranking', 'rho(Ω.m)',
'position pk(m)',
'rho range(Ω.m)'],
**kwargs) :
"""
Function format text from anomaly features
:param anFeatures: Anomaly features
:type anFeatures: list or dict
:param title: head lines
:type title: list
:Example:
>>> from watex.utils.wmathandtricks import fmtAnText
>>> fmtAnText(anFeatures =[1,130, 93,(146,145, 125)])
"""
inline =kwargs.pop('inline', '-')
mlabel =kwargs.pop('mlabels', 100)
line = inline * int(mlabel)
#--------------------header ----------------------------------------
print(line)
tem_head ='|'.join(['{:^15}'.format(i) for i in title[:-1]])
tem_head +='|{:^45}'.format(title[-1])
print(tem_head)
print(line)
#-----------------------end header----------------------------------
newF =[]
if isinstance(anFeatures, dict):
for keys, items in anFeatures.items():
rrpos=keys.replace('_pk', '')
rank=rrpos[0]
pos =rrpos[1:]
newF.append([rank, min(items), pos, items])
elif isinstance(anFeatures, list):
newF =[anFeatures]
for anFeatures in newF:
strfeatures ='|'.join(['{:^15}'.format(str(i)) \
for i in anFeatures[:-1]])
try :
iter(anFeatures[-1])
except :
strfeatures +='|{:^45}'.format(str(anFeatures[-1]))
else :
strfeatures += '|{:^45}'.format(
''.join(['{} '.format(str(i)) for i in anFeatures[-1]]))
print(strfeatures)
print(line)
def select_anomaly ( rhoa_array, pos_array=None, auto=True,
dipole_length =10., **kws ) :
"""
Select the anomaly value from `rhoa_array` and find its boundaries if
``auto` is set to ``True``. If `auto` is ``False``, it's usefull to
provide the anomaly boundaries from station position. Change the argument
`dipole_length` i.e. the distance between measurement electrode is not
equal to ``10``m else give the `pos_array`. If the `pos_array` is given,
the `dipole_length` will be recomputed.
:note: If the `auto` param is ``True``, the automatic computation will
give at most three best animalies ranking according
to the resitivity value.
:param rhoa_array: The apparent resistivity value of :ref:`erp`
:type rho_array: array_like
:param pos_array: The array of station position in meters
:type pos_array: array_like
:param auto:
Automaticaly of manual computation to select the best anomaly point.
Be sure if `auto` is set to ``False`` to provide the anomaly boundary
by setting `pos_bounds` :
pos_bounds=(90, 130)
where :math:`90` is the `pk_min` and :math:`130` is the `pk_max`
If `pos_bounds` is not given an station error will probably occurs
from :class:`~utils.exceptions.WATexError_station`.
:param dipole_length:
Is the distance between two closest measurement. If the value is known
it's better to provide it and don't need to provied a `pos_array`
value.
:type dipole_length: float
:param pos_bounds:
Is the tuple value of anomaly boundaries composed of `pk_min` and
`pk_max`. Please refer to :doc:`compute_power`. When provided
the `pos_bounds` value, please set `the dipole_length` to accurate
the computation of :func:`compute_power`.
:return:
- *rhoa*: The app. resistivity value of the selected anomaly
- `pk_min` and the `pk_max`: refer to :doc:`compute_power`.
- `rhoa_max` and `rhoa_min`: refer to :doc:`compute_magnitude`
-
"""
pos_bounds =kws.pop("pos_bounds", (None, None))
anom_pos = kws.pop('pos_anomaly', None)
display_infos =kws.pop('display', False)
if auto is False :
if None in pos_bounds or pos_bounds is None :
raise Wex.WATexError_site('One position is missed'
'Plase provided it!')
pos_bounds = np.array(pos_bounds)
pos_min, pos_max = pos_bounds.min(), pos_bounds.max()
# get the res from array
dl_station_loc = np.arange(0, dipole_length * len(rhoa_array),
dipole_length)
# then select rho range
ind_pk_min = int(np.where(dl_station_loc==pos_min)[0])
ind_pk_max = int(np.where(dl_station_loc==pos_max)[0])
rhoa_range = rhoa_array [ind_pk_min:ind_pk_max +1]
pk, res= find_pk_from_selectedAn(an_res_range=rhoa_range,
pos=pos_bounds,
selectedPk= anom_pos)
pk = int(pk.replace('pk', ''))
rhoa = rhoa_array[int(np.where(dl_station_loc == pk )[0])]
rhoa_min = rhoa_array[int(np.where(dl_station_loc == pos_min )[0])]
rhoa_max = rhoa_array[int(np.where(dl_station_loc == pos_max)[0])]
rhoa_bounds = (rhoa_min, rhoa_max)
return {'1_pk{}'.format(pk):
(pk, rhoa, pos_bounds, rhoa_bounds, res)}
if auto:
bestSelectedDICT, anpks, \
collectanlyBounds, min_pks = compute_lower_anomaly(
erp_array= rhoa_array,
station_position= pos_array, step= dipole_length,
display_infos=display_infos )
return {key: find_pkfeatures (anom_infos= bestSelectedDICT,
anom_rank= ii+1, pks_rhoa_index=min_pks,
dl=dipole_length)
for ii, (key , rho_r) in enumerate(bestSelectedDICT.items())
}
def find_pkfeatures (anom_infos, anom_rank, pks_rhoa_index, dl):
"""
Get the pk bound from ranking of computed best points
:param anom_infos:
Is a dictionnary of best anomaly points computed from
:func:`compute_lower_anomaly` when `pk_bounds` is not given.
see :doc:`compute_lower_anomaly`
:param anom_rank: Automatic ranking after selecting best points
:param pk_rhoa_index:
Is tuple of selected anomaly resistivity value and index in the whole
:ref:`erp` line. for instance:
pks_rhoa_index= (80., 17)
where "80" is the value of selected anomaly in ohm.m and "17" is the
index of selected points in the :ref:`erp` array.
:param dl:
Is the distance between two measurement as `dipole_length`. Provide
the `dl` if the *default* value is not right.
:returns:
see :doc:`select_anomaly`
"""
rank_code = '{}_pk'.format(anom_rank)
for key in anom_infos.keys():
if rank_code in key:
pk = float(key.replace(rank_code, ''))
rhoa = list(pks_rhoa_index[anom_rank-1])[0]
codec = key
break
ind_rhoa =np.where(anom_infos[codec] ==rhoa)[0]
if len(ind_rhoa) ==0 : ind_rhoa =0
leninf = len(anom_infos[codec][: int(ind_rhoa)])
pk_min = pk - leninf * dl
lensup =len(anom_infos[codec][ int(ind_rhoa):])
pk_max = pk + (lensup -1) * dl
pos_bounds = (pk_min, pk_max)
rhoa_bounds = (anom_infos[codec][0], anom_infos[codec][-1])
return pk, rhoa, pos_bounds, rhoa_bounds, anom_infos[codec]
def find_pkBounds( pk , rhoa, rhoa_range, dl=10.):
"""
Find station position boundary indexed in :ref:`erp` line. Usefull
to get the boundaries indexes `pk_boun_indexes` for :ref:`erp`
normalisation when computing `anr` or else.
:param pk: Selected anomaly station value
:type pk: float
:param rhoa: Selected anomaly value in ohm.m
:type rhoa: float
:rhoa_range: Selected anomaly values from `pk_min` to `pk_max`
:rhoa_range: array_like
:parm dl: see :doc:`find_pkfeatures`
:Example:
>>> from from watex.utils.wmathandtricks import find_pkBounds
>>> find_pkBounds(pk=110, rhoa=137,
rhoa_range=np.array([175,132,137,139,170]))
"""
if isinstance(pk, str):
pk = float(pk.replace(pk[0], '').replace('_pk', ''))
index_rhoa = np.where(rhoa_range ==rhoa)[0]
if len(index_rhoa) ==0 : index_rhoa =0
leftlen = len(rhoa_range[: int(index_rhoa)])
rightlen = len(rhoa_range[int(index_rhoa):])
pk_min = pk - leftlen * dl
pk_max = pk + (rightlen -1) * dl
return pk_min, pk_max
def wrap_infos (phrase , value ='', underline ='-', unit ='',
site_number= '', **kws) :
"""Display info from anomaly details."""
repeat =kws.pop('repeat', 77)
intermediate =kws.pop('inter+', '')
begin_phrase_mark= kws.pop('begin_phrase', '--|>')
on = kws.pop('on', False)
if not on: return ''
else :
print(underline * repeat)
print('{0} {1:<50}'.format(begin_phrase_mark, phrase),
'{0:<10} {1}'.format(value, unit),
'{0}'.format(intermediate), "{}".format(site_number))
print(underline * repeat )
def drawn_anomaly_boundaries2(erp_data, appRes, index):
"""
Function to drawn anomaly boundary
and return the anomaly with its boundaries
:param erp_data: erp profile
:type erp_data: array_like or list
:param appRes: resistivity value of minimum pk anomaly
:type appRes: float
:param index: index of minimum pk anomaly
:type index: int
:return: anomaly boundary
:rtype: list of array_like
"""
f = 0 # flag to mention which part must be calculated
if index ==0 :
f = 1 # compute only right part
elif appRes ==erp_data[-1]:
f=2 # compute left part
def loop_sideBound(term):
"""
loop side bar from anomaly and find the term side
:param term: is array of left or right side of anomaly.
:type trem: array
:return: side bar
:type: array_like
"""
tem_drawn =[]
maxT=0
for ii, tem_rho in enumerate(term) :
diffRes_betw_2pts= tem_rho - appRes
if diffRes_betw_2pts > maxT :
maxT = diffRes_betw_2pts
tem_drawn.append(tem_rho)
elif diffRes_betw_2pts < maxT :
# rho_limit = tem_rho
break
# print(tem_drawn)
return np.array(tem_drawn)
# first broke erp profile from the anomalies
if f==2 : # compute the left part
# flip array and start backward counting
temp_erp_data = erp_data [::-1]
sbeg = appRes # initialize value
for ii, valan in enumerate(temp_erp_data):
if valan >= sbeg:
sbeg = valan
elif valan < sbeg:
left_term = erp_data[ii:]
break
left_term = erp_data[:index][::-1] # flip left term for looping
# flip again to keep the order
left_limit = loop_sideBound(term=left_term)[::-1]
if f==0 or f ==1 :
right_term= erp_data[index :]
right_limit=loop_sideBound(right_term)
# concat right and left to get the complete anomaly
if f==2:
anomalyBounds = np.append(left_limit,appRes)
elif f ==1 :
anomalyBounds = np.array([[appRes]+ right_limit.tolist()])
else:
left_limit = np.append(left_limit, appRes)
anomalyBounds = np.concatenate((left_limit, right_limit))
return appRes, index, anomalyBounds
def getdfAndFindAnomalyBoundaries(df):
"""
Define anomaly boundary `upper bound` and `lowerbound` from
:ref:`ves` location.
:param df: Dataframe pandas contained the columns
'pk', 'x', 'y', 'rho', 'dl'.
returns:
- `autoOption` triggered the automatic Option if nothing is specified
into excelsheet.
- `ves_loc`: Sounding curve location at pk
- `posMinMax`: Anomaly boundaries composed of ``lower`` and ``upper``
bounds.
Specific names can be used to define lower and upper bounds::
`lower`: 'lower', 'inf', 'min', 'min', '1' or 'low'
`upper`: 'upper', 'sup', 'maj', 'max', '2, or 'up'
To define the sounding location, can use::
`ves`:'ves', 'se', 'sond','vs', 'loc', '0' or 'dl'
"""
shape_=[ 'V','W', 'U', 'H', 'M', 'C', 'K' ]
type__= ['EC', 'NC', 'CP', 'CB2P']
# - ``EC`` for Extensive conductive.
# - ``NC`` for narrow conductive.
# - ``CP`` for conductive PLANE
# - ``CB2P`` for contact between two planes.
shape =None
type_ =None
def recoverShapeOrTypefromSheet(listOfAddedArray, param):
""" Loop the array and get whether an anomaly shape name is provided
:param listOfAddedArray: all Added array values except
'pk', 'x', 'y', 'rho' are composed of list of addedArray.
:param param: Can be main description of different `shape_` of `type__`
:returns:
- `shape` : 'V','W', 'U', 'H', 'M', 'C' or 'K' from sheet or
`type` : 'EC', 'NC', 'CP', 'CB2P'
- listOfAddedArray : list of added array
"""
param_ =None
for jj, colarray in enumerate(listOfAddedArray[::-1]):
tem_=[str(ss).upper().strip() for ss in list(colarray)]
for ix , elem in enumerate(tem_):
for param_elm in param:
if elem ==param_elm :
# retrieves the shape and replace by np.nan value
listOfAddedArray[::-1][jj][ix]=np.nan
return param_elm , listOfAddedArray
return param_, listOfAddedArray
def mergeToOne(listOfColumns, _df):
""" Get data from other columns annd merge into one array
:param listOfColumns: Columns names
:param _df: dataframe to retrieve data to one
"""
new_array = np.full((_df.shape[0],), np.nan)
listOfColumnData = [ _df[name].to_numpy() for name in listOfColumns ]
# loop from backward so we keep the most important to the first row
# close the main df that composed `pk`,`x`, `y`, and `rho`.
# find the shape
shape, listOfColumnData = recoverShapeOrTypefromSheet(listOfColumnData,
param =shape_)
type_, listOfColumnData = recoverShapeOrTypefromSheet(listOfColumnData,
param =type__)
for colarray in listOfColumnData[::-1]:
for ix , val in enumerate(colarray):
try:
if not np.isnan(val) :
new_array[ix]=val
except :pass
return shape , type_, new_array
def retrieve_ix_val(array):
""" Retrieve value and index and build `posMinMax boundaries
:param array: array of main colum contains the anomaly definitions or
a souding curve location like ::
sloc = [NaN, 'low', NaN, NaN, NaN, 'ves', NaN,
NaN, 'up', NaN, NaN, NaN]
`low`, `ves` and `up` are the lower boundary, the electric
sounding and the upper boundary of the selected anomaly
respectively.
For instance, if dipole_length is =`10`m, t he location (`pk`)
of `low`, `ves` and `up` are 10, 50 and 80 m respectively.
`posMinMax` =(10, 80)
"""
lower_ix =None
upper_ix =None
ves_ix = None
array= array.reshape((array.shape[0],) )
for ix, val in enumerate(array):
for low, up, vloc in zip(
['lower', 'inf', 'min', 'min', '1', 'low'],
['upper', 'sup', 'maj', 'max', '2', 'up'],
['ves', 'se', 'sond','vs', 'loc', '0', 'dl']
):
try :
floatNaNor123= np.float(val)
except:
if val.lower().find(low)>=0:
lower_ix = ix
break
elif val.lower().find(up) >=0:
upper_ix = ix
break
elif val.lower().find(vloc)>=0:
ves_ix = ix
break
else :
if floatNaNor123 ==1:
lower_ix = ix
break
elif floatNaNor123 ==2:
upper_ix = ix
break
elif floatNaNor123 ==0:
ves_ix = ix
break
return lower_ix, ves_ix, upper_ix
# set pandas so to consider np.inf as NaN number.
pd.options.mode.use_inf_as_na = True
# unecesseray to specify the colum of sounding location.
# dl =['drill', 'dl', 'loc', 'dh', 'choi']
_autoOption=False # set automatic to False one posMinMax
# not found as well asthe anomaly location `ves`.
posMinMax =None
#get df columns from the 4-iem index
for sl in ['pk', 'sta', 'loc']:
for val in df.columns:
if val.lower()==sl:
pk_series = df[val].to_numpy()
break
listOfAddedColumns= df.iloc[:, 4:].columns
if len(listOfAddedColumns) ==0:
return True, shape, type_, None, posMinMax, df
df_= df.iloc[:, 4:]
# check whether all remains dataframe values are `NaN` values
if len(list(df_.columns[df_.isna().all()])) == len(listOfAddedColumns):
# If yes , trigger the auto option
return True, shape, type_, None, posMinMax, df.iloc[:, :4]
# get the colum name with any nan values
sloc_column=list(df_.columns[df_.isna().any()])
# if column contains one np.nan, the sloc colum is found
sloc_values = df_[sloc_column].to_numpy()
if len(sloc_column)>1 : #
# get the value from single array
shape , type_, sloc_values = mergeToOne(sloc_column, df_)
lower_ix, ves_ix ,upper_ix = retrieve_ix_val(sloc_values)
# if `lower` and `upper` bounds are not found then start or end limits of
# selected anomaly from the position(pk) of the sounding curve.
if lower_ix is None :
lower_ix =ves_ix
if upper_ix is None:
upper_ix = ves_ix
if (lower_ix and upper_ix ) is None:
posMinMax =None
if posMinMax is None and ves_ix is None: _autoOption =True
else :
posMinMax =(pk_series[lower_ix], pk_series[upper_ix] )
if ves_ix is None: ves_loc=None
else : ves_loc = pk_series[ves_ix]
return _autoOption, shape, type_, ves_loc , posMinMax, df.iloc[:, :4]
@deprecated('Deprecated function to `:func:`watex.core.erp.get_type`'
' more efficient using median and index computation. It will '
'probably deprecate soon for neural network pattern recognition.')
def get_type (erp_array, posMinMax, pk, pos_array, dl):
"""
Find anomaly type from app. resistivity values and positions locations
:param erp_array: App.resistivty values of all `erp` lines
:type erp_array: array_like
:param posMinMax: Selected anomaly positions from startpoint and endpoint
:type posMinMax: list or tuple or nd.array(1,2)
:param pk: Position of selected anomaly in meters
:type pk: float or int
:param pos_array: Stations locations or measurements positions
:type pos_array: array_like
:param dl:
Distance between two receiver electrodes measurement. The same
as dipole length in meters.
:returns:
- ``EC`` for Extensive conductive.
- ``NC`` for narrow conductive.
- ``CP`` for conductive plane
- ``CB2P`` for contact between two planes.
:Example:
>>> from watex.core.erp import get_type
>>> x = [60, 61, 62, 63, 68, 65, 80, 90, 100, 80, 100, 80]
>>> pos= np.arange(0, len(x)*10, 10)
>>> ano_type= get_type(erp_array= np.array(x),
... posMinMax=(10,90), pk=50, pos_array=pos, dl=10)
>>> ano_type
...CB2P
"""
# Get position index
anom_type ='CP'
index_pos = int(np.where(pos_array ==pk)[0])
# if erp_array [:index_pos +1].mean() < np.median(erp_array) or\
# erp_array[index_pos:].mean() < np.median(erp_array) :
# anom_type ='CB2P'
if erp_array [:index_pos+1].mean() < np.median(erp_array) and \
erp_array[index_pos:].mean() < np.median(erp_array) :
anom_type ='CB2P'
elif erp_array [:index_pos +1].mean() >= np.median(erp_array) and \
erp_array[index_pos:].mean() >= np.median(erp_array) :
if dl <= (max(posMinMax)- min(posMinMax)) <= 5* dl:
anom_type = 'NC'
elif (max(posMinMax)- min(posMinMax))> 5 *dl:
anom_type = 'EC'
return anom_type
if __name__=='__main__':
path = 'data/erp/l10_gbalo.xlsx' # ztepogovogo_0
path= r'F:\repositories\watex\data\Bag.main&rawds\ert_copy\nt\b1_5.xlsx'
path = 'data/erp/test_anomaly.xlsx'
data = pd.read_excel(path).to_numpy()[:, -1]
df = pd.read_excel(path)
# autotrig, shape ,type_, indexanom , posMinMax, newdf = getdfAndFindAnomalyBoundaries(df)
# print(autotrig, shape,type_, indexanom , posMinMax, newdf)
| 34.804766 | 95 | 0.559939 | # -*- coding: utf-8 -*-
"""
===============================================================================
Copyright (c) 2021 Kouadio K. Laurent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
===============================================================================
.. synopsis:: 'watex.utils.wmathandtricks'
Module for computing
Created on Mon Jun 21 14:43:25 2021
.. _electrical-resistivity-profile::`erp`
.. _vertical-electrical-sounding::`ves`
.. _station-position::`pk`
.._anomaly-boundaries:`anBounds`
@author: @Daniel03
"""
import os
import warnings
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from scipy.interpolate import interp1d as sp1d
from ..utils._watexlog import watexlog
from ..utils.decorator import deprecated
import watex.utils.exceptions as Wex
_logger =watexlog.get_watex_logger(__name__)
def compute_lower_anomaly(erp_array, station_position=None,
step=None, **kws):
"""
Function to get the minimum value on the ERP array.
If `pk` is provided wil give the index of pk
:param erp_array: array of apparent resistivity profile
:type erp_array: array_like
:param station position: array of station position (survey) , if not given
and `step` is known , set the step value and
`station_position` will compute automatically
:type station_position: array_like
:param step: The distance between measurement im meter. If given will
recompute the `station_position`
:returns: * `bestSelectedDict`: dict containing best anomalies
with the anomaly resistivities range.
* `anpks`: Main positions of best select anomaly
* `collectanlyBounds`: list of arrays of select anomaly values
* `min_pks`: list of tuples (pk,
minVal of best anomalies points.)
:rtype: tuple
:Example:
>>> from watex.utils.wmathandtricks import compute_lower_anolamy
>>> import pandas as pd
>>> path_to_= 'data/l10_gbalo.xlsx'
>>> dataRes=pd.read_excel(erp_data).to_numpy()[:,-1]
>>> anomaly, *_ = compute_lower_anomaly(erp_array=data, step =10)
>>> anomaly
"""
display_infos= kws.pop('diplay_infos', False)
# got minumum of erp data
collectanlyBounds=[]
if step is not None:
station_position = np.arange(0, step * len(erp_array), step)
min_pks= get_minVal(erp_array) # three min anomaly values
# compute new_pjk
# find differents anomlies boundaries
for ii, (rho, index) in enumerate(min_pks) :
_, _, anlyBounds= drawn_anomaly_boundaries(erp_data = erp_array,
appRes = rho, index=index)
collectanlyBounds.append(anlyBounds)
if station_position is None :
pks =np.array(['?' for ii in range(len(erp_array))])
else : pks =station_position
if pks.dtype in ['int', 'float']:
anpks =np.array([pks[skanIndex ] for
(_, skanIndex) in min_pks ])
else : anpks ='?'
bestSelectedDICT={}
for ii, (pk, anb) in enumerate(zip(anpks, collectanlyBounds)):
bestSelectedDICT['{0}_pk{1}'.format(ii+1, pk)] = anb
if display_infos:
print('{0:+^100}'.format(
' *Best Conductive anomaly points (BCPts)* '))
fmtAnText(anFeatures=bestSelectedDICT)
return bestSelectedDICT, anpks, collectanlyBounds, min_pks
def get_minVal(array):
"""
Function to find the three minimum values on array and their
corresponding indexes
:param array: array of values
:type array: array_like
:returns: Three minimum values of rho, index in rho_array
:rtype: tuple
"""
holdList =[]
if not isinstance(array, (list, tuple, np.ndarray)):
if isinstance(array, float):
array=np.array([array])
else :
try :
array =np.array([float(array)])
except:
raise Wex.WATexError_float('Could not convert %s to float!')
try :
# first option:find minimals locals values
minlocals = argrelextrema(array, np.less)[0]
temp_array =np.array([array[int(index)] for index in minlocals])
if len(minlocals) ==0:
ix = np.where(array ==array.min())
if len(ix)>1:
ix =ix[0]
temp_array = array[int(ix)]
except :
# second option: use archaic computation.
temp_array =np.sort(array)
else :
temp_array= np.sort(temp_array)
ss=0
for ii, tem_ar in enumerate(temp_array) :
if ss >=3 :
holdList=holdList[:3]
break
min_index = np.where(array==tem_ar)[0]
if len(min_index)==1 :
holdList.append((array[int(min_index)],
int(min_index)))
ss +=ii
elif len(min_index) > 1 :
# loop the index array and find the min for consistency
for jj, indx in enumerate(min_index):
holdList.append((array[int(indx)],
int(indx)))
ss =len(holdList)
# for consistency keep the 3 best min values
if len(holdList)>3 :
holdList = holdList[:3]
return holdList
def drawn_anomaly_boundaries(erp_data, appRes, index):
"""
Function to drawn anomaly boundary
and return the anomaly with its boundaries
:param erp_data: erp profile
:type erp_data: array_like or list
:param appRes: resistivity value of minimum pk anomaly
:type appRes: float
:param index: index of minimum pk anomaly
:type index: int
:return: anomaly boundary
:rtype: list of array_like
"""
f = 0 # flag to mention which part must be calculated
if index ==0 :
f = 1 # compute only right part
elif appRes ==erp_data[-1]:
f=2 # compute left part
def loop_sideBound(term):
"""
loop side bar from anomaly and find the term side
:param term: is array of left or right side of anomaly.
:type term: array
:return: side bar
:type: array_like
"""
tem_drawn =[]
maxT=0
for ii, tem_rho in enumerate(term) :
diffRes_betw_2pts= tem_rho - appRes
if diffRes_betw_2pts > maxT :
maxT = diffRes_betw_2pts
tem_drawn.append(tem_rho)
elif diffRes_betw_2pts < maxT :
# rho_limit = tem_rho
break
return np.array(tem_drawn)
# first broke erp profile from the anomalies
if f ==0 or f==2 :
left_term = erp_data[:index][::-1] # flip left term for looping
# flip again to keep the order
left_limit = loop_sideBound(term=left_term)[::-1]
if f==0 or f ==1 :
right_term= erp_data[index :]
right_limit=loop_sideBound(right_term)
# concat right and left to get the complete anomaly
if f==2:
anomalyBounds = np.append(left_limit,appRes)
elif f ==1 :
anomalyBounds = np.array([appRes]+ right_limit.tolist())
else:
left_limit = np.append(left_limit, appRes)
anomalyBounds = np.concatenate((left_limit, right_limit))
return appRes, index, anomalyBounds
def defineAnomaly(erp_data, station_position=None, pks=None,
dipole_length=10., **kwargs):
"""
Function will select the different anomalies. If pk is not given,
the best three anomalies on the survey lines will be
computed automatically
:param erp_data: Electrical resistivity profiling
:type erp_data: array_like
:param pks: station positions anomaly boundaries (pk_begin, pk_end)
If selected anomalies is more than one, set `pks` into dict
where number of anomaly =keys and pks = values
:type pks: list or dict
:param dipole_length: Distance between two measurements in meters
Change the `dipole lengh
:type dipole_length: float
:param station_position: station position array
:type statiion_position: array_like
:return: list of anomalies bounds
"""
selectedPk =kwargs.pop('selectedPk', None)
bestSelectedDICT={}
if station_position is not None :
dipole_length = (station_position.max()-
station_position.min())/(len(station_position -1))
if station_position is None :
station_position =np.arange(0, dipole_length * len(erp_data),
dipole_length)
def getBound(pksbounds):
"""
Get the bound from the given `pks`
:param pksbounds: Anomaly boundaries
:type pksbounds: list of array_like
:returns: * anomBounds- array of appRes values of anomaly
:rtype: array_like
"""
# check if bound is on station positions
for spk in pksbounds :
if not pksbounds.min() <= spk <= pksbounds.max():
raise Wex.WATexError_AnomalyBounds(
'Bound <{0}> provided is out of range !'
'Dipole length is set to = {1} m.'
' Please set a new bounds.')
pkinf = np.where(station_position==pksbounds.min())[0]
pksup = np.where(station_position==pksbounds.max())[0]
anomBounds = erp_data[int(pkinf):int(pksup)+1]
return anomBounds
if pks is None :
bestSelectedDICT, *_= compute_lower_anomaly(
erp_array=erp_data, step=dipole_length,
station_position =station_position)
elif isinstance(pks, list):
pks =np.array(sorted(pks))
collectanlyBounds = getBound(pksbounds= pks)
# get the length of selected anomalies and computed the station
# location wich composed the bounds (Xbegin and Xend)
pkb, *_= find_pk_from_selectedAn(
an_res_range=collectanlyBounds, pos=pks,
selectedPk=selectedPk)
bestSelectedDICT={ '1_{}'.format(pkb):collectanlyBounds}
elif isinstance(pks, dict):
for ii, (keys, values) in enumerate(pks.items()):
if isinstance(values, list):
values =np.array(values)
collectanlyBounds= getBound(pksbounds=values)
pkb, *_= find_pk_from_selectedAn(
an_res_range=collectanlyBounds, pos=pks,
selectedPk=selectedPk)
bestSelectedDICT['{0}_{1}'.format(ii+1, pkb)]=collectanlyBounds
return bestSelectedDICT
def find_pk_from_selectedAn(an_res_range, pos=None, selectedPk=None):
"""
Function to select the main :ref:`pk` from both :ref:`anBounds`.
:paran an_res_range: anomaly resistivity range on :ref:`erp` line.
:type an_res_range: array_like
:param pos: position of anomaly boundaries (inf and sup):
anBounds = [90, 130]
- 130 is max boundary and 90 the min boundary
:type pos: list
:param selectedPk:
User can set its own position of the right anomaly. Be sure that
the value provided is right position .
Could not compute again provided that `pos`
is not `None`.
:return: anomaly station position.
:rtype: str 'pk{position}'
:Example:
>>> from watex.utils.wmathandtricks import find_pk_from_selectedAn
>>> resan = np.array([168,130, 93,146,145])
>>> pk= find_pk_from_selectedAn(
... resan, pos=[90, 13], selectedPk= 'str20')
>>> pk
"""
#compute dipole length from pos
if pos is not None :
if isinstance(pos, list):
pos =np.array(pos)
if pos is None and selectedPk is None :
raise Wex.WATexError_parameter_number(
'Give at least the anomaly boundaries'
' before computing the selected anomaly position.')
if selectedPk is not None : # mean is given
if isinstance(selectedPk, str):
if selectedPk.isdigit() :
sPk= int(selectedPk)
elif selectedPk.isalnum():
oss = ''.join([s for s in selectedPk
if s.isdigit()])
sPk =int(oss)
else :
try :
sPk = int(selectedPk)
except : pass
if pos is not None : # then compare the sPk and ps value
try :
if not pos.min()<= sPk<=pos.max():
warnings.warn('Wrong position given <{}>.'
' Should compute new positions.'.
format(selectedPk))
_logger.debug('Wrong position given <{}>.'
'Should compute new positions.'.
format(selectedPk))
except UnboundLocalError:
print("local variable 'sPk' referenced before assignment")
else :
return 'pk{}'.format(sPk ), an_res_range
else :
selectedPk='pk{}'.format(sPk )
return selectedPk , an_res_range
if isinstance(pos, list):
pos =np.array(pos)
if isinstance(an_res_range, list):
an_res_range =np.array(an_res_range)
dipole_length = (pos.max()-pos.min())/(len(an_res_range)-1)
tem_loc = np.arange(pos.min(), pos.max()+dipole_length, dipole_length)
# find min value of collected anomalies values
locmin = np.where (an_res_range==an_res_range.min())[0]
if len(locmin) >1 : locmin =locmin[0]
pk_= int(tem_loc[int(locmin)]) # find the min pk
selectedPk='pk{}'.format(pk_)
return selectedPk , an_res_range
def fmtAnText(anFeatures=None, title=['Ranking', 'rho(Ω.m)',
'position pk(m)',
'rho range(Ω.m)'],
**kwargs) :
"""
Function format text from anomaly features
:param anFeatures: Anomaly features
:type anFeatures: list or dict
:param title: head lines
:type title: list
:Example:
>>> from watex.utils.wmathandtricks import fmtAnText
>>> fmtAnText(anFeatures =[1,130, 93,(146,145, 125)])
"""
inline =kwargs.pop('inline', '-')
mlabel =kwargs.pop('mlabels', 100)
line = inline * int(mlabel)
#--------------------header ----------------------------------------
print(line)
tem_head ='|'.join(['{:^15}'.format(i) for i in title[:-1]])
tem_head +='|{:^45}'.format(title[-1])
print(tem_head)
print(line)
#-----------------------end header----------------------------------
newF =[]
if isinstance(anFeatures, dict):
for keys, items in anFeatures.items():
rrpos=keys.replace('_pk', '')
rank=rrpos[0]
pos =rrpos[1:]
newF.append([rank, min(items), pos, items])
elif isinstance(anFeatures, list):
newF =[anFeatures]
for anFeatures in newF:
strfeatures ='|'.join(['{:^15}'.format(str(i)) \
for i in anFeatures[:-1]])
try :
iter(anFeatures[-1])
except :
strfeatures +='|{:^45}'.format(str(anFeatures[-1]))
else :
strfeatures += '|{:^45}'.format(
''.join(['{} '.format(str(i)) for i in anFeatures[-1]]))
print(strfeatures)
print(line)
def select_anomaly ( rhoa_array, pos_array=None, auto=True,
dipole_length =10., **kws ) :
"""
Select the anomaly value from `rhoa_array` and find its boundaries if
``auto` is set to ``True``. If `auto` is ``False``, it's usefull to
provide the anomaly boundaries from station position. Change the argument
`dipole_length` i.e. the distance between measurement electrode is not
equal to ``10``m else give the `pos_array`. If the `pos_array` is given,
the `dipole_length` will be recomputed.
:note: If the `auto` param is ``True``, the automatic computation will
give at most three best animalies ranking according
to the resitivity value.
:param rhoa_array: The apparent resistivity value of :ref:`erp`
:type rho_array: array_like
:param pos_array: The array of station position in meters
:type pos_array: array_like
:param auto:
Automaticaly of manual computation to select the best anomaly point.
Be sure if `auto` is set to ``False`` to provide the anomaly boundary
by setting `pos_bounds` :
pos_bounds=(90, 130)
where :math:`90` is the `pk_min` and :math:`130` is the `pk_max`
If `pos_bounds` is not given an station error will probably occurs
from :class:`~utils.exceptions.WATexError_station`.
:param dipole_length:
Is the distance between two closest measurement. If the value is known
it's better to provide it and don't need to provied a `pos_array`
value.
:type dipole_length: float
:param pos_bounds:
Is the tuple value of anomaly boundaries composed of `pk_min` and
`pk_max`. Please refer to :doc:`compute_power`. When provided
the `pos_bounds` value, please set `the dipole_length` to accurate
the computation of :func:`compute_power`.
:return:
- *rhoa*: The app. resistivity value of the selected anomaly
- `pk_min` and the `pk_max`: refer to :doc:`compute_power`.
- `rhoa_max` and `rhoa_min`: refer to :doc:`compute_magnitude`
-
"""
pos_bounds =kws.pop("pos_bounds", (None, None))
anom_pos = kws.pop('pos_anomaly', None)
display_infos =kws.pop('display', False)
if auto is False :
if None in pos_bounds or pos_bounds is None :
raise Wex.WATexError_site('One position is missed'
'Plase provided it!')
pos_bounds = np.array(pos_bounds)
pos_min, pos_max = pos_bounds.min(), pos_bounds.max()
# get the res from array
dl_station_loc = np.arange(0, dipole_length * len(rhoa_array),
dipole_length)
# then select rho range
ind_pk_min = int(np.where(dl_station_loc==pos_min)[0])
ind_pk_max = int(np.where(dl_station_loc==pos_max)[0])
rhoa_range = rhoa_array [ind_pk_min:ind_pk_max +1]
pk, res= find_pk_from_selectedAn(an_res_range=rhoa_range,
pos=pos_bounds,
selectedPk= anom_pos)
pk = int(pk.replace('pk', ''))
rhoa = rhoa_array[int(np.where(dl_station_loc == pk )[0])]
rhoa_min = rhoa_array[int(np.where(dl_station_loc == pos_min )[0])]
rhoa_max = rhoa_array[int(np.where(dl_station_loc == pos_max)[0])]
rhoa_bounds = (rhoa_min, rhoa_max)
return {'1_pk{}'.format(pk):
(pk, rhoa, pos_bounds, rhoa_bounds, res)}
if auto:
bestSelectedDICT, anpks, \
collectanlyBounds, min_pks = compute_lower_anomaly(
erp_array= rhoa_array,
station_position= pos_array, step= dipole_length,
display_infos=display_infos )
return {key: find_pkfeatures (anom_infos= bestSelectedDICT,
anom_rank= ii+1, pks_rhoa_index=min_pks,
dl=dipole_length)
for ii, (key , rho_r) in enumerate(bestSelectedDICT.items())
}
def find_pkfeatures (anom_infos, anom_rank, pks_rhoa_index, dl):
"""
Get the pk bound from ranking of computed best points
:param anom_infos:
Is a dictionnary of best anomaly points computed from
:func:`compute_lower_anomaly` when `pk_bounds` is not given.
see :doc:`compute_lower_anomaly`
:param anom_rank: Automatic ranking after selecting best points
:param pk_rhoa_index:
Is tuple of selected anomaly resistivity value and index in the whole
:ref:`erp` line. for instance:
pks_rhoa_index= (80., 17)
where "80" is the value of selected anomaly in ohm.m and "17" is the
index of selected points in the :ref:`erp` array.
:param dl:
Is the distance between two measurement as `dipole_length`. Provide
the `dl` if the *default* value is not right.
:returns:
see :doc:`select_anomaly`
"""
rank_code = '{}_pk'.format(anom_rank)
for key in anom_infos.keys():
if rank_code in key:
pk = float(key.replace(rank_code, ''))
rhoa = list(pks_rhoa_index[anom_rank-1])[0]
codec = key
break
ind_rhoa =np.where(anom_infos[codec] ==rhoa)[0]
if len(ind_rhoa) ==0 : ind_rhoa =0
leninf = len(anom_infos[codec][: int(ind_rhoa)])
pk_min = pk - leninf * dl
lensup =len(anom_infos[codec][ int(ind_rhoa):])
pk_max = pk + (lensup -1) * dl
pos_bounds = (pk_min, pk_max)
rhoa_bounds = (anom_infos[codec][0], anom_infos[codec][-1])
return pk, rhoa, pos_bounds, rhoa_bounds, anom_infos[codec]
def find_pkBounds( pk , rhoa, rhoa_range, dl=10.):
"""
Find station position boundary indexed in :ref:`erp` line. Usefull
to get the boundaries indexes `pk_boun_indexes` for :ref:`erp`
normalisation when computing `anr` or else.
:param pk: Selected anomaly station value
:type pk: float
:param rhoa: Selected anomaly value in ohm.m
:type rhoa: float
:rhoa_range: Selected anomaly values from `pk_min` to `pk_max`
:rhoa_range: array_like
:parm dl: see :doc:`find_pkfeatures`
:Example:
>>> from from watex.utils.wmathandtricks import find_pkBounds
>>> find_pkBounds(pk=110, rhoa=137,
rhoa_range=np.array([175,132,137,139,170]))
"""
if isinstance(pk, str):
pk = float(pk.replace(pk[0], '').replace('_pk', ''))
index_rhoa = np.where(rhoa_range ==rhoa)[0]
if len(index_rhoa) ==0 : index_rhoa =0
leftlen = len(rhoa_range[: int(index_rhoa)])
rightlen = len(rhoa_range[int(index_rhoa):])
pk_min = pk - leftlen * dl
pk_max = pk + (rightlen -1) * dl
return pk_min, pk_max
def wrap_infos (phrase , value ='', underline ='-', unit ='',
site_number= '', **kws) :
"""Display info from anomaly details."""
repeat =kws.pop('repeat', 77)
intermediate =kws.pop('inter+', '')
begin_phrase_mark= kws.pop('begin_phrase', '--|>')
on = kws.pop('on', False)
if not on: return ''
else :
print(underline * repeat)
print('{0} {1:<50}'.format(begin_phrase_mark, phrase),
'{0:<10} {1}'.format(value, unit),
'{0}'.format(intermediate), "{}".format(site_number))
print(underline * repeat )
def drawn_anomaly_boundaries2(erp_data, appRes, index):
"""
Function to drawn anomaly boundary
and return the anomaly with its boundaries
:param erp_data: erp profile
:type erp_data: array_like or list
:param appRes: resistivity value of minimum pk anomaly
:type appRes: float
:param index: index of minimum pk anomaly
:type index: int
:return: anomaly boundary
:rtype: list of array_like
"""
f = 0 # flag to mention which part must be calculated
if index ==0 :
f = 1 # compute only right part
elif appRes ==erp_data[-1]:
f=2 # compute left part
def loop_sideBound(term):
"""
loop side bar from anomaly and find the term side
:param term: is array of left or right side of anomaly.
:type trem: array
:return: side bar
:type: array_like
"""
tem_drawn =[]
maxT=0
for ii, tem_rho in enumerate(term) :
diffRes_betw_2pts= tem_rho - appRes
if diffRes_betw_2pts > maxT :
maxT = diffRes_betw_2pts
tem_drawn.append(tem_rho)
elif diffRes_betw_2pts < maxT :
# rho_limit = tem_rho
break
# print(tem_drawn)
return np.array(tem_drawn)
# first broke erp profile from the anomalies
if f==2 : # compute the left part
# flip array and start backward counting
temp_erp_data = erp_data [::-1]
sbeg = appRes # initialize value
for ii, valan in enumerate(temp_erp_data):
if valan >= sbeg:
sbeg = valan
elif valan < sbeg:
left_term = erp_data[ii:]
break
left_term = erp_data[:index][::-1] # flip left term for looping
# flip again to keep the order
left_limit = loop_sideBound(term=left_term)[::-1]
if f==0 or f ==1 :
right_term= erp_data[index :]
right_limit=loop_sideBound(right_term)
# concat right and left to get the complete anomaly
if f==2:
anomalyBounds = np.append(left_limit,appRes)
elif f ==1 :
anomalyBounds = np.array([[appRes]+ right_limit.tolist()])
else:
left_limit = np.append(left_limit, appRes)
anomalyBounds = np.concatenate((left_limit, right_limit))
return appRes, index, anomalyBounds
def getdfAndFindAnomalyBoundaries(df):
"""
Define anomaly boundary `upper bound` and `lowerbound` from
:ref:`ves` location.
:param df: Dataframe pandas contained the columns
'pk', 'x', 'y', 'rho', 'dl'.
returns:
- `autoOption` triggered the automatic Option if nothing is specified
into excelsheet.
- `ves_loc`: Sounding curve location at pk
- `posMinMax`: Anomaly boundaries composed of ``lower`` and ``upper``
bounds.
Specific names can be used to define lower and upper bounds::
`lower`: 'lower', 'inf', 'min', 'min', '1' or 'low'
`upper`: 'upper', 'sup', 'maj', 'max', '2, or 'up'
To define the sounding location, can use::
`ves`:'ves', 'se', 'sond','vs', 'loc', '0' or 'dl'
"""
shape_=[ 'V','W', 'U', 'H', 'M', 'C', 'K' ]
type__= ['EC', 'NC', 'CP', 'CB2P']
# - ``EC`` for Extensive conductive.
# - ``NC`` for narrow conductive.
# - ``CP`` for conductive PLANE
# - ``CB2P`` for contact between two planes.
shape =None
type_ =None
def recoverShapeOrTypefromSheet(listOfAddedArray, param):
""" Loop the array and get whether an anomaly shape name is provided
:param listOfAddedArray: all Added array values except
'pk', 'x', 'y', 'rho' are composed of list of addedArray.
:param param: Can be main description of different `shape_` of `type__`
:returns:
- `shape` : 'V','W', 'U', 'H', 'M', 'C' or 'K' from sheet or
`type` : 'EC', 'NC', 'CP', 'CB2P'
- listOfAddedArray : list of added array
"""
param_ =None
for jj, colarray in enumerate(listOfAddedArray[::-1]):
tem_=[str(ss).upper().strip() for ss in list(colarray)]
for ix , elem in enumerate(tem_):
for param_elm in param:
if elem ==param_elm :
# retrieves the shape and replace by np.nan value
listOfAddedArray[::-1][jj][ix]=np.nan
return param_elm , listOfAddedArray
return param_, listOfAddedArray
def mergeToOne(listOfColumns, _df):
""" Get data from other columns annd merge into one array
:param listOfColumns: Columns names
:param _df: dataframe to retrieve data to one
"""
new_array = np.full((_df.shape[0],), np.nan)
listOfColumnData = [ _df[name].to_numpy() for name in listOfColumns ]
# loop from backward so we keep the most important to the first row
# close the main df that composed `pk`,`x`, `y`, and `rho`.
# find the shape
shape, listOfColumnData = recoverShapeOrTypefromSheet(listOfColumnData,
param =shape_)
type_, listOfColumnData = recoverShapeOrTypefromSheet(listOfColumnData,
param =type__)
for colarray in listOfColumnData[::-1]:
for ix , val in enumerate(colarray):
try:
if not np.isnan(val) :
new_array[ix]=val
except :pass
return shape , type_, new_array
def retrieve_ix_val(array):
""" Retrieve value and index and build `posMinMax boundaries
:param array: array of main colum contains the anomaly definitions or
a souding curve location like ::
sloc = [NaN, 'low', NaN, NaN, NaN, 'ves', NaN,
NaN, 'up', NaN, NaN, NaN]
`low`, `ves` and `up` are the lower boundary, the electric
sounding and the upper boundary of the selected anomaly
respectively.
For instance, if dipole_length is =`10`m, t he location (`pk`)
of `low`, `ves` and `up` are 10, 50 and 80 m respectively.
`posMinMax` =(10, 80)
"""
lower_ix =None
upper_ix =None
ves_ix = None
array= array.reshape((array.shape[0],) )
for ix, val in enumerate(array):
for low, up, vloc in zip(
['lower', 'inf', 'min', 'min', '1', 'low'],
['upper', 'sup', 'maj', 'max', '2', 'up'],
['ves', 'se', 'sond','vs', 'loc', '0', 'dl']
):
try :
floatNaNor123= np.float(val)
except:
if val.lower().find(low)>=0:
lower_ix = ix
break
elif val.lower().find(up) >=0:
upper_ix = ix
break
elif val.lower().find(vloc)>=0:
ves_ix = ix
break
else :
if floatNaNor123 ==1:
lower_ix = ix
break
elif floatNaNor123 ==2:
upper_ix = ix
break
elif floatNaNor123 ==0:
ves_ix = ix
break
return lower_ix, ves_ix, upper_ix
# set pandas so to consider np.inf as NaN number.
pd.options.mode.use_inf_as_na = True
# unecesseray to specify the colum of sounding location.
# dl =['drill', 'dl', 'loc', 'dh', 'choi']
_autoOption=False # set automatic to False one posMinMax
# not found as well asthe anomaly location `ves`.
posMinMax =None
#get df columns from the 4-iem index
for sl in ['pk', 'sta', 'loc']:
for val in df.columns:
if val.lower()==sl:
pk_series = df[val].to_numpy()
break
listOfAddedColumns= df.iloc[:, 4:].columns
if len(listOfAddedColumns) ==0:
return True, shape, type_, None, posMinMax, df
df_= df.iloc[:, 4:]
# check whether all remains dataframe values are `NaN` values
if len(list(df_.columns[df_.isna().all()])) == len(listOfAddedColumns):
# If yes , trigger the auto option
return True, shape, type_, None, posMinMax, df.iloc[:, :4]
# get the colum name with any nan values
sloc_column=list(df_.columns[df_.isna().any()])
# if column contains one np.nan, the sloc colum is found
sloc_values = df_[sloc_column].to_numpy()
if len(sloc_column)>1 : #
# get the value from single array
shape , type_, sloc_values = mergeToOne(sloc_column, df_)
lower_ix, ves_ix ,upper_ix = retrieve_ix_val(sloc_values)
# if `lower` and `upper` bounds are not found then start or end limits of
# selected anomaly from the position(pk) of the sounding curve.
if lower_ix is None :
lower_ix =ves_ix
if upper_ix is None:
upper_ix = ves_ix
if (lower_ix and upper_ix ) is None:
posMinMax =None
if posMinMax is None and ves_ix is None: _autoOption =True
else :
posMinMax =(pk_series[lower_ix], pk_series[upper_ix] )
if ves_ix is None: ves_loc=None
else : ves_loc = pk_series[ves_ix]
return _autoOption, shape, type_, ves_loc , posMinMax, df.iloc[:, :4]
@deprecated('Deprecated function to `:func:`watex.core.erp.get_type`'
' more efficient using median and index computation. It will '
'probably deprecate soon for neural network pattern recognition.')
def get_type (erp_array, posMinMax, pk, pos_array, dl):
"""
Find anomaly type from app. resistivity values and positions locations
:param erp_array: App.resistivty values of all `erp` lines
:type erp_array: array_like
:param posMinMax: Selected anomaly positions from startpoint and endpoint
:type posMinMax: list or tuple or nd.array(1,2)
:param pk: Position of selected anomaly in meters
:type pk: float or int
:param pos_array: Stations locations or measurements positions
:type pos_array: array_like
:param dl:
Distance between two receiver electrodes measurement. The same
as dipole length in meters.
:returns:
- ``EC`` for Extensive conductive.
- ``NC`` for narrow conductive.
- ``CP`` for conductive plane
- ``CB2P`` for contact between two planes.
:Example:
>>> from watex.core.erp import get_type
>>> x = [60, 61, 62, 63, 68, 65, 80, 90, 100, 80, 100, 80]
>>> pos= np.arange(0, len(x)*10, 10)
>>> ano_type= get_type(erp_array= np.array(x),
... posMinMax=(10,90), pk=50, pos_array=pos, dl=10)
>>> ano_type
...CB2P
"""
# Get position index
anom_type ='CP'
index_pos = int(np.where(pos_array ==pk)[0])
# if erp_array [:index_pos +1].mean() < np.median(erp_array) or\
# erp_array[index_pos:].mean() < np.median(erp_array) :
# anom_type ='CB2P'
if erp_array [:index_pos+1].mean() < np.median(erp_array) and \
erp_array[index_pos:].mean() < np.median(erp_array) :
anom_type ='CB2P'
elif erp_array [:index_pos +1].mean() >= np.median(erp_array) and \
erp_array[index_pos:].mean() >= np.median(erp_array) :
if dl <= (max(posMinMax)- min(posMinMax)) <= 5* dl:
anom_type = 'NC'
elif (max(posMinMax)- min(posMinMax))> 5 *dl:
anom_type = 'EC'
return anom_type
if __name__=='__main__':
path = 'data/erp/l10_gbalo.xlsx' # ztepogovogo_0
path= r'F:\repositories\watex\data\Bag.main&rawds\ert_copy\nt\b1_5.xlsx'
path = 'data/erp/test_anomaly.xlsx'
data = pd.read_excel(path).to_numpy()[:, -1]
df = pd.read_excel(path)
# autotrig, shape ,type_, indexanom , posMinMax, newdf = getdfAndFindAnomalyBoundaries(df)
# print(autotrig, shape,type_, indexanom , posMinMax, newdf)
| 0 | 0 | 0 |
ce8d96eab0af3942faea6af37906f7c4d05ffc12 | 278 | py | Python | test/sources/base.py | snallapa/mod_updater | 783ddf09da9d3909ce3d466f148218dbc51e29c9 | [
"MIT"
] | null | null | null | test/sources/base.py | snallapa/mod_updater | 783ddf09da9d3909ce3d466f148218dbc51e29c9 | [
"MIT"
] | 1 | 2021-05-15T20:08:33.000Z | 2021-05-22T21:19:41.000Z | test/sources/base.py | snallapa/mod_updater | 783ddf09da9d3909ce3d466f148218dbc51e29c9 | [
"MIT"
] | null | null | null | import unittest
import os
import shutil
| 25.272727 | 90 | 0.73741 | import unittest
import os
import shutil
class SourceTest(unittest.TestCase):
TEST_MODS_FOLDER = "test_mods"
def tearDown(self):
if os.path.exists(self.TEST_MODS_FOLDER) and os.path.isdir(self.TEST_MODS_FOLDER):
shutil.rmtree(self.TEST_MODS_FOLDER)
| 138 | 77 | 23 |
26b2299fc736855cc6f266946d8b6050b00ab180 | 23,051 | py | Python | QConnectBase/connection_base.py | test-fullautomation/robotframework-qconnect-base | b8a6b5bc89907f2a7f6cd69460f11bde496526bd | [
"Apache-2.0"
] | 1 | 2021-12-22T01:50:48.000Z | 2021-12-22T01:50:48.000Z | QConnectBase/connection_base.py | test-fullautomation/robotframework-qconnect-base | b8a6b5bc89907f2a7f6cd69460f11bde496526bd | [
"Apache-2.0"
] | 4 | 2022-02-08T12:24:42.000Z | 2022-03-28T13:07:43.000Z | QConnectBase/connection_base.py | test-fullautomation/robotframework-qconnect-base | b8a6b5bc89907f2a7f6cd69460f11bde496526bd | [
"Apache-2.0"
] | 1 | 2022-02-15T05:38:30.000Z | 2022-02-15T05:38:30.000Z | # Copyright 2020-2022 Robert Bosch Car Multimedia GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *******************************************************************************
#
# File: connection_base.py
#
# Initially created by Cuong Nguyen (RBVH/ECM11) / May 2021.
# Based on \lib\TCP\CTCPMultiQueued.py in TML Framework.
#
# Description:
# Provide the infrastructure for sending commands and getting traces from connection continuously.
#
# History:
#
# 12.05.2021 / V 0.1 / Cuong Nguyen
# - Initialize
#
# *******************************************************************************
from abc import ABCMeta
from inspect import currentframe
from collections import deque
from robot.libraries.BuiltIn import BuiltIn
from QConnectBase.qlogger import QLogger
import QConnectBase.constants as constants
import queue
import abc
import time
import platform
import threading
import re
_platform = platform.system().lower()
class ConnectionBase(object):
"""
Base class for all connection classes.
"""
__metaclass__ = ABCMeta
_SUPPORTED_PLATFORM_LIST = [constants.OS_WINDOWS_STR,
constants.OS_LINUX_STR]
_CONNECTION_TYPE = "NotSupported"
_ERROR_INSTRUCTION = constants.UNKNOWN_STR
MAX_LEN_BACKTRACE = 500 # Lines
RECV_MSGS_POLLING_INTERVAL = 0.005
_call_thrd_obj = None
_call_thrd_init = threading.Event()
_call_thrd_term = threading.Event()
_recv_thrd_obj = None
# _recv_thrd_term = threading.Event()
_recv_thrd_term = None
_force_seq_lock = threading.RLock()
_start_dlt_lock = threading.RLock()
_traceq_handle = 0
_traceq_obj = {}
_traceq_lock = threading.Lock()
supported_devices = []
# # for continuous processing
# _msgq_c_handle = 0
# _msgq_c_obj = {}
# _msgq_c_lock = threading.Lock()
_is_precondition_valid = True
_should_check_timeout = False
_logger = None
_logger_handler = None
config = None
def __new__(cls, *args, **kwargs):
"""
Override creating instance method to check for conditions.
Args:
args: Non-Keyword Arguments.
kwargs: Keyword Arguments.
Returns:
ConnectionBase instance if passing the conditions.
None if failing the conditions.
"""
if (not cls.is_supported_platform()) or (not cls.is_precondition_pass()):
return None
return super(ConnectionBase, cls).__new__(cls)
# region GENERAL METHODS
@classmethod
def is_supported_platform(cls):
"""
Check if current platform is supported.
Returns:
True if platform is supported.
False if platform is not supported.
"""
return _platform in cls._SUPPORTED_PLATFORM_LIST
@classmethod
def is_precondition_pass(cls):
"""
Check for precondition.
Returns:
True if passing the precondition.
False if failing the precondition.
"""
return cls._is_precondition_valid
def error_instruction(self):
"""
Get the error instruction.
Returns:
Error instruction string.
"""
return self._ERROR_INSTRUCTION
# endregion
# region MUST BE OVERRIDE METHODS
@abc.abstractmethod
def quit(self, is_disconnect_all=True):
"""
>> This method MUST be overridden in derived class <<
Abstract method for quiting the connection.
Args:
is_disconnect_all: Determine if it's necessary to disconnect all connections.
Returns:
None.
"""
self._logger.removeHandler(self._logger_handler)
@abc.abstractmethod
def connect(self, device, files=None, test_connection=False):
"""
>> This method MUST be overridden in derived class <<
Abstract method for quiting the connection.
Args:
device: Determine if it's necessary to disconnect all connections.
files: Determine if it's necessary to disconnect all connections.
test_connection: Determine if it's necessary to disconnect all connections.
Returns:
None.
"""
pass
@abc.abstractmethod
def disconnect(self, device):
"""
>> This method MUST be overridden in derived class <<
Abstract method for disconnecting connection.
Args:
device: Device name.
Returns:
None.
"""
pass
# endregion
# region RECEIVER THREAD METHODS
def _init_thrd_llrecv(self, n_thrd_id):
"""
Start a thread which receive message from connection continuously.
Args:
n_thrd_id: thread id.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
self._llrecv_thrd_obj = threading.Thread(target=self._thrd_llrecv_from_connection_interface)
self._llrecv_thrd_obj.setDaemon(True)
self._llrecv_thrd_obj.name = str(self._CONNECTION_TYPE) + "-" + str(n_thrd_id)
BuiltIn().log("%s: starting low-level receiver thread '%s'" % (_mident, self._llrecv_thrd_obj.name), constants.LOG_LEVEL_DEBUG)
self._llrecv_thrd_obj.start()
def _thrd_llrecv_from_connection_interface(self):
"""
>> This method will be override in derived class <<
The thread which receive message from connection continuously.
Returns:
None
"""
pass
def _init_thread_receiver(self, thread_id, mode=None, sync_with_start=False):
"""
Initialize a thread for receiving data from connection.
Args:
thread_id: Thread ID number.
mode: Connection's mode.
sync_with_start: Determine if receiving thread needs to wait for start event.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
thread_name = self._CONNECTION_TYPE
if mode is not None:
thread_name = mode
conn_id_name = str(thread_name) + str(thread_id)
self._logger = QLogger().get_logger(conn_id_name)
self._logger_handler = QLogger().set_handler(self.config)
self._recv_thrd_term = threading.Event()
self._recv_thrd_obj = threading.Thread(target=self._thread_receive_from_connection, kwargs=dict(sync_with_start=sync_with_start))
self._recv_thrd_obj.setDaemon(True)
self._recv_thrd_obj.name = conn_id_name
BuiltIn().log("%s: starting receiver thread '%s'" % (_mident, self._recv_thrd_obj.name))
self._recv_thrd_obj.start()
def _thread_receive_from_connection(self, sync_with_start=False):
"""
Thread to receive data from connection continuously.
Args:
sync_with_start: determine if thread needs to wait for start event.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
if sync_with_start is True:
BuiltIn().log("%s: receiver thread is waiting to start." % _mident, constants.LOG_LEVEL_DEBUG)
while not self._recv_thrd_start.isSet():
time.sleep(self.__class__.RECV_MSGS_POLLING_INTERVAL)
BuiltIn().log("%s: receiver thread started." % _mident, constants.LOG_LEVEL_DEBUG)
while not self._recv_thrd_term.isSet():
try:
msg = self.read_obj()
if self._should_check_timeout:
self.check_timeout(msg)
if msg is not None:
self._should_check_timeout = False
self.pre_msg_check(msg)
BuiltIn().log(msg, constants.LOG_LEVEL_INFO)
if self._logger:
self._logger.info(msg)
# with self._msgq_c_lock:
# now = time.time()
# for q in self._msgq_c_obj.values():
# q.put((now, msg), False)
with self.__class__._traceq_lock:
if self.__class__._traceq_obj:
for (regex_filter, msg_queue, back_trace_queue, use_fetch_block, regex_end_block_pattern, regex_line_filter) in self.__class__._traceq_obj.values():
is_hit = False
result_obj = None
if use_fetch_block is True:
matchObj = regex_line_filter.search(msg)
if matchObj is not None:
back_trace_queue.append(msg)
(is_hit, result_obj) = self._filter_msg(regex_end_block_pattern, msg)
else:
(is_hit, result_obj) = self._filter_msg(regex_filter, msg)
if is_hit:
now = time.time()
if use_fetch_block is True:
result_obj = regex_filter.search("\r\n".join(back_trace_queue))
back_trace_queue.clear()
msg_queue.put((now, result_obj), False)
self.post_msg_check(msg)
except BrokenConnError as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_DEBUG)
self._broken_conn.set()
break
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_WARNING)
time.sleep(self.__class__.RECV_MSGS_POLLING_INTERVAL)
self._recv_thrd_term.clear()
BuiltIn().log("%s: receiver thread terminated." % _mident, constants.LOG_LEVEL_DEBUG)
def send_obj(self, obj, cr=True):
"""
Wrapper method to send message to a tcp connection.
Args:
obj: Data to be sent.
cr: Determine if it's necessary to add newline character at the end of command.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('%s' % _mident, constants.LOG_LEVEL_DEBUG)
msg = obj
if self._is_connected:
# noinspection PyBroadException
try:
BuiltIn().log("%s: sending: '%s'" % (_mident, msg), constants.LOG_LEVEL_DEBUG)
self._send(msg, cr)
except:
self._is_connected = False
def read_obj(self):
"""
Wrapper method to get the response from connection.
Returns:
Responded message.
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('%s' % _mident, constants.LOG_LEVEL_DEBUG)
msg = None
if self._is_connected:
try:
BuiltIn().log("%s: reading..." % _mident, constants.LOG_LEVEL_DEBUG)
msg = self._read()
BuiltIn().log("%s: read: '%s'" % (_mident, msg), constants.LOG_LEVEL_DEBUG)
except BrokenConnError as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_ERROR)
self._is_connected = False
raise reason
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_WARNING)
return msg
# endregion
# region TRACE INFRASTRUCTURE METHODS
def wait_4_trace(self, search_obj, timeout=0, use_fetch_block=False, end_of_block_pattern=".*", filter_pattern=".*", *fct_args):
"""
Suspend the control flow until a Trace message is received which matches to a specified regular expression.
Args:
search_obj : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.
use_fetch_block : Determine if 'fetch block' feature is used.
end_of_block_pattern : The end of block pattern.
filter_pattern : Regular expression object to filter message line by line.
timeout : Optional timeout parameter specified as a floating point number in the unit 'seconds'.
fct_args: Optional list of function arguments passed to be sent.
Returns:
None : If no trace message matched to the specified regular expression and a timeout occurred.
<match> : If a trace message has matched to the specified regular expression, a match object is returned as the result.\
The complete trace message can be accessed by the 'string' attribute of the match object.\
For access to groups within the regular expression, use the group() method.\
For more information, refer to Python documentation for module 're'.\
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
search_regex = re.compile(search_obj, re.M | re.S | re.U)
regex_obj_filter = re.compile(filter_pattern)
trq_handle, trace_queue = self.create_and_activate_trace_queue(search_regex, use_fetch_block, end_of_block_pattern, regex_obj_filter)
try:
self.send_obj(*fct_args)
except Exception as err_msg: # pylint: disable=W0703
BuiltIn().log('%s: An Exception occurred executing function object: %s' % (_mident, repr(self.send_obj)), 'ERROR')
BuiltIn().log('Function Arguments: %s' % repr(fct_args), 'ERROR')
BuiltIn().log('Error Message: %s' % repr(err_msg), 'ERROR')
success = True
match = None
try:
(dummy, match) = trace_queue.get(True, timeout)
except queue.Empty:
success = False
finally:
self.deactivate_and_delete_trace_queue(trq_handle, trace_queue)
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
if success:
return match
else:
return None
def wait_4_trace_continuously(self, trace_queue, timeout=0, *fct_args):
"""
Getting trace log continuously without creating a new trace queue.
Args:
trace_queue: Queue to store the traces.
timeout: Timeout for waiting a matched log.
fct_args: Arguments to be sent to connection.
Returns:
None : If no trace message matched to the specified regular expression and a timeout occurred.
match object : If a trace message has matched to the specified regular expression, a match object is returned as the result. \
The complete trace message can be accessed by the 'string' attribute of the match object. \
For access to groups within the regular expression, use the group() method. \
For more information, refer to Python documentation for module 're'. \
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
try:
self.send_obj(*fct_args)
except Exception as err_msg: # pylint: disable=W0703
BuiltIn().log('%s: An Exception occurred executing function object: %s' % (_mident, repr(self.send_obj)), 'ERROR')
BuiltIn().log('Function Arguments: %s' % repr(fct_args), 'ERROR')
BuiltIn().log('Error Message: %s' % repr(err_msg), 'ERROR')
success = True
match = None
try:
if trace_queue is not None:
(dummy, match) = trace_queue.get(True, timeout)
except queue.Empty:
success = False
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
if success:
return match
else:
return None
@classmethod
def create_and_activate_trace_queue(cls, search_element, use_fetch_block=False, end_of_block_pattern='.*', regex_line_filter_pattern=None):
"""
Create Queue and assign it to _trace_queue object and activate the queue with the search element.
Args:
search_element : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.#
use_fetch_block : Determine if 'fetch block' feature is used.
end_of_block_pattern : The end of block pattern.
regex_line_filter_pattern : Regular expression object to filter message line by line.
Returns:
trq_handle, trace_queue: the handle and search object
"""
trace_queue = queue.Queue()
trq_handle = cls.activate_trace_queue(search_element, trace_queue, use_fetch_block, end_of_block_pattern, regex_line_filter_pattern)
return trq_handle, trace_queue
@classmethod
def deactivate_and_delete_trace_queue(cls, trq_handle, trace_queue):
"""
Deactivate trace queue and delete.
Args:
trq_handle: Trace queue handle.
trace_queue: Trace queue object.
Returns:
None.
"""
cls.deactivate_trace_queue(trq_handle)
del trace_queue
@classmethod
def activate_trace_queue(cls, search_obj, trace_queue, use_fetch_block=False, end_of_block_pattern='.*', line_filter_pattern=None):
"""
Activates a trace message filter specified as a regular expression. All matching trace messages are put in the specified queue object.
Args:
search_obj : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.#
trace_queue : A queue object all trace message which matches the regular expression are put in. \
The using application must assure, that the queue is emptied or deleted.
use_fetch_block : Determine if 'fetch block' feature is used
end_of_block_pattern : The end of block pattern
line_filter_pattern : Regular expression object to filter message line by line.
Returns:
<int> : Handle to deactivate the message filter.
"""
_mident = '%s.%s()' % (cls.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
with cls._traceq_lock:
cls._traceq_handle += 1
back_trace_queue = deque(maxlen=cls.MAX_LEN_BACKTRACE)
search_regex_obj = re.compile(search_obj)
cls._traceq_obj[cls._traceq_handle] = (search_regex_obj,
trace_queue,
back_trace_queue,
use_fetch_block,
re.compile(end_of_block_pattern, re.M | re.S | re.U),
line_filter_pattern)
handle_id = cls._traceq_handle
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
return handle_id
@classmethod
def deactivate_trace_queue(cls, handle):
"""
Deactivates a trace message filter previously activated by ActivateTraceQ() method.
Args:
handle : Integer object returned by ActivateTraceQ() method.
Returns:
False : No trace message filter active with the specified handle (i.e. handle is not in use).
True : Trace message filter successfully deleted.
"""
_mident = '%s.%s()' % (cls.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
with cls._traceq_lock:
if handle in cls._traceq_obj:
del cls._traceq_obj[handle]
is_success = True
else:
is_success = False
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
return is_success
def check_timeout(self, msg):
"""
>> This method will be override in derived class <<
Check if responded message come in cls._RESPOND_TIMEOUT or we will raise a timeout event.
Args:
msg: Responded message for checking.
Returns:
None.
"""
pass
def pre_msg_check(self, msg):
"""
>> This method will be override in derived class <<
Pre-checking message when receiving it from connection.
Args:
msg: received message to be checked.
Returns:
None.
"""
pass
def post_msg_check(self, msg):
"""
>> This method will be override in derived class <<
Post-checking message when receiving it from connection.
Args:
msg: received message to be checked.
Returns:
None.
"""
pass
# endregion
# region UTILITIES METHODS
@staticmethod
@staticmethod
def _filter_msg(self, regex_filter_obj, msg):
"""
Filter message by regular expression object.
Args:
regex_filter_obj: regular expression object.
msg: message string.
Returns:
is_hit: Determine if there is any matched.
matched_obj: Matched object.
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log(_mident, constants.LOG_LEVEL_DEBUG)
matched_obj = None
try:
BuiltIn().log("%s: regex_filter_obj '%s'" % (_mident, repr(regex_filter_obj)), constants.LOG_LEVEL_DEBUG)
BuiltIn().log("%s: msg '%s'" % (_mident, repr(msg)), constants.LOG_LEVEL_DEBUG)
matched_obj = regex_filter_obj.search(msg)
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_ERROR)
is_hit = False
if matched_obj:
is_hit = True
return is_hit, matched_obj
# endregion
| 35.463077 | 169 | 0.623747 | # Copyright 2020-2022 Robert Bosch Car Multimedia GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *******************************************************************************
#
# File: connection_base.py
#
# Initially created by Cuong Nguyen (RBVH/ECM11) / May 2021.
# Based on \lib\TCP\CTCPMultiQueued.py in TML Framework.
#
# Description:
# Provide the infrastructure for sending commands and getting traces from connection continuously.
#
# History:
#
# 12.05.2021 / V 0.1 / Cuong Nguyen
# - Initialize
#
# *******************************************************************************
from abc import ABCMeta
from inspect import currentframe
from collections import deque
from robot.libraries.BuiltIn import BuiltIn
from QConnectBase.qlogger import QLogger
import QConnectBase.constants as constants
import queue
import abc
import time
import platform
import threading
import re
_platform = platform.system().lower()
class BrokenConnError(Exception):
pass
class ConnectionBase(object):
"""
Base class for all connection classes.
"""
__metaclass__ = ABCMeta
_SUPPORTED_PLATFORM_LIST = [constants.OS_WINDOWS_STR,
constants.OS_LINUX_STR]
_CONNECTION_TYPE = "NotSupported"
_ERROR_INSTRUCTION = constants.UNKNOWN_STR
MAX_LEN_BACKTRACE = 500 # Lines
RECV_MSGS_POLLING_INTERVAL = 0.005
_call_thrd_obj = None
_call_thrd_init = threading.Event()
_call_thrd_term = threading.Event()
_recv_thrd_obj = None
# _recv_thrd_term = threading.Event()
_recv_thrd_term = None
_force_seq_lock = threading.RLock()
_start_dlt_lock = threading.RLock()
_traceq_handle = 0
_traceq_obj = {}
_traceq_lock = threading.Lock()
supported_devices = []
# # for continuous processing
# _msgq_c_handle = 0
# _msgq_c_obj = {}
# _msgq_c_lock = threading.Lock()
_is_precondition_valid = True
_should_check_timeout = False
_logger = None
_logger_handler = None
config = None
def __new__(cls, *args, **kwargs):
"""
Override creating instance method to check for conditions.
Args:
args: Non-Keyword Arguments.
kwargs: Keyword Arguments.
Returns:
ConnectionBase instance if passing the conditions.
None if failing the conditions.
"""
if (not cls.is_supported_platform()) or (not cls.is_precondition_pass()):
return None
return super(ConnectionBase, cls).__new__(cls)
# region GENERAL METHODS
@classmethod
def is_supported_platform(cls):
"""
Check if current platform is supported.
Returns:
True if platform is supported.
False if platform is not supported.
"""
return _platform in cls._SUPPORTED_PLATFORM_LIST
@classmethod
def is_precondition_pass(cls):
"""
Check for precondition.
Returns:
True if passing the precondition.
False if failing the precondition.
"""
return cls._is_precondition_valid
def error_instruction(self):
"""
Get the error instruction.
Returns:
Error instruction string.
"""
return self._ERROR_INSTRUCTION
# endregion
# region MUST BE OVERRIDE METHODS
@abc.abstractmethod
def quit(self, is_disconnect_all=True):
"""
>> This method MUST be overridden in derived class <<
Abstract method for quiting the connection.
Args:
is_disconnect_all: Determine if it's necessary to disconnect all connections.
Returns:
None.
"""
self._logger.removeHandler(self._logger_handler)
@abc.abstractmethod
def connect(self, device, files=None, test_connection=False):
"""
>> This method MUST be overridden in derived class <<
Abstract method for quiting the connection.
Args:
device: Determine if it's necessary to disconnect all connections.
files: Determine if it's necessary to disconnect all connections.
test_connection: Determine if it's necessary to disconnect all connections.
Returns:
None.
"""
pass
@abc.abstractmethod
def disconnect(self, device):
"""
>> This method MUST be overridden in derived class <<
Abstract method for disconnecting connection.
Args:
device: Device name.
Returns:
None.
"""
pass
# endregion
# region RECEIVER THREAD METHODS
def _init_thrd_llrecv(self, n_thrd_id):
"""
Start a thread which receive message from connection continuously.
Args:
n_thrd_id: thread id.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
self._llrecv_thrd_obj = threading.Thread(target=self._thrd_llrecv_from_connection_interface)
self._llrecv_thrd_obj.setDaemon(True)
self._llrecv_thrd_obj.name = str(self._CONNECTION_TYPE) + "-" + str(n_thrd_id)
BuiltIn().log("%s: starting low-level receiver thread '%s'" % (_mident, self._llrecv_thrd_obj.name), constants.LOG_LEVEL_DEBUG)
self._llrecv_thrd_obj.start()
def _thrd_llrecv_from_connection_interface(self):
"""
>> This method will be override in derived class <<
The thread which receive message from connection continuously.
Returns:
None
"""
pass
def _init_thread_receiver(self, thread_id, mode=None, sync_with_start=False):
"""
Initialize a thread for receiving data from connection.
Args:
thread_id: Thread ID number.
mode: Connection's mode.
sync_with_start: Determine if receiving thread needs to wait for start event.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
thread_name = self._CONNECTION_TYPE
if mode is not None:
thread_name = mode
conn_id_name = str(thread_name) + str(thread_id)
self._logger = QLogger().get_logger(conn_id_name)
self._logger_handler = QLogger().set_handler(self.config)
self._recv_thrd_term = threading.Event()
self._recv_thrd_obj = threading.Thread(target=self._thread_receive_from_connection, kwargs=dict(sync_with_start=sync_with_start))
self._recv_thrd_obj.setDaemon(True)
self._recv_thrd_obj.name = conn_id_name
BuiltIn().log("%s: starting receiver thread '%s'" % (_mident, self._recv_thrd_obj.name))
self._recv_thrd_obj.start()
def _thread_receive_from_connection(self, sync_with_start=False):
"""
Thread to receive data from connection continuously.
Args:
sync_with_start: determine if thread needs to wait for start event.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
if sync_with_start is True:
BuiltIn().log("%s: receiver thread is waiting to start." % _mident, constants.LOG_LEVEL_DEBUG)
while not self._recv_thrd_start.isSet():
time.sleep(self.__class__.RECV_MSGS_POLLING_INTERVAL)
BuiltIn().log("%s: receiver thread started." % _mident, constants.LOG_LEVEL_DEBUG)
while not self._recv_thrd_term.isSet():
try:
msg = self.read_obj()
if self._should_check_timeout:
self.check_timeout(msg)
if msg is not None:
self._should_check_timeout = False
self.pre_msg_check(msg)
BuiltIn().log(msg, constants.LOG_LEVEL_INFO)
if self._logger:
self._logger.info(msg)
# with self._msgq_c_lock:
# now = time.time()
# for q in self._msgq_c_obj.values():
# q.put((now, msg), False)
with self.__class__._traceq_lock:
if self.__class__._traceq_obj:
for (regex_filter, msg_queue, back_trace_queue, use_fetch_block, regex_end_block_pattern, regex_line_filter) in self.__class__._traceq_obj.values():
is_hit = False
result_obj = None
if use_fetch_block is True:
matchObj = regex_line_filter.search(msg)
if matchObj is not None:
back_trace_queue.append(msg)
(is_hit, result_obj) = self._filter_msg(regex_end_block_pattern, msg)
else:
(is_hit, result_obj) = self._filter_msg(regex_filter, msg)
if is_hit:
now = time.time()
if use_fetch_block is True:
result_obj = regex_filter.search("\r\n".join(back_trace_queue))
back_trace_queue.clear()
msg_queue.put((now, result_obj), False)
self.post_msg_check(msg)
except BrokenConnError as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_DEBUG)
self._broken_conn.set()
break
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_WARNING)
time.sleep(self.__class__.RECV_MSGS_POLLING_INTERVAL)
self._recv_thrd_term.clear()
BuiltIn().log("%s: receiver thread terminated." % _mident, constants.LOG_LEVEL_DEBUG)
def send_obj(self, obj, cr=True):
"""
Wrapper method to send message to a tcp connection.
Args:
obj: Data to be sent.
cr: Determine if it's necessary to add newline character at the end of command.
Returns:
None
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('%s' % _mident, constants.LOG_LEVEL_DEBUG)
msg = obj
if self._is_connected:
# noinspection PyBroadException
try:
BuiltIn().log("%s: sending: '%s'" % (_mident, msg), constants.LOG_LEVEL_DEBUG)
self._send(msg, cr)
except:
self._is_connected = False
def read_obj(self):
"""
Wrapper method to get the response from connection.
Returns:
Responded message.
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('%s' % _mident, constants.LOG_LEVEL_DEBUG)
msg = None
if self._is_connected:
try:
BuiltIn().log("%s: reading..." % _mident, constants.LOG_LEVEL_DEBUG)
msg = self._read()
BuiltIn().log("%s: read: '%s'" % (_mident, msg), constants.LOG_LEVEL_DEBUG)
except BrokenConnError as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_ERROR)
self._is_connected = False
raise reason
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_WARNING)
return msg
# endregion
# region TRACE INFRASTRUCTURE METHODS
def wait_4_trace(self, search_obj, timeout=0, use_fetch_block=False, end_of_block_pattern=".*", filter_pattern=".*", *fct_args):
"""
Suspend the control flow until a Trace message is received which matches to a specified regular expression.
Args:
search_obj : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.
use_fetch_block : Determine if 'fetch block' feature is used.
end_of_block_pattern : The end of block pattern.
filter_pattern : Regular expression object to filter message line by line.
timeout : Optional timeout parameter specified as a floating point number in the unit 'seconds'.
fct_args: Optional list of function arguments passed to be sent.
Returns:
None : If no trace message matched to the specified regular expression and a timeout occurred.
<match> : If a trace message has matched to the specified regular expression, a match object is returned as the result.\
The complete trace message can be accessed by the 'string' attribute of the match object.\
For access to groups within the regular expression, use the group() method.\
For more information, refer to Python documentation for module 're'.\
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
search_regex = re.compile(search_obj, re.M | re.S | re.U)
regex_obj_filter = re.compile(filter_pattern)
trq_handle, trace_queue = self.create_and_activate_trace_queue(search_regex, use_fetch_block, end_of_block_pattern, regex_obj_filter)
try:
self.send_obj(*fct_args)
except Exception as err_msg: # pylint: disable=W0703
BuiltIn().log('%s: An Exception occurred executing function object: %s' % (_mident, repr(self.send_obj)), 'ERROR')
BuiltIn().log('Function Arguments: %s' % repr(fct_args), 'ERROR')
BuiltIn().log('Error Message: %s' % repr(err_msg), 'ERROR')
success = True
match = None
try:
(dummy, match) = trace_queue.get(True, timeout)
except queue.Empty:
success = False
finally:
self.deactivate_and_delete_trace_queue(trq_handle, trace_queue)
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
if success:
return match
else:
return None
def wait_4_trace_continuously(self, trace_queue, timeout=0, *fct_args):
"""
Getting trace log continuously without creating a new trace queue.
Args:
trace_queue: Queue to store the traces.
timeout: Timeout for waiting a matched log.
fct_args: Arguments to be sent to connection.
Returns:
None : If no trace message matched to the specified regular expression and a timeout occurred.
match object : If a trace message has matched to the specified regular expression, a match object is returned as the result. \
The complete trace message can be accessed by the 'string' attribute of the match object. \
For access to groups within the regular expression, use the group() method. \
For more information, refer to Python documentation for module 're'. \
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
try:
self.send_obj(*fct_args)
except Exception as err_msg: # pylint: disable=W0703
BuiltIn().log('%s: An Exception occurred executing function object: %s' % (_mident, repr(self.send_obj)), 'ERROR')
BuiltIn().log('Function Arguments: %s' % repr(fct_args), 'ERROR')
BuiltIn().log('Error Message: %s' % repr(err_msg), 'ERROR')
success = True
match = None
try:
if trace_queue is not None:
(dummy, match) = trace_queue.get(True, timeout)
except queue.Empty:
success = False
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
if success:
return match
else:
return None
@classmethod
def create_and_activate_trace_queue(cls, search_element, use_fetch_block=False, end_of_block_pattern='.*', regex_line_filter_pattern=None):
"""
Create Queue and assign it to _trace_queue object and activate the queue with the search element.
Args:
search_element : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.#
use_fetch_block : Determine if 'fetch block' feature is used.
end_of_block_pattern : The end of block pattern.
regex_line_filter_pattern : Regular expression object to filter message line by line.
Returns:
trq_handle, trace_queue: the handle and search object
"""
trace_queue = queue.Queue()
trq_handle = cls.activate_trace_queue(search_element, trace_queue, use_fetch_block, end_of_block_pattern, regex_line_filter_pattern)
return trq_handle, trace_queue
@classmethod
def deactivate_and_delete_trace_queue(cls, trq_handle, trace_queue):
"""
Deactivate trace queue and delete.
Args:
trq_handle: Trace queue handle.
trace_queue: Trace queue object.
Returns:
None.
"""
cls.deactivate_trace_queue(trq_handle)
del trace_queue
@classmethod
def activate_trace_queue(cls, search_obj, trace_queue, use_fetch_block=False, end_of_block_pattern='.*', line_filter_pattern=None):
"""
Activates a trace message filter specified as a regular expression. All matching trace messages are put in the specified queue object.
Args:
search_obj : Regular expression all received trace messages are compare to. \
Can be passed either as a string or a regular expression object. Refer to Python documentation for module 're'.#
trace_queue : A queue object all trace message which matches the regular expression are put in. \
The using application must assure, that the queue is emptied or deleted.
use_fetch_block : Determine if 'fetch block' feature is used
end_of_block_pattern : The end of block pattern
line_filter_pattern : Regular expression object to filter message line by line.
Returns:
<int> : Handle to deactivate the message filter.
"""
_mident = '%s.%s()' % (cls.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
with cls._traceq_lock:
cls._traceq_handle += 1
back_trace_queue = deque(maxlen=cls.MAX_LEN_BACKTRACE)
search_regex_obj = re.compile(search_obj)
cls._traceq_obj[cls._traceq_handle] = (search_regex_obj,
trace_queue,
back_trace_queue,
use_fetch_block,
re.compile(end_of_block_pattern, re.M | re.S | re.U),
line_filter_pattern)
handle_id = cls._traceq_handle
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
return handle_id
@classmethod
def deactivate_trace_queue(cls, handle):
"""
Deactivates a trace message filter previously activated by ActivateTraceQ() method.
Args:
handle : Integer object returned by ActivateTraceQ() method.
Returns:
False : No trace message filter active with the specified handle (i.e. handle is not in use).
True : Trace message filter successfully deleted.
"""
_mident = '%s.%s()' % (cls.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log('Execute %s' % _mident, constants.LOG_LEVEL_DEBUG)
with cls._traceq_lock:
if handle in cls._traceq_obj:
del cls._traceq_obj[handle]
is_success = True
else:
is_success = False
BuiltIn().log('Completed %s' % _mident, constants.LOG_LEVEL_DEBUG)
return is_success
def check_timeout(self, msg):
"""
>> This method will be override in derived class <<
Check if responded message come in cls._RESPOND_TIMEOUT or we will raise a timeout event.
Args:
msg: Responded message for checking.
Returns:
None.
"""
pass
def pre_msg_check(self, msg):
"""
>> This method will be override in derived class <<
Pre-checking message when receiving it from connection.
Args:
msg: received message to be checked.
Returns:
None.
"""
pass
def post_msg_check(self, msg):
"""
>> This method will be override in derived class <<
Post-checking message when receiving it from connection.
Args:
msg: received message to be checked.
Returns:
None.
"""
pass
# endregion
# region UTILITIES METHODS
@staticmethod
def _rm_q_dollar(input_):
# noinspection PyBroadException
try:
output = input_.replace("\$(", "$(")
output = output.replace("\${", "${")
except:
# in case of any issue return input as it is
output = input_
return output
@staticmethod
def _q_dollar(input_):
# noinspection PyBroadException
try:
output = input_.replace("$(", "\$(")
output = output.replace("${", "\${")
except:
# in case of any issue return input as it is
output = input_
return output
def _filter_msg(self, regex_filter_obj, msg):
"""
Filter message by regular expression object.
Args:
regex_filter_obj: regular expression object.
msg: message string.
Returns:
is_hit: Determine if there is any matched.
matched_obj: Matched object.
"""
_mident = '%s.%s()' % (self.__class__.__name__, currentframe().f_code.co_name)
BuiltIn().log(_mident, constants.LOG_LEVEL_DEBUG)
matched_obj = None
try:
BuiltIn().log("%s: regex_filter_obj '%s'" % (_mident, repr(regex_filter_obj)), constants.LOG_LEVEL_DEBUG)
BuiltIn().log("%s: msg '%s'" % (_mident, repr(msg)), constants.LOG_LEVEL_DEBUG)
matched_obj = regex_filter_obj.search(msg)
except Exception as reason:
BuiltIn().log("%s: %s" % (_mident, reason), constants.LOG_LEVEL_ERROR)
is_hit = False
if matched_obj:
is_hit = True
return is_hit, matched_obj
# endregion
| 515 | 20 | 73 |
34a9ec4fa2a33c1935b23d1582d87f9965cee308 | 40 | py | Python | app/commands/__init__.py | accordeiro/flask-skeleton | 71e2b6849a8dd95235bea8ffca274f844c069510 | [
"MIT"
] | 1 | 2015-06-24T14:04:40.000Z | 2015-06-24T14:04:40.000Z | app/commands/__init__.py | accordeiro/flask-skeleton | 71e2b6849a8dd95235bea8ffca274f844c069510 | [
"MIT"
] | null | null | null | app/commands/__init__.py | accordeiro/flask-skeleton | 71e2b6849a8dd95235bea8ffca274f844c069510 | [
"MIT"
] | null | null | null | from app.commands.create_admin import *
| 20 | 39 | 0.825 | from app.commands.create_admin import *
| 0 | 0 | 0 |
eca95506898e351c5ddca551279a45512721dfa1 | 715 | py | Python | pyconde/events/models.py | EuroPython/djep | afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21 | [
"BSD-3-Clause"
] | 5 | 2015-01-02T14:33:14.000Z | 2021-08-03T10:19:07.000Z | pyconde/events/models.py | EuroPython/djep | afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21 | [
"BSD-3-Clause"
] | null | null | null | pyconde/events/models.py | EuroPython/djep | afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21 | [
"BSD-3-Clause"
] | 3 | 2015-08-30T09:45:03.000Z | 2017-04-08T12:15:22.000Z | from django.db import models
from django.utils.translation import ugettext_lazy as _
from pyconde.conference.models import Conference, CurrentConferenceManager
| 34.047619 | 76 | 0.714685 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from pyconde.conference.models import Conference, CurrentConferenceManager
class Event(models.Model):
conference = models.ForeignKey(Conference, verbose_name=_("Conference"))
title = models.CharField(_("Title"), max_length=255)
date = models.DateTimeField(_("Date"))
end_date = models.DateTimeField(_("End date"), blank=True, null=True)
link = models.URLField(_("Link"), blank=True, null=True)
objects = models.Manager()
current_conference = CurrentConferenceManager()
class Meta(object):
verbose_name = _('event')
verbose_name_plural = _('events')
ordering = ['date']
| 0 | 530 | 23 |
6cfa6cf4a8e279382f2f497f00619a53aa3c0534 | 2,527 | py | Python | xcode_build/xcode_build.py | 12star9/Python-Tools | dd5f1564670240f4b053355418669b274028e1a7 | [
"MIT"
] | 19 | 2018-08-24T08:03:12.000Z | 2021-05-17T08:12:29.000Z | xcode_build/xcode_build.py | 12star9/Python-Tools | dd5f1564670240f4b053355418669b274028e1a7 | [
"MIT"
] | 3 | 2021-03-19T01:47:04.000Z | 2022-01-13T01:23:02.000Z | xcode_build/xcode_build.py | 12star9/Python-Tools | dd5f1564670240f4b053355418669b274028e1a7 | [
"MIT"
] | 6 | 2019-07-07T16:51:12.000Z | 2021-05-20T06:31:27.000Z | # -*- coding: utf-8 -*-
import optparse
import os
import sys
import getpass
import json
import hashlib
import smtplib
import commands
import subprocess
import shutil
# import appicon_generate
from xcode_build_module import XCodeBuild
#主函数
current_work_path=os.getcwd()
projecttest_path=current_work_path+"/project_test"
projectPathList=[projecttest_path]
for index in range(0,4):
resultPath=projecttest_path+"/../backup%s"%(index)
try:
shutil.rmtree(resultPath)
except BaseException:
pass
# print 'error!'
pass
for index in range(0,4):
resultPath=projecttest_path+"/../backup%s"%(index)
try:
shutil.copytree(projecttest_path, resultPath)
except BaseException:
pass
# print 'error.'
finally:
projectPathList.append(resultPath)
pass
test(projectPathList)
# ImgManager.sharedinstance().handle_icon_images()
# generateAppIcons()
| 30.445783 | 224 | 0.67313 | # -*- coding: utf-8 -*-
import optparse
import os
import sys
import getpass
import json
import hashlib
import smtplib
import commands
import subprocess
import shutil
# import appicon_generate
from xcode_build_module import XCodeBuild
#主函数
def main():
# print buildArchivePath('project_test')
# return
# print "sys.path[0]:",sys.path[0]
# print "sys.path[1]:",sys.path[1]
# print "sys.argv[0]:scriptPath:", sys.argv[0]
# print "sys.argv[1]:argv[1]:", sys.argv[1]
# print "sys.argv[2]:argv[2]:", sys.argv[2]
# print "len(sys.argv):",len(sys.argv)
# setProjectAppDisplayName('firstBuild121414')
#开始打包
test()
def test(projectPathList):
#appicon图片存储路径
# appicon_path=mainPath+"/project_test/Assets.xcassets/AppIcon.appiconset"
current_work_path=os.getcwd()
for index in range(0,len(projectPathList)):
projectPath=projectPathList[index]
xcode_build=XCodeBuild(projectPath, projectPath+"/project_test/Info.plist",
True,"project_test","Release","iPhone Distribution: nanxing liao (73889W623Z)",current_work_path+"/provisioning_profile/project_test_dis_provisioning_profile.mobileprovision",current_work_path+"/exportOptions.plist")
xcode_build.checkWorkSpace()
xcode_build.allowFinder()
xcode_build.allowKeychain()
xcode_build.clearPbxproj()
xcode_build.cleanPro()
if index==0 or index>4:
pass
else:
sdk_functions={
1:lambda :xcode_build.embedAssignSDK('Adcolony'),
2:lambda :xcode_build.embedAssignSDK('Adview'),
3:lambda :xcode_build.embedAssignSDK('Facebook'),
4:lambda :xcode_build.embedAssignSDK('Youmi')
}
func=sdk_functions[index]
func()
xcode_build.buildApp()
return
current_work_path=os.getcwd()
projecttest_path=current_work_path+"/project_test"
projectPathList=[projecttest_path]
for index in range(0,4):
resultPath=projecttest_path+"/../backup%s"%(index)
try:
shutil.rmtree(resultPath)
except BaseException:
pass
# print 'error!'
pass
for index in range(0,4):
resultPath=projecttest_path+"/../backup%s"%(index)
try:
shutil.copytree(projecttest_path, resultPath)
except BaseException:
pass
# print 'error.'
finally:
projectPathList.append(resultPath)
pass
test(projectPathList)
# ImgManager.sharedinstance().handle_icon_images()
# generateAppIcons()
| 1,580 | 0 | 45 |
0bf8ac14dd00049d22a2e0dc026034c8c1c67517 | 19,107 | py | Python | src/flags.py | kor-solidarity/screeps_python | 18479c00c2284eeddc583ad55e3ae9f9fc3e09ff | [
"MIT"
] | null | null | null | src/flags.py | kor-solidarity/screeps_python | 18479c00c2284eeddc583ad55e3ae9f9fc3e09ff | [
"MIT"
] | null | null | null | src/flags.py | kor-solidarity/screeps_python | 18479c00c2284eeddc583ad55e3ae9f9fc3e09ff | [
"MIT"
] | null | null | null | from defs import *
import random
import miscellaneous
import pathfinding
from _custom_constants import *
from structure_display import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
# todo 정리 후 원래 스폰에 있던거 전부 제거요망
# REMOTE---------------------------------------------------------------------------
# ALL remotes.
"""
완성될 시 절차:
- 깃발을 다 둘러본다.
- 자기소속 깃발이 있을 경우 (W1E2-rm) 옵션에 넣는다.
+ 각종 기본값을 설정한다.
+ 넣을때 기본값으로 주둔시킬 병사 수를 지정한다. 디폴트 0
+ 도로를 또 깔것인가? 길따라 깐다. 디폴트 0
+ 모든 컨트롤러 있는 방 루프돌려서 이미 소속된 다른방이 있으면 그거 지운다.
+ 넣고나서 깃발 지운다.
- 추후 특정 이름이 들어간 깃발은 명령어대로 하고 삭제한다.
"""
| 43.425 | 115 | 0.479615 | from defs import *
import random
import miscellaneous
import pathfinding
from _custom_constants import *
from structure_display import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
# todo 정리 후 원래 스폰에 있던거 전부 제거요망
# REMOTE---------------------------------------------------------------------------
# ALL remotes.
"""
완성될 시 절차:
- 깃발을 다 둘러본다.
- 자기소속 깃발이 있을 경우 (W1E2-rm) 옵션에 넣는다.
+ 각종 기본값을 설정한다.
+ 넣을때 기본값으로 주둔시킬 병사 수를 지정한다. 디폴트 0
+ 도로를 또 깔것인가? 길따라 깐다. 디폴트 0
+ 모든 컨트롤러 있는 방 루프돌려서 이미 소속된 다른방이 있으면 그거 지운다.
+ 넣고나서 깃발 지운다.
- 추후 특정 이름이 들어간 깃발은 명령어대로 하고 삭제한다.
"""
def run_flags():
flags = Game.flags
# 메모리화 절차
for flag_name in Object.keys(flags):
# 해당 플래그 오브젝트. flag_name 은 말그대로 이름뿐임.
flag_obj = flags[flag_name]
# 해당 깃발이 내 소속 방에 있는건지 확인
controlled = False
if flag_obj.room and flag_obj.room.controller and flag_obj.room.controller.my:
controlled = True
# 깃발 명령어 쪼개는데 필요함.
name_list = flag_name.split()
# 포문 끝나고 깃발 삭제할지 확인...
delete_flag = False
# 깃발이 있는 방이름.
flag_room_name = flag_obj.pos.roomName
# 건물 건설 지정.
if flag_name.includes(STRUCTURE_LINK) or flag_name.includes(STRUCTURE_CONTAINER) \
or flag_name.includes(STRUCTURE_SPAWN) or flag_name.includes(STRUCTURE_EXTENSION) \
or flag_name.includes(STRUCTURE_ROAD) or flag_name.includes(STRUCTURE_STORAGE) \
or flag_name.includes(STRUCTURE_RAMPART) or flag_name.includes(STRUCTURE_EXTRACTOR):
# todo 미완성임. -del 하고 섞일 수 있음.
bld_type = name_list[0]
# 링크용일 경우.
if bld_type == STRUCTURE_LINK:
bld_plan = flag_obj.room.createConstructionSite(flag_obj.pos, STRUCTURE_LINK)
# 컨테이너
elif bld_type == STRUCTURE_CONTAINER:
bld_plan = flag_obj.room.createConstructionSite(flag_obj.pos, STRUCTURE_LINK)
# 스폰
elif bld_type == STRUCTURE_SPAWN:
bld_plan = flag_obj.room.createConstructionSite(flag_obj.pos, STRUCTURE_SPAWN)
# 익스텐션
elif bld_type == STRUCTURE_EXTENSION:
bld_plan = flag_obj.room.createConstructionSite(flag_obj.pos, STRUCTURE_EXTENSION)
# storage
elif bld_type == STRUCTURE_STORAGE:
bld_plan = flag_obj.room.createConstructionSite(flag_obj.pos, STRUCTURE_STORAGE)
# todo 도로랑 램파트는 한번에 쭉 연결하는게 가능함. 그걸 확인해보자.
print(bld_plan, bld_type)
# 건설할 건물이 레벨부족 또는 한도초과로 못놓는 경우.
if bld_plan == ERR_RCL_NOT_ENOUGH or bld_plan == ERR_FULL:
# 건설용 메모리 초기화
if not chambro.memory.bld_plan:
chambro.memory.bld_plan = []
# 내 방이 아닌 경우 그냥 삭제.
# todo 멀티방이면 어찌할거임?
if bld_plan == ERR_RCL_NOT_ENOUGH and not controlled:
print('the {} cannot be built in {} - not controlled.'.format(bld_type, flag_obj.pos.roomName))
else:
print('added bld')
# json to put into the bld_plan memory
blds = {'type': bld_type, 'pos': flag_obj.pos}
chambro.memory.bld_plan.append(blds)
# 건설이 불가한 경우.
elif bld_plan == ERR_INVALID_TARGET or bld_plan == ERR_INVALID_ARGS:
print('building plan at {}x{}y is wrong: {}'.format(flag_obj.pos.x, flag_obj.pos.y, bld_plan))
delete_flag = True
# 방이름/방향 + -rm + 아무글자(없어도됨) << 방을 등록한다.
if flag_name.includes(spawn.room.name) and flag_name.includes("-rm"):
# 방이름 외 그냥 바로 위라던지 정도의 확인절차
# wasd 시스템(?) 사용
rm_loc = name_list.index('-rm')
target_room = name_list[rm_loc - 1]
# todo 방향 아직 안찍음
# 여기에 안뜨면 당연 방이름이 아니라 상대적 위치를 찍은거.
# if not Game.rooms[target_room]:
print('includes("-rm")')
# init. remote
if not Memory.rooms[spawn.room.name].options.remotes:
Memory.rooms[spawn.room.name].options.remotes = {}
# 혹시 다른방에 이 방이 이미 소속돼있는지도 확인한다. 있으면 없앤다.
for i in Object.keys(Memory.rooms):
# 같은방은 건들면 안됨...
if i == spawn.room.name:
continue
found_and_deleted = False
if Memory.rooms[i].options:
if Memory.rooms[i].options.remotes:
# for_num = 0
for r in Object.keys(Memory.rooms[i].options.remotes):
if r == flag_obj.pos.roomName:
del Memory.rooms[i].options.remotes[r]
# print('del')
found_and_deleted = True
break
# for_num += 1
if found_and_deleted:
break
# 방이 추가됐는지에 대한 불리언.
room_added = False
# 이미 방이 있는지 확인한다.
for r in Object.keys(Memory.rooms[spawn.room.name].options.remotes):
# 있으면 굳이 또 추가할 필요가 없음..
if r.roomName == flag_obj.pos.roomName:
room_added = True
break
print('room added?', room_added)
# 추가가 안된 상태면 초기화를 진행
if not room_added:
print('what??')
# init = {'roomName': Game.flag_obj.pos.roomName, 'defenders': 1, 'initRoad': 0,
# 'display': {'x': Game.flag_obj.pos.x, 'y': Game.flag_obj.pos.y}}
init = {'defenders': 1, 'initRoad': 0,
'display': {'x': flag_obj.pos.x,
'y': flag_obj.pos.y}}
Memory.rooms[spawn.room.name][options][remotes][flag_obj.pos.roomName] = init
# Memory.rooms[spawn.room.name][options][remotes].update({flag_obj.pos.roomName: init})
print('Memory.rooms[{}][options][remotes][{}]'.format(spawn.room.name,
flag_obj.pos.roomName),
JSON.stringify(Memory.rooms[spawn.room.name][options][remotes][flag_obj
.pos.roomName]))
delete_flag = True
# 주둔할 병사 수 재정의
if flag_name.includes('-def'):
print("includes('-def')")
number_added = False
included = name_list.index('-def')
# 초기화
number = 0
# 트라이에 걸린다는건 숫자 빼먹었거나 숫자가 아니라는거.
try:
number = int(name_list[included + 1])
number_added = True
except:
print("error for flag {}: no number for -def".format(flag_name))
if number_added:
# 방을 돌린다.
for i in Object.keys(Memory.rooms):
found = False
# 같은방을 찾으면 병사정보를 수정한다.
if Memory.rooms[i].options and Memory.rooms[i].options.remotes:
for r in Object.keys(Memory.rooms[i].options.remotes):
if r == flag_room_name:
Memory.rooms[i].options.remotes[r][defenders] = number
found = True
if found:
break
delete_flag = True
# 방의 수리단계 설정.
if flag_name.includes('-rp'):
print("includes('-rp')")
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
included = name_list.index('-rp')
# 트라이에 걸린다는건 숫자 빼먹었거나 숫자가 아니라는거.
try:
number = name_list[included + 1]
number = int(number)
print('repair', number)
except:
print("error for flag {}: no number for -rp".format(flag_name))
# 설정 끝.
flag_obj.room.memory.options.repair = number
delete_flag = True
# 방의 운송크립수 설정.
if flag_name.includes('-hl'):
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
included = name_list.index('-hl')
# 트라이에 걸린다는건 숫자 빼먹었거나 숫자가 아니라는거.
try:
number = name_list[included + 1]
number = int(number)
except:
print("error for flag {}: no number for -hl".format(flag_name))
# 설정 끝.
flag_obj.room.memory.options.haulers = number
delete_flag = True
# 방 안에 미네랄 채취 시작
if flag_name.includes('-mine'):
print('-mine')
# todo 키퍼방일 경우 추가요망. 현재는 내방만.
if controlled:
mineral_loc = flag_obj.room.find(FIND_MINERALS)[0]
# 엑스트랙터 생성
mineral_loc.pos.createConstructionSite(STRUCTURE_EXTRACTOR)
road_to_spawn = mineral_loc.pos.findPathTo(spawn, {'ignoreCreeps': True})
road_len = len(road_to_spawn)
counter = 0
# 줄따라 놓기
for s in road_to_spawn:
if counter == 0 or counter == road_len:
pass
elif counter == 1:
posi = __new__(RoomPosition(s.x, s.y, flag_obj.room.name))
posi.createConstructionSite(STRUCTURE_CONTAINER)
else:
posi = __new__(RoomPosition(s.x, s.y, flag_obj.room.name))
posi.createConstructionSite(STRUCTURE_ROAD)
counter += 1
delete_flag = True
# 방내 설정값 표기.
if flag_name.includes('-dsp'):
print("includes('-dsp')")
if not controlled:
# 리모트 소속방 찾는다.
for chambra_nomo in Object.keys(Game.rooms):
set_loc = False
if Memory.rooms[chambra_nomo].options:
# counter_num = 0
for r in Object.keys(Memory.rooms[chambra_nomo].options.remotes):
remote_room_name = r
# 방이름 이거랑 똑같은지.
# 안똑같으면 통과
if remote_room_name != flag_obj.pos.roomName:
print('{} != flags[{}].pos.roomName {}'
.format(remote_room_name, flag_name, flag_obj.pos.roomName))
pass
else:
print('Memory.rooms[chambra_nomo].options.remotes[counter_num].display'
, Memory.rooms[chambra_nomo].options.remotes[r].display)
if not Memory.rooms[chambra_nomo].options.remotes[r].display:
Memory.rooms[chambra_nomo].options.remotes[r].display = {}
rx = flag_obj.pos.x
ry = flag_obj.pos.y
Memory.rooms[chambra_nomo].options.remotes[r].display.x = rx
Memory.rooms[chambra_nomo].options.remotes[r].display.y = ry
set_loc = True
# counter_num += 1
if set_loc:
break
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
# 만일 비어있으면 값 초기화.
if not flag_obj.room.memory.options.display:
flag_obj.room.memory.options.display = {}
# 깃발꽂힌 위치값 등록.
print('flagpos {}, {}'.format(flag_obj.pos.x, flag_obj.pos.y))
flag_obj.room.memory.options.display['x'] = flag_obj.pos.x
flag_obj.room.memory.options.display['y'] = flag_obj.pos.y
print('flags[{}].room.memory.options.display {}'
.format(flag_name, flag_obj.room.memory.options.display))
delete_flag = True
# 방 내 핵채우기 트리거. 예·아니오 토글
if flag_name.includes('-fln'):
delete_flag = True
if controlled:
if flag_obj.room.memory.options.fill_nuke == 1:
flag_obj.room.memory.options.fill_nuke = 0
elif flag_obj.room.memory.options.fill_nuke == 0:
flag_obj.room.memory.options.fill_nuke = 1
else:
flag_obj.room.memory.options.fill_nuke = 0
# 방 내 연구소 채우기 트리거. 예·아니오 토글
if flag_name.includes('-fll'):
delete_flag = True
if controlled:
if flag_obj.room.memory.options.fill_labs == 1:
flag_obj.room.memory.options.fill_labs = 0
elif flag_obj.room.memory.options.fill_labs == 0:
flag_obj.room.memory.options.fill_labs = 1
else:
flag_obj.room.memory.options.fill_labs = 0
# 램파트 토글.
if flag_name.includes('-ram'):
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
# 램파트가 열렸는가?
if flag_obj.room.memory.options.ramparts_open == 1:
# 그럼 닫는다.
flag_obj.room.memory.options.ramparts = 2
# 그럼 닫힘?
elif flag_obj.room.memory.options.ramparts_open == 0:
# 열어
flag_obj.room.memory.options.ramparts = 1
delete_flag = True
# 타워공격 토글.
if flag_name.includes('-tow'):
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
if flag_obj.room.memory.options.tow_atk == 1:
flag_obj.room.memory.options.tow_atk = 0
else:
flag_obj.room.memory.options.tow_atk = 1
delete_flag = True
# 디스플레이 제거. 쓸일은 없을듯 솔까.
if flag_name.includes('-dsprm'):
# 내 방이 아니면 이걸 돌리는 이유가없음....
if controlled:
# 깃발꽂힌 위치값 제거.
flag_obj.room.memory.options.display = {}
delete_flag = True
# 방 안 건설장 다 삭제..
if flag_name.includes('-clr'):
print("includes('-clr')")
# cons = Game.flag_obj.room.find(FIND_CONSTRUCTION_SITES)
world_const = Game.constructionSites
for c in Object.keys(world_const):
obj = Game.getObjectById(c)
if obj.pos.roomName == flag_room_name:
obj.remove()
# 원하는거 찾았으면 더 할 이유가 없으니.
if found:
break
delete_flag = True
# remote 배정된 방 삭제조치. 자기 방에서 했을 경우 해당 위치에 배정된 건물을 지운다.
if flag_name.includes('-del'):
print("includes('-del')")
# 자기 방으로 찍었을 경우 찍은 위치에 뭐가 있는지 확인하고 그걸 없앤다.
if flag_obj.room and flag_obj.room.controller \
and flag_obj.room.controller.my:
print('my room at {}'.format(flag_obj.room.name))
# 해당 위치에 건설장 또는 건물이 있으면 없앤다.
if len(flag_obj.pos.lookFor(LOOK_CONSTRUCTION_SITES)):
print(flag_obj.pos.lookFor(LOOK_CONSTRUCTION_SITES), JSON.stringify())
del_res = flag_obj.pos.lookFor(LOOK_CONSTRUCTION_SITES)[0].remove()
elif len(flag_obj.pos.lookFor(LOOK_STRUCTURES)):
del_res = flag_obj.pos.lookFor(LOOK_STRUCTURES)[0].destroy()
# 만약 건물도 건설장도 없으면 해당 위치에 배정된 건설 메모리가 있나 찾아본다
elif chambro.memory.bld_plan:
num = 0
for plan in chambro.memory.bld_plan:
if JSON.stringify(plan.pos) == JSON.stringify(flag_obj.pos):
chambro.memory.bld_plan.splice(num, 1)
print('deleted!')
num += 1
# if its remote room
else:
# 방을 돌린다.
for i in Object.keys(Memory.rooms):
found = False
if Memory.rooms[i].options:
print('Memory.rooms[{}].options.remotes {}'.format(i, JSON.stringify(
Memory.rooms[i].options.remotes)))
print('len(Memory.rooms[{}].options.remotes) {}'.format(i, len(
Memory.rooms[i].options.remotes)))
# 옵션안에 리모트가 없을수도 있음.. 특히 확장 안했을때.
if len(Memory.rooms[i].options.remotes) > 0:
# 리모트 안에 배정된 방이 있는지 확인한다.
# 아래 포문에 씀.
del_number = 0
for r in Object.keys(Memory.rooms[i].options.remotes):
print('r', r, 'flag_room_name', flag_room_name)
# 배정된 방을 찾으면 이제 방정보 싹 다 날린다.
if r == flag_room_name:
# del_number = r # Memory.rooms[i].options.remotes[r]
print('deleting roomInfo Memory.rooms[{}].options.remotes[{}]'
.format(i, r), 'del_number', del_number)
# Memory.rooms[i].options.remotes.splice(del_number, 1)
del Memory.rooms[i].options.remotes[r]
found = True
# 방에 짓고있는것도 다 취소
world_const = Game.constructionSites
for c in Object.keys(world_const):
obj = Game.getObjectById(c)
if obj.pos.roomName == flag_room_name:
obj.remove()
# if Game.flag_obj.room:
# cons = Game.flag_obj.room.find(FIND_CONSTRUCTION_SITES)
# for c in cons:
# c.remove()
break
del_number += 1
# 원하는거 찾았으면 더 할 이유가 없으니.
if found:
break
delete_flag = True
# 방 안에 건물확인 스크립트 초기화 조치
if flag_name.includes('-rset'):
print("resetting")
if controlled:
chambro.memory[options][reset] = 1
else:
print(flag_obj.room.name, '은 내 방이 아님.')
delete_flag = True
if delete_flag:
aa = flag_obj.remove()
print('delete {}: {}'.format(flag_obj, aa))
| 20,307 | 0 | 23 |
0f1ac96711ae26dda8377bc5d76c3872eea1afb7 | 7,311 | py | Python | kgtk/cli/remove_columns.py | shashank73744/kgtk | e8b45e35e97cbe788edeaf0962742e48b24e309c | [
"MIT"
] | null | null | null | kgtk/cli/remove_columns.py | shashank73744/kgtk | e8b45e35e97cbe788edeaf0962742e48b24e309c | [
"MIT"
] | null | null | null | kgtk/cli/remove_columns.py | shashank73744/kgtk | e8b45e35e97cbe788edeaf0962742e48b24e309c | [
"MIT"
] | null | null | null | """
Remove columns from a KGTK file.
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file(positional=True)
parser.add_output_file()
parser.add_argument('-c', "--columns", action="store", type=str, dest="columns", nargs='+', required=True,
help="Columns to remove as a comma- or space-separated strings, e.g., id,docid or id docid")
parser.add_argument( "--split-on-commas", dest="split_on_commas", help="Parse the list of columns, splitting on commas. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=True)
parser.add_argument( "--split-on-spaces", dest="split_on_spaces", help="Parse the list of columns, splitting on spaces. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument( "--strip-spaces", dest="strip_spaces", help="Parse the list of columns, stripping whitespace. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=True)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, default_mode=KgtkReaderMode.NONE, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
| 41.073034 | 153 | 0.616879 | """
Remove columns from a KGTK file.
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Remove columns from a KGTK file.',
'description': 'Remove specific columns from a KGTK file.'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file(positional=True)
parser.add_output_file()
parser.add_argument('-c', "--columns", action="store", type=str, dest="columns", nargs='+', required=True,
help="Columns to remove as a comma- or space-separated strings, e.g., id,docid or id docid")
parser.add_argument( "--split-on-commas", dest="split_on_commas", help="Parse the list of columns, splitting on commas. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=True)
parser.add_argument( "--split-on-spaces", dest="split_on_spaces", help="Parse the list of columns, splitting on spaces. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument( "--strip-spaces", dest="strip_spaces", help="Parse the list of columns, stripping whitespace. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=True)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, default_mode=KgtkReaderMode.NONE, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
columns: typing.Optional[typing.List[str]],
split_on_commas: bool,
split_on_spaces: bool,
strip_spaces: bool,
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
# import modules locally
from pathlib import Path
import sys
from kgtk.exceptions import kgtk_exception_auto_handler, KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_kgtk_file), file=error_file)
print("--output-file=%s" % str(output_kgtk_file), file=error_file)
if columns is not None:
print("--columns=%s" % " ".join(columns), file=error_file)
print("--split-on-commas=%s" % str(split_on_commas), file=error_file)
print("--split-on-spaces=%s" % str(split_on_spaces), file=error_file)
print("--strip-spaces=%s" % str(strip_spaces), file=error_file)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
if columns is None:
columns = [ ] # This simplifies matters.
if split_on_spaces:
# We will be very lenient, and allow space-seperated arguments
# *inside* shell quoting, e.g.
#
# kgtk remove_columns -c 'name name2 name3'
#
# Do not enable this option if spaces are legal inside your
# column names.
columns = " ".join(columns).split()
remove_columns: typing.List[str] = [ ]
arg: str
column_name: str
for arg in columns:
if split_on_commas:
for column_name in arg.split(","):
if strip_spaces:
column_name = column_name.strip()
if len(column_name) > 0:
remove_columns.append(column_name)
else:
if strip_spaces:
arg = arg.strip()
if len(arg) > 0:
remove_columns.append(arg)
if verbose:
print("Removing %d columns: %s" % (len(remove_columns), " ".join(remove_columns)), file=error_file, flush=True)
if len(remove_columns) == 0:
raise KGTKException("No columns to remove")
if verbose:
print("Opening the input file: %s" % str(input_kgtk_file), file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
output_column_names: typing.List[str] = kr.column_names.copy()
trouble_column_names: typing.List[str] = [ ]
for column_name in remove_columns:
if column_name in output_column_names:
output_column_names.remove(column_name)
else:
print("Error: cannot remove unknown column '%s'." % column_name, file=error_file, flush=True)
trouble_column_names.append(column_name)
if len(trouble_column_names) > 0:
raise KGTKException("Unknown columns %s" % " ".join(trouble_column_names))
if verbose:
print("Opening the output file: %s" % str(output_kgtk_file), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(output_column_names,
output_kgtk_file,
mode=KgtkWriter.Mode[kr.mode.name],
verbose=verbose,
very_verbose=very_verbose)
shuffle_list: typing.List[int] = kw.build_shuffle_list(kr.column_names)
input_line_count: int = 0
row: typing.List[str]
for row in kr:
input_line_count += 1
kw.write(row, shuffle_list=shuffle_list)
if verbose:
print("Processed %d rows." % (input_line_count), file=error_file, flush=True)
kw.close()
return 0
except Exception as e:
kgtk_exception_auto_handler(e)
return 1
| 5,429 | 0 | 46 |
cf5c88ac7ec5d660e56f6f172e6ee872ace993e3 | 589 | py | Python | init_all.py | Frankkie/Thesis-Project-IF-Game | e44cc2f7ce19bbfa04de3a4fee959651024b276b | [
"MIT"
] | 1 | 2021-06-12T22:54:24.000Z | 2021-06-12T22:54:24.000Z | init_all.py | Frankkie/Thesis-Project-IF-Game | e44cc2f7ce19bbfa04de3a4fee959651024b276b | [
"MIT"
] | null | null | null | init_all.py | Frankkie/Thesis-Project-IF-Game | e44cc2f7ce19bbfa04de3a4fee959651024b276b | [
"MIT"
] | null | null | null | """
"""
from display import Display
from load import Loader
from game import Game
class Initializer:
"""
The class that handles the program before a game is launched.
"""
| 17.848485 | 65 | 0.619694 | """
"""
from display import Display
from load import Loader
from game import Game
class Initializer:
"""
The class that handles the program before a game is launched.
"""
def __init__(self):
self.display = Display(None)
self.loader = None
self.game = None
def load_game(self):
game_name = "Union Colonizer"
self.loader = Loader(game_name, None)
dct, game_args = self.loader.load_game(self.display)
self.game = Game(**dct)
self.display.game = self.game
self.game.boot_game(**game_args)
| 343 | 0 | 54 |
1f57911b5e44b1ab4f8fa09702e065453ebc8590 | 3,666 | py | Python | bobinler.py | enderyilmazz/ElektronikKomponentKayitKontrolSistemi | 0a5219aa8d6d729b7a240176bc02770681eaf6fe | [
"MIT"
] | 1 | 2021-12-02T10:55:41.000Z | 2021-12-02T10:55:41.000Z | bobinler.py | enderyilmazz/ElektronikKomponentKayitKontrolSistemi | 0a5219aa8d6d729b7a240176bc02770681eaf6fe | [
"MIT"
] | null | null | null | bobinler.py | enderyilmazz/ElektronikKomponentKayitKontrolSistemi | 0a5219aa8d6d729b7a240176bc02770681eaf6fe | [
"MIT"
] | null | null | null | #171602012 - Ender Yılmaz
#Bilişim Sistemleri Mühendisliği
#Elektronik Komponent Kayıt Kontrol Sistemi
from PyQt5 import QtCore, QtGui, QtWidgets | 53.130435 | 99 | 0.688762 | #171602012 - Ender Yılmaz
#Bilişim Sistemleri Mühendisliği
#Elektronik Komponent Kayıt Kontrol Sistemi
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_bobinler(object):
def setupUi(self, bobinler):
bobinler.setObjectName("bobinler")
bobinler.resize(293, 190)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("buton_resim/bobin.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
bobinler.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(bobinler)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 20, 111, 16))
self.label.setObjectName("label")
self.mgecirgenlik = QtWidgets.QLineEdit(self.centralwidget)
self.mgecirgenlik.setGeometry(QtCore.QRect(130, 20, 151, 20))
self.mgecirgenlik.setObjectName("mgecirgenlik")
self.sarim = QtWidgets.QLineEdit(self.centralwidget)
self.sarim.setGeometry(QtCore.QRect(130, 50, 151, 20))
self.sarim.setObjectName("sarim")
self.kesitalan = QtWidgets.QLineEdit(self.centralwidget)
self.kesitalan.setGeometry(QtCore.QRect(130, 80, 151, 20))
self.kesitalan.setObjectName("kesitalan")
self.uzunluk = QtWidgets.QLineEdit(self.centralwidget)
self.uzunluk.setGeometry(QtCore.QRect(130, 110, 151, 20))
self.uzunluk.setObjectName("uzunluk")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 50, 111, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 80, 111, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(20, 110, 111, 16))
self.label_4.setObjectName("label_4")
self.hesapla_btn = QtWidgets.QPushButton(self.centralwidget)
self.hesapla_btn.setGeometry(QtCore.QRect(230, 140, 51, 23))
self.hesapla_btn.setObjectName("hesapla_btn")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(20, 140, 111, 16))
self.label_5.setObjectName("label_5")
self.enduktans_label = QtWidgets.QLabel(self.centralwidget)
self.enduktans_label.setGeometry(QtCore.QRect(130, 140, 101, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.enduktans_label.setFont(font)
self.enduktans_label.setObjectName("enduktans_label")
bobinler.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(bobinler)
self.statusbar.setObjectName("statusbar")
bobinler.setStatusBar(self.statusbar)
self.retranslateUi(bobinler)
QtCore.QMetaObject.connectSlotsByName(bobinler)
def retranslateUi(self, bobinler):
_translate = QtCore.QCoreApplication.translate
bobinler.setWindowTitle(_translate("bobinler", "Bobin Hesaplama"))
self.label.setText(_translate("bobinler", "Manyetik Geçirgenlik :"))
self.label_2.setText(_translate("bobinler", "Sarım Sayısı :"))
self.label_3.setText(_translate("bobinler", "Bobin Kesit Alan :"))
self.label_4.setText(_translate("bobinler", "Tel Uzunluğu :"))
self.hesapla_btn.setText(_translate("bobinler", "Hesapla"))
self.label_5.setText(_translate("bobinler", "Endüktans :"))
self.enduktans_label.setText(_translate("bobinler", "000 H")) | 3,446 | 5 | 76 |
204a60b80807dd098d39cbc5d789117dabc04f40 | 555 | py | Python | src/timeatlas/models/NN/util.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 10 | 2020-08-25T09:23:02.000Z | 2021-01-12T14:00:35.000Z | src/timeatlas/models/NN/util.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 140 | 2020-06-30T11:59:47.000Z | 2021-08-23T20:58:43.000Z | src/timeatlas/models/NN/util.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | null | null | null | import numpy as np
def chunkify(tsd, seq_len):
"""
Splits a TimeSeriesDataset into chunks of length seq_len
Args:
tsd: TimeSeriesDataset object
seq_len: length of the subsequences to return
Returns: numpy arrays with chunks of size seq_len
"""
x, y = [], []
for s in tsd:
for i in range(len(s) - seq_len):
x_i = s._data["values"][i: i + seq_len]
y_i = s._data["values"][i + seq_len]
x.append(x_i)
y.append(y_i)
return np.array(x), np.array(y)
| 21.346154 | 60 | 0.569369 | import numpy as np
def chunkify(tsd, seq_len):
"""
Splits a TimeSeriesDataset into chunks of length seq_len
Args:
tsd: TimeSeriesDataset object
seq_len: length of the subsequences to return
Returns: numpy arrays with chunks of size seq_len
"""
x, y = [], []
for s in tsd:
for i in range(len(s) - seq_len):
x_i = s._data["values"][i: i + seq_len]
y_i = s._data["values"][i + seq_len]
x.append(x_i)
y.append(y_i)
return np.array(x), np.array(y)
| 0 | 0 | 0 |
5adb499c490f3f509aec32ba50dc8a30ee53dbe1 | 861 | py | Python | leon_utils/clime.py | Leonerist/YoloX-Track-Plate_Recognition-MoreModules | fde4a001c0088d4bfe7a027f31040e732ec8b519 | [
"Apache-2.0"
] | 7 | 2022-03-12T09:09:44.000Z | 2022-03-20T05:19:23.000Z | leon_utils/clime.py | Leonerist/YoloX-Track-Plate_Recognition-MoreModules | fde4a001c0088d4bfe7a027f31040e732ec8b519 | [
"Apache-2.0"
] | null | null | null | leon_utils/clime.py | Leonerist/YoloX-Track-Plate_Recognition-MoreModules | fde4a001c0088d4bfe7a027f31040e732ec8b519 | [
"Apache-2.0"
] | null | null | null | import cv2
import uuid
import os
# tar为目标车牌号的后4位
| 25.323529 | 61 | 0.541231 | import cv2
import uuid
import os
def pic_save(tar_num,pic,root):
root = os.path.join(root,'clime_pics')
file_name = os.path.join(root,tar_num)
if not os.path.exists(file_name ):
os.mkdir(file_name)
ran = uuid.uuid4().hex
pic_name = os.path.join(file_name , '{}.jpg'.format(ran))
cv2.imwrite(pic_name,pic)
# tar为目标车牌号的后4位
def get_clime(tars,figures_mes,figures,root):
id_list = []
for figure_mes in figures_mes:
# 发现目标车牌
if figure_mes[6][-4:] in tars:
tar_id = figure_mes[4]
# 获得车辆照片
i = 0
for figure in figures:
i += 1
id = figure[1]
if id == tar_id:
id_list.append(id)
pic = figure[0]
pic_save(figure_mes[6][-4:],pic,root)
return id_list
| 787 | 0 | 45 |
d33b7ffbe95fb1679388cf5f2d1ef3c68a73def9 | 183 | py | Python | xudo/commands/__init__.py | Ostralyan/xudo | d4ab6ee01fef52e8799ff3e6a76234351620447e | [
"MIT"
] | 3 | 2017-04-05T17:30:13.000Z | 2018-09-18T02:14:40.000Z | xudo/commands/__init__.py | Ostralyan/xudo | d4ab6ee01fef52e8799ff3e6a76234351620447e | [
"MIT"
] | null | null | null | xudo/commands/__init__.py | Ostralyan/xudo | d4ab6ee01fef52e8799ff3e6a76234351620447e | [
"MIT"
] | 2 | 2017-05-30T17:42:31.000Z | 2017-12-14T20:14:31.000Z | from .logs import *
from .pull import *
from .build import *
from .clean import *
from .prune import *
from .test import *
from .watch import *
from .jooq import *
from .dbpw import * | 20.333333 | 20 | 0.710383 | from .logs import *
from .pull import *
from .build import *
from .clean import *
from .prune import *
from .test import *
from .watch import *
from .jooq import *
from .dbpw import * | 0 | 0 | 0 |
eafe07d9b54c8a2808bc734ee1b880f491578b49 | 1,861 | py | Python | douban/douban/settings.py | ziyiyizi/crawl_web | 191b0839d9d095adccd8c2ab893d35c54dfc4053 | [
"Apache-2.0"
] | 11 | 2016-08-01T09:28:23.000Z | 2021-11-08T08:45:54.000Z | douban/douban/settings.py | ziyiyizi/crawl_web | 191b0839d9d095adccd8c2ab893d35c54dfc4053 | [
"Apache-2.0"
] | null | null | null | douban/douban/settings.py | ziyiyizi/crawl_web | 191b0839d9d095adccd8c2ab893d35c54dfc4053 | [
"Apache-2.0"
] | 4 | 2016-09-09T04:45:39.000Z | 2019-10-29T06:34:39.000Z | # -*- coding: utf-8 -*-
# Scrapy settings for douban project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'douban'
# Scrapy寻找spiders的地方,这是一个列表结构,你可以指定多个地方。
SPIDER_MODULES = ['douban.spiders']
# 用scrapy genspider [-t template] <name> <domain>命令生成的spider所放的地方
NEWSPIDER_MODULE = 'douban.spiders'
# Retry many times since proxies often fail
RETRY_TIMES = 10
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408]
# 如果你不想用代理IP去抓取网页,注释掉下面的前三个组件。第四个组件的目的是定制自己request header.
DOWNLOADER_MIDDLEWARES = {
# 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 90,
# 'douban.randomproxy.RandomProxy': 100,
# 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
'douban.MyMiddlewares.CustomUserAgentMiddleware':345,
}
# 把这个路徑改成你自己
PROXY_LIST = '/home/vincent/crawl_web/douban/proxy_list.txt'
# Configure item pipelines
ITEM_PIPELINES = {
'douban.pipelines.BookInfoPipeline': 300,
'douban.pipelines.IDPipeline': 500,
}
DOWNLOAD_DELAY = 2
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=False
# The initial download delay
#AUTOTHROTTLE_START_DELAY=3
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=12
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
| 33.232143 | 80 | 0.785599 | # -*- coding: utf-8 -*-
# Scrapy settings for douban project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'douban'
# Scrapy寻找spiders的地方,这是一个列表结构,你可以指定多个地方。
SPIDER_MODULES = ['douban.spiders']
# 用scrapy genspider [-t template] <name> <domain>命令生成的spider所放的地方
NEWSPIDER_MODULE = 'douban.spiders'
# Retry many times since proxies often fail
RETRY_TIMES = 10
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408]
# 如果你不想用代理IP去抓取网页,注释掉下面的前三个组件。第四个组件的目的是定制自己request header.
DOWNLOADER_MIDDLEWARES = {
# 'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 90,
# 'douban.randomproxy.RandomProxy': 100,
# 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
'douban.MyMiddlewares.CustomUserAgentMiddleware':345,
}
# 把这个路徑改成你自己
PROXY_LIST = '/home/vincent/crawl_web/douban/proxy_list.txt'
# Configure item pipelines
ITEM_PIPELINES = {
'douban.pipelines.BookInfoPipeline': 300,
'douban.pipelines.IDPipeline': 500,
}
DOWNLOAD_DELAY = 2
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=False
# The initial download delay
#AUTOTHROTTLE_START_DELAY=3
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=12
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
| 0 | 0 | 0 |
db1ca0e3ea0fb2786dbc481f81b1c276be13cf33 | 613 | py | Python | parrainage/app/management/commands/create_initial_admin_user.py | ronnix/parrainage | cc5a94b1dd7c5654ff35312c60930e06d327103d | [
"MIT"
] | 1 | 2022-02-20T02:37:07.000Z | 2022-02-20T02:37:07.000Z | parrainage/app/management/commands/create_initial_admin_user.py | ronnix/parrainage | cc5a94b1dd7c5654ff35312c60930e06d327103d | [
"MIT"
] | 41 | 2022-02-17T09:50:08.000Z | 2022-03-09T07:53:25.000Z | parrainage/app/management/commands/create_initial_admin_user.py | ronnix/parrainage | cc5a94b1dd7c5654ff35312c60930e06d327103d | [
"MIT"
] | null | null | null | import os
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
| 26.652174 | 88 | 0.683524 | import os
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
class Command(BaseCommand):
help = "Créer un utilisateur admin initial"
def handle(self, *args, **options):
User = get_user_model()
if User.objects.exists():
return
username = os.environ["ADMIN_USERNAME"]
password = os.environ["ADMIN_PASSWORD"]
email = os.environ["ADMIN_EMAIL"]
User.objects.create_superuser(username=username, password=password, email=email)
self.stdout.write(f'Initial user "{username}" was created')
| 398 | 82 | 23 |
54b127d16e206063b8d9142f1e864c12d75bf507 | 46,092 | py | Python | astroquery/esa/jwst/tests/test_jwsttap.py | mfisherlevine/astroquery | cda88ef18b308563e86ee79bcc78e4ac59874df1 | [
"BSD-3-Clause"
] | null | null | null | astroquery/esa/jwst/tests/test_jwsttap.py | mfisherlevine/astroquery | cda88ef18b308563e86ee79bcc78e4ac59874df1 | [
"BSD-3-Clause"
] | null | null | null | astroquery/esa/jwst/tests/test_jwsttap.py | mfisherlevine/astroquery | cda88ef18b308563e86ee79bcc78e4ac59874df1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
===============
eJWST TAP tests
===============
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
"""
import os
import shutil
from unittest.mock import MagicMock
import astropy.units as u
import numpy as np
import pytest
from astropy import units
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.table import Table
from astropy.units import Quantity
from astroquery.esa.jwst import JwstClass
from astroquery.esa.jwst.tests.DummyTapHandler import DummyTapHandler
from astroquery.ipac.ned import Ned
from astroquery.simbad import Simbad
from astroquery.utils import TableList
from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler
from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse
from astroquery.utils.tap.core import TapPlus
from astroquery.utils.tap.xmlparser import utils
from astroquery.vizier import Vizier
from astroquery.esa.jwst import conf
@pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
@pytest.fixture(autouse=True)
planeids = "('00000000-0000-0000-879d-ae91fa2f43e2', '00000000-0000-0000-9852-a9fa8c63f7ef')"
| 43.897143 | 115 | 0.595266 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
===============
eJWST TAP tests
===============
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
"""
import os
import shutil
from unittest.mock import MagicMock
import astropy.units as u
import numpy as np
import pytest
from astropy import units
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.table import Table
from astropy.units import Quantity
from astroquery.esa.jwst import JwstClass
from astroquery.esa.jwst.tests.DummyTapHandler import DummyTapHandler
from astroquery.ipac.ned import Ned
from astroquery.simbad import Simbad
from astroquery.utils import TableList
from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler
from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse
from astroquery.utils.tap.core import TapPlus
from astroquery.utils.tap.xmlparser import utils
from astroquery.vizier import Vizier
from astroquery.esa.jwst import conf
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def get_plane_id_mock(url, *args, **kwargs):
return ['00000000-0000-0000-879d-ae91fa2f43e2'], 3
@pytest.fixture(autouse=True)
def plane_id_request(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(JwstClass, '_get_plane_id', get_plane_id_mock)
return mp
def get_associated_planes_mock(url, *args, **kwargs):
if kwargs.get("max_cal_level") == 2:
return "('00000000-0000-0000-879d-ae91fa2f43e2')"
else:
return planeids
@pytest.fixture(autouse=True)
def associated_planes_request(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(JwstClass, '_get_associated_planes', get_associated_planes_mock)
return mp
def get_product_mock(params, *args, **kwargs):
if('file_name' in kwargs and kwargs.get('file_name') == 'file_name_id'):
return "00000000-0000-0000-8740-65e2827c9895"
else:
return "jw00617023001_02102_00001_nrcb4_uncal.fits"
@pytest.fixture(autouse=True)
def get_product_request(request):
if 'noautofixt' in request.keywords:
return
mp = request.getfixturevalue("monkeypatch")
mp.setattr(JwstClass, '_query_get_product', get_product_mock)
return mp
planeids = "('00000000-0000-0000-879d-ae91fa2f43e2', '00000000-0000-0000-9852-a9fa8c63f7ef')"
class TestTap:
def test_load_tables(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
# default parameters
parameters = {}
parameters['only_names'] = False
parameters['include_shared_tables'] = False
parameters['verbose'] = False
tap.load_tables()
dummyTapHandler.check_call('load_tables', parameters)
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['only_names'] = True
parameters['include_shared_tables'] = True
parameters['verbose'] = True
tap.load_tables(only_names=True, include_shared_tables=True, verbose=True)
dummyTapHandler.check_call('load_tables', parameters)
def test_load_table(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
# default parameters
parameters = {}
parameters['table'] = 'table'
parameters['verbose'] = False
tap.load_table('table')
dummyTapHandler.check_call('load_table', parameters)
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['table'] = 'table'
parameters['verbose'] = True
tap.load_table('table', verbose=True)
dummyTapHandler.check_call('load_table', parameters)
def test_launch_sync_job(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
query = "query"
# default parameters
parameters = {}
parameters['query'] = query
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
tap.launch_job(query)
dummyTapHandler.check_call('launch_job', parameters)
# test with parameters
dummyTapHandler.reset()
name = 'name'
output_file = 'output'
output_format = 'format'
verbose = True
dump_to_file = True
upload_resource = 'upload_res'
upload_table_name = 'upload_table'
parameters['query'] = query
parameters['name'] = name
parameters['output_file'] = output_file
parameters['output_format'] = output_format
parameters['verbose'] = verbose
parameters['dump_to_file'] = dump_to_file
parameters['upload_resource'] = upload_resource
parameters['upload_table_name'] = upload_table_name
tap.launch_job(query,
name=name,
output_file=output_file,
output_format=output_format,
verbose=verbose,
dump_to_file=dump_to_file,
upload_resource=upload_resource,
upload_table_name=upload_table_name)
dummyTapHandler.check_call('launch_job', parameters)
def test_launch_async_job(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
query = "query"
# default parameters
parameters = {}
parameters['query'] = query
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['background'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
tap.launch_job(query, async_job=True)
dummyTapHandler.check_call('launch_job_async', parameters)
# test with parameters
dummyTapHandler.reset()
name = 'name'
output_file = 'output'
output_format = 'format'
verbose = True
dump_to_file = True
background = True
upload_resource = 'upload_res'
upload_table_name = 'upload_table'
parameters['query'] = query
parameters['name'] = name
parameters['output_file'] = output_file
parameters['output_format'] = output_format
parameters['verbose'] = verbose
parameters['dump_to_file'] = dump_to_file
parameters['background'] = background
parameters['upload_resource'] = upload_resource
parameters['upload_table_name'] = upload_table_name
tap.launch_job(query,
name=name,
output_file=output_file,
output_format=output_format,
verbose=verbose,
dump_to_file=dump_to_file,
background=background,
upload_resource=upload_resource,
upload_table_name=upload_table_name,
async_job=True)
dummyTapHandler.check_call('launch_job_async', parameters)
def test_list_async_jobs(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
# default parameters
parameters = {}
parameters['verbose'] = False
tap.list_async_jobs()
dummyTapHandler.check_call('list_async_jobs', parameters)
# test with parameters
dummyTapHandler.reset()
parameters['verbose'] = True
tap.list_async_jobs(verbose=True)
dummyTapHandler.check_call('list_async_jobs', parameters)
def test_query_region(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = JwstClass(tap_plus_handler=tapplus, show_messages=False)
# Launch response: we use default response because the
# query contains decimals
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(200)
responseLaunchJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseLaunchJob.set_data(method='POST',
context=None,
body=jobData,
headers=None)
# The query contains decimals: force default response
connHandler.set_default_response(responseLaunchJob)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),
frame='icrs')
with pytest.raises(ValueError) as err:
tap.query_region(sc)
assert "Missing required argument: 'width'" in err.value.args[0]
width = Quantity(12, u.deg)
height = Quantity(10, u.deg)
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width)
assert "Missing required argument: 'height'" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height), Table))
# Test observation_id argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, observation_id=1)
assert "observation_id must be string" in err.value.args[0]
assert(isinstance(tap.query_region(sc, width=width, height=height, observation_id="observation"), Table))
# raise ValueError
# Test cal_level argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, cal_level='a')
assert "cal_level must be either 'Top' or an integer" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, cal_level='Top'), Table))
assert (isinstance(tap.query_region(sc, width=width, height=height, cal_level=1), Table))
# Test only_public
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, only_public='a')
assert "only_public must be boolean" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, only_public=True), Table))
# Test dataproduct_type argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, prod_type=1)
assert "prod_type must be string" in err.value.args[0]
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, prod_type='a')
assert "prod_type must be one of: " in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, prod_type='image'), Table))
# Test instrument_name argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, instrument_name=1)
assert "instrument_name must be string" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, instrument_name='NIRCAM'), Table))
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height,
instrument_name='a')
assert "instrument_name must be one of: " in err.value.args[0]
# Test filter_name argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, filter_name=1)
assert "filter_name must be string" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, filter_name='filter'), Table))
# Test proposal_id argument
with pytest.raises(ValueError) as err:
tap.query_region(sc, width=width, height=height, proposal_id=123)
assert "proposal_id must be string" in err.value.args[0]
assert (isinstance(tap.query_region(sc, width=width, height=height, proposal_id='123'), Table))
table = tap.query_region(sc, width=width, height=height)
assert len(table) == 3, f"Wrong job results (num rows). Expected: {3}, found {len(table)}"
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_region(sc, radius=radius)
assert len(table) == 3, f"Wrong job results (num rows). Expected: {3}, found {len(table)}"
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_query_region_async(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = JwstClass(tap_plus_handler=tapplus, show_messages=False)
jobid = '12345'
# Launch response
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(303)
responseLaunchJob.set_message("OK")
# list of list (httplib implementation for headers in response)
launchResponseHeaders = [['location', 'http://test:1111/tap/async/' + jobid]]
responseLaunchJob.set_data(method='POST',
context=None,
body=None,
headers=launchResponseHeaders)
connHandler.set_default_response(responseLaunchJob)
# Phase response
responsePhase = DummyResponse()
responsePhase.set_status_code(200)
responsePhase.set_message("OK")
responsePhase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
connHandler.set_response(req, responsePhase)
# Results response
responseResultsJob = DummyResponse()
responseResultsJob.set_status_code(200)
responseResultsJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseResultsJob.set_data(method='GET',
context=None,
body=jobData,
headers=None)
req = "async/" + jobid + "/results/result"
connHandler.set_response(req, responseResultsJob)
sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),
frame='icrs')
width = Quantity(12, u.deg)
height = Quantity(10, u.deg)
table = tap.query_region(sc, width=width, height=height, async_job=True)
assert len(table) == 3, f"Wrong job results (num rows). Expected: {3}, found {len(table)}"
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
# by radius
radius = Quantity(1, u.deg)
table = tap.query_region(sc, radius=radius, async_job=True)
assert len(table) == 3, f"Wrong job results (num rows). Expected: {3}, found {len(table)}"
self.__check_results_column(table,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(table,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(table,
'source_id',
'source_id',
None,
object)
self.__check_results_column(table,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_cone_search_sync(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = JwstClass(tap_plus_handler=tapplus, show_messages=False)
# Launch response: we use default response because the
# query contains decimals
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(200)
responseLaunchJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseLaunchJob.set_data(method='POST',
context=None,
body=jobData,
headers=None)
ra = 19.0
dec = 20.0
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
connHandler.set_default_response(responseLaunchJob)
job = tap.cone_search(sc, radius)
assert job is not None, "Expected a valid job"
assert job.async_ is False, "Expected a synchronous job"
assert job.get_phase() == 'COMPLETED', f"Wrong job phase. Expected: {'COMPLETED'}, found {job.get_phase()}"
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, f"Wrong job results (num rows). Expected: {3}, found {len(results)}"
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
# Test observation_id argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, observation_id=1)
assert "observation_id must be string" in err.value.args[0]
# Test cal_level argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, cal_level='a')
assert "cal_level must be either 'Top' or an integer" in err.value.args[0]
# Test only_public
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, only_public='a')
assert "only_public must be boolean" in err.value.args[0]
# Test dataproduct_type argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, prod_type=1)
assert "prod_type must be string" in err.value.args[0]
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, prod_type='a')
assert "prod_type must be one of: " in err.value.args[0]
# Test instrument_name argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, instrument_name=1)
assert "instrument_name must be string" in err.value.args[0]
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, instrument_name='a')
assert "instrument_name must be one of: " in err.value.args[0]
# Test filter_name argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, filter_name=1)
assert "filter_name must be string" in err.value.args[0]
# Test proposal_id argument
with pytest.raises(ValueError) as err:
tap.cone_search(sc, radius, proposal_id=123)
assert "proposal_id must be string" in err.value.args[0]
def test_cone_search_async(self):
connHandler = DummyConnHandler()
tapplus = TapPlus("http://test:1111/tap", connhandler=connHandler)
tap = JwstClass(tap_plus_handler=tapplus, show_messages=False)
jobid = '12345'
# Launch response
responseLaunchJob = DummyResponse()
responseLaunchJob.set_status_code(303)
responseLaunchJob.set_message("OK")
# list of list (httplib implementation for headers in response)
launchResponseHeaders = [['location', 'http://test:1111/tap/async/' + jobid]]
responseLaunchJob.set_data(method='POST',
context=None,
body=None,
headers=launchResponseHeaders)
ra = 19
dec = 20
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
radius = Quantity(1.0, u.deg)
connHandler.set_default_response(responseLaunchJob)
# Phase response
responsePhase = DummyResponse()
responsePhase.set_status_code(200)
responsePhase.set_message("OK")
responsePhase.set_data(method='GET',
context=None,
body="COMPLETED",
headers=None)
req = "async/" + jobid + "/phase"
connHandler.set_response(req, responsePhase)
# Results response
responseResultsJob = DummyResponse()
responseResultsJob.set_status_code(200)
responseResultsJob.set_message("OK")
jobDataFile = data_path('job_1.vot')
jobData = utils.read_file_content(jobDataFile)
responseResultsJob.set_data(method='GET',
context=None,
body=jobData,
headers=None)
req = "async/" + jobid + "/results/result"
connHandler.set_response(req, responseResultsJob)
job = tap.cone_search(sc, radius, async_job=True)
assert job is not None, "Expected a valid job"
assert job.async_ is True, "Expected an asynchronous job"
assert job.get_phase() == 'COMPLETED', f"Wrong job phase. Expected: {'COMPLETED'}, found {job.get_phase()}"
assert job.failed is False, "Wrong job status (set Failed = True)"
# results
results = job.get_results()
assert len(results) == 3, "Wrong job results (num rows). Expected: {3}, found {len(results)}"
self.__check_results_column(results,
'alpha',
'alpha',
None,
np.float64)
self.__check_results_column(results,
'delta',
'delta',
None,
np.float64)
self.__check_results_column(results,
'source_id',
'source_id',
None,
object)
self.__check_results_column(results,
'table1_oid',
'table1_oid',
None,
np.int32)
def test_get_product_by_artifactid(self):
dummyTapHandler = DummyTapHandler()
jwst = JwstClass(tap_plus_handler=dummyTapHandler, data_handler=dummyTapHandler, show_messages=False)
# default parameters
with pytest.raises(ValueError) as err:
jwst.get_product()
assert "Missing required argument: 'artifact_id' or 'file_name'" in err.value.args[0]
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['output_file'] = 'jw00617023001_02102_00001_nrcb4_uncal.fits'
parameters['verbose'] = False
param_dict = {}
param_dict['RETRIEVAL_TYPE'] = 'PRODUCT'
param_dict['DATA_RETRIEVAL_ORIGIN'] = 'ASTROQUERY'
param_dict['ARTIFACTID'] = '00000000-0000-0000-8740-65e2827c9895'
parameters['params_dict'] = param_dict
jwst.get_product(artifact_id='00000000-0000-0000-8740-65e2827c9895')
dummyTapHandler.check_call('load_data', parameters)
def test_get_product_by_filename(self):
dummyTapHandler = DummyTapHandler()
jwst = JwstClass(tap_plus_handler=dummyTapHandler, data_handler=dummyTapHandler, show_messages=False)
# default parameters
with pytest.raises(ValueError) as err:
jwst.get_product()
assert "Missing required argument: 'artifact_id' or 'file_name'" in err.value.args[0]
# test with parameters
dummyTapHandler.reset()
parameters = {}
parameters['output_file'] = 'file_name_id'
parameters['verbose'] = False
param_dict = {}
param_dict['RETRIEVAL_TYPE'] = 'PRODUCT'
param_dict['DATA_RETRIEVAL_ORIGIN'] = 'ASTROQUERY'
param_dict['ARTIFACTID'] = '00000000-0000-0000-8740-65e2827c9895'
parameters['params_dict'] = param_dict
jwst.get_product(file_name='file_name_id')
dummyTapHandler.check_call('load_data', parameters)
def test_get_products_list(self):
dummyTapHandler = DummyTapHandler()
jwst = JwstClass(tap_plus_handler=dummyTapHandler, data_handler=dummyTapHandler, show_messages=False)
# default parameters
with pytest.raises(ValueError) as err:
jwst.get_product_list()
assert "Missing required argument: 'observation_id'" in err.value.args[0]
# test with parameters
dummyTapHandler.reset()
observation_id = "jw00777011001_02104_00001_nrcblong"
cal_level_condition = " AND m.calibrationlevel = m.max_cal_level"
prodtype_condition = ""
query = (f"select distinct a.uri, a.artifactid, a.filename, "
f"a.contenttype, a.producttype, p.calibrationlevel, p.public "
f"FROM {conf.JWST_PLANE_TABLE} p JOIN {conf.JWST_ARTIFACT_TABLE} "
f"a ON (p.planeid=a.planeid) WHERE a.planeid "
f"IN {planeids};")
parameters = {}
parameters['query'] = query
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
jwst.get_product_list(observation_id=observation_id)
dummyTapHandler.check_call('launch_job', parameters)
def test_get_obs_products(self):
dummyTapHandler = DummyTapHandler()
jwst = JwstClass(tap_plus_handler=dummyTapHandler, data_handler=dummyTapHandler, show_messages=False)
# default parameters
with pytest.raises(ValueError) as err:
jwst.get_obs_products()
assert "Missing required argument: 'observation_id'" in err.value.args[0]
# test with parameters
dummyTapHandler.reset()
output_file_full_path_dir = os.getcwd() + os.sep + "temp_test_jwsttap_get_obs_products_1"
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
observation_id = 'jw00777011001_02104_00001_nrcblong'
parameters = {}
parameters['verbose'] = False
param_dict = {}
param_dict['RETRIEVAL_TYPE'] = 'OBSERVATION'
param_dict['DATA_RETRIEVAL_ORIGIN'] = 'ASTROQUERY'
param_dict['planeid'] = planeids
param_dict['calibrationlevel'] = 'ALL'
parameters['params_dict'] = param_dict
# Test single product tar
file = data_path('single_product_retrieval.tar')
output_file_full_path = output_file_full_path_dir + os.sep + os.path.basename(file)
shutil.copy(file, output_file_full_path)
parameters['output_file'] = output_file_full_path
expected_files = []
extracted_file_1 = output_file_full_path_dir + os.sep + 'single_product_retrieval_1.fits'
expected_files.append(extracted_file_1)
try:
files_returned = (jwst.get_obs_products(
observation_id=observation_id, cal_level='ALL',
output_file=output_file_full_path))
dummyTapHandler.check_call('load_data', parameters)
self.__check_extracted_files(files_expected=expected_files,
files_returned=files_returned)
finally:
shutil.rmtree(output_file_full_path_dir)
# Test single file
output_file_full_path_dir = os.getcwd() + os.sep +\
"temp_test_jwsttap_get_obs_products_2"
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
file = data_path('single_product_retrieval_1.fits')
output_file_full_path = output_file_full_path_dir + os.sep +\
os.path.basename(file)
shutil.copy(file, output_file_full_path)
parameters['output_file'] = output_file_full_path
expected_files = []
expected_files.append(output_file_full_path)
try:
files_returned = (jwst.get_obs_products(
observation_id=observation_id,
output_file=output_file_full_path))
dummyTapHandler.check_call('load_data', parameters)
self.__check_extracted_files(files_expected=expected_files,
files_returned=files_returned)
finally:
# self.__remove_folder_contents(folder=output_file_full_path_dir)
shutil.rmtree(output_file_full_path_dir)
# Test single file zip
output_file_full_path_dir = os.getcwd() + os.sep + "temp_test_jwsttap_get_obs_products_3"
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
file = data_path('single_product_retrieval_3.fits.zip')
output_file_full_path = output_file_full_path_dir + os.sep +\
os.path.basename(file)
shutil.copy(file, output_file_full_path)
parameters['output_file'] = output_file_full_path
expected_files = []
extracted_file_1 = output_file_full_path_dir + os.sep + 'single_product_retrieval.fits'
expected_files.append(extracted_file_1)
try:
files_returned = (jwst.get_obs_products(
observation_id=observation_id,
output_file=output_file_full_path))
dummyTapHandler.check_call('load_data', parameters)
self.__check_extracted_files(files_expected=expected_files,
files_returned=files_returned)
finally:
# self.__remove_folder_contents(folder=output_file_full_path_dir)
shutil.rmtree(output_file_full_path_dir)
# Test single file gzip
output_file_full_path_dir = (os.getcwd() + os.sep + "temp_test_jwsttap_get_obs_products_4")
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
file = data_path('single_product_retrieval_2.fits.gz')
output_file_full_path = output_file_full_path_dir + os.sep + os.path.basename(file)
shutil.copy(file, output_file_full_path)
parameters['output_file'] = output_file_full_path
expected_files = []
extracted_file_1 = output_file_full_path_dir + os.sep + 'single_product_retrieval_2.fits.gz'
expected_files.append(extracted_file_1)
try:
files_returned = (jwst.get_obs_products(
observation_id=observation_id,
output_file=output_file_full_path))
dummyTapHandler.check_call('load_data', parameters)
self.__check_extracted_files(files_expected=expected_files,
files_returned=files_returned)
finally:
# self.__remove_folder_contents(folder=output_file_full_path_dir)
shutil.rmtree(output_file_full_path_dir)
# Test tar with 3 files, a normal one, a gzip one and a zip one
output_file_full_path_dir = (os.getcwd() + os.sep + "temp_test_jwsttap_get_obs_products_5")
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
file = data_path('three_products_retrieval.tar')
output_file_full_path = output_file_full_path_dir + os.sep + os.path.basename(file)
shutil.copy(file, output_file_full_path)
parameters['output_file'] = output_file_full_path
expected_files = []
extracted_file_1 = output_file_full_path_dir + os.sep + 'single_product_retrieval_1.fits'
expected_files.append(extracted_file_1)
extracted_file_2 = output_file_full_path_dir + os.sep + 'single_product_retrieval_2.fits.gz'
expected_files.append(extracted_file_2)
extracted_file_3 = output_file_full_path_dir + os.sep + 'single_product_retrieval_3.fits.zip'
expected_files.append(extracted_file_3)
try:
files_returned = (jwst.get_obs_products(
observation_id=observation_id,
output_file=output_file_full_path))
dummyTapHandler.check_call('load_data', parameters)
self.__check_extracted_files(files_expected=expected_files,
files_returned=files_returned)
finally:
# self.__remove_folder_contents(folder=output_file_full_path_dir)
shutil.rmtree(output_file_full_path_dir)
def test_gunzip_file(self):
output_file_full_path_dir = (os.getcwd() + os.sep + "temp_test_jwsttap_gunzip")
try:
os.makedirs(output_file_full_path_dir, exist_ok=True)
except OSError as err:
print(f"Creation of the directory {output_file_full_path_dir} failed: {err.strerror}")
raise err
file = data_path('single_product_retrieval_2.fits.gz')
output_file_full_path = output_file_full_path_dir + os.sep + os.path.basename(file)
shutil.copy(file, output_file_full_path)
expected_files = []
extracted_file_1 = output_file_full_path_dir + os.sep + "single_product_retrieval_2.fits"
expected_files.append(extracted_file_1)
try:
extracted_file = (JwstClass.gzip_uncompress_and_rename_single_file(
output_file_full_path))
if extracted_file != extracted_file_1:
raise ValueError(f"Extracted file not found: {extracted_file_1}")
finally:
# self.__remove_folder_contents(folder=output_file_full_path_dir)
shutil.rmtree(output_file_full_path_dir)
def __check_results_column(self, results, columnName, description, unit,
dataType):
c = results[columnName]
assert c.description == description, \
f"Wrong description for results column '{columnName}'. Expected: '{description}', "\
f"found '{c.description}'"
assert c.unit == unit, \
f"Wrong unit for results column '{columnName}'. Expected: '{unit}', found '{c.unit}'"
assert c.dtype == dataType, \
f"Wrong dataType for results column '{columnName}'. Expected: '{dataType}', found '{c.dtype}'"
def __remove_folder_contents(self, folder):
for root, dirs, files in os.walk(folder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def __check_extracted_files(self, files_expected, files_returned):
if len(files_expected) != len(files_returned):
raise ValueError(f"Expected files size error. "
f"Found {len(files_returned)}, "
f"expected {len(files_expected)}")
for f in files_expected:
if not os.path.exists(f):
raise ValueError(f"Not found extracted file: "
f"{f}")
if f not in files_returned:
raise ValueError(f"Not found expected file: {f}")
def test_query_target_error(self):
jwst = JwstClass(show_messages=False)
simbad = Simbad()
ned = Ned()
vizier = Vizier()
# Testing default parameters
with pytest.raises(ValueError) as err:
jwst.query_target(target_name="M1", target_resolver="")
assert "This target resolver is not allowed" in err.value.args[0]
with pytest.raises(ValueError) as err:
jwst.query_target("TEST")
assert "This target name cannot be determined with this resolver: ALL" in err.value.args[0]
with pytest.raises(ValueError) as err:
jwst.query_target(target_name="M1", target_resolver="ALL")
assert err.value.args[0] in [f"This target name cannot be determined "
f"with this resolver: ALL", "Missing "
f"required argument: 'width'"]
# Testing no valid coordinates from resolvers
simbad_file = data_path('test_query_by_target_name_simbad_ned_error.vot')
simbad_table = Table.read(simbad_file)
simbad.query_object = MagicMock(return_value=simbad_table)
ned_file = data_path('test_query_by_target_name_simbad_ned_error.vot')
ned_table = Table.read(ned_file)
ned.query_object = MagicMock(return_value=ned_table)
vizier_file = data_path('test_query_by_target_name_vizier_error.vot')
vizier_table = Table.read(vizier_file)
vizier.query_object = MagicMock(return_value=vizier_table)
# coordinate_error = 'coordinate must be either a string or astropy.coordinates'
with pytest.raises(ValueError) as err:
jwst.query_target(target_name="test", target_resolver="SIMBAD",
radius=units.Quantity(5, units.deg))
assert 'This target name cannot be determined with this resolver: SIMBAD' in err.value.args[0]
with pytest.raises(ValueError) as err:
jwst.query_target(target_name="test", target_resolver="NED",
radius=units.Quantity(5, units.deg))
assert 'This target name cannot be determined with this resolver: NED' in err.value.args[0]
with pytest.raises(ValueError) as err:
jwst.query_target(target_name="test", target_resolver="VIZIER",
radius=units.Quantity(5, units.deg))
assert 'This target name cannot be determined with this resolver: VIZIER' in err.value.args[0]
def test_remove_jobs(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
job_list = ['dummyJob']
parameters = {}
parameters['jobs_list'] = job_list
parameters['verbose'] = False
tap.remove_jobs(job_list)
dummyTapHandler.check_call('remove_jobs', parameters)
def test_save_results(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
job = 'dummyJob'
parameters = {}
parameters['job'] = job
parameters['verbose'] = False
tap.save_results(job)
dummyTapHandler.check_call('save_results', parameters)
def test_login(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
parameters = {}
parameters['user'] = 'test_user'
parameters['password'] = 'test_password'
parameters['credentials_file'] = None
parameters['verbose'] = False
tap.login(user='test_user', password='test_password')
dummyTapHandler.check_call('login', parameters)
def test_logout(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
parameters = {}
parameters['verbose'] = False
tap.logout()
dummyTapHandler.check_call('logout', parameters)
@pytest.mark.noautofixt
def test_query_get_product(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
file = 'test_file'
parameters = {}
parameters['query'] = f"select * from jwst.artifact a where a.filename = '{file}'"
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
tap._query_get_product(file_name=file)
dummyTapHandler.check_call('launch_job', parameters)
artifact = 'test_artifact'
parameters['query'] = f"select * from jwst.artifact a where a.artifactid = '{artifact}'"
tap._query_get_product(artifact_id=artifact)
dummyTapHandler.check_call('launch_job', parameters)
def test_get_related_observations(self):
dummyTapHandler = DummyTapHandler()
tap = JwstClass(tap_plus_handler=dummyTapHandler, show_messages=False)
obs = 'dummyObs'
tap.get_related_observations(observation_id=obs)
parameters = {}
parameters['query'] = f"select * from jwst.main m where m.members like '%{obs}%'"
parameters['name'] = None
parameters['output_file'] = None
parameters['output_format'] = 'votable'
parameters['verbose'] = False
parameters['dump_to_file'] = False
parameters['upload_resource'] = None
parameters['upload_table_name'] = None
dummyTapHandler.check_call('launch_job', parameters)
| 44,042 | 669 | 181 |
90b56e9072ff77759975646cfa3e82041694b2d4 | 737 | py | Python | docs/conf.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | null | null | null | docs/conf.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | null | null | null | docs/conf.py | ShantamShorewala/aizynthfinder | 6b15d5846558b14c4ce3c353727d9d676af7f6fb | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(0, os.path.abspath("."))
project = "aizynthfinder"
copyright = "2020-2022, Molecular AI group"
author = "Molecular AI group"
release = "3.3.0"
# This make sure that the cli_help.txt file is properly formated
with open("cli_help.txt", "r") as fileobj:
lines = fileobj.read().splitlines()
with open("cli_help.txt", "w") as fileobj:
fileobj.write(".. code-block::\n\n")
fileobj.write(" " + "\n ".join(lines))
extensions = [
"sphinx.ext.autodoc",
]
autodoc_member_order = "bysource"
autodoc_typehints = "description"
html_theme = "alabaster"
html_theme_options = {
"description": "A fast, robust and flexible software for retrosynthetic planning",
"fixed_sidebar": True,
}
| 25.413793 | 86 | 0.694708 | import os
import sys
sys.path.insert(0, os.path.abspath("."))
project = "aizynthfinder"
copyright = "2020-2022, Molecular AI group"
author = "Molecular AI group"
release = "3.3.0"
# This make sure that the cli_help.txt file is properly formated
with open("cli_help.txt", "r") as fileobj:
lines = fileobj.read().splitlines()
with open("cli_help.txt", "w") as fileobj:
fileobj.write(".. code-block::\n\n")
fileobj.write(" " + "\n ".join(lines))
extensions = [
"sphinx.ext.autodoc",
]
autodoc_member_order = "bysource"
autodoc_typehints = "description"
html_theme = "alabaster"
html_theme_options = {
"description": "A fast, robust and flexible software for retrosynthetic planning",
"fixed_sidebar": True,
}
| 0 | 0 | 0 |
c41bc50ee88e3138d6b8c2fd1482bb7a081971bc | 2,351 | py | Python | main.py | johangenis/day-18-start | 1d479505b0bb3223e6e8032303669d9f9d0c076c | [
"MIT"
] | null | null | null | main.py | johangenis/day-18-start | 1d479505b0bb3223e6e8032303669d9f9d0c076c | [
"MIT"
] | null | null | null | main.py | johangenis/day-18-start | 1d479505b0bb3223e6e8032303669d9f9d0c076c | [
"MIT"
] | null | null | null | import _tkinter
from turtle import Turtle, Screen
import random
tim = Turtle()
tim.shape("turtle")
tim.color("red")
tim.speed("fastest")
# r = random.random()
# b = random.random()
# g = random.random()
# rgb = (random.random(), random.random(), random.random())
draw_spirograph(5)
# for shape_sides_n in range(3, 41):
# draw_shapes(shape_sides_n)
# for walk_steps_n in range(3, 201):
# random_walk(walk_steps_n)
# dashed_line()
# square()
# pentagon()
# hexagon()
# heptagon()
# octagon()
screen = Screen()
screen.exitonclick()
| 20.991071 | 65 | 0.612505 | import _tkinter
from turtle import Turtle, Screen
import random
tim = Turtle()
tim.shape("turtle")
tim.color("red")
tim.speed("fastest")
# r = random.random()
# b = random.random()
# g = random.random()
# rgb = (random.random(), random.random(), random.random())
def draw_shapes(num_sides):
angle = 360/num_sides
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(num_sides):
tim.forward(30)
tim.right(angle)
def draw_spirograph(size_of_gap):
for _ in range(int(360 / size_of_gap)):
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
tim.circle(100)
tim.setheading(tim.heading() + size_of_gap)
draw_spirograph(5)
# for shape_sides_n in range(3, 41):
# draw_shapes(shape_sides_n)
def random_walk(num_steps):
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
tim.pensize(width=10)
tim.speed("fastest")
direction = [0, 90, 180, 270, 360]
for _ in range(num_steps):
tim.forward(30)
tim.setheading(random.choice(direction))
# for walk_steps_n in range(3, 201):
# random_walk(walk_steps_n)
def square():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(4):
tim.forward(100)
tim.right(90)
def pentagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(5):
tim.forward(100)
tim.right(360/5)
def hexagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(6):
tim.forward(100)
tim.right(360/6)
def heptagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(7):
tim.forward(100)
tim.right(360/7)
def octagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(8):
tim.forward(100)
tim.right(360/8)
def dashed_line():
for _ in range(15):
tim.color("red")
tim.forward(10)
tim.penup()
tim.forward(10)
tim.pendown()
# dashed_line()
# square()
# pentagon()
# hexagon()
# heptagon()
# octagon()
screen = Screen()
screen.exitonclick()
| 1,588 | 0 | 207 |
2b1e1131e827c40ddcb55389af6b293ec4f32cb1 | 2,715 | py | Python | ml/jointclassifier/joint_args.py | DavidThe4sian/marvin | 1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6 | [
"MIT"
] | 4 | 2021-05-14T02:23:52.000Z | 2021-12-28T10:33:18.000Z | ml/jointclassifier/joint_args.py | DavidThe4sian/marvin | 1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6 | [
"MIT"
] | null | null | null | ml/jointclassifier/joint_args.py | DavidThe4sian/marvin | 1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6 | [
"MIT"
] | 3 | 2021-05-31T06:38:59.000Z | 2021-09-29T02:44:22.000Z | from dataclasses import dataclass, field
from typing import Optional
from transformers import (
TrainingArguments
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_nick: Optional[str] = field(
default=None,
metadata={
"help": "The model Nickname"
},
)
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
freeze_encoder: Optional[bool] = field(
default=False, metadata={"help" : "Freeze the encoder"}
)
skip_preclassifier: Optional[bool] = field(
default=False, metadata={"help" : "Skip the preclassifier layer"}
)
@dataclass
class TrainingArguments(TrainingArguments):
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
ignore_index: Optional[int] = field(
default=0,
metadata={
"help": "Specifies a target value that is ignored and does not contribute to the input gradient"},
)
dropout_rate: Optional[float] = field(
default=0.1,
metadata={
"help": "Dropout for fully-connected layers"},
)
train_jointly: Optional[bool] = field(
default=True,
metadata={
"help": "Dropout for fully-connected layers"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
task: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train"},
)
data_dir: Optional[str] = field(
default='./data',
metadata={"help": "The input data dir"},
)
max_seq_len: Optional[int] = field(
default=50,
metadata={"help": "TBW"},
)
result_dir: Optional[str] = field(
default='./',
metadata={"help": "The result dir"},
)
prediction_dir: Optional[str] = field(
default='./',
metadata={"help": "The prediction dir"},
)
| 27.989691 | 124 | 0.622468 | from dataclasses import dataclass, field
from typing import Optional
from transformers import (
TrainingArguments
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_nick: Optional[str] = field(
default=None,
metadata={
"help": "The model Nickname"
},
)
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
freeze_encoder: Optional[bool] = field(
default=False, metadata={"help" : "Freeze the encoder"}
)
skip_preclassifier: Optional[bool] = field(
default=False, metadata={"help" : "Skip the preclassifier layer"}
)
@dataclass
class TrainingArguments(TrainingArguments):
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
ignore_index: Optional[int] = field(
default=0,
metadata={
"help": "Specifies a target value that is ignored and does not contribute to the input gradient"},
)
dropout_rate: Optional[float] = field(
default=0.1,
metadata={
"help": "Dropout for fully-connected layers"},
)
train_jointly: Optional[bool] = field(
default=True,
metadata={
"help": "Dropout for fully-connected layers"},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
task: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train"},
)
data_dir: Optional[str] = field(
default='./data',
metadata={"help": "The input data dir"},
)
max_seq_len: Optional[int] = field(
default=50,
metadata={"help": "TBW"},
)
result_dir: Optional[str] = field(
default='./',
metadata={"help": "The result dir"},
)
prediction_dir: Optional[str] = field(
default='./',
metadata={"help": "The prediction dir"},
)
| 0 | 0 | 0 |
89af48b3ab87b7d65ea88b68ffd8b3bf3ae91a67 | 226 | py | Python | example/JKBrickworks/1701_printer/print_space.py | HomeIT4all/micropython-ev3dev | fd59b55eb840d6e83a1e78d76a363af12c6d7f81 | [
"MIT"
] | 6 | 2019-04-16T08:32:38.000Z | 2021-09-27T12:18:46.000Z | example/JKBrickworks/1701_printer/print_space.py | HomeIT4all/micropython-ev3dev | fd59b55eb840d6e83a1e78d76a363af12c6d7f81 | [
"MIT"
] | null | null | null | example/JKBrickworks/1701_printer/print_space.py | HomeIT4all/micropython-ev3dev | fd59b55eb840d6e83a1e78d76a363af12c6d7f81 | [
"MIT"
] | 3 | 2019-12-22T16:13:38.000Z | 2021-09-27T12:19:04.000Z |
from project import motor
from project import variables
# My Blocks
from lower_pen import lower_pen
from lift_pen import lift_pen
| 17.384615 | 39 | 0.756637 |
from project import motor
from project import variables
# My Blocks
from lower_pen import lower_pen
from lift_pen import lift_pen
def print_space(size):
seg4 = variables['Seg4']
motor['A'].on_for_degrees(25, seg4)
| 70 | 0 | 23 |
12811944daf601d9b30ebfd6544d6de22d011f0a | 6,181 | py | Python | adminfind.py | blackXploits/AdminFind2 | 6222de245116b0fb77521ed54449a86a73a3f1d3 | [
"MIT"
] | null | null | null | adminfind.py | blackXploits/AdminFind2 | 6222de245116b0fb77521ed54449a86a73a3f1d3 | [
"MIT"
] | null | null | null | adminfind.py | blackXploits/AdminFind2 | 6222de245116b0fb77521ed54449a86a73a3f1d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: BlackXploits
# Telegram: @BlackXploits
# Please don't delete this COPYRIGHT
import argparse
import subprocess
import signal
import Queue
import time
from threading import Thread, Lock
from sys import argv, stdout
from os import getpid, kill
w = '\033[0m'
y = '\033[93m'
r = '\033[31m'
g = '\033[32m'
o = '\033[33m'
b = '\033[34m'
parser = argparse.ArgumentParser(prog='adminfinder', usage='adminfinder [options]')
parser.add_argument('-u', "--url", type=str, help='url eg. target.com')
parser.add_argument("-w", "--wordlist", type=str, help="wordlist")
parser.add_argument('-t', "--threads", type=int, help='number of threads')
parser.add_argument('-p', "--proxy", type=str, help='use proxy eg. socks5:127.0.0.1:9050')
parser.add_argument('-f', "--follow", action="store_true", help='follow and resolve redirects')
parser.add_argument('-b', "--forbidden", action="store_true", help='show forbidden pages')
args = parser.parse_args()
print y+' Admin_Finder_v2.0 '
print b+'''
___ __ _ _______ __
/ | ____/ /___ ___ (_)___ / ____(_)___ ____/ /
/ /| |/ __ / __ `__ \/ / __ \/ /_ / / __ \/ __ /
/ ___ / /_/ / / / / / / / / / / __/ / / / / / /_/ /
/_/ |_\__,_/_/ /_/ /_/_/_/ /_/_/ /_/_/ /_/\__,_/
'''
print r+' c0ded by blackXploits '
print w+''
if len(argv) == 1:
parser.print_help()
exit()
if args.proxy:
try:
import socks, socket
except:
print "Error socksipy module not found, apt-get install python-socksipy to use proxies"
exit()
try:
proxytype = args.proxy.split(":")[0]
proxyip = args.proxy.split(":")[1]
proxyport = args.proxy.split(":")[2]
except:
print "Error proxy must be in the form of type:host:port"
parser.print_help()
exit()
if proxytype == "socks4":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxyip), int(proxyport), True)
elif proxytype == "socks5":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxyip), int(proxyport), True)
else:
print "Error Unknown proxy type: " + str(proxytype)
exit()
socket.socket = socks.socksocket
socket.create_connection = create_connection
import httplib, httplib2
domain = args.url
url = str(domain.strip())
wordlist = [line.strip() for line in open("wordlist.txt", 'r')]
signal.signal(signal.SIGINT, killpid)
queueLock = Lock()
workQueue = Queue.Queue(len(wordlist))
found = []
threads = []
exitFlag = 0
threadID = 1
maxthreads = 40
if args.threads:
maxthreads = args.threads
queueLock.acquire()
for word in wordlist:
workQueue.put(word)
queueLock.release()
while threadID <= maxthreads:
tname = str("Thread-") + str(threadID)
thread = myThread(threadID, tname, workQueue)
thread.start()
threads.append(thread)
threadID += 1
with Timer():
while not workQueue.empty():
pass
exitFlag = 1
for t in threads:
t.join()
print "\r\x1b[K\n [*] All threads complete, " + str(len(found)) + " sites found."
| 31.535714 | 184 | 0.626759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: BlackXploits
# Telegram: @BlackXploits
# Please don't delete this COPYRIGHT
import argparse
import subprocess
import signal
import Queue
import time
from threading import Thread, Lock
from sys import argv, stdout
from os import getpid, kill
w = '\033[0m'
y = '\033[93m'
r = '\033[31m'
g = '\033[32m'
o = '\033[33m'
b = '\033[34m'
class myThread (Thread):
def __init__(self, threadID, name, q):
Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
getresponse(self.name, self.q)
class Timer():
def __enter__(self): self.start = time.time()
def __exit__(self, *args):
taken = time.time() - self.start
seconds = int(time.strftime('%S', time.gmtime(taken)))
minutes = int(time.strftime('%M', time.gmtime(taken)))
hours = int(time.strftime('%H', time.gmtime(taken)))
if minutes > 0:
if hours > 0:
print " [*] Time elapsed " + str(hours) + " hours, " + str(minutes) + " minutes and " + str(seconds) + " seconds at " + str(round(len(wordlist) / taken,2)) + " lookups per second."
else:
print " [*] Time elapsed " + str(minutes) + " minutes and " + str(seconds) + " seconds at " + str(round(len(wordlist) / taken,2)) + " lookups per second."
else:
print " [*] Time elapsed " + str(seconds) + " seconds at " + str(round(len(wordlist) / taken,2)) + " lookups per second."
maked = "rm -rf .cache_httplib"
process = subprocess.Popen(maked.split(), stdout=subprocess.PIPE)
poutput = process.communicate()[0]
class Printer():
def __init__(self,data):
stdout.write("\r\x1b[K"+data.__str__())
stdout.flush()
def getresponse(threadName, q):
while not exitFlag:
if not workQueue.empty():
queueLock.acquire()
data = q.get()
queueLock.release()
checkg = 1
while checkg == 1:
try:
connection = httplib.HTTPConnection(str(url))
connection.request('HEAD', "/" + str(data.strip()))
response = connection.getresponse()
progdone = len(wordlist) - workQueue.qsize()
update = " [>] Checking " + str(progdone) + "/" + str(len(wordlist)) + " " + str(url) + "/" + str(data.strip()) + " \t[" + str(response.status) + "]"
Printer(update)
checkg += 1
reporturl = "\r\x1b[K [*] " + str(url) + "/" + str(data.strip())
if len(reporturl) < 60:
add = 60 - int(len(reporturl))
reporturl = str(reporturl) + str(" ") * int(add)
reportcode = "[" + str(response.status) + "]"
if response.status == 200:
print str(reporturl) + str(reportcode) + " OK"
found.append(response.status)
elif response.status >= 300 and response.status < 400 and args.follow:
reso = httplib2.Http(".cache_httplib")
reso.follow_all_redirects = True
link = "http://" + str(url) + "/" + str(data.strip())
resp = reso.request(link, "HEAD")[0]
finalurl = resp['content-location']
if finalurl[0:5] == "http:":
finalurl = finalurl[11:]
elif finalurl[0:5] == "https":
finalurl = " [HTTPS] " + finalurl[12:]
print str(reporturl) + str(reportcode) + " Redirect >> " + str(finalurl)
elif response.status == 403 and args.forbidden:
print str(reporturl) + str(reportcode) + " Forbidden!"
except:
pass
else:
queueLock.release()
def killpid(signum = 0, frame = 0):
print "\r\x1b[K"
kill(getpid(), 9)
parser = argparse.ArgumentParser(prog='adminfinder', usage='adminfinder [options]')
parser.add_argument('-u', "--url", type=str, help='url eg. target.com')
parser.add_argument("-w", "--wordlist", type=str, help="wordlist")
parser.add_argument('-t', "--threads", type=int, help='number of threads')
parser.add_argument('-p', "--proxy", type=str, help='use proxy eg. socks5:127.0.0.1:9050')
parser.add_argument('-f', "--follow", action="store_true", help='follow and resolve redirects')
parser.add_argument('-b', "--forbidden", action="store_true", help='show forbidden pages')
args = parser.parse_args()
print y+' Admin_Finder_v2.0 '
print b+'''
___ __ _ _______ __
/ | ____/ /___ ___ (_)___ / ____(_)___ ____/ /
/ /| |/ __ / __ `__ \/ / __ \/ /_ / / __ \/ __ /
/ ___ / /_/ / / / / / / / / / / __/ / / / / / /_/ /
/_/ |_\__,_/_/ /_/ /_/_/_/ /_/_/ /_/_/ /_/\__,_/
'''
print r+' c0ded by blackXploits '
print w+''
if len(argv) == 1:
parser.print_help()
exit()
if args.proxy:
try:
import socks, socket
except:
print "Error socksipy module not found, apt-get install python-socksipy to use proxies"
exit()
def create_connection(address, timeout=None, source_address=None):
sock = socks.socksocket()
sock.connect(address)
return sock
try:
proxytype = args.proxy.split(":")[0]
proxyip = args.proxy.split(":")[1]
proxyport = args.proxy.split(":")[2]
except:
print "Error proxy must be in the form of type:host:port"
parser.print_help()
exit()
if proxytype == "socks4":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, str(proxyip), int(proxyport), True)
elif proxytype == "socks5":
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, str(proxyip), int(proxyport), True)
else:
print "Error Unknown proxy type: " + str(proxytype)
exit()
socket.socket = socks.socksocket
socket.create_connection = create_connection
import httplib, httplib2
domain = args.url
url = str(domain.strip())
wordlist = [line.strip() for line in open("wordlist.txt", 'r')]
signal.signal(signal.SIGINT, killpid)
queueLock = Lock()
workQueue = Queue.Queue(len(wordlist))
found = []
threads = []
exitFlag = 0
threadID = 1
maxthreads = 40
if args.threads:
maxthreads = args.threads
queueLock.acquire()
for word in wordlist:
workQueue.put(word)
queueLock.release()
while threadID <= maxthreads:
tname = str("Thread-") + str(threadID)
thread = myThread(threadID, tname, workQueue)
thread.start()
threads.append(thread)
threadID += 1
with Timer():
while not workQueue.empty():
pass
exitFlag = 1
for t in threads:
t.join()
print "\r\x1b[K\n [*] All threads complete, " + str(len(found)) + " sites found."
| 2,913 | -9 | 265 |
8ac760cb94d7eff35d38268345a0ee40b72f61bd | 249 | py | Python | Day 2/OutPut.py | Soundarya0/30-Days-of-Code | c7b34826c96eaef97a4e4c2dd9e8a30a9daaf990 | [
"MIT"
] | null | null | null | Day 2/OutPut.py | Soundarya0/30-Days-of-Code | c7b34826c96eaef97a4e4c2dd9e8a30a9daaf990 | [
"MIT"
] | null | null | null | Day 2/OutPut.py | Soundarya0/30-Days-of-Code | c7b34826c96eaef97a4e4c2dd9e8a30a9daaf990 | [
"MIT"
] | null | null | null | Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 23:11:46) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>>=======30 Days of Code/Day 2/Operators.py ======
12.00
20
8
15
>>>
| 24.9 | 95 | 0.606426 | Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 23:11:46) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>>=======30 Days of Code/Day 2/Operators.py ======
12.00
20
8
15
>>>
| 0 | 0 | 0 |
e674cdc55fcf16b081aab4e42eb70a114f2254ff | 61,641 | py | Python | tensorflow_probability/python/internal/distribution_util_test.py | RogerFrigola/probability | cfb507b7ede2c1ba753bffc5ea827b9c97c37bdc | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/distribution_util_test.py | RogerFrigola/probability | cfb507b7ede2c1ba753bffc5ea827b9c97c37bdc | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/distribution_util_test.py | RogerFrigola/probability | cfb507b7ede2c1ba753bffc5ea827b9c97c37bdc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for distribution_utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import Categorical
from tensorflow_probability.python.distributions import Mixture
from tensorflow_probability.python.distributions import MixtureSameFamily
from tensorflow_probability.python.distributions import MultivariateNormalDiag
from tensorflow_probability.python.distributions import Normal
from tensorflow_probability.python.internal import distribution_util
from tensorflow.python.framework import test_util
special = try_import("scipy.special")
def _matrix_diag(d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
@test_util.run_all_in_graph_and_eager_modes
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
@test_util.run_all_in_graph_and_eager_modes
if __name__ == "__main__":
tf.test.main()
| 36.259412 | 80 | 0.633507 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for distribution_utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import itertools
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import Categorical
from tensorflow_probability.python.distributions import Mixture
from tensorflow_probability.python.distributions import MixtureSameFamily
from tensorflow_probability.python.distributions import MultivariateNormalDiag
from tensorflow_probability.python.distributions import Normal
from tensorflow_probability.python.internal import distribution_util
from tensorflow.python.framework import test_util
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf.logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
def _logit(x):
x = np.asarray(x)
return np.log(x) - np.log1p(-x)
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
def _matrix_diag(d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_tril is not None:
scale_tril = np.tril(scale_tril)
if scale_diag is not None:
scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))
if scale_identity_multiplier is not None:
scale_tril += (
scale_identity_multiplier * _matrix_diag(np.ones(
[scale_tril.shape[-1]], dtype=np.float32)))
return scale_tril
return _make_diag_scale(
loc, scale_diag, scale_identity_multiplier, shape_hint)
def _make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_diag is not None:
scale_diag = np.asarray(scale_diag)
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier
return _matrix_diag(scale_diag)
if loc is None and shape_hint is None:
return None
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
scale_identity_multiplier = 1.
return scale_identity_multiplier * np.diag(np.ones(shape_hint))
class MakeTrilScaleTest(tf.test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_tril_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_tril_scale(**scale_args)
self.evaluate(scale.to_dense())
else:
scale = distribution_util.make_tril_scale(**scale_args)
self.assertAllClose(expected_scale, self.evaluate(scale.to_dense()))
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]],
"scale_tril": [[[[1., 0., 0.],
[-3., 3., 0.],
[1., -2., 1.]],
[[2., 1., 0.],
[-4., 7., 0.],
[1., -1., 1.]]]]
})
def testZeroTriU(self):
with self.cached_session():
scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])
self.assertAllClose([[1., 0], [1., 1.]], self.evaluate(scale.to_dense()))
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_tril_scale(
scale_tril=[[0., 1], [1., 1.]], validate_args=True)
self.evaluate(scale.to_dense())
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_tril_scale(
scale_tril=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
self.evaluate(scale.to_dense())
class MakeDiagScaleTest(tf.test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_diag_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_diag_scale(**scale_args)
self.evaluate(scale.to_dense())
else:
scale = distribution_util.make_diag_scale(**scale_args)
self.assertAllClose(expected_scale, self.evaluate(scale.to_dense()))
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.]
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]]
})
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_diag_scale(
scale_diag=[[0., 1], [1., 1.]], validate_args=True)
self.evaluate(scale.to_dense())
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_diag_scale(
scale_diag=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
self.evaluate(scale.to_dense())
class ShapesFromLocAndScaleTest(tf.test.TestCase):
def test_static_loc_static_scale_non_matching_event_size_raises(self):
loc = tf.constant(np.zeros((2, 4)))
scale = tf.linalg.LinearOperatorDiag(np.ones((5, 1, 3)))
with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
distribution_util.shapes_from_loc_and_scale(loc, scale)
def test_static_loc_static_scale(self):
loc = tf.constant(np.zeros((2, 3)))
scale = tf.linalg.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tf.TensorShape([5, 2]), batch_shape)
self.assertEqual(tf.TensorShape([3]), event_shape)
def test_static_loc_dynamic_scale(self):
loc = tf.constant(np.zeros((2, 3)))
diag = tf.placeholder(tf.float64)
scale = tf.linalg.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_static_scale(self):
loc = tf.placeholder(tf.float64)
diag = tf.constant(np.ones((5, 2, 3)))
scale = tf.linalg.LinearOperatorDiag(diag)
with self.cached_session():
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
# batch_shape depends on both args, and so is dynamic. Since loc did not
# have static shape, we inferred event shape entirely from scale, and this
# is available statically.
self.assertAllEqual(
[5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_dynamic_scale(self):
loc = tf.placeholder(tf.float64)
diag = tf.placeholder(tf.float64)
scale = tf.linalg.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_none_loc_static_scale(self):
loc = None
scale = tf.linalg.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tf.TensorShape([5, 1]), batch_shape)
self.assertEqual(tf.TensorShape([3]), event_shape)
def test_none_loc_dynamic_scale(self):
loc = None
diag = tf.placeholder(tf.float64)
scale = tf.linalg.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 1], batch_shape)
self.assertAllEqual([3], event_shape)
class GetBroadcastShapeTest(tf.test.TestCase):
def test_all_static_shapes_work(self):
x = tf.ones((2, 1, 3))
y = tf.ones((1, 5, 3))
z = tf.ones(())
self.assertAllEqual([2, 5, 3],
distribution_util.get_broadcast_shape(x, y, z))
def test_with_some_dynamic_shapes_works(self):
x = tf.ones((2, 1, 3))
y = tf.placeholder(x.dtype)
z = tf.ones(())
with self.cached_session() as sess:
bcast_shape = sess.run(
distribution_util.get_broadcast_shape(x, y, z),
feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)})
self.assertAllEqual([2, 5, 3], bcast_shape)
class TridiagTest(tf.test.TestCase):
def testWorksCorrectlyNoBatches(self):
with self.cached_session():
self.assertAllEqual(
[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
self.evaluate(distribution_util.tridiag(
[1., 2., 3.],
[4., 5., 6., 7.],
[8., 9., 10.])))
def testWorksCorrectlyBatches(self):
with self.cached_session():
self.assertAllClose(
[[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
[[0.7, 0.1, 0.0, 0.0],
[0.8, 0.6, 0.2, 0.0],
[0.0, 0.9, 0.5, 0.3],
[0.0, 0.0, 1.0, 0.4]]],
self.evaluate(distribution_util.tridiag(
[[1., 2., 3.],
[0.8, 0.9, 1.]],
[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]],
[[8., 9., 10.],
[0.1, 0.2, 0.3]])),
rtol=1e-5, atol=0.)
def testHandlesNone(self):
with self.cached_session():
self.assertAllClose(
[[[4., 0., 0., 0.],
[0., 5., 0., 0.],
[0., 0., 6., 0.],
[0., 0., 0, 7.]],
[[0.7, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.4]]],
self.evaluate(distribution_util.tridiag(
diag=[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]])),
rtol=1e-5, atol=0.)
class MixtureStddevTest(tf.test.TestCase):
def test_mixture_dev(self):
mixture_weights = np.array([
[1.0/3, 1.0/3, 1.0/3],
[0.750, 0.250, 0.000]
])
component_means = np.array([
[1.0, 1.0, 1.0],
[-5, 0, 1.25]
])
component_devs = np.array([
[1.0, 1.0, 1.0],
[0.01, 2.0, 0.1]
])
# The first case should trivially have a standard deviation of 1.0 because
# all components are identical and have that standard deviation.
# The second case was computed by hand.
expected_devs = np.array([
1.0,
2.3848637277
])
weights_tf = tf.constant(mixture_weights)
means_tf = tf.constant(component_means)
sigmas_tf = tf.constant(component_devs)
mix_dev = distribution_util.mixture_stddev(weights_tf,
means_tf,
sigmas_tf)
with self.cached_session() as sess:
actual_devs = sess.run(mix_dev)
self.assertAllClose(actual_devs, expected_devs)
class PadMixtureDimensionsTest(tf.test.TestCase):
def test_pad_mixture_dimensions_mixture(self):
with self.cached_session() as sess:
gm = Mixture(
cat=Categorical(probs=[[0.3, 0.7]]),
components=[
Normal(loc=[-1.0], scale=[1.0]),
Normal(loc=[1.0], scale=[0.5])
])
x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.cat, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
def test_pad_mixture_dimensions_mixture_same_family(self):
with self.cached_session() as sess:
gm = MixtureSameFamily(
mixture_distribution=Categorical(probs=[0.3, 0.7]),
components_distribution=MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1.0, 0.5]))
x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.mixture_distribution, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2, 1])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
class _PadTest(object):
def testNegAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (
tf.constant(value_)
if self.is_static_shape else tf.placeholder_with_default(
value_, shape=None))
count = (
tf.constant(count_)
if self.is_static_shape else tf.placeholder_with_default(
count_, shape=None))
x0_front = distribution_util.pad(
x, axis=-2, value=value, count=count, front=True)
x0_back = distribution_util.pad(
x, axis=-2, count=count, back=True)
x0_both = distribution_util.pad(
x, axis=-2, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([4, 3], x0_front.shape)
self.assertAllEqual([4, 3], x0_back.shape)
self.assertAllEqual([4, 3], x0_both.shape)
[x0_front_, x0_back_, x0_both_] = sess.run([
x0_front, x0_back, x0_both])
self.assertAllClose(
np.float32([[value_]*3,
[value_]*3,
[1, 2, 3],
[4, 5, 6]]),
x0_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3],
[4, 5, 6],
[0.]*3,
[0.]*3]),
x0_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_]*3,
[1, 2, 3],
[4, 5, 6],
[value_]*3]),
x0_both_, atol=0., rtol=1e-6)
def testPosAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = tf.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (
tf.constant(value_)
if self.is_static_shape else tf.placeholder_with_default(
value_, shape=None))
count = (
tf.constant(count_)
if self.is_static_shape else tf.placeholder_with_default(
count_, shape=None))
x1_front = distribution_util.pad(
x, axis=1, value=value, count=count, front=True)
x1_back = distribution_util.pad(
x, axis=1, count=count, back=True)
x1_both = distribution_util.pad(
x, axis=1, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([2, 5], x1_front.shape)
self.assertAllEqual([2, 5], x1_back.shape)
self.assertAllEqual([2, 5], x1_both.shape)
[x1_front_, x1_back_, x1_both_] = sess.run([
x1_front, x1_back, x1_both])
self.assertAllClose(
np.float32([[value_]*2 + [1, 2, 3],
[value_]*2 + [4, 5, 6]]),
x1_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3] + [0.]*2,
[4, 5, 6] + [0.]*2]),
x1_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_, 1, 2, 3, value_],
[value_, 4, 5, 6, value_]]),
x1_both_, atol=0., rtol=1e-6)
class PadStaticTest(_PadTest, tf.test.TestCase):
@property
def is_static_shape(self):
return True
class PadDynamicTest(_PadTest, tf.test.TestCase):
@property
def is_static_shape(self):
return False
class PickScalarConditionTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_pick_scalar_condition_static(self):
pos = np.exp(np.random.randn(3, 2, 4)).astype(np.float32)
neg = -np.exp(np.random.randn(3, 2, 4)).astype(np.float32)
# Python static cond
self.assertAllEqual(
distribution_util.pick_scalar_condition(True, pos, neg), pos)
self.assertAllEqual(
distribution_util.pick_scalar_condition(False, pos, neg), neg)
# TF static cond
self.assertAllEqual(distribution_util.pick_scalar_condition(
tf.constant(True), pos, neg), pos)
self.assertAllEqual(distribution_util.pick_scalar_condition(
tf.constant(False), pos, neg), neg)
# Dynamic tests don't need to (/can't) run in Eager mode.
def test_pick_scalar_condition_dynamic(self):
pos = np.exp(np.random.randn(3, 2, 4)).astype(np.float32)
neg = -np.exp(np.random.randn(3, 2, 4)).astype(np.float32)
# TF dynamic cond
dynamic_true = tf.placeholder_with_default(input=True, shape=None)
dynamic_false = tf.placeholder_with_default(input=False, shape=None)
pos_ = self.evaluate(distribution_util.pick_scalar_condition(
dynamic_true, pos, neg))
neg_ = self.evaluate(distribution_util.pick_scalar_condition(
dynamic_false, pos, neg))
self.assertAllEqual(pos_, pos)
self.assertAllEqual(neg_, neg)
# TF dynamic everything
pos_dynamic = tf.placeholder_with_default(input=pos, shape=None)
neg_dynamic = tf.placeholder_with_default(input=neg, shape=None)
pos_ = self.evaluate(distribution_util.pick_scalar_condition(
dynamic_true, pos_dynamic, neg_dynamic))
neg_ = self.evaluate(distribution_util.pick_scalar_condition(
dynamic_false, pos_dynamic, neg_dynamic))
self.assertAllEqual(pos_, pos)
self.assertAllEqual(neg_, neg)
class TestMoveDimension(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_move_dimension_static_shape(self):
x = tf.random_normal(shape=[200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 1, 1)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 0, 3)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 0, -2)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 6, 4, 1])
@test_util.run_in_graph_and_eager_modes
def test_move_dimension_dynamic_shape(self):
x_ = tf.random_normal(shape=[200, 30, 4, 1, 6])
x = tf.placeholder_with_default(input=x_, shape=None)
x_perm1 = distribution_util.move_dimension(x, 1, 1)
x_perm2 = distribution_util.move_dimension(x, 0, 3)
x_perm3 = distribution_util.move_dimension(x, 0, -2)
x_perm4 = distribution_util.move_dimension(x, 4, 2)
x_perm5 = distribution_util.move_dimension(x, -1, 2)
x_perm1_, x_perm2_, x_perm3_, x_perm4_, x_perm5_ = self.evaluate(
[tf.shape(x_perm1),
tf.shape(x_perm2),
tf.shape(x_perm3),
tf.shape(x_perm4),
tf.shape(x_perm5)])
self.assertAllEqual(x_perm1_, [200, 30, 4, 1, 6])
self.assertAllEqual(x_perm2_, [30, 4, 1, 200, 6])
self.assertAllEqual(x_perm3_, [30, 4, 1, 200, 6])
self.assertAllEqual(x_perm4_, [200, 30, 6, 4, 1])
self.assertAllEqual(x_perm5_, [200, 30, 6, 4, 1])
@test_util.run_in_graph_and_eager_modes
def test_move_dimension_dynamic_indices(self):
x_ = tf.random_normal(shape=[200, 30, 4, 1, 6])
x = tf.placeholder_with_default(input=x_, shape=None)
x_perm1 = distribution_util.move_dimension(
x,
tf.placeholder_with_default(input=1, shape=[]),
tf.placeholder_with_default(input=1, shape=[]))
x_perm2 = distribution_util.move_dimension(
x,
tf.placeholder_with_default(input=0, shape=[]),
tf.placeholder_with_default(input=3, shape=[]))
x_perm3 = distribution_util.move_dimension(
x,
tf.placeholder_with_default(input=0, shape=[]),
tf.placeholder_with_default(input=-2, shape=[]))
x_perm4 = distribution_util.move_dimension(
x,
tf.placeholder_with_default(input=4, shape=[]),
tf.placeholder_with_default(input=2, shape=[]))
x_perm5 = distribution_util.move_dimension(
x,
tf.placeholder_with_default(input=-1, shape=[]),
tf.placeholder_with_default(input=2, shape=[]))
x_perm1_, x_perm2_, x_perm3_, x_perm4_, x_perm5_ = self.evaluate(
[tf.shape(x_perm1),
tf.shape(x_perm2),
tf.shape(x_perm3),
tf.shape(x_perm4),
tf.shape(x_perm5)])
self.assertAllEqual(x_perm1_, [200, 30, 4, 1, 6])
self.assertAllEqual(x_perm2_, [30, 4, 1, 200, 6])
self.assertAllEqual(x_perm3_, [30, 4, 1, 200, 6])
self.assertAllEqual(x_perm4_, [200, 30, 6, 4, 1])
self.assertAllEqual(x_perm5_, [200, 30, 6, 4, 1])
class AssertCloseTest(tf.test.TestCase):
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
# First component isn't less than float32.eps = 1e-7
z = tf.placeholder(tf.float32)
# This shouldn"t be detected as an integer.
w = tf.placeholder(tf.float32)
feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],
z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}
with self.cached_session():
with tf.control_dependencies([distribution_util.assert_integer_form(x)]):
tf.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with tf.control_dependencies(
[distribution_util.assert_integer_form(y)]):
tf.identity(y).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with tf.control_dependencies(
[distribution_util.assert_integer_form(z)]):
tf.identity(z).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with tf.control_dependencies(
[distribution_util.assert_integer_form(w)]):
tf.identity(w).eval(feed_dict=feed_dict)
class MaybeGetStaticTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetStaticInt(self):
x = 2
self.assertEqual(x, distribution_util.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.),
distribution_util.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticNumpyArray(self):
x = np.array(2, dtype=np.int32)
self.assertEqual(x, distribution_util.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.),
distribution_util.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticConstant(self):
x = tf.constant(2, dtype=tf.int32)
self.assertEqual(np.array(2, dtype=np.int32),
distribution_util.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.),
distribution_util.maybe_get_static_value(x, dtype=np.float64))
def testGetStaticPlaceholder(self):
x = tf.placeholder(dtype=tf.int32, shape=[1])
self.assertEqual(None, distribution_util.maybe_get_static_value(x))
self.assertEqual(
None, distribution_util.maybe_get_static_value(x, dtype=np.float64))
class GetLogitsAndProbsTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testImproperArguments(self):
with self.assertRaises(ValueError):
distribution_util.get_logits_and_probs(logits=None, probs=None)
with self.assertRaises(ValueError):
distribution_util.get_logits_and_probs(logits=[0.1], probs=[0.1])
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = _logit(p)
new_logits, new_p = distribution_util.get_logits_and_probs(
logits=logits, validate_args=True)
self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)
self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)
@test_util.run_in_graph_and_eager_modes
def testLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
new_logits, new_p = distribution_util.get_logits_and_probs(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(self.evaluate(new_p), p)
self.assertAllClose(self.evaluate(new_logits), logits)
@test_util.run_in_graph_and_eager_modes
def testProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
new_logits, new_p = distribution_util.get_logits_and_probs(
probs=p, validate_args=True)
self.assertAllClose(_logit(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
new_logits, new_p = distribution_util.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
_, prob = distribution_util.get_logits_and_probs(
probs=p, validate_args=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_probs(
probs=p2, validate_args=True)
self.evaluate(prob)
_, prob = distribution_util.get_logits_and_probs(
probs=p2, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs has components greater than 1"):
_, prob = distribution_util.get_logits_and_probs(
probs=p3, validate_args=True)
self.evaluate(prob)
_, prob = distribution_util.get_logits_and_probs(
probs=p3, validate_args=False)
self.evaluate(prob)
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
_, prob = distribution_util.get_logits_and_probs(
probs=p, multidimensional=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = distribution_util.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError(
"(probs has components greater than 1|probs does not sum to 1)"):
_, prob = distribution_util.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = distribution_util.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs does not sum to 1"):
_, prob = distribution_util.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = distribution_util.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=False)
self.evaluate(prob)
def testProbsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
p = tf.ones([int(2**11+1)], dtype=np.float16)
distribution_util.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
p = tf.placeholder(dtype=tf.float16)
_, prob = distribution_util.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
prob.eval(feed_dict={p: np.ones([int(2**11+1)])})
def testLogitsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
l = tf.ones([int(2**11+1)], dtype=np.float16)
distribution_util.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
l = tf.placeholder(dtype=tf.float16)
logit, _ = distribution_util.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
logit.eval(feed_dict={l: np.ones([int(2**11+1)])})
class EmbedCheckCategoricalEventShapeTest(tf.test.TestCase):
def testTooSmall(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = tf.ones([1], dtype=np.float16)
checked_param = distribution_util.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"must have at least 2 events"):
param = tf.placeholder(dtype=tf.float16)
checked_param = distribution_util.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([1])})
def testTooLarge(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = tf.ones([int(2**11+1)], dtype=tf.float16)
checked_param = distribution_util.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
param = tf.placeholder(dtype=tf.float16)
checked_param = distribution_util.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
param = tf.convert_to_tensor(
np.ones([2**11 + 1]).astype(tf.qint16.as_numpy_dtype),
dtype=tf.qint16)
with self.assertRaises(TypeError):
distribution_util.embed_check_categorical_event_shape(param)
class EmbedCheckIntegerCastingClosedTest(tf.test.TestCase):
def testCorrectlyAssertsNonnegative(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be non-negative"):
x = tf.placeholder(dtype=tf.float16)
x_checked = distribution_util.embed_check_integer_casting_closed(
x, target_dtype=tf.int16)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})
def testCorrectlyAssersIntegerForm(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be int16-equivalent."):
x = tf.placeholder(dtype=tf.float16)
x_checked = distribution_util.embed_check_integer_casting_closed(
x, target_dtype=tf.int16)
x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})
def testCorrectlyAssertsLargestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot exceed 32767."):
x = tf.placeholder(dtype=tf.int32)
x_checked = distribution_util.embed_check_integer_casting_closed(
x, target_dtype=tf.int16)
x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})
def testCorrectlyAssertsSmallestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot be smaller than 0."):
x = tf.placeholder(dtype=tf.int32)
x_checked = distribution_util.embed_check_integer_casting_closed(
x, target_dtype=tf.uint16, assert_nonnegative=False)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})
@test_util.run_all_in_graph_and_eager_modes
class LogCombinationsTest(tf.test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
if not special:
return
log_combs = np.log(special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class DynamicShapeTest(tf.test.TestCase):
def testSameDynamicShape(self):
with self.cached_session():
scalar = tf.constant(2.0)
scalar1 = tf.placeholder(dtype=tf.float32)
vector = [0.3, 0.4, 0.5]
vector1 = tf.placeholder(dtype=tf.float32, shape=[None])
vector2 = tf.placeholder(dtype=tf.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = tf.placeholder(
dtype=tf.float32, shape=[None, None])
multidimensional2 = tf.placeholder(
dtype=tf.float32, shape=[None, None])
# Scalar
self.assertTrue(
distribution_util.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
distribution_util.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
distribution_util.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
distribution_util.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
distribution_util.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
distribution_util.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
distribution_util.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
distribution_util.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
distribution_util.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
class RotateTransposeTest(tf.test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
@test_util.run_in_graph_and_eager_modes
def testRollStatic(self):
if tf.executing_eagerly():
error_message = r"Attempt to convert a value \(None\)"
else:
error_message = "None values not supported."
with self.assertRaisesRegexp(ValueError, error_message):
distribution_util.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = distribution_util.rotate_transpose(x, shift)
self.assertAllEqual(
self._np_rotate_transpose(x, shift), self.evaluate(y))
self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())
def testRollDynamic(self):
with self.cached_session() as sess:
x = tf.placeholder(tf.float32)
shift = tf.placeholder(tf.int32)
for x_value in (np.ones(
1, dtype=x.dtype.as_numpy_dtype()), np.ones(
(2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(
(3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(distribution_util.rotate_transpose(x, shift),
feed_dict={x: x_value,
shift: shift_value}))
class PickVectorTest(tf.test.TestCase):
def testCorrectlyPicksVector(self):
with self.cached_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, self.evaluate(distribution_util.pick_vector(tf.less(0, 5), x, y)))
self.assertAllEqual(
y, self.evaluate(distribution_util.pick_vector(tf.less(5, 0), x, y)))
self.assertAllEqual(x,
distribution_util.pick_vector(
tf.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
distribution_util.pick_vector(
tf.constant(False), x, y)) # No eval.
class PreferStaticRankTest(tf.test.TestCase):
def testNonEmptyConstantTensor(self):
x = tf.zeros((2, 3, 4))
rank = distribution_util.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(3, rank)
def testEmptyConstantTensor(self):
x = tf.constant([])
rank = distribution_util.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(1, rank)
def testScalarTensor(self):
x = tf.constant(1.)
rank = distribution_util.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(0, rank)
def testDynamicRankEndsUpBeingNonEmpty(self):
x = tf.placeholder(np.float64, shape=None)
rank = distribution_util.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))}))
def testDynamicRankEndsUpBeingEmpty(self):
x = tf.placeholder(np.int32, shape=None)
rank = distribution_util.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(1, rank.eval(feed_dict={x: []}))
def testDynamicRankEndsUpBeingScalar(self):
x = tf.placeholder(np.int32, shape=None)
rank = distribution_util.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(0, rank.eval(feed_dict={x: 1}))
class PreferStaticShapeTest(tf.test.TestCase):
def testNonEmptyConstantTensor(self):
x = tf.zeros((2, 3, 4))
shape = distribution_util.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([2, 3, 4]), shape)
def testEmptyConstantTensor(self):
x = tf.constant([])
shape = distribution_util.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([0]), shape)
def testScalarTensor(self):
x = tf.constant(1.)
shape = distribution_util.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([]), shape)
def testDynamicShapeEndsUpBeingNonEmpty(self):
x = tf.placeholder(np.float64, shape=None)
shape = distribution_util.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))
def testDynamicShapeEndsUpBeingEmpty(self):
x = tf.placeholder(np.int32, shape=None)
shape = distribution_util.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))
def testDynamicShapeEndsUpBeingScalar(self):
x = tf.placeholder(np.int32, shape=None)
shape = distribution_util.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))
class PreferStaticValueTest(tf.test.TestCase):
def testNonEmptyConstantTensor(self):
x = tf.zeros((2, 3, 4))
value = distribution_util.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.zeros((2, 3, 4)), value)
def testEmptyConstantTensor(self):
x = tf.constant([])
value = distribution_util.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array([]), value)
def testScalarTensor(self):
x = tf.constant(1.)
value = distribution_util.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array(1.), value)
def testDynamicValueEndsUpBeingNonEmpty(self):
x = tf.placeholder(np.float64, shape=None)
value = distribution_util.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.zeros((2, 3)),
value.eval(feed_dict={x: np.zeros((2, 3))}))
def testDynamicValueEndsUpBeingEmpty(self):
x = tf.placeholder(np.int32, shape=None)
value = distribution_util.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []}))
def testDynamicValueEndsUpBeingScalar(self):
x = tf.placeholder(np.int32, shape=None)
value = distribution_util.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array(1), value.eval(feed_dict={x: 1}))
class FillTriangularTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_triangular(self, x, upper=False):
"""Numpy implementation of `fill_triangular`."""
x = np.asarray(x)
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1])
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Invalid shape.")
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],
axis=-1)
y = y.reshape(np.concatenate([
np.int32(x.shape[:-1]),
np.int32([n, n]),
], axis=0))
return np.triu(y) if upper else np.tril(y)
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = tf.placeholder_with_default(x_, shape=static_shape)
# Add `zeros_like(x)` such that x's value and gradient are identical. We
# do this so we can ensure each gradient value is mapped to the right
# gradient location. (Not doing this means the gradient wrt `x` is simple
# `ones_like(x)`.)
# Note:
# zeros_like_x_pl == zeros_like(x_pl)
# gradient(zeros_like_x_pl, x_pl) == x_pl - 1
zeros_like_x_pl = (x_pl * tf.stop_gradient(x_pl - 1.)
- tf.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = distribution_util.fill_triangular(x, **kwargs)
grad_actual = tf.gradients(actual, x_pl)[0]
[actual_, grad_actual_] = sess.run([actual, grad_actual],
feed_dict={x_pl: x_})
expected = self._fill_triangular(x_, **kwargs)
if use_deferred_shape:
self.assertEqual(None, actual.shape)
else:
self.assertAllEqual(expected.shape, actual.shape)
self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
def testCorrectlyMakes1x1TriLower(self):
self._run_test(self._rng.randn(3, int(1*2/2)))
def testCorrectlyMakesNoBatchTriLower(self):
self._run_test(self._rng.randn(int(4*5/2)))
def testCorrectlyMakesBatchTriLower(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)))
def testCorrectlyMakesBatchTriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True)
def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True)
def testCorrectlyMakesBatch7x7TriLower(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)))
def testCorrectlyMakes1x1TriUpper(self):
self._run_test(self._rng.randn(3, int(1*2/2)), upper=True)
def testCorrectlyMakesNoBatchTriUpper(self):
self._run_test(self._rng.randn(int(4*5/2)), upper=True)
def testCorrectlyMakesBatchTriUpper(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True)
def testCorrectlyMakesBatchTriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)),
use_deferred_shape=True,
upper=True)
def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)),
use_deferred_shape=True,
upper=True)
def testCorrectlyMakesBatch7x7TriUpper(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True)
class FillTriangularInverseTest(FillTriangularTest):
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = tf.placeholder_with_default(x_, shape=static_shape)
zeros_like_x_pl = (x_pl * tf.stop_gradient(x_pl - 1.)
- tf.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = distribution_util.fill_triangular(x, **kwargs)
inverse_actual = distribution_util.fill_triangular_inverse(
actual, **kwargs)
inverse_actual_ = sess.run(
inverse_actual,
feed_dict={x_pl: x_})
if use_deferred_shape:
self.assertEqual(None, inverse_actual.shape)
else:
self.assertAllEqual(x_.shape, inverse_actual.shape)
self.assertAllEqual(x_, inverse_actual_)
class ReduceWeightedLogSumExp(tf.test.TestCase):
def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False):
m = np.max(logx, axis=axis, keepdims=True)
sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims)
sgn = np.sign(sum_)
if not keep_dims:
m = np.squeeze(m, axis=axis)
return m + np.log(sgn * sum_), sgn
def testNoWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
with self.cached_session() as sess:
logx = tf.constant(logx_)
expected = tf.reduce_logsumexp(logx, axis=-1)
grad_expected = tf.gradients(expected, logx)[0]
actual, actual_sgn = distribution_util.reduce_weighted_logsumexp(
logx, axis=-1, return_sign=True)
grad_actual = tf.gradients(actual, logx)[0]
[actual_, actual_sgn_, grad_actual_,
expected_, grad_expected_] = sess.run([
actual, actual_sgn, grad_actual,
expected, grad_expected])
self.assertAllEqual(expected_, actual_)
self.assertAllEqual(grad_expected_, grad_actual_)
self.assertAllEqual([1., 1, 1], actual_sgn_)
def testNegativeWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1)
with self.cached_session() as sess:
logx = tf.constant(logx_)
w = tf.constant(w_)
actual, actual_sgn = distribution_util.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True)
[actual_, actual_sgn_] = sess.run([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([-1., -1, 1], actual_sgn_)
def testKeepDims(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(
logx_, w_, axis=-1, keep_dims=True)
with self.cached_session() as sess:
logx = tf.constant(logx_)
w = tf.constant(w_)
actual, actual_sgn = distribution_util.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True, keep_dims=True)
[actual_, actual_sgn_] = sess.run([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_)
def testDocString(self):
"""This test verifies the correctness of the docstring examples."""
with self.cached_session():
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
self.assertAllClose(
np.log(4),
self.evaluate(distribution_util.reduce_weighted_logsumexp(x, w)))
with np.errstate(divide="ignore"):
self.assertAllClose(
np.log([0, 2, 2]),
self.evaluate(
distribution_util.reduce_weighted_logsumexp(x, w, axis=0)))
self.assertAllClose(
np.log([1, 3]),
self.evaluate(
distribution_util.reduce_weighted_logsumexp(x, w, axis=1)))
self.assertAllClose(
np.log([[1], [3]]),
self.evaluate(
distribution_util.reduce_weighted_logsumexp(
x, w, axis=1, keep_dims=True)))
self.assertAllClose(
np.log(4),
self.evaluate(
distribution_util.reduce_weighted_logsumexp(x, w, axis=[0, 1])))
class GenNewSeedTest(tf.test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(distribution_util.gen_new_seed(0, "salt") is None)
self.assertTrue(distribution_util.gen_new_seed(None, "salt") is None)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
class SoftplusTest(tf.test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
with self.cached_session(use_gpu=use_gpu) as sess:
softplus = tf.nn.softplus(np_features)
softplus_inverse = distribution_util.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = sess.run([
softplus, softplus_inverse])
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
str(np_features.dtype), 1e-6)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
np.isfinite(tf_softplus))
self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
np.isfinite(tf_softplus_inverse))
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.cached_session():
x = tf.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = tf.nn.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = tf.test.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
tf.logging.vlog(2, "softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testInverseSoftplusGradientNeverNan(self):
with self.cached_session():
# Note that this range contains both zero and inf.
x = tf.constant(np.logspace(-8, 6).astype(np.float16))
y = distribution_util.softplus_inverse(x)
grads = self.evaluate(tf.gradients(y, x)[0])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
def testInverseSoftplusGradientFinite(self):
with self.cached_session():
# This range of x is all finite, and so is 1 / x. So the
# gradient and its approximations should be finite as well.
x = tf.constant(np.logspace(-4.8, 4.5).astype(np.float16))
y = distribution_util.softplus_inverse(x)
grads = self.evaluate(tf.gradients(y, x)[0])
# Equivalent to `assertAllTrue` (if it existed).
self.assertAllEqual(
np.ones_like(grads).astype(np.bool), np.isfinite(grads))
@test_util.run_all_in_graph_and_eager_modes
class ArgumentsTest(tf.test.TestCase):
def testNoArguments(self):
def foo():
return distribution_util.parent_frame_arguments()
self.assertEqual({}, foo())
def testPositionalArguments(self):
def foo(a, b, c, d): # pylint: disable=unused-argument
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(1, 2, 3, 4))
# Tests that it does not matter where this function is called, and
# no other local variables are returned back.
def bar(a, b, c):
unused_x = a * b
unused_y = c * 3
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, bar(1, 2, 3))
def testOverloadedArgumentValues(self):
def foo(a, b, c): # pylint: disable=unused-argument
a = 42
b = 31
c = 42
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 42, "b": 31, "c": 42}, foo(1, 2, 3))
def testKeywordArguments(self):
def foo(**kwargs): # pylint: disable=unused-argument
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(a=1, b=2, c=3, d=4))
def testPositionalKeywordArgs(self):
def foo(a, b, c, **kwargs): # pylint: disable=unused-argument
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(a=1, b=2, c=3, unicorn=None))
def testNoVarargs(self):
def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument
return distribution_util.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(1, 2, 3, *[1, 2, 3]))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, unicorn=None))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, *[1, 2, 3], unicorn=None))
if __name__ == "__main__":
tf.test.main()
| 52,649 | 4,422 | 2,527 |
409adb4c2876471e91efadcd006af57a80bddfb1 | 839 | py | Python | jobs/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | jobs/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | jobs/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib.staticfiles import views as static_views
from django.conf.urls.static import static
from django.conf import settings
import jobs.views as views
from django.views.generic import ListView,DetailView
from jobs.models import Job
from rest_framework.urlpatterns import format_suffix_patterns
# urlpatterns = [
# url(r'^$', views.indexJobs, name='indexJobs'),
app_name ='jobs'
# ]
urlpatterns = [
url(r'^$', views.jobsIndex, name='jobsIndex'),
url(r'^jobslist/$', views.JobList.as_view()),
url(r'^(?P<pk>\d+)$',views.job_detail,name='job_detail'),
url(r'^create/$',views.create_job,name='jobsCreate'),
url(r'^(?P<category_slug>[-\w]+)/$', views.jobsIndex, name='jobs_by_category')
]
urlpatterns = format_suffix_patterns(urlpatterns) | 39.952381 | 79 | 0.705602 | from django.conf.urls import url
from django.contrib.staticfiles import views as static_views
from django.conf.urls.static import static
from django.conf import settings
import jobs.views as views
from django.views.generic import ListView,DetailView
from jobs.models import Job
from rest_framework.urlpatterns import format_suffix_patterns
# urlpatterns = [
# url(r'^$', views.indexJobs, name='indexJobs'),
app_name ='jobs'
# ]
urlpatterns = [
url(r'^$', views.jobsIndex, name='jobsIndex'),
url(r'^jobslist/$', views.JobList.as_view()),
url(r'^(?P<pk>\d+)$',views.job_detail,name='job_detail'),
url(r'^create/$',views.create_job,name='jobsCreate'),
url(r'^(?P<category_slug>[-\w]+)/$', views.jobsIndex, name='jobs_by_category')
]
urlpatterns = format_suffix_patterns(urlpatterns) | 0 | 0 | 0 |
924962304e469d47a501a8bfc37a4a68f7833772 | 3,253 | py | Python | 2018/day15/combat.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null | 2018/day15/combat.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null | 2018/day15/combat.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null |
# Sorts y coordinate first, then x.
if __name__ == "__main__":
main()
| 29.844037 | 89 | 0.57178 |
def main():
pass
# Sorts y coordinate first, then x.
def POSITION_COMPARATOR(x): return (x[1], x[0])
class Board:
def __init__(self):
self.state = []
self.width = -1
self.units = []
def add_row(self, row):
self.state.append(row)
if (self.width == -1):
self.width = len(row)
elif (self.width != len(row)):
raise ValueError(
"Invalid row: doesn't match width " + self.width + ": " + len(row))
self.height = len(self.state)
# search for units in the new row
for index in xrange(self.width):
if (Unit.is_unit(row[index])):
unit = Unit(row[index], (index, self.height - 1))
self.units.append(unit)
def do_tick(self):
# sort into reading order
self.units.sort(key=POSITION_COMPARATOR)
for unit in self.units:
unit.update(self)
def is_empty(self, position):
return self.state[position[1]][position[0]] == '.'
def is_target(self, type, position):
unit_type = 'G' if type == 'E' else 'E'
return self.state[position[1]][position[0]] == unit_type
@staticmethod
def parse(input_lines):
board = Board()
for line in input_lines:
board.add_row(line)
return board
class Unit:
def __init__(self, type, position):
self.hp = 200
self.attack = 3
self.position = position
self.type = type
def update(self, board):
targets = self.get_targets(board)
if (len(targets) == 0):
# no more targets!
return
targets.sort(key=lambda x: x.position)
positions = self.get_adjacent_positions()
# are any targets in the positions? If so, attack.
for position in positions:
if board.is_target(self.type, position):
self.attack_target(targets.find(
lambda x: x.position == position))
return
# OK, at this point need to move. Get the possible moves spaces, then
# BFS towards them.
possible_move_spaces = self.get_possible_move_spaces(targets, board)
def get_targets(self, board):
return filter(lambda x: x.type != self.type, board.units)
def attack_target(self, target):
pass
def get_possible_move_spaces(self, targets, board):
"""Returns the possible move spaces among the targets.
Assumes the targets are in reading order and returns the spaces in reading order.
"""
return_list = []
for target in targets:
for position in target.get_adjacent_positions():
if (board.is_empty(position)):
return_list.append(position)
return_list.sort(key=POSITION_COMPARATOR)
return return_list
def get_adjacent_positions(self):
x = self.position[0]
y = self.position[1]
return [(x, y - 1), (x - 1, y), (x + 1, y), (x, y + 1)]
def __repr__(self):
return "{0}({2}): {1}".format(self.type, self.position, self.hp)
@staticmethod
def is_unit(str):
return str == "G" or str == "E"
if __name__ == "__main__":
main()
| 2,204 | 878 | 91 |
dcae4afdf4101782c5dc6dab3ba349f227806355 | 1,564 | py | Python | ironmq-alert-pagerduty.py | OnBeep/ironmq-alert-pagerduty | 13e0fb906d494ec156069051731bf71d5c1ad257 | [
"Apache-2.0"
] | 1 | 2015-01-26T19:41:56.000Z | 2015-01-26T19:41:56.000Z | ironmq-alert-pagerduty.py | OnBeep/ironmq-alert-pagerduty | 13e0fb906d494ec156069051731bf71d5c1ad257 | [
"Apache-2.0"
] | null | null | null | ironmq-alert-pagerduty.py | OnBeep/ironmq-alert-pagerduty | 13e0fb906d494ec156069051731bf71d5c1ad257 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import pprint
import pygerduty
import time
import yaml
parser = argparse.ArgumentParser(description="Simple argument parser")
parser.add_argument("-config", type=str, required=False,
help="The location of a file containing a JSON payload.")
parser.add_argument("-payload", type=str, required=False,
help="The location of a file containing a JSON payload.")
parser.add_argument("-d", type=str, required=False,
help="Directory")
parser.add_argument("-e", type=str, required=False,
help="Environment")
parser.add_argument("-id", type=str, required=False,
help="Task id")
args = parser.parse_args()
config = {}
if args.config is not None:
config = yaml.load(open(args.config).read())
pdapi = config['pagerduty_api_key']
pdsvc = config['pagerduty_service_key']
pdsub = config['pagerduty_subdomain']
payload = {}
if args.payload is not None:
payload = json.loads(open(args.payload).read())
# Alert payloads are expected to look like the following:
# {
# "alert_direction": "asc",
# "alert_id": "54c548fc7fae9a32210f5782",
# "alert_trigger": 1,
# "alert_type": "fixed",
# "created_at": "2015-01-25T19:52:39Z",
# "queue_size": 1,
# "source_queue": "example_queue_name"
# }
queue_name = payload['source_queue']
queue_size = payload['queue_size']
desc = 'Queue [%s] is at size [%s]' % (queue_name, queue_size)
pagerduty = pygerduty.PagerDuty(pdsub, api_token=pdapi)
pagerduty.trigger_incident(
pdsvc,
desc,
incident_key=queue_name,
details=payload)
| 30.076923 | 70 | 0.702685 | import argparse
import json
import pprint
import pygerduty
import time
import yaml
parser = argparse.ArgumentParser(description="Simple argument parser")
parser.add_argument("-config", type=str, required=False,
help="The location of a file containing a JSON payload.")
parser.add_argument("-payload", type=str, required=False,
help="The location of a file containing a JSON payload.")
parser.add_argument("-d", type=str, required=False,
help="Directory")
parser.add_argument("-e", type=str, required=False,
help="Environment")
parser.add_argument("-id", type=str, required=False,
help="Task id")
args = parser.parse_args()
config = {}
if args.config is not None:
config = yaml.load(open(args.config).read())
pdapi = config['pagerduty_api_key']
pdsvc = config['pagerduty_service_key']
pdsub = config['pagerduty_subdomain']
payload = {}
if args.payload is not None:
payload = json.loads(open(args.payload).read())
# Alert payloads are expected to look like the following:
# {
# "alert_direction": "asc",
# "alert_id": "54c548fc7fae9a32210f5782",
# "alert_trigger": 1,
# "alert_type": "fixed",
# "created_at": "2015-01-25T19:52:39Z",
# "queue_size": 1,
# "source_queue": "example_queue_name"
# }
queue_name = payload['source_queue']
queue_size = payload['queue_size']
desc = 'Queue [%s] is at size [%s]' % (queue_name, queue_size)
pagerduty = pygerduty.PagerDuty(pdsub, api_token=pdapi)
pagerduty.trigger_incident(
pdsvc,
desc,
incident_key=queue_name,
details=payload)
| 0 | 0 | 0 |
3ea59de0c0a271acba84f58574d3a231f8250293 | 776 | py | Python | day02/main.py | Floozutter/aoc-2021-python | 29f670d5026d4fe834efccdfedc362052549896b | [
"Unlicense"
] | null | null | null | day02/main.py | Floozutter/aoc-2021-python | 29f670d5026d4fe834efccdfedc362052549896b | [
"Unlicense"
] | null | null | null | day02/main.py | Floozutter/aoc-2021-python | 29f670d5026d4fe834efccdfedc362052549896b | [
"Unlicense"
] | null | null | null | INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
commands = tuple(map(parse_command, raw.strip().split("\n")))
h = d = 0
for word, n in commands:
match word:
case "forward":
h += n
case "down":
d += n
case "up":
d -= n
case _:
raise AssertionError
print(h * d)
h = d = aim = 0
for word, n in commands:
match word:
case "forward":
h += n
d += n * aim
case "down":
aim += n
case "up":
aim -= n
case _:
raise AssertionError
print(h * d)
| 20.972973 | 61 | 0.483247 | INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
def parse_command(line: str) -> tuple[str, int]:
word, n = line.split()
return word, int(n)
commands = tuple(map(parse_command, raw.strip().split("\n")))
h = d = 0
for word, n in commands:
match word:
case "forward":
h += n
case "down":
d += n
case "up":
d -= n
case _:
raise AssertionError
print(h * d)
h = d = aim = 0
for word, n in commands:
match word:
case "forward":
h += n
d += n * aim
case "down":
aim += n
case "up":
aim -= n
case _:
raise AssertionError
print(h * d)
| 78 | 0 | 23 |
f0a75dd3ab42a6474f2b5a90b908b179edc5d010 | 971 | py | Python | source/scripts/python/ducktype/source/ducktype.py | lights0123/core | 85b4cf004280f64c144a7111236a03021e39fdf9 | [
"Apache-2.0"
] | 1 | 2021-03-12T11:07:19.000Z | 2021-03-12T11:07:19.000Z | source/scripts/python/ducktype/source/ducktype.py | lights0123/core | 85b4cf004280f64c144a7111236a03021e39fdf9 | [
"Apache-2.0"
] | null | null | null | source/scripts/python/ducktype/source/ducktype.py | lights0123/core | 85b4cf004280f64c144a7111236a03021e39fdf9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
| 22.068182 | 44 | 0.608651 | #!/usr/bin/env python3
def multiply(left, right):
result = left * right;
print(left, ' * ', right, ' = ', result);
return result;
def divide(left, right):
if right != 0.0:
result = left / right;
print(left, ' / ', right, ' = ', result);
else:
print('Invalid right operand: ', right);
return result;
def sum(left, right):
result = left + right;
print(left, ' + ', right, ' = ', result);
return result;
def hello():
print('Hello World from Python!!');
return;
def strcat(left, right):
result = left + right;
print(left, ' + ', right, ' = ', result);
return result;
def old_style(left: int, right: int) -> int:
result = left + right;
print(left, ' + ', right, ' = ', result);
return result;
def mixed_style(left, right: int) -> int:
result = left + right;
print(left, ' + ', right, ' = ', result);
return result;
def mixed_style_noreturn(left, right: int):
result = left + right;
print(left, ' + ', right, ' = ', result);
return result;
| 764 | 0 | 184 |
97f22bfa35408db0c6a4b26c331d672aa870f000 | 439 | py | Python | tests/core/usage/audit_usage_test.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 2,212 | 2018-04-03T20:58:42.000Z | 2022-03-31T17:58:38.000Z | tests/core/usage/audit_usage_test.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 354 | 2018-04-03T16:29:55.000Z | 2022-03-31T18:26:26.000Z | tests/core/usage/audit_usage_test.py | paulo-sampaio/detect-secrets | 73ffbc35a72cb316d9e1842cc131b6098cf3c36a | [
"Apache-2.0"
] | 298 | 2018-04-02T19:35:15.000Z | 2022-03-28T04:52:14.000Z | import pytest
from detect_secrets.core.usage import ParserBuilder
@pytest.fixture
| 23.105263 | 55 | 0.740319 | import pytest
from detect_secrets.core.usage import ParserBuilder
@pytest.fixture
def parser():
return ParserBuilder().add_console_use_arguments()
def test_normal_mode_requires_single_file(parser):
with pytest.raises(SystemExit):
parser.parse_args(['audit', 'fileA', 'fileB'])
def test_diff_mode_requires_two_files(parser):
with pytest.raises(SystemExit):
parser.parse_args(['audit', 'fileA', '--diff'])
| 284 | 0 | 68 |
6c5e94ad965376f9d4defe5b2bf499a9b31d9ca1 | 6,938 | py | Python | extraction.py | davarbri/jpegForensics | 66f20b861655481c4575430858977a80afddff65 | [
"CC0-1.0"
] | null | null | null | extraction.py | davarbri/jpegForensics | 66f20b861655481c4575430858977a80afddff65 | [
"CC0-1.0"
] | null | null | null | extraction.py | davarbri/jpegForensics | 66f20b861655481c4575430858977a80afddff65 | [
"CC0-1.0"
] | null | null | null | # !/usr/bin/python3
# David Arboledas Brihuega
# November 2021
#
# ------------------------------------------------------
# This script is called by extractForensicJPEG to get
# all the forensic data embebed in the jpeg files
# --------------------------------------------------------
# import sys
import binascii
import re
import hashlib
import verifyIMEI
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
TRAILER_EOF = "ffd9"
BIN_TRAILER_NUMBER = binascii.unhexlify(TRAILER_EOF)
IMEI_LENGTH = 15
HASH_LENGTH = 32
#FORENSIC_DATA_LENGTH = IMEI_LENGTH + HASH_LENGTH + RSA_hex_length()
primaryPics = []
# Reading forensic info from file
| 34.346535 | 79 | 0.53488 | # !/usr/bin/python3
# David Arboledas Brihuega
# November 2021
#
# ------------------------------------------------------
# This script is called by extractForensicJPEG to get
# all the forensic data embebed in the jpeg files
# --------------------------------------------------------
# import sys
import binascii
import re
import hashlib
import verifyIMEI
from Crypto.Hash import MD5
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
TRAILER_EOF = "ffd9"
BIN_TRAILER_NUMBER = binascii.unhexlify(TRAILER_EOF)
IMEI_LENGTH = 15
HASH_LENGTH = 32
#FORENSIC_DATA_LENGTH = IMEI_LENGTH + HASH_LENGTH + RSA_hex_length()
primaryPics = []
def RSA_hex_length():
try:
with open('public.pem', 'rb') as f:
key = RSA.importKey(f.read())
f.close()
return key.n.bit_length() / 4 # hex digits length
except FileNotFoundError:
msg = (
"Sorry, Public Key file public.pem does not exist."
"\nIt can't be verified!")
print(msg)
def readingData(file, mode, fileNumber, totalFile):
# Gets a 'stamped' JPEG file
temporary_image = file
# and opens it to read it
print("\n------ File number", fileNumber, '(', file, ')' "---------")
try:
with open(temporary_image, 'rb') as temporary_file:
bin_vector_data = temporary_file.read()
print("[ ] Searching for jpeg trailer...")
# call to find the last 0xFFD9 marker offset
jpeg_trailer_index = find_jpeg_last_trailer_index(bin_vector_data)
if jpeg_trailer_index > 0:
print(
" [+] Found jpeg trailer 0xFFD9 at offset",
format(jpeg_trailer_index, ",d"))
print("[ ] Searching forensic data...")
# call to read IMEI + original MDhash + RSA signature
extract_forensic_data(
bin_vector_data,
jpeg_trailer_index,
file,
mode,
fileNumber,
totalFile)
else:
print("[-] Trailer 0xFFD9 not found. Exiting.")
except FileNotFoundError:
msg = "Sorry, the file " + temporary_image + " does not exist."
print(msg)
def find_jpeg_last_trailer_index(
data: bytes) -> int:
# Finds all 0xFFD9 EOI markers
try:
EOF_list = [match.start()
for match in re.finditer(re.escape(b'\xFF\xD9'), data)]
# And returns the offser of the last one
return EOF_list.pop()
except IndexError:
return -1 # Not a JPEG file
# Reading forensic info from file
def extract_forensic_data(
vector: bytes,
index: int,
file: str,
mode: int,
fileNumber,
totalFile
):
# vector: JPEG image
# index: last 0xFFD9 offset
forensic_data = vector[index + len(BIN_TRAILER_NUMBER):].decode()
# IMEI string recorded
IMEI = forensic_data[:IMEI_LENGTH]
# MDstring recorded
MD5_hash = forensic_data[IMEI_LENGTH:IMEI_LENGTH + HASH_LENGTH]
# JPEG file MD5 until FFD9
fileMD5 = hashlib.md5(vector[:index + len(BIN_TRAILER_NUMBER)]).hexdigest()
# RSA signature saved
RSA_signature = forensic_data[IMEI_LENGTH + HASH_LENGTH:]
FORENSIC_DATA_LENGTH = IMEI_LENGTH + HASH_LENGTH + RSA_hex_length()
if len(forensic_data) != FORENSIC_DATA_LENGTH:
if len(forensic_data) == 0: # There are no data
print(
" [-] Forensic data not found.\n",
" Suspected manipulated file...")
if fileNumber == totalFile and mode == 0:
writeLogFile(primaryPics)
else:
print(" [-] Data NOT VALID, VOID SIGNATURE!")
try:
IMEI = int(IMEI)
if verifyIMEI.isValidIMEI(str(IMEI)):
print(" [+] Found possible IMEI: ", IMEI, " --> OK")
else:
print(" [+] Found possible IMEI: ", IMEI, " --> VOID")
except ValueError:
print("IMEI edited! Foresnsic data manipulated")
finally:
if fileNumber == totalFile and mode == 0:
writeLogFile(primaryPics)
else: # Forensic data length is OK
print(" [+] Found IMEI: ", IMEI)
print(" [+] Found MD5: ", MD5_hash)
print(" [+] Calculated hash: ", fileMD5)
print(" [+] Found RSA signature: ", RSA_signature)
verify_signature(
IMEI, fileMD5, RSA_signature, MD5_hash, file,
mode, fileNumber, totalFile)
def verify_signature(
IMEI, fileMD5, RSA_signature, MD5_hash,
file, mode, fileNumber, totalFile):
info = IMEI + fileMD5
info = info.encode("utf-8")
# Gets info hash
hasher = MD5.new(info)
try:
with open('public.pem', 'rb') as f:
key = RSA.importKey(f.read())
f.close()
RSA_signature = RSA_signature.encode("utf-8")
RSA_signature = bytes.fromhex(RSA_signature.decode("utf-8"))
verifier = PKCS1_v1_5.new(key)
# Now, verify the signature
if fileNumber <= totalFile:
if (
verifier.verify(hasher, RSA_signature)
and MD5_hash == fileMD5):
if mode == 0:
print("File:", fileNumber, "Total", totalFile)
primaryPics.append(file)
print(
"\n\t",file, "SIGNATURE is",
"VALID. Image NOT edited")
if fileNumber == totalFile:
writeLogFile(primaryPics)
else: # only one file
print(
"\n\t",file, "SIGNATURE is",
"VALID. Image NOT edited")
else: # void signature
print(
"\n\tINVALID signature!",
"The image file was probably edited.")
if mode == 0 and fileNumber == totalFile:
print("Ultimo fichero", fileNumber, "", totalFile)
except FileNotFoundError:
msg = (
"Sorry, Public Key file public.pem does not exist."
"\nIt can't be verified!")
print(msg)
def writeLogFile(primaryPics):
try:
file_out = open("primaryImages.log", "w")
file_out.write("Primary JPEG pictures on dir\n\n")
for file in primaryPics:
file_out.write(file + "\n")
print("\n [+] File primaryImages.log written...")
except IOError:
msg = "Unable to create file primaryImages.log on directory"
print(msg)
finally:
file_out.close()
| 6,088 | 0 | 146 |
d967be55e6cb52da3762f78abab30bc03ed2c8b0 | 391 | py | Python | blogs/migrations/0033_emailer_notification_text.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 657 | 2020-05-26T16:16:07.000Z | 2022-03-26T22:35:01.000Z | blogs/migrations/0033_emailer_notification_text.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 107 | 2020-05-26T17:45:04.000Z | 2022-03-17T08:24:00.000Z | blogs/migrations/0033_emailer_notification_text.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 42 | 2020-05-26T23:57:58.000Z | 2022-03-15T04:20:26.000Z | # Generated by Django 3.0.7 on 2021-04-08 09:51
from django.db import migrations, models
| 20.578947 | 47 | 0.603581 | # Generated by Django 3.0.7 on 2021-04-08 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0032_auto_20210408_0927'),
]
operations = [
migrations.AddField(
model_name='emailer',
name='notification_text',
field=models.TextField(blank=True),
),
]
| 0 | 277 | 23 |
1bd66f574eb18045fc6794bdbca4cfae4cb4f514 | 2,388 | py | Python | src/server.py | maxalex301/toolchains | 0e8a5021e17d9ef36cf82a920f712446f5964a75 | [
"MIT"
] | null | null | null | src/server.py | maxalex301/toolchains | 0e8a5021e17d9ef36cf82a920f712446f5964a75 | [
"MIT"
] | null | null | null | src/server.py | maxalex301/toolchains | 0e8a5021e17d9ef36cf82a920f712446f5964a75 | [
"MIT"
] | null | null | null | import subprocess
from functools import reduce
| 29.481481 | 98 | 0.550251 | import subprocess
from functools import reduce
def escape(args):
result = []
for a in args:
if ' ' in a:
result.append("'{}'".format(a))
else:
result.append(a)
return result
class Server:
def __init__(self, host, port, user):
self.user = user
self.host = host
self.port = port
def __host_str(self):
return self.user + '@' + self.host
def __port_str(self):
if self.port == 22:
return ''
return ' -p '+str(self.port)
def remote_dir(self, dir):
return '{host}:{dir}'.format(host=self.__host_str(), dir=dir)
def sync(self, source, dest, exclude, deleteFolderContent=True):
cmd = (
'rsync -trvlH'
' -e "ssh{port}"'
' {exclude}'
' --delete'
' {src}/ {dst}').format(
port=self.__port_str(),
src=source,
dst=dest,
exclude=reduce(lambda x, y: x + ' --exclude {}'.format(y), exclude, '')
)
print(cmd)
subprocess.check_call(cmd, shell=True)
def upload(self, src, dest, exclude):
self.mkdir(dest)
self.sync(src, self.remote_dir(dest), exclude)
def download(self, src, dest, exclude):
self.sync(self.remote_dir(src), dest, exclude)
def mkdir(self, path):
self.cmd('mkdir -p ' + path)
def rm(self, path):
self.cmd('rm -rf ' + path)
def get_command(self, cmd):
return 'ssh -Aq{port} {host} "{cmd}"'.format(
port=self.__port_str(), host=self.__host_str(), cmd=cmd)
def replace_file_content(self, file, src, dest):
self.cmd("sed -i -e 's#{src}#{dest}#' {file}".format(src=src, dest=dest, file=file))
def getenv(self, var):
return subprocess.check_output(self.get_command('echo \$'+var), shell=True).strip()
def cmd(self, command):
try:
print(command)
subprocess.check_call(self.get_command(command), shell=True)
except subprocess.CalledProcessError as e:
return e.returncode
return 0
def cmd_in_wd(self, env, wd, command):
env_vars=['{key}={val}'.format(key=key, val=value) for key, value in env.items()]
return self.cmd('{env} cd {wd}; {cmd}'.format(env=' '.join(env_vars), wd=wd, cmd=command))
| 1,924 | -8 | 423 |
e375caa94d739899aaba48b2a38229c7a7c23855 | 13,044 | py | Python | src/tenants/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | 1 | 2016-01-18T08:19:22.000Z | 2016-01-18T08:19:22.000Z | src/tenants/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | src/tenants/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import importlib
import datetime
from django.conf import settings
from django.db import models
from django.db import transaction
from django.db.models.signals import post_save
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.template.defaultfilters import slugify
from model_utils import Choices
from model_utils.fields import StatusField
from model_utils.models import TimeStampedModel
from model_utils.managers import InheritanceManager, QueryManager
from litedesk.lib.active_directory.session import Session
from litedesk.lib.active_directory.classes.base import Company, User as ActiveDirectoryUser
from audit.models import Trackable, UntrackableChangeError
from audit.signals import pre_trackable_model_delete
from syncremote.models import Synchronizable
import tasks
log = logging.getLogger(__name__)
if not hasattr(settings, 'PROVISIONABLE_SERVICES'):
settings.PROVISIONABLE_SERVICES = []
# Signals
post_save.connect(TenantService.on_user_creation, dispatch_uid='user_add', sender=User)
pre_trackable_model_delete.connect(User.on_removal, dispatch_uid='user_delete', sender=User)
| 33.880519 | 96 | 0.66797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import importlib
import datetime
from django.conf import settings
from django.db import models
from django.db import transaction
from django.db.models.signals import post_save
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.template.defaultfilters import slugify
from model_utils import Choices
from model_utils.fields import StatusField
from model_utils.models import TimeStampedModel
from model_utils.managers import InheritanceManager, QueryManager
from litedesk.lib.active_directory.session import Session
from litedesk.lib.active_directory.classes.base import Company, User as ActiveDirectoryUser
from audit.models import Trackable, UntrackableChangeError
from audit.signals import pre_trackable_model_delete
from syncremote.models import Synchronizable
import tasks
log = logging.getLogger(__name__)
if not hasattr(settings, 'PROVISIONABLE_SERVICES'):
settings.PROVISIONABLE_SERVICES = []
class ServiceMeta(object):
def __init__(self, class_path_string):
module_name, class_name = class_path_string.rsplit('.', 1)
module = importlib.import_module(module_name)
klass = getattr(module, class_name)
self.model_class = klass
self.meta = klass._meta
@property
def slug(self):
return self.model_class.service_slug()
@property
def name(self):
return self.meta.verbose_name
@property
def model(self):
return self.model_class
class ActiveDirectory(models.Model):
url = models.CharField(max_length=300)
domain = models.CharField(max_length=200)
ou = models.CharField(max_length=200)
username = models.CharField(max_length=80)
password = models.CharField(max_length=1000)
@property
def full_url(self):
return 'ldaps://%s' % self.url
@property
def dn(self):
params = ['DC=%s' % component for component in self.url.split('.')]
params.insert(0, 'cn=Users')
params.insert(0, 'cn=%s' % self.username)
return ','.join(params)
def make_session(self):
return Session(self.full_url, self.dn, self.password, True)
def find_company(self):
session = self.make_session()
try:
query_results = Company.search(session, query='(ou=%s)' % self.ou)
return query_results[0]
except IndexError:
return None
def find_user(self, username):
try:
session = self.make_session()
query_results = Company.search(session, query='(ou=%s)' % self.ou)
company = query_results[0]
return [u for u in company.users if u.s_am_account_name == username].pop()
except IndexError:
return None
def __unicode__(self):
return '%s/%s (%s)' % (self.full_url, self.domain, self.ou)
class Tenant(TimeStampedModel):
name = models.CharField(max_length=1000, unique=True, db_index=True)
active = models.BooleanField(default=True)
primary_contact = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='tenant')
members = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='peers', blank=True)
active_directory = models.OneToOneField(ActiveDirectory, null=True, blank=True)
email_domain = models.CharField(max_length=300, default='onmicrosoft.com')
def get_active_directory_session(self):
return self.active_directory.make_session()
def get_service(self, service_slug):
services = self.tenantservice_set.select_subclasses()
try:
return [s for s in services if s.service_slug() == service_slug].pop()
except IndexError:
return None
def __unicode__(self):
return self.name
class TenantItem(models.Model):
tenant = models.ForeignKey(Tenant)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
item = GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return 'Item %s from %s' % (self.item, self.tenant)
class TenantService(models.Model):
PLATFORM_TYPE_CHOICES = Choices('mobile', 'web', 'windows')
PLATFORM_TYPES = [p[0] for p in PLATFORM_TYPE_CHOICES]
ACTIVE_DIRECTORY_CONTROLLER = False
EXPENSE_CATEGORY = 'platform'
DEACTIVATION_EXCEPTION = Exception
objects = InheritanceManager()
active = QueryManager(is_active=True)
tenant = models.ForeignKey(Tenant)
is_active = models.BooleanField(default=True)
api_token = models.CharField(max_length=128)
@property
def __subclassed__(self):
return TenantService.objects.get_subclass(id=self.id)
@property
def service(self):
return self.__class__.service_slug()
@property
def type(self):
return self.__subclassed__.PLATFORM_TYPE
@property
def name(self):
return self._meta.verbose_name
@property
def is_active_directory_controller(self):
return self.ACTIVE_DIRECTORY_CONTROLLER
def register(self, user):
raise NotImplementedError
def activate(self, user, editor=None):
user.services.add(self)
def deactivate(self, user, editor=None):
log.debug('Deactivating user %s on %s' % (user, self))
service_user = self.get_service_user(user)
for up in user.userprovisionable_set.filter(service=self):
up.item.deprovision(self, user, editor=editor)
if service_user is not None:
try:
service_user.deactivate()
except self.__class__.DEACTIVATION_EXCEPTION:
log.info('Trying to deactivate user %s, which is not active' % user)
user.services.remove(self)
def get_service_user(self, user):
raise NotImplementedError
def validate_unique(self, exclude=None):
tenant_services = self.tenant.tenantservice_set.select_subclasses()
if self.pk is not None:
tenant_services = tenant_services.exclude(pk=self.pk)
active_services = tenant_services.filter(is_active=True)
if self.is_active and self.type in [s.type for s in active_services]:
raise ValidationError('Active %s service already exists' % self.type)
super(TenantService, self).validate_unique(exclude=None)
def __unicode__(self):
subclassed = TenantService.objects.get_subclass(id=self.id)
return '%s service for %s' % (subclassed.name, self.tenant)
@classmethod
def get_serializer_data(self, **data):
return {}
@classmethod
def service_slug(cls):
return slugify(cls._meta.verbose_name)
@classmethod
def make(cls, tenant, api_token, active, *args, **kw):
return cls(
tenant=tenant,
api_token=api_token,
is_active=active,
**cls.get_serializer_data(**kw)
)
@staticmethod
def on_user_creation(*args, **kw):
if kw.get('created'):
user = kw.get('instance')
for service in user.tenant.tenantservice_set.select_subclasses():
if service.is_active_directory_controller:
tasks.register_user_in_provisioning_service(service, user)
@staticmethod
def get_available():
return [ServiceMeta(s) for s in settings.PROVISIONABLE_SERVICES]
class User(Trackable, Synchronizable):
TRACKABLE_ATTRIBUTES = ['first_name', 'last_name', 'status', 'email']
SYNCHRONIZABLE_ATTRIBUTES_MAP = {
'username': 's_am_account_name',
'first_name': 'given_name',
'last_name': 'sn',
'email': 'mail',
'display_name': 'display_name',
'mobile_phone_number': 'telephone_number'
}
STATUS = Choices('staged', 'pending', 'active', 'suspended', 'disabled')
tenant = models.ForeignKey(Tenant)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
display_name = models.CharField(max_length=200, null=True)
mobile_phone_number = models.CharField(max_length=16, null=True, blank=True)
username = models.CharField(max_length=100, editable=False)
email = models.EmailField(null=True)
services = models.ManyToManyField(TenantService)
status = StatusField()
@property
def tenant_email(self):
return '%s@%s' % (self.username, self.tenant.email_domain)
@property
def full_username(self):
return '%s@%s' % (self.username, self.tenant.active_directory.url)
def get_remote(self):
return self.tenant.active_directory.find_user(self.username)
def push(self):
log.info('Pushing user %s' % self)
with transaction.atomic():
remote_user = self.get_remote()
session = self.tenant.get_active_directory_session()
if remote_user is None:
remote_user = ActiveDirectoryUser(
session,
parent=self.tenant.active_directory.find_company(),
given_name=self.first_name,
sn=self.last_name,
s_am_account_name=self.username,
mail=self.email,
display_name=self.display_name or self.get_default_display_name(),
user_principal_name=self.full_username,
telephone_number=self.mobile_phone_number
)
else:
remote_user.mail = self.email
remote_user.display_name = self.display_name
remote_user.given_name = self.first_name
remote_user.sn = self.last_name
remote_user.telephone_number = self.mobile_phone_number
remote_user.save()
def pull(self):
pass
def get_default_display_name(self):
return ' '.join([self.first_name, self.last_name])
def get_provisioned_items(self, item_class=None, service=None):
qs = self.userprovisionable_set.all()
if item_class is not None:
qs = qs.filter(item_type=ContentType.objects.get_for_model(item_class))
if service is not None:
qs = qs.filter(service=service)
return set([up.item for up in qs])
def save(self, *args, **kw):
with transaction.atomic():
self.sync(force_push=kw.get('force_insert'))
super(User, self).save(*args, **kw)
def __unicode__(self):
return self.username
@classmethod
def load(cls, remote_object, **kw):
editor = kw.pop('editor', None)
if editor is None:
raise UntrackableChangeError('Can not load new data without tracking editor')
try:
editor_tenant = editor.tenant
except:
editor_tenant = None
tenant = kw.pop('tenant', editor_tenant)
if tenant is None:
raise ValueError('User %s has no tenant' % remote_object)
obj = cls(
username=remote_object.s_am_account_name,
tenant=tenant,
first_name=remote_object.given_name,
last_name=remote_object.sn,
email=remote_object.mail,
display_name=remote_object.display_name,
last_remote_read=datetime.datetime.now()
)
obj.save(editor=editor)
@classmethod
def get_remote_last_modified(cls, remote_object):
return datetime.datetime.strptime(remote_object.when_changed, '%Y%m%d%H%M%S.%fZ')
@staticmethod
def on_removal(sender, **kw):
user = kw.get('instance')
for service in user.services.select_subclasses():
service.deactivate(user, editor=kw.get('editor'))
class Meta:
unique_together = ('tenant', 'username')
class UserGroup(models.Model):
tenant = models.ForeignKey(Tenant)
name = models.CharField(max_length=80)
members = models.ManyToManyField(User, blank=True)
def __unicode__(self):
return '%s@%s' % (self.name, self.tenant.name)
class Meta:
unique_together = ('name', 'tenant')
# Signals
post_save.connect(TenantService.on_user_creation, dispatch_uid='user_add', sender=User)
pre_trackable_model_delete.connect(User.on_removal, dispatch_uid='user_delete', sender=User)
| 6,875 | 4,093 | 161 |
12b095adb60425a8605df3b0b9f6a57257c592e1 | 5,067 | py | Python | seaworthy/containers/rabbitmq.py | praekeltfoundation/seaworthy | 6f10a19b45d4ea1dc3bd0553cc4d0438696c079c | [
"BSD-3-Clause"
] | 31 | 2017-11-21T12:21:31.000Z | 2021-11-16T11:50:42.000Z | seaworthy/containers/rabbitmq.py | praekeltfoundation/seaworthy | 6f10a19b45d4ea1dc3bd0553cc4d0438696c079c | [
"BSD-3-Clause"
] | 91 | 2017-08-16T14:24:52.000Z | 2020-01-16T10:13:52.000Z | seaworthy/containers/rabbitmq.py | praekeltfoundation/seaworthy | 6f10a19b45d4ea1dc3bd0553cc4d0438696c079c | [
"BSD-3-Clause"
] | 3 | 2018-10-13T07:56:02.000Z | 2019-02-14T12:29:53.000Z | """
RabbitMQ container definition.
"""
from seaworthy.definitions import ContainerDefinition
from seaworthy.utils import output_lines
class RabbitMQContainer(ContainerDefinition):
"""
RabbitMQ container definition.
.. todo::
Write more docs.
"""
# There seems to be a weird interaction between the erlang runtime and
# something in docker which results in annoyingly long startup times in
# some environments. The best we can do to deal with that is to give it a
# bit more time to get going. :-(
WAIT_TIMEOUT = 30.0
DEFAULT_NAME = 'rabbitmq'
DEFAULT_IMAGE = 'rabbitmq:alpine'
DEFAULT_WAIT_PATTERNS = (r'Server startup complete',)
DEFAULT_VHOST = '/vhost'
DEFAULT_USER = 'user'
DEFAULT_PASSWORD = 'password'
def __init__(self,
name=DEFAULT_NAME,
image=DEFAULT_IMAGE,
wait_patterns=DEFAULT_WAIT_PATTERNS,
vhost=DEFAULT_VHOST,
user=DEFAULT_USER,
password=DEFAULT_PASSWORD,
**kwargs):
"""
:param vhost: the name of a vhost to create at startup
:param user: the name of a user to create at startup
:param password: the password for the user
"""
super().__init__(name, image, wait_patterns, **kwargs)
self.vhost = vhost
self.user = user
self.password = password
def wait_for_start(self):
"""
Wait for the RabbitMQ process to be come up.
"""
er = self.exec_rabbitmqctl(
'wait', ['--pid', '1', '--timeout', str(int(self.wait_timeout))])
output_lines(er, error_exc=TimeoutError)
def base_kwargs(self):
"""
Add a ``tmpfs`` entry for ``/var/lib/rabbitmq`` to avoid unnecessary
disk I/O and ``environment`` entries for the configured vhost and user
creds.
"""
return {
'environment': {
'RABBITMQ_DEFAULT_VHOST': self.vhost,
'RABBITMQ_DEFAULT_USER': self.user,
'RABBITMQ_DEFAULT_PASS': self.password,
},
'tmpfs': {'/var/lib/rabbitmq': 'uid=100,gid=101'},
}
def clean(self):
"""
Remove all data by using ``rabbitmqctl`` to eval
``rabbit_mnesia:reset()``.
"""
reset_erl = 'rabbit:stop(), rabbit_mnesia:reset(), rabbit:start().'
self.exec_rabbitmqctl('eval', [reset_erl])
def exec_rabbitmqctl(self, command, args=[], rabbitmqctl_opts=['-q']):
"""
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
cmd = ['rabbitmqctl'] + rabbitmqctl_opts + [command] + args
return self.inner().exec_run(cmd)
def exec_rabbitmqctl_list(self, resources, args=[],
rabbitmq_opts=['-q', '--no-table-headers']):
"""
Execute a ``rabbitmqctl`` command to list the given resources.
:param resources: the resources to list, e.g. ``'vhosts'``
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
command = 'list_{}'.format(resources)
return self.exec_rabbitmqctl(command, args, rabbitmq_opts)
def list_vhosts(self):
"""
Run the ``list_vhosts`` command and return a list of vhost names.
"""
return output_lines(self.exec_rabbitmqctl_list('vhosts'))
def list_queues(self):
"""
Run the ``list_queues`` command (for the default vhost) and return a
list of tuples describing the queues.
:return:
A list of 2-element tuples. The first element is the queue name,
the second is the current queue size.
"""
lines = output_lines(
self.exec_rabbitmqctl_list('queues', ['-p', self.vhost]))
return [tuple(line.split(None, 1)) for line in lines]
def list_users(self):
"""
Run the ``list_users`` command and return a list of tuples describing
the users.
:return:
A list of 2-element tuples. The first element is the username, the
second a list of tags for the user.
"""
lines = output_lines(self.exec_rabbitmqctl_list('users'))
return [_parse_rabbitmq_user(line) for line in lines]
def broker_url(self):
""" Returns a "broker URL" for use with Celery. """
return 'amqp://{}:{}@{}/{}'.format(
self.user, self.password, self.name, self.vhost)
| 34.006711 | 78 | 0.594632 | """
RabbitMQ container definition.
"""
from seaworthy.definitions import ContainerDefinition
from seaworthy.utils import output_lines
def _parse_rabbitmq_user(user_line):
user, tags = user_line.split('\t', 1)
tags = tags.strip('[]').split(', ')
return (user, tags)
class RabbitMQContainer(ContainerDefinition):
"""
RabbitMQ container definition.
.. todo::
Write more docs.
"""
# There seems to be a weird interaction between the erlang runtime and
# something in docker which results in annoyingly long startup times in
# some environments. The best we can do to deal with that is to give it a
# bit more time to get going. :-(
WAIT_TIMEOUT = 30.0
DEFAULT_NAME = 'rabbitmq'
DEFAULT_IMAGE = 'rabbitmq:alpine'
DEFAULT_WAIT_PATTERNS = (r'Server startup complete',)
DEFAULT_VHOST = '/vhost'
DEFAULT_USER = 'user'
DEFAULT_PASSWORD = 'password'
def __init__(self,
name=DEFAULT_NAME,
image=DEFAULT_IMAGE,
wait_patterns=DEFAULT_WAIT_PATTERNS,
vhost=DEFAULT_VHOST,
user=DEFAULT_USER,
password=DEFAULT_PASSWORD,
**kwargs):
"""
:param vhost: the name of a vhost to create at startup
:param user: the name of a user to create at startup
:param password: the password for the user
"""
super().__init__(name, image, wait_patterns, **kwargs)
self.vhost = vhost
self.user = user
self.password = password
def wait_for_start(self):
"""
Wait for the RabbitMQ process to be come up.
"""
er = self.exec_rabbitmqctl(
'wait', ['--pid', '1', '--timeout', str(int(self.wait_timeout))])
output_lines(er, error_exc=TimeoutError)
def base_kwargs(self):
"""
Add a ``tmpfs`` entry for ``/var/lib/rabbitmq`` to avoid unnecessary
disk I/O and ``environment`` entries for the configured vhost and user
creds.
"""
return {
'environment': {
'RABBITMQ_DEFAULT_VHOST': self.vhost,
'RABBITMQ_DEFAULT_USER': self.user,
'RABBITMQ_DEFAULT_PASS': self.password,
},
'tmpfs': {'/var/lib/rabbitmq': 'uid=100,gid=101'},
}
def clean(self):
"""
Remove all data by using ``rabbitmqctl`` to eval
``rabbit_mnesia:reset()``.
"""
reset_erl = 'rabbit:stop(), rabbit_mnesia:reset(), rabbit:start().'
self.exec_rabbitmqctl('eval', [reset_erl])
def exec_rabbitmqctl(self, command, args=[], rabbitmqctl_opts=['-q']):
"""
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
cmd = ['rabbitmqctl'] + rabbitmqctl_opts + [command] + args
return self.inner().exec_run(cmd)
def exec_rabbitmqctl_list(self, resources, args=[],
rabbitmq_opts=['-q', '--no-table-headers']):
"""
Execute a ``rabbitmqctl`` command to list the given resources.
:param resources: the resources to list, e.g. ``'vhosts'``
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
command = 'list_{}'.format(resources)
return self.exec_rabbitmqctl(command, args, rabbitmq_opts)
def list_vhosts(self):
"""
Run the ``list_vhosts`` command and return a list of vhost names.
"""
return output_lines(self.exec_rabbitmqctl_list('vhosts'))
def list_queues(self):
"""
Run the ``list_queues`` command (for the default vhost) and return a
list of tuples describing the queues.
:return:
A list of 2-element tuples. The first element is the queue name,
the second is the current queue size.
"""
lines = output_lines(
self.exec_rabbitmqctl_list('queues', ['-p', self.vhost]))
return [tuple(line.split(None, 1)) for line in lines]
def list_users(self):
"""
Run the ``list_users`` command and return a list of tuples describing
the users.
:return:
A list of 2-element tuples. The first element is the username, the
second a list of tags for the user.
"""
lines = output_lines(self.exec_rabbitmqctl_list('users'))
return [_parse_rabbitmq_user(line) for line in lines]
def broker_url(self):
""" Returns a "broker URL" for use with Celery. """
return 'amqp://{}:{}@{}/{}'.format(
self.user, self.password, self.name, self.vhost)
| 121 | 0 | 23 |
b9fca280646e6e89fc5b84533dc2566e7941b0bd | 9,410 | py | Python | Lib/gds/burp/api.py | mwielgoszewski/jython-burp-api | 002383f7acc5fb237e3804fe5bd2aa2950a0240d | [
"0BSD"
] | 134 | 2015-01-21T14:22:42.000Z | 2021-09-02T10:52:43.000Z | Lib/gds/burp/api.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
] | 7 | 2015-01-19T16:54:45.000Z | 2018-10-10T15:10:13.000Z | Lib/gds/burp/api.py | d453d2/burp-jython-console | 3cec3200ede2da0f1cdbf935efc340f073c07ea2 | [
"0BSD"
] | 29 | 2015-02-13T14:08:23.000Z | 2021-12-17T03:17:40.000Z | # -*- coding: utf-8 -*-
'''
gds.burp.api
~~~~~~~~~~~~
This module implements the Jython Burp Plugin API.
Plugins written in Jython can implement the interfaces in this
package in order to register for various methods exposed by
Burp Extender.
'''
from .core import Interface
__all__ = [
'INewScanIssueHandler',
'IExtenderRequestHandler',
'IExtenderResponseHandler',
'IIntruderRequestHandler',
'IIntruderResponseHandler',
'IProxyRequestHandler',
'IProxyResponseHandler',
'IRepeaterRequestHandler',
'IRepeaterResponseHandler',
'IScannerRequestHandler',
'IScannerResponseHandler',
'ISequencerRequestHandler',
'ISequencerResponseHandler',
'ISpiderRequestHandler',
'ISpiderResponseHandler',
'ITargetRequestHandler',
'ITargetResponseHandler',
]
class INewScanIssueHandler(Interface):
'''
Extension point interface for components to perform actions
whenever Burp Scanner discovers a new, unique issue.
Classes that implement this interface must implement the
:meth:`newScanIssue` method.
'''
def newScanIssue(issue):
'''
This method is invoked whenever Burp Scanner discovers a new,
unique issue, and can be used to perform customised reporting
or logging of issues.
:param issue: An :class:`burp.IScanIssue <IScanIssue>` object.
'''
class IExtenderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Extender sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Extender sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IExtenderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Extender receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Extender receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IIntruderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Intruder sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Intruder sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IIntruderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Intruder receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Intruder receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IProxyRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Proxy sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Proxy sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IProxyResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Proxy receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp proxy receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IRepeaterRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Repeater sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Repeater sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IRepeaterResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Repeater receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Repeater receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IScannerRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Scanner sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Scanner sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IScannerResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Scanner receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Scanner receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISequencerRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Sequencer sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Sequencer sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISequencerResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Sequencer receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Sequencer receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISpiderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Spider sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Spider sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISpiderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Spider receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Spider receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ITargetRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Target sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Target sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ITargetResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Target receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Target receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
| 27.595308 | 71 | 0.679809 | # -*- coding: utf-8 -*-
'''
gds.burp.api
~~~~~~~~~~~~
This module implements the Jython Burp Plugin API.
Plugins written in Jython can implement the interfaces in this
package in order to register for various methods exposed by
Burp Extender.
'''
from .core import Interface
__all__ = [
'INewScanIssueHandler',
'IExtenderRequestHandler',
'IExtenderResponseHandler',
'IIntruderRequestHandler',
'IIntruderResponseHandler',
'IProxyRequestHandler',
'IProxyResponseHandler',
'IRepeaterRequestHandler',
'IRepeaterResponseHandler',
'IScannerRequestHandler',
'IScannerResponseHandler',
'ISequencerRequestHandler',
'ISequencerResponseHandler',
'ISpiderRequestHandler',
'ISpiderResponseHandler',
'ITargetRequestHandler',
'ITargetResponseHandler',
]
class INewScanIssueHandler(Interface):
'''
Extension point interface for components to perform actions
whenever Burp Scanner discovers a new, unique issue.
Classes that implement this interface must implement the
:meth:`newScanIssue` method.
'''
def newScanIssue(issue):
'''
This method is invoked whenever Burp Scanner discovers a new,
unique issue, and can be used to perform customised reporting
or logging of issues.
:param issue: An :class:`burp.IScanIssue <IScanIssue>` object.
'''
class IExtenderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Extender sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Extender sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IExtenderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Extender receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Extender receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IIntruderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Intruder sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Intruder sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IIntruderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Intruder receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Intruder receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IProxyRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Proxy sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Proxy sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IProxyResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Proxy receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp proxy receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IRepeaterRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Repeater sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Repeater sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IRepeaterResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Repeater receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Repeater receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IScannerRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Scanner sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Scanner sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class IScannerResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Scanner receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Scanner receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISequencerRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Sequencer sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Sequencer sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISequencerResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Sequencer receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Sequencer receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISpiderRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Spider sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Spider sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ISpiderResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Spider receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Spider receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ITargetRequestHandler(Interface):
'''
Extension point interface for components to perform actions on
a request before Burp Target sends it on the wire.
Classes that implement this interface must implement the
:meth:`processRequest` method.
'''
def processRequest(request):
'''
This method is invoked before Burp Target sends a request
on the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
class ITargetResponseHandler(Interface):
'''
Extension point interface for components to perform actions on
a response after Burp Target receives it off the wire.
Classes that implement this interface must implement the
:meth:`processResponse` method.
'''
def processResponse(request):
'''
This method is invoked after Burp Target receives a response
off the wire.
:param request: An :class:`HttpRequest <HttpRequest>` object.
'''
| 0 | 0 | 0 |
f08ef18d930caa30a2711dbb9e63e39d48e1c32a | 498 | py | Python | SinGAN/tasks/train.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | SinGAN/tasks/train.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | SinGAN/tasks/train.py | cbosoft/SinGAN | c3c950ddf59ae57e4685adc555f5cb4723bd7e78 | [
"MIT"
] | null | null | null | from SinGAN.training import train
from SinGAN.util import read_image, adjust_scales_to_image
| 26.210526 | 59 | 0.660643 | from SinGAN.training import train
from SinGAN.util import read_image, adjust_scales_to_image
def run_task_train(cfg):
# Read training image
real = read_image(cfg.training.image, cfg)
adjust_scales_to_image(real, cfg)
# Data store for training params/weights
data = Gs, Zs, reals, noise_amp = [], [], [], []
# Train!
train(*data, cfg)
print(cfg.training.date)
# Generate? Check if the trained model works?
# SinGAN_generate(*data, cfg)
| 377 | 0 | 24 |