hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0b9a9734fc82602ebc66cd8758b5aacd1abcb0d | 17,679 | py | Python | rbf/linalg.py | aaronbalzac/rbf-neural-network | 69c86ccb4e8041fdfb7ce9eff1e4db98648d6b5f | [
"MIT"
] | 1 | 2021-05-12T06:52:11.000Z | 2021-05-12T06:52:11.000Z | rbf/linalg.py | 0x41head/rbf-neural-network | 69c86ccb4e8041fdfb7ce9eff1e4db98648d6b5f | [
"MIT"
] | null | null | null | rbf/linalg.py | 0x41head/rbf-neural-network | 69c86ccb4e8041fdfb7ce9eff1e4db98648d6b5f | [
"MIT"
] | null | null | null | '''
Module for linear algebra routines.
'''
import logging
import warnings
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
from scipy.linalg.lapack import (dpotrf, dpotrs, dtrtrs, dgetrf,
dgetrs)
from rbf.sputils import row_norms, divide_rows
LOGGER = logging.getLogger(__name__)
try:
from sksparse import cholmod
HAS_CHOLMOD = True
except ImportError:
HAS_CHOLMOD = False
CHOLMOD_MSG = (
'Could not import CHOLMOD. Sparse matrices will be converted to dense for '
'all Cholesky decompositions. To install CHOLMOD and its python wrapper, '
'follow the instructions at https://scikit-sparse.readthedocs.io. '
'Anaconda users can install CHOLMOD with the command `conda install -c '
'conda-forge scikit-sparse`')
LOGGER.debug(CHOLMOD_MSG)
## Wrappers for low level LAPACK functions
#####################################################################
def _lu(A):
'''
Computes the LU factorization of `A` using the routine `dgetrf`
Parameters
----------
A : (n, n) float array
Returns
-------
(n, n) float array
LU factorization
(n,) int array
pivots
'''
# handle rank zero matrix
if A.shape == (0, 0):
return (np.zeros((0, 0), dtype=float),
np.zeros((0,), dtype=np.int32))
# get the LU factorization
fac, piv, info = dgetrf(A)
if info < 0:
raise ValueError('the %s-th argument had an illegal value' % -info)
elif info > 0:
raise np.linalg.LinAlgError(
'U(%s, %s) is exactly zero. The factorization has been completed, but '
'the factor U is exactly singular, and division by zero will occur if '
'it is used to solve a system of equations. ' % (info, info))
return fac, piv
def _solve_lu(fac, piv, b):
'''
Solves the system of equations `Ax = b` given the LU factorization of `A`.
Uses the `dgetrs` routine.
Parameters
----------
fac : (n, n) float array
piv : (n,) int array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
# handle the case of an array with zero-length for an axis.
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dgetrs(fac, piv, b)
if info != 0:
raise ValueError('the %s-th argument had an illegal value' % -info)
return x
def _cholesky(A, lower=True):
'''
Computes the Cholesky decomposition of `A` using the routine `dpotrf`.
Parameters
----------
A : (n, n) float array
lower : bool, optional
Returns
-------
(n, n) float array
'''
# handle rank zero matrix
if A.shape == (0, 0):
return np.zeros((0, 0), dtype=float)
L, info = dpotrf(A, lower=lower)
if info > 0:
raise np.linalg.LinAlgError(
'The leading minor of order %s is not positive definite, and the '
'factorization could not be completed. ' % info)
elif info < 0:
raise ValueError('The %s-th argument has an illegal value.' % -info)
return L
def _solve_cholesky(L, b, lower=True):
'''
Solves the system of equations `Ax = b` given the Cholesky decomposition of
`A`. Uses the routine `dpotrs`.
Parameters
----------
L : (n, n) float array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dpotrs(L, b, lower=lower)
if info < 0:
raise ValueError('The %s-th argument has an illegal value.' % -info)
return x
def _solve_triangular(L, b, lower=True):
'''
Solve the triangular system of equations `Lx = b` using `dtrtrs`.
Parameters
----------
L : (n, n) float array
b : (n, *) float array
Returns
-------
(n, *) float array
'''
if any(i == 0 for i in b.shape):
return np.zeros(b.shape)
x, info = dtrtrs(L, b, lower=lower)
if info < 0:
raise ValueError('The %s-th argument had an illegal value' % (-info))
elif info > 0:
raise np.linalg.LinAlgError(
'The %s-th diagonal element of A is zero, indicating that the matrix is '
'singular and the solutions X have not been computed.' % info)
return x
#####################################################################
def as_sparse_or_array(A, dtype=None, copy=False):
'''
If `A` is a scipy sparse matrix then return it as a csc matrix. Otherwise,
return it as an array.
'''
if sp.issparse(A):
# This does not make a copy if A is csc, has the same dtype and copy is
# false.
A = sp.csc_matrix(A, dtype=dtype, copy=copy)
else:
A = np.array(A, dtype=dtype, copy=copy)
return A
def as_array(A, dtype=None, copy=False):
'''
Return `A` as an array if it is not already. This properly handles when `A`
is sparse.
'''
if sp.issparse(A):
A = A.toarray()
A = np.array(A, dtype=dtype, copy=copy)
return A
class _SparseSolver(object):
'''
computes the LU factorization of the sparse matrix `A` with SuperLU.
'''
def __init__(self, A):
LOGGER.debug(
'computing the LU decomposition of a %s by %s sparse matrix with %s '
'nonzeros ' % (A.shape + (A.nnz,)))
self.factor = spla.splu(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return self.factor.solve(b)
class _DenseSolver(object):
'''
computes the LU factorization of the dense matrix `A`.
'''
def __init__(self, A):
fac, piv = _lu(A)
self.fac = fac
self.piv = piv
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return _solve_lu(self.fac, self.piv, b)
class Solver(object):
'''
Computes an LU factorization of `A` and provides a method to solve `Ax = b`
for `x`. `A` can be a scipy sparse matrix or a numpy array.
Parameters
----------
A : (n, n) array or scipy sparse matrix
'''
def __init__(self, A):
A = as_sparse_or_array(A, dtype=float)
if sp.issparse(A):
self._solver = _SparseSolver(A)
else:
self._solver = _DenseSolver(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
Parameters
----------
b : (n, *) array or sparse matrix
Returns
-------
(n, *) array
'''
b = as_array(b, dtype=float)
return self._solver.solve(b)
class _SparsePosDefSolver(object):
'''
Factors the sparse positive definite matrix `A` as `LL^T = A`. Note that `L`
is NOT necessarily the lower triangular matrix from a Cholesky decomposition.
Instead, it is structured to be maximally sparse. This class requires
CHOLMOD.
'''
def __init__(self, A):
LOGGER.debug(
'computing the Cholesky decomposition of a %s by %s sparse matrix with '
'%s nonzeros ' % (A.shape + (A.nnz,)))
self.factor = cholmod.cholesky(
A,
use_long=False,
ordering_method='default')
# store the squared diagonal components of the cholesky factorization
self.d = self.factor.D()
# store the permutation array, which permutes `A` such that its cholesky
# factorization is maximally sparse
self.p = self.factor.P()
def solve(self, b):
'''
solves `Ax = b` for `x`
'''
return self.factor.solve_A(b)
def solve_L(self, b):
'''
Solves `Lx = b` for `x`
'''
if b.ndim == 1:
s_inv = 1.0/np.sqrt(self.d)
elif b.ndim == 2:
# expand for broadcasting
s_inv = 1.0/np.sqrt(self.d)[:, None]
else:
raise ValueError('`b` must be a one or two dimensional array')
out = s_inv*self.factor.solve_L(b[self.p])
return out
def L(self):
'''Return the factorization `L`'''
L = self.factor.L()
p_inv = np.argsort(self.p)
out = L[p_inv]
return out
def log_det(self):
'''Returns the log determinant of `A`'''
out = np.sum(np.log(self.d))
return out
class _DensePosDefSolver(object):
'''
Computes to Cholesky factorization of the dense positive definite matrix `A`.
This uses low level LAPACK functions
'''
def __init__(self, A):
self.chol = _cholesky(A, lower=True)
def solve(self, b):
'''
Solves the equation `Ax = b` for `x`
'''
return _solve_cholesky(self.chol, b, lower=True)
def solve_L(self, b):
'''
Solves the equation `Lx = b` for `x`, where `L` is the Cholesky
decomposition.
'''
return _solve_triangular(self.chol, b, lower=True)
def L(self):
'''Returns the Cholesky decomposition of `A`'''
return self.chol
def log_det(self):
'''Returns the log determinant of `A`'''
out = 2*np.sum(np.log(np.diag(self.chol)))
return out
class PosDefSolver(object):
'''
Factors the positive definite matrix `A` as `LL^T = A` and provides an
efficient method for solving `Ax = b` for `x`. Additionally provides a method
to solve `Lx = b`, get the log determinant of `A`, and get `L`. `A` can be a
scipy sparse matrix or a numpy array.
Parameters
----------
A : (n, n) array or scipy sparse matrix
Positive definite matrix
'''
def __init__(self, A):
A = as_sparse_or_array(A, dtype=float)
if sp.issparse(A) & (not HAS_CHOLMOD):
warnings.warn(CHOLMOD_MSG)
A = A.toarray()
if sp.issparse(A):
self._solver = _SparsePosDefSolver(A)
else:
self._solver = _DensePosDefSolver(A)
def solve(self, b):
'''
solves `Ax = b` for `x`
Parameters
----------
b : (n, *) array or sparse matrix
Returns
-------
(n, *) array
'''
b = as_array(b, dtype=float)
return self._solver.solve(b)
def solve_L(self, b):
'''
solves `Lx = b` for `x`
Parameters
----------
b : (n, *) array or sparse matrix
Returns
-------
(n, *) array
'''
b = as_array(b, dtype=float)
return self._solver.solve_L(b)
def L(self):
'''
Returns the factorization `L`
Returns
-------
(n, n) array or sparse matrix
'''
return self._solver.L()
def log_det(self):
'''
Returns the log determinant of `A`
Returns
-------
float
'''
return self._solver.log_det()
def is_positive_definite(A):
'''
Tests if `A` is positive definite. This is done by testing whether the
Cholesky decomposition finishes successfully. `A` can be a scipy sparse
matrix or a numpy array.
'''
try:
PosDefSolver(A).L()
except (np.linalg.LinAlgError, cholmod.CholmodNotPositiveDefiniteError):
return False
return True
class PartitionedSolver(object):
'''
Solves the system of equations
.. math::
\\left[
\\begin{array}{cc}
A & B \\\\
B^T & 0 \\\\
\\end{array}
\\right]
\\left[
\\begin{array}{c}
x \\\\
y \\\\
\\end{array}
\\right]
=
\\left[
\\begin{array}{c}
a \\\\
b \\\\
\\end{array}
\\right]
for `x` and `y`. This class builds the system and then factors it with an LU
decomposition. As opposed to `PartitionedPosDefSolver`, `A` is not assumed to
be positive definite. `A` can be a scipy sparse matrix or a numpy array. `B`
can also be a scipy sparse matrix or a numpy array but it will be converted
to a numpy array.
Parameters
----------
A : (n, n) array or sparse matrix
B : (n, p) array or sparse matrix
'''
def __init__(self, A, B):
# make sure A is either a csc sparse matrix or a float array
A = as_sparse_or_array(A, dtype=float)
# ensure B is dense
B = as_array(B, dtype=float)
n, p = B.shape
if n < p:
raise np.linalg.LinAlgError(
'There are fewer rows than columns in `B`. This makes the block '
'matrix singular, and its inverse cannot be computed.')
# concatenate the A and B matrices
if sp.issparse(A):
Z = sp.csc_matrix((p, p), dtype=float)
C = sp.vstack((sp.hstack((A, B)), sp.hstack((B.T, Z))))
else:
Z = np.zeros((p, p), dtype=float)
C = np.vstack((np.hstack((A, B)), np.hstack((B.T, Z))))
self._solver = Solver(C)
self.n = n
def solve(self, a, b):
'''
Solves for `x` and `y` given `a` and `b`.
Parameters
----------
a : (n, *) array or sparse matrix
b : (p, *) array or sparse matrix
Returns
-------
(n, *) array
(p, *) array
'''
a = as_array(a, dtype=float)
b = as_array(b, dtype=float)
c = np.concatenate((a, b), axis=0)
xy = self._solver.solve(c)
x, y = xy[:self.n], xy[self.n:]
return x, y
class PartitionedPosDefSolver(object):
'''
Solves the system of equations
.. math::
\\left[
\\begin{array}{cc}
A & B \\\\
B^T & 0 \\\\
\\end{array}
\\right]
\\left[
\\begin{array}{c}
x \\\\
y \\\\
\\end{array}
\\right]
=
\\left[
\\begin{array}{c}
a \\\\
b \\\\
\\end{array}
\\right]
for `x` and `y`, where `A` is a positive definite matrix. Rather than naively
building and solving the system, this class partitions the inverse as
.. math::
\\left[
\\begin{array}{cc}
C & D \\\\
D^T & E \\\\
\\end{array}
\\right]
where
.. math::
C = A^{-1} - (A^{-1} B) (B^T A^{-1} B)^{-1} (A^{-1} B)^T
.. math::
D = (A^{-1} B) (B^T A^{-1} B)^{-1}
.. math::
E = - (B^T A^{-1} B)^{-1}
The inverse of `A` is not computed, but instead its action is performed by
solving the Cholesky decomposition of `A`. `A` can be a scipy sparse matrix
or a numpy array. `B` can also be either a scipy sparse matrix or a numpy
array but it will be converted to a numpy array.
Parameters
----------
A : (n, n) array or sparse matrix
B : (n, p) array or sparse matrix
Note
----
This class stores the factorization of `A`, which may be sparse, the dense
matrix `A^-1 B`, and the dense factorization of `B^T A^-1 B`. If the number
of columns in `B` is large then this may take up too much memory.
'''
def __init__(self, A, B):
# make sure A is either a csc sparse matrix or a float array
A = as_sparse_or_array(A, dtype=float)
# convert B to dense if it is sparse
B = as_array(B, dtype=float)
n, p = B.shape
if n < p:
raise np.linalg.LinAlgError(
'There are fewer rows than columns in `B`. This makes the block '
'matrix singular, and its inverse cannot be computed.')
A_solver = PosDefSolver(A)
AiB = A_solver.solve(B)
BtAiB_solver = PosDefSolver(B.T.dot(AiB))
self._AiB = AiB
self._A_solver = A_solver
self._BtAiB_solver = BtAiB_solver
def solve(self, a, b):
'''
Solves for `x` and `y` given `a` and `b`.
Parameters
----------
a : (n, *) array or sparse matrix
b : (p, *) array or sparse matrix
Returns
-------
(n, *) array
(p, *) array
'''
a = as_array(a, dtype=float)
b = as_array(b, dtype=float)
Eb = -self._BtAiB_solver.solve(b)
Db = -self._AiB.dot(Eb)
Dta = self._BtAiB_solver.solve(self._AiB.T.dot(a))
Ca = self._A_solver.solve(a) - self._AiB.dot(Dta)
x = Ca + Db
y = Dta + Eb
return x, y
class GMRESSolver(object):
'''
Solves the system of equations `Ax = b` for `x` iteratively with GMRES and an
incomplete LU decomposition.
Parameters
----------
A : (n, n) CSC sparse matrix
drop_tol : float, optional
Passed to `scipy.sparse.linalg.spilu`. This controls the sparsity of the
ILU decomposition used for the preconditioner. It should be between 0 and
1. smaller values make the decomposition denser but better approximates the
LU decomposition. If the value is too large then you may get a "Factor is
exactly singular" error.
fill_factor : float, optional
Passed to `scipy.sparse.linalg.spilu`. I believe this controls the memory
allocated for the ILU decomposition. If this value is too small then memory
will be allocated dynamically for the decomposition. If this is too large
then you may get a memory error.
normalize_inplace : bool
If True and `A` is a csc matrix, then `A` is normalized in place.
'''
def __init__(self,
A,
drop_tol=0.005,
fill_factor=2.0,
normalize_inplace=False):
# the spilu and gmres functions are most efficient with csc sparse. If the
# matrix is already csc then this will do nothing
A = sp.csc_matrix(A)
n = row_norms(A)
if normalize_inplace:
divide_rows(A, n, inplace=True)
else:
A = divide_rows(A, n, inplace=False).tocsc()
LOGGER.debug(
'computing the ILU decomposition of a %s by %s sparse matrix with %s '
'nonzeros ' % (A.shape + (A.nnz,)))
ilu = spla.spilu(
A,
drop_rule='basic',
drop_tol=drop_tol,
fill_factor=fill_factor)
LOGGER.debug('done')
M = spla.LinearOperator(A.shape, ilu.solve)
self.A = A
self.M = M
self.n = n
def solve(self, b, tol=1.0e-10):
'''
Solve `Ax = b` for `x`
Parameters
----------
b : (n,) array
tol : float, optional
Returns
-------
(n,) array
'''
# solve the system using GMRES and define the callback function to
# print info for each iteration
def callback(res, _itr=[0]):
l2 = np.linalg.norm(res)
LOGGER.debug('GMRES error on iteration %s: %s' % (_itr[0], l2))
_itr[0] += 1
LOGGER.debug('solving the system with GMRES')
x, info = spla.gmres(
self.A,
b/self.n,
tol=tol,
M=self.M,
callback=callback)
LOGGER.debug('finished GMRES with info %s' % info)
return x
| 23.666667 | 79 | 0.587307 | 12,446 | 0.703999 | 0 | 0 | 0 | 0 | 0 | 0 | 10,296 | 0.582386 |
f0b9fbfd7322db83b4a975dbad3a4b36576b39d1 | 3,200 | py | Python | week_3/cnn.py | jimichailidis/UVA_AML18 | 620a9aba6651dd4451f5286aa250d7037673f7bf | [
"MIT"
] | null | null | null | week_3/cnn.py | jimichailidis/UVA_AML18 | 620a9aba6651dd4451f5286aa250d7037673f7bf | [
"MIT"
] | null | null | null | week_3/cnn.py | jimichailidis/UVA_AML18 | 620a9aba6651dd4451f5286aa250d7037673f7bf | [
"MIT"
] | null | null | null | #%%
import os
print(os.getcwd())
from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD
from dataset_utils import load_mnist
import numpy as np
from convolution_layer import ConvLayer
from maxpool_layer import MaxPool2x2
from flatten_layer import FlattenLayer
import sys
def iterate_minibatches(x, y, batch_size=16, verbose=True):
assert len(x) == len(y)
indices = np.arange(len(x))
np.random.shuffle(indices)
for i, start_idx in enumerate(range(0, len(x) - batch_size + 1, batch_size)):
if verbose:
print('\rBatch: {}/{}'.format(i + 1, len(x) // batch_size), end='')
sys.stdout.flush()
excerpt = indices[start_idx:start_idx + batch_size]
yield x[excerpt], y[excerpt]
def get_cnn():
nn = SequentialNN()
nn.add(ConvLayer(1, 2, filter_size=3)) # The output is of size N_obj 2 28 28
nn.add(ReLU()) # The output is of size N_obj 2 28 28
nn.add(MaxPool2x2()) # The output is of size N_obj 2 14 14
nn.add(ConvLayer(2, 4, filter_size=3)) # The output is of size N_obj 4 14 14
nn.add(ReLU()) # The output is of size N_obj 4 14 14
nn.add(MaxPool2x2()) # The output is of size N_obj 4 7 7
nn.add(FlattenLayer()) # The output is of size N_obj 196
nn.add(Dense(4 * 7 * 7, 32))
nn.add(ReLU())
nn.add(Dense(32, 1))
return nn
nn = get_cnn()
loss = Hinge()
optimizer = SGD(nn)
train = list(load_mnist(dataset='training', path='.'))
train_images = np.array([im[1] for im in train])
train_targets = np.array([im[0] for im in train])
# We will train a 0 vs. 1 classifier
x_train = train_images[train_targets < 2][:1000]
y_train = train_targets[train_targets < 2][:1000]
y_train = y_train * 2 - 1
y_train = y_train.reshape((-1, 1))
x_train = x_train.astype('float32') / 255.0
x_train = x_train.reshape((-1, 1, 28, 28))
# It will train for about 5 minutes
num_epochs = 3
batch_size = 32
# We will store the results here
history = {'loss': [], 'accuracy': []}
# `num_epochs` represents the number of iterations
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
# We perform iteration a one-by-one iteration of the mini-batches
for x_batch, y_batch in iterate_minibatches(x_train, y_train, batch_size):
# Predict the target value
y_pred = nn.forward(x_batch)
# Compute the gradient of the loss
loss_grad = loss.backward(y_pred, y_batch)
# Perform backwards pass
nn.backward(x_batch, loss_grad)
# Update the params
optimizer.update_params()
# Save loss and accuracy values
history['loss'].append(loss.forward(y_pred, y_batch))
prediction_is_correct = (y_pred > 0) == (y_batch > 0)
history['accuracy'].append(np.mean(prediction_is_correct))
print()
#%%
import matplotlib.pyplot as plt
# Let's plot the results to get a better insight
plt.figure(figsize=(8, 5))
ax_1 = plt.subplot()
ax_1.plot(history['loss'], c='g', lw=2, label='train loss')
ax_1.set_ylabel('loss', fontsize=16)
ax_1.set_xlabel('#batches', fontsize=16)
ax_2 = plt.twinx(ax_1)
ax_2.plot(history['accuracy'], lw=3, label='train accuracy')
ax_2.set_ylabel('accuracy', fontsize=16)
| 31.372549 | 81 | 0.672188 | 0 | 0 | 463 | 0.144688 | 0 | 0 | 0 | 0 | 817 | 0.255313 |
f0ba87898e06dbbd8420e2f2f2069300ec91a8de | 25,935 | py | Python | methylprep/processing/preprocess.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
] | 5 | 2019-08-28T08:27:16.000Z | 2020-03-11T17:20:01.000Z | methylprep/processing/preprocess.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
] | 16 | 2021-04-08T22:02:58.000Z | 2022-03-18T17:30:50.000Z | methylprep/processing/preprocess.py | WonyoungCho/methylprep | 4e34f62be969158453ba9b05b7629433f9bbba8b | [
"MIT"
] | 3 | 2022-01-26T00:12:19.000Z | 2022-03-09T22:43:22.000Z | # Normal-exponential using out-of-band probes
# normex: negative control probes
# noob: ‘out-of-band’ Infinium I probes
# Lib
import logging
import numpy as np
import pandas as pd
from statsmodels import robust
from scipy.stats import norm, lognorm
# App
from ..models import ControlType, ArrayType
from ..models.sketchy_probes import qualityMask450, qualityMaskEPIC, qualityMaskEPICPLUS, qualityMaskmouse
__all__ = ['preprocess_noob']
LOGGER = logging.getLogger(__name__)
def preprocess_noob(container, offset=15, pval_probes_df=None, quality_mask_df=None, nonlinear_dye_correction=True, debug=False, unit_test_oob=False): # v1.4.5+
""" NOOB pythonized copy of https://github.com/zwdzwd/sesame/blob/master/R/background_correction.R
- The function takes a SigSet and returns a modified SigSet with the background subtracted.
- Background is modelled in a normal distribution and true signal in an exponential distribution.
- The Norm-Exp deconvolution is parameterized using Out-Of-Band (oob) probes.
- includes snps, but not control probes yet
- output should replace the container instead of returning debug dataframes
- II RED and II GREEN both have data, but manifest doesn't have a way to track this, so function tracks it.
- keep IlmnID as index for meth/unmeth snps, and convert fg_green
if nonlinear_dye_correction=True, this uses a sesame method in place of minfi method, in a later step.
if unit_test_oob==True, returns the intermediate data instead of updating the SigSet/SampleDataContainer.
"""
if debug:
print(f"DEBUG NOOB {debug} nonlinear_dye_correction={nonlinear_dye_correction}, pval_probes_df={pval_probes_df.shape if isinstance(pval_probes_df,pd.DataFrame) else 'None'}, quality_mask_df={quality_mask_df.shape if isinstance(quality_mask_df,pd.DataFrame) else 'None'}")
# stack- need one long list of values, regardless of Meth/Uneth
ibG = pd.concat([
container.ibG.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'),
container.ibG.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U')
])
ibG = ibG[ ~ibG['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
ibR = pd.concat([
container.ibR.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'), #.drop(columns=['Meth','Unmeth']),
container.ibR.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U') #.drop(columns=['Meth','Unmeth'])
])
ibR = ibR[ ~ibR['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
# out-of-band is Green-Unmeth and Red-Meth
# exclude failing probes
pval = pval_probes_df.loc[ pval_probes_df['poobah_pval'] > container.poobah_sig ].index if isinstance(pval_probes_df, pd.DataFrame) else []
qmask = quality_mask_df.loc[ quality_mask_df['quality_mask'] == 0 ].index if isinstance(quality_mask_df, pd.DataFrame) else []
# the ignored errors here should only be from probes that are both pval failures and qmask failures.
Rmeth = list(container.oobR['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Runmeth = list(container.oobR['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobR = pd.DataFrame( Rmeth + Runmeth, columns=['mean_value'])
Gmeth = list(container.oobG['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Gunmeth = list(container.oobG['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobG = pd.DataFrame( Gmeth + Gunmeth, columns=['mean_value'])
# minfi test
# ref fg_green = 442614 | vs ibG 442672 = 396374 + 46240
# ref fg_red = 528410 | vs ibR 528482 = 439279 + 89131
# ref oob_green = 178374
# ref oob_red = 92578
#oobR = pd.DataFrame( data={'mean_value': container.oobR['Meth']})
#oobG = pd.DataFrame( data={'mean_value': container.oobG['Unmeth']})
#print(f" oobR {oobR.shape} oobG {oobG.shape}")
#import pdb;pdb.set_trace()
debug_warnings = ""
if oobR['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobR = oobR.dropna()
if oobG['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobG = oobG.dropna()
if ibG['mean_value'].isna().sum() > 0 or ibR['mean_value'].isna().sum() > 0:
raise ValueError("ibG or ibR is missing probe intensities. need to filter them out.")
if debug:
print(f"ibG {len(ibG)} ibR {len(ibR)} oobG {len(oobG)} oobR {len(oobR)} | {debug_warnings}")
# set minimum intensity to 1
ibG_affected = len(ibG.loc[ ibG['mean_value'] < 1 ].index)
ibR_affected = len(ibR.loc[ ibR['mean_value'] < 1 ].index)
ibG.loc[ ibG['mean_value'] < 1, 'mean_value'] = 1
ibR.loc[ ibR['mean_value'] < 1, 'mean_value'] = 1
oobG_affected = len(oobG[ oobG['mean_value'] < 1])
oobR_affected = len(oobR[ oobR['mean_value'] < 1])
oobG.loc[ oobG.mean_value < 1, 'mean_value'] = 1
oobR.loc[ oobR.mean_value < 1, 'mean_value'] = 1
if debug:
if ibR_affected > 0 or ibR_affected > 0:
print(f"ib: Set {ibR_affected} red and {ibG_affected} green to 1.0 ({len(ibR[ ibR['mean_value'] == 1 ].index)}, {len(ibG[ ibG['mean_value'] == 1 ].index)})")
if oobG_affected > 0 or oobR_affected > 0:
print(f"oob: Set {oobR_affected} red and {oobG_affected} green to 1.0 ({len(oobR[ oobR['mean_value'] == 1 ].index)}, {len(oobG[ oobG['mean_value'] == 1 ].index)})")
# do background correction in each channel; returns "normalized in-band signal"
ibG_nl, params_green = normexp_bg_corrected(ibG, oobG, offset, sample_name=container.sample.name)
ibR_nl, params_red = normexp_bg_corrected(ibR, oobR, offset, sample_name=container.sample.name)
noob_green = ibG_nl.round({'bg_corrected':0})
noob_red = ibR_nl.round({'bg_corrected':0})
if unit_test_oob:
return {
'oobR': oobR,
'oobG': oobG,
'noob_green': noob_green,
'noob_red': noob_red,
}
# by default, this last step is omitted for sesame
if nonlinear_dye_correction == True:
# update() expects noob_red/green to have IlmnIDs in index, and contain bg_corrected for ALL probes.
container.update_probe_means(noob_green, noob_red)
elif nonlinear_dye_correction == False:
# this "linear" method may be anologous to the ratio quantile normalization described in Nature: https://www.nature.com/articles/s41598-020-72664-6
normexp_bg_correct_control(container.ctrl_green, params_green)
normexp_bg_correct_control(container.ctrl_red, params_red)
mask_green = container.ctrl_green['Control_Type'].isin(ControlType.normalization_green())
mask_red = container.ctrl_red['Control_Type'].isin(ControlType.normalization_red())
avg_green = container.ctrl_green[mask_green]['bg_corrected'].mean()
avg_red = container.ctrl_red[mask_red]['bg_corrected'].mean()
rg_ratios = avg_red / avg_green
red_factor = 1 / rg_ratios
container.update_probe_means(noob_green, noob_red, red_factor)
container._SigSet__minfi_noob = True
elif nonlinear_dye_correction is None:
if debug:
LOGGER.info("skipping linear/nonlinear dye-bias correction step")
# skips the minfi-linear step and won't trigger the sesame nonlinear dye bias step downstream, if you REALLY want it uncorrected. Mostly for debugging / benchmarking.
container.update_probe_means(noob_green, noob_red)
class BackgroundCorrectionParams():
""" used in apply_bg_correction """
__slots__ = (
'bg_mean',
'bg_mad',
'mean_signal',
'offset',
)
def __init__(self, bg_mean, bg_mad, mean_signal, offset):
# note: default offset was 15. In v1.3.3 (Jan 2020) I kept 15, after finding this made results match sesame's NOOB output exactly, if dye step ommitted.
# offset is specified in the preprocess_noob() function.
self.bg_mean = bg_mean
self.bg_mad = bg_mad
self.mean_signal = mean_signal
self.offset = offset
def normexp_bg_corrected(fg_probes, ctrl_probes, offset, sample_name=None):
""" analogous to sesame's backgroundCorrectionNoobCh1 """
fg_means = fg_probes['mean_value']
if fg_means.min() == fg_means.max():
LOGGER.error(f"{sample_name}: min and max intensity are same. Sample probably bad.")
params = BackgroundCorrectionParams(bg_mean=1.0, bg_mad=1.0, mean_signal=1.0, offset=15)
fg_probes['bg_corrected'] = 1.0
return fg_probes, params
fg_mean, _fg_mad = huber(fg_means)
bg_mean, bg_mad = huber(ctrl_probes['mean_value'])
mean_signal = np.maximum(fg_mean - bg_mean, 10) # "alpha" in sesame function
params = BackgroundCorrectionParams(bg_mean, bg_mad, mean_signal, offset)
corrected_signals = apply_bg_correction(fg_means, params)
fg_probes['bg_corrected'] = corrected_signals
fg_probes['bg_corrected'] = fg_probes['bg_corrected'].round(1)
return fg_probes, params
def normexp_bg_correct_control(control_probes, params):
"""Function for getting xcs controls for preprocessNoob"""
control_means = control_probes['mean_value']
corrected_signals = apply_bg_correction(control_means, params)
control_probes['bg_corrected'] = corrected_signals
return control_probes
def apply_bg_correction(mean_values, params):
""" this function won't work with float16 in practice (underflow). limits use to float32 """
if not isinstance(params, BackgroundCorrectionParams):
raise ValueError('params is not a BackgroundCorrectionParams instance')
np.seterr(under='ignore') # 'raise to explore fixing underflow warning here'
bg_mean = params.bg_mean #mu
bg_mad = params.bg_mad #sigma
mean_signal = params.mean_signal #alpha
offset = params.offset
mu_sf = mean_values - bg_mean - (bg_mad ** 2) / mean_signal
#try:
# signal_part_one = mu_sf + (bg_mad ** 2)
# signal_part_two = np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
# signal = signal_part_one * signal_part_two
#except:
# print(signal_part_one, norm(mu_sf, bg_mad).logpdf(0), norm(mu_sf, bg_mad).logsf(0))
# norm is from scipy.stats
signal = mu_sf + (bg_mad ** 2) * np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
""" COMPARE with sesame:
signal <- mu.sf + sigma2 * exp(
dnorm(0, mean = mu.sf, sd = sigma, log = TRUE) -
pnorm(
0, mean = mu.sf, sd = sigma,
lower.tail = FALSE, log.p = TRUE))
"""
# sesame: "Limit of numerical accuracy reached with very low intensity or very high background:
# setting adjusted intensities to small value"
signal = np.maximum(signal, 1e-6)
true_signal = signal + offset
return true_signal
def huber(vector):
"""Huber function. Designed to mirror MASS huber function in R
Parameters
----------
vector: list
list of float values
Returns
-------
local_median: float
calculated mu value
mad_scale: float
calculated s value
"""
num_values = len(vector)
positive_factor = 1.5
convergence_tol = 1.0e-6
mad_scale = robust.mad(vector)
local_median = np.median(vector)
init_local_median = local_median
if not (local_median or mad_scale):
return local_median, mad_scale
while True:
yy = np.minimum(
np.maximum(
local_median - positive_factor * mad_scale,
vector,
),
local_median + positive_factor * mad_scale,
)
init_local_median = sum(yy) / num_values
if abs(local_median - init_local_median) < convergence_tol * mad_scale:
return local_median, mad_scale
local_median = init_local_median
def _apply_sesame_quality_mask(data_container):
""" adapted from sesame's qualityMask function, which is applied just after poobah
to remove probes Wanding thinks are sketchy.
OUTPUT: this pandas DataFrame will have NaNs for probes to be excluded and 0.0 for probes to be retained. NaNs converted to 1.0 in final processing output.
SESAME:
masked <- sesameDataGet(paste0(sset@platform, '.probeInfo'))$mask
to use TCGA masking, only applies to HM450
"""
if data_container.array_type not in (
# ArrayType.ILLUMINA_27K,
ArrayType.ILLUMINA_450K,
ArrayType.ILLUMINA_EPIC,
ArrayType.ILLUMINA_EPIC_PLUS,
ArrayType.ILLUMINA_MOUSE):
LOGGER.info(f"Quality masking is not supported for {data_container.array_type}.")
return
# load set of probes to remove from local file
if data_container.array_type == ArrayType.ILLUMINA_450K:
probes = qualityMask450
elif data_container.array_type == ArrayType.ILLUMINA_EPIC:
probes = qualityMaskEPIC
elif data_container.array_type == ArrayType.ILLUMINA_EPIC_PLUS:
# this is a bit of a hack; probe names don't match epic, so I'm temporarily renaming, then filtering, then reverting.
probes = qualityMaskEPICPLUS
elif data_container.array_type == ArrayType.ILLUMINA_MOUSE:
probes = qualityMaskmouse
# v1.6+: the 1.0s are good probes and the 0.0 are probes to be excluded.
cgs = pd.DataFrame( np.zeros((len(data_container.man.index), 1)), index=data_container.man.index, columns=['quality_mask'])
cgs['quality_mask'] = 1.0
snps = pd.DataFrame( np.zeros((len(data_container.snp_man.index), 1)), index=data_container.snp_man.index, columns=['quality_mask'])
snps['quality_mask'] = 1.0
df = pd.concat([cgs, snps])
df.loc[df.index.isin(probes), 'quality_mask'] = 0
#LOGGER.info(f"DEBUG quality_mask: {df.shape}, {df['quality_mask'].value_counts()} from {probes.shape} probes")
return df
""" ##### DEPRECATED (<v1.5.0) #####
def _old_reprocess_noob_sesame_v144(container, offset=15, debug=False):
''' NOOB pythonized copy of https://github.com/zwdzwd/sesame/blob/master/R/background_correction.R
- The function takes a SigSet and returns a modified SigSet with that background subtracted.
- Background is modelled in a normal distribution and true signal in an exponential distribution.
- The Norm-Exp deconvolution is parameterized using Out-Of-Band (oob) probes.
- includes snps, but not control probes yet
- output should replace the container instead of returning debug dataframes
- II RED and II GREEN both have data, but manifest doesn't have a way to track this, so function tracks it.
'''
# get in-band red and green channel probe means
#ibR <- c(IR(sset), II(sset)[,'U']) # in-band red signal = IR_meth + IR_unmeth + II[unmeth]
#ibG <- c(IG(sset), II(sset)[,'M']) # in-band green signal = IG_meth + IG_unmeth + II[meth]
# cols: mean_value, IlmnID, probe_type (I,II); index: illumina_id
#CHECKED: AddressA or AddressB for each probe subtype matches probes.py
raw = container.snp_methylated.data_frame
snp_IR_meth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Red')][['mean_value','AddressB_ID']]
.reset_index().rename(columns={'AddressB_ID':'illumina_id'}).set_index('illumina_id'))
snp_IR_meth['Channel'] = 'Red'
snp_IG_meth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Grn')][['mean_value','AddressB_ID']]
.reset_index().rename(columns={'AddressB_ID':'illumina_id'}).set_index('illumina_id'))
snp_IG_meth['Channel'] = 'Grn'
snp_II_meth = (raw[(raw['Infinium_Design_Type'] == 'II')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_II_meth['Channel'] = 'Grn'
raw = container.snp_unmethylated.data_frame
snp_IR_unmeth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Red')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_IR_unmeth['Channel'] = 'Red'
snp_IG_unmeth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Grn')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_IG_unmeth['Channel'] = 'Grn'
snp_II_unmeth = (raw[(raw['Infinium_Design_Type'] == 'II')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_II_unmeth['Channel'] = 'Red'
if debug:
print('snp probes:', snp_IR_meth.shape, snp_IG_unmeth.shape, snp_II_meth.shape, snp_II_unmeth.shape)
#--> copy over snps, but first get snps with illumina_id in index
# swap index on all snps from IlmnID to illumina_id
## note: 350076 II + 89203 IR + 46298 IG = 485577 (including rs probes, but excl controls)
ibG = container.fg_green # --> self.raw_dataset.get_fg_values(self.manifest, Channel.GREEN)
ibG['Channel'] = 'Grn'
ibG.index.name = 'illumina_id'
ibR = container.fg_red # --> self.raw_dataset.get_fg_values(self.manifest, Channel.RED)
ibR['Channel'] = 'Red'
ibR.index.name = 'illumina_id'
# to match sesame, extra probes are IR_unmeth and IG_unmeth in ibR red and ibG green, respectively.
ibG = pd.concat([ibG,
snp_IG_meth,
snp_IG_unmeth,
snp_II_meth
], sort=True).drop('probe_type', axis=1)
# sort=True, because column order varies
ibR = pd.concat([ibR,
snp_IR_meth,
snp_IR_unmeth,
snp_II_unmeth
], sort=True).drop('probe_type', axis=1)
if debug:
print('in-bound Green:', ibG.shape) # green IG is AddressB, (meth) according to PROBE_SUBSETS
print('in-bound Red:', ibR.shape) # red IR is AddressA (unmeth) according to PROBE_SUBSETS
### at this point, ibG ibR probe counts match sesame EXACTLY
# set minimum intensity to 1
ibR_affected = len(ibR.loc[ ibR['mean_value'] < 1 ].index)
ibG_affected = len(ibG.loc[ ibG['mean_value'] < 1 ].index)
ibR.loc[ ibR['mean_value'] < 1, 'mean_value'] = 1
ibG.loc[ ibG['mean_value'] < 1, 'mean_value'] = 1
if debug:
print(f"IB: Set {ibR_affected} red and {ibG_affected} green to 1.0 ({len(ibR[ ibR['mean_value'] == 1 ].index)}, {len(ibG[ ibG['mean_value'] == 1 ].index)})")
red_dupes = len(ibR.index)-len(ibR.drop_duplicates().index)
grn_dupes = len(ibG.index)-len(ibG.drop_duplicates().index)
if debug and (red_dupes or grn_dupes):
print(f"duplicate probes: {red_dupes} red and {grn_dupes} green")
ref = container.manifest.data_frame # [['Infinium_Design_Type','Color_Channel']]
# using a copy .oobG and .oobR here; does not update the idat or other source data probe_means
# adopted from raw_dataset.filter_oob_probes here
oobR = (container.oobR.merge(container.manifest.data_frame[['AddressB_ID']],
how='left',
left_index=True,
right_index=True)
.reset_index()
.rename(columns={'AddressB_ID':'illumina_id', 'Unnamed: 0': 'IlmnID'})
.set_index('illumina_id')
)
oobR = pd.DataFrame(list(oobR['meth']) + list(oobR['unmeth']), columns=['mean_value'])
oobG = (container.oobG.merge(container.manifest.data_frame[['AddressA_ID']],
how='left',
left_index=True,
right_index=True)
.reset_index()
.rename(columns={'AddressA_ID':'illumina_id', 'Unnamed: 0': 'IlmnID'})
.set_index('illumina_id')
)
oobG = pd.DataFrame(list(oobG['meth']) + list(oobG['unmeth']), columns=['mean_value'])
oobG_affected = len(oobG[ oobG['mean_value'] < 1])
oobG.loc[ oobG.mean_value < 1, 'mean_value'] = 1
oobR_affected = len(oobR[ oobR['mean_value'] < 1])
oobR.loc[ oobR.mean_value < 1, 'mean_value'] = 1
# here: do bg_subtract AND normalization step here ...
## do background correction in each channel; returns "normalized in-band signal"
ibR_nl, params_red = normexp_bg_corrected(ibR, oobR, offset, sample_name=container.sample.name)
#<- .backgroundCorrectionNoobCh1(ibR, oobR(sset), ctl(sset)$R, getBackgroundR(sset, bgR), offset=offset)
ibG_nl, params_green = normexp_bg_corrected(ibG, oobG, offset, sample_name=container.sample.name)
# <- .backgroundCorrectionNoobCh1(ibG, oobG(sset), ctl(sset)$G, getBackgroundG(sset, bgG), offset=offset)
ibG_nl = ibG_nl.round({'bg_corrected':0})
ibR_nl = ibR_nl.round({'bg_corrected':0})
#print('ibG_nl', ibG_nl.shape)
#print('ibR_nl', ibR_nl.shape)
noob_green = ibG_nl
noob_red = ibR_nl
if debug:
print(f"OOB: Set {oobR_affected} red and {oobG_affected} green to 1.0; shapes: {oobG.shape}, {oobR.shape}")
print(f"noob_red with Grn: {len(noob_red[noob_red['Channel'] == 'Grn'])} noob_green with Red: {len(noob_green[noob_green['Channel'] == 'Red'])}")
ref_IG = ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']=='I')]
ref_IR = ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']=='I')]
ref_II = ref[ref['Infinium_Design_Type']=='II'] # II channel is NaN, but BOTH channels have data
print(f"from manifest: ref_IG {ref_IG.shape} ref_IR {ref_IR.shape} ref_II {ref_II.shape}")
# Combine and return red (IG + IR + II_unmeth) and green (IG + IR + II_meth)
# ibR_nl has IlmnID and illumina_id (index); ref has IlmnID as index
# ref_meth/ref_unmeth from probes.py
ref_meth = pd.concat([
ref[(ref['Color_Channel'].isna()) & (ref['Infinium_Design_Type']=='II')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']== 'I')]['AddressB_ID'].reset_index().rename(columns={'AddressB_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']== 'I')]['AddressB_ID'].reset_index().rename(columns={'AddressB_ID':'illumina_id'}),
]) #.set_index('illumina_id') # .drop('illumina_id', axis=1)
ref_unmeth = pd.concat([
ref[(ref['Color_Channel'].isna()) & (ref['Infinium_Design_Type']=='II')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']== 'I')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']== 'I')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
]) #.set_index('illumina_id') # .drop('illumina_id', axis=1)
noob_meth_G = noob_green[noob_green.index.isin(ref_meth['illumina_id'])]
noob_unmeth_G = noob_green[noob_green.index.isin(ref_unmeth['illumina_id'])]
noob_meth_R = noob_red[noob_red.index.isin(ref_meth['illumina_id'])]
noob_unmeth_R = noob_red[noob_red.index.isin(ref_unmeth['illumina_id'])]
noob_meth_dupes = pd.concat([noob_meth_G, noob_meth_R])
noob_unmeth_dupes = pd.concat([noob_unmeth_G, noob_unmeth_R])
# CONFIRMED: this dedupe method below matches sesame's output exactly for noob_meth
noob_meth = (noob_meth_dupes[~noob_meth_dupes.index.duplicated(keep='first')]
.set_index('IlmnID')
.sort_index()
.rename(columns={'bg_corrected':'meth'})
)
# conveniently, the FIRST value of each duplicate probe appears to be the one we want for both meth/unmeth R/G channels
noob_unmeth = (noob_unmeth_dupes[~noob_unmeth_dupes.index.duplicated(keep='first')]
.set_index('IlmnID')
.sort_index()
.rename(columns={'bg_corrected':'unmeth'})
)
# update II, IG, IR, oobR, oobG, ctrl_red, ctrl_green
# --> --> probes.py subsets concatenate these:
# fg_green
# GREEN + AddressA + II
# GREEN + AddressA + IG
# GREEN + AddressB + IG
# oob_green
# RED + AddressA + IR
# fg_red
# RED + AddressA + II
# RED + AddressA + IR
# RED + AddressB + IR
# oob_red
# GREEN + AddressB + IG
#
# methylated
# GREEN + AddressA + II
# GREEN + AddressB + I
# RED + AddressB + I
# unmethylated
# RED + AddressA + II
# GREEN + AddressA + I
# RED + AddressA + I
# RETROFITTING BELOW -- may not work, as sesame works with noob_meth / noob_unmeth instead
try:
container.methylated.set_bg_corrected(noob_green, noob_red)
container.unmethylated.set_bg_corrected(noob_green, noob_red)
container.methylated.set_noob(1.0)
container.unmethylated.set_noob(1.0)
except ValueError as e:
print(e)
if debug:
LOGGER.warning("could not update container methylated / unmethylated noob values, because preprocess_sesame_noob has already run once.")
# output df should have sample meth or unmeth in a column with sample name and IlmnID as index. 485512 rows
if debug:
return {
'noob_meth': noob_meth,
'noob_unmeth': noob_unmeth,
'oobR': oobR,
'oobG': oobG,
'noob_green': noob_green,
'noob_red': noob_red,
'dupe_meth': noob_meth_dupes,
'dupe_unmeth': noob_unmeth_dupes,
}
return # noob_meth, noob_unmeth
"""
| 51.05315 | 279 | 0.662888 | 594 | 0.0229 | 0 | 0 | 0 | 0 | 0 | 0 | 18,366 | 0.708046 |
f0bac3ba5e39b1d61b8af9133f958f0f2c233ba5 | 5,041 | py | Python | examples/llvm_rl/model/inference_result.py | xtremey/CompilerGym | 2c2ceaebfccae72beb4aaacc74e0c8134c6f2ffe | [
"MIT"
] | null | null | null | examples/llvm_rl/model/inference_result.py | xtremey/CompilerGym | 2c2ceaebfccae72beb4aaacc74e0c8134c6f2ffe | [
"MIT"
] | null | null | null | examples/llvm_rl/model/inference_result.py | xtremey/CompilerGym | 2c2ceaebfccae72beb4aaacc74e0c8134c6f2ffe | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List
import numpy as np
from pydantic import BaseModel, validator
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from compiler_gym.datasets import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.timer import Timer
logger = logging.getLogger(__name__)
class InferenceResult(BaseModel):
"""Represents the result of running an RL agent on a problem."""
# The benchmark URI.
benchmark: str
inference_walltime_seconds: float
commandline: str
episode_len: int
instruction_count_init: int
instruction_count_final: int
instruction_count_oz: int
instruction_count_reduction: float
"""The final instruction count, normalized to -Oz."""
object_size_init: int
object_size_final: int
object_size_oz: int
object_size_reduction: float
"""The final object size, normalized to -Oz."""
runtimes_init: List[float]
runtimes_final: List[float]
runtimes_o3: List[float]
runtime_reduction: float
"""The final runtime, normalized to -Oz."""
@classmethod
def from_agent(
cls, env: CompilerEnv, agent, runtime: bool = True, runtimes_count: int = 30
):
# We calculate our own reward at the end, no need for incremental
# rewards during inference.
env.reward_space = None
# Run inference on the environment.
observation, done = env.reset(), False
with Timer() as inference_timer:
while not done:
action = agent.compute_action(observation)
observation, _, done, _ = env.step(action)
instruction_count_init = env.unwrapped.observation["IrInstructionCountO0"]
instruction_count_final = env.unwrapped.observation["IrInstructionCount"]
instruction_count_oz = env.unwrapped.observation["IrInstructionCountOz"]
object_size_init = env.unwrapped.observation["ObjectTextSizeO0"]
object_size_final = env.unwrapped.observation["ObjectTextSizeBytes"]
object_size_oz = env.unwrapped.observation["ObjectTextSizeOz"]
runtimes_init = []
runtimes_o3 = []
runtimes_final = []
try:
if runtime and env.unwrapped.observation["IsRunnable"]:
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_final = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_final) == runtimes_count
), f"{len(runtimes_final)} != {runtimes_count}"
env.reset()
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_init = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_init) == runtimes_count
), f"{len(runtimes_init)} != {runtimes_count}"
env.send_param("llvm.apply_baseline_optimizations", "-O3")
env.unwrapped.observation["Runtime"] # warmup
runtimes_o3 = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_o3) == runtimes_count
), f"{len(runtimes_o3)} != {runtimes_count}"
except Exception as e: # pylint: disable=broad-except
logger.warning("Failed to compute runtime: %s", e)
return cls(
benchmark=env.benchmark.uri,
inference_walltime_seconds=inference_timer.time,
commandline=env.commandline(),
episode_len=len(env.actions),
instruction_count_init=instruction_count_init,
instruction_count_final=instruction_count_final,
instruction_count_oz=instruction_count_oz,
instruction_count_reduction=instruction_count_oz
/ max(instruction_count_final, 1),
object_size_init=object_size_init,
object_size_final=object_size_final,
object_size_oz=object_size_oz,
object_size_reduction=object_size_oz / max(object_size_final, 1),
runtimes_init=runtimes_init,
runtimes_final=runtimes_final,
runtimes_o3=runtimes_o3,
runtime_reduction=np.median(runtimes_o3 or [0])
/ max(np.median(runtimes_final or [0]), 1),
)
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
| 39.382813 | 84 | 0.649276 | 4,414 | 0.87562 | 0 | 0 | 3,652 | 0.724459 | 0 | 0 | 1,078 | 0.213846 |
f0bcf9adb138c5c68aa47fd5e13ac337ba12d12c | 196 | py | Python | Calculator/Division.py | rn44/statsCalculator | b827edfc49d9251c7490ede3e578b58b49d9bb13 | [
"MIT"
] | null | null | null | Calculator/Division.py | rn44/statsCalculator | b827edfc49d9251c7490ede3e578b58b49d9bb13 | [
"MIT"
] | null | null | null | Calculator/Division.py | rn44/statsCalculator | b827edfc49d9251c7490ede3e578b58b49d9bb13 | [
"MIT"
] | 2 | 2020-11-07T03:34:17.000Z | 2020-12-10T16:34:55.000Z | def division(a, b):
b = float(b)
if b == 0:
c = 0
print('Cannot divide by 0.')
return c
else:
a = float(a)
c = round(a / b, 9)
return c
| 17.818182 | 36 | 0.408163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.107143 |
f0c2b3418e682549772086df9363fdf46943f5bd | 1,322 | py | Python | fabulous/script_gen.py | FPGA-Research-Manchester/nextpnr-fabulous | 665d13010eb6ea6ccf0c6041bb9df5028983a3bf | [
"0BSD"
] | 1 | 2021-10-01T11:05:52.000Z | 2021-10-01T11:05:52.000Z | fabulous/script_gen.py | FPGA-Research-Manchester/nextpnr-fabulous | 665d13010eb6ea6ccf0c6041bb9df5028983a3bf | [
"0BSD"
] | null | null | null | fabulous/script_gen.py | FPGA-Research-Manchester/nextpnr-fabulous | 665d13010eb6ea6ccf0c6041bb9df5028983a3bf | [
"0BSD"
] | 1 | 2021-10-01T11:05:52.000Z | 2021-10-01T11:05:52.000Z | import re
from array import *
import fileinput
import sys, getopt
import csv
def main(argv):
vlog = ''
top = ''
try:
opts, args = getopt.getopt(argv,"hf:t:",["Vlog=","Top="])
except getopt.GetoptError:
print ('script_gen.py -r <verilog file> -t <top module name>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('top_wrapper_generator.py -r <NumberOfRows> -c <NumberOfCols> -b <FrameBitsPerRow> -f <MaxFramesPerCol> -d <desync_flag>')
sys.exit()
elif opt in ("-f", "--Vlog"):
vlog = arg
elif opt in ("-t", "--Top"):
top = arg
if not top :
top = vlog.replace('.v','')
print ('File :',vlog)
print ('Top_module :', top)
script_str = ''
try:
with open("./template_temp.txt", 'r') as file :
script_str = file.read()
except IOError:
print("template_temp.txt not accessible")
sys.exit(2)
script_str = script_str.replace("template.v", vlog)
script_str = script_str.replace("-top template", '-top '+top)
script_str = script_str.replace("template.json", top+'.json')
if script_str :
with open(top+'.ys', 'w') as file:
file.write(script_str)
if __name__ == "__main__":
main(sys.argv[1:])
| 28.73913 | 141 | 0.559002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.298033 |
f0c3581446d85c97a243b1adbc5978391a877b8e | 269 | py | Python | src/app/buy_btc.py | simondorfman/hello_cb_pro | cdb96ee1390d22753630e24dac9bfdc5e47e788d | [
"MIT"
] | null | null | null | src/app/buy_btc.py | simondorfman/hello_cb_pro | cdb96ee1390d22753630e24dac9bfdc5e47e788d | [
"MIT"
] | null | null | null | src/app/buy_btc.py | simondorfman/hello_cb_pro | cdb96ee1390d22753630e24dac9bfdc5e47e788d | [
"MIT"
] | null | null | null | import os
from cbt.private_client import PrivateClient
from cbt.auth import get_new_private_connection
if __name__ == "__main__":
usd = os.getenv("USD_BUY")
auth = get_new_private_connection()
client = PrivateClient(auth)
client.market_buy_btc(usd)
| 20.692308 | 47 | 0.754647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.070632 |
f0c366363d8d56f6982ab28b3008beafae215018 | 4,836 | py | Python | sample/tensor_functions.py | TheLampshady/tensor_tutorial | a5665d05ce7fdefa4975081847ac34d44756a3d0 | [
"MIT"
] | 2 | 2018-04-19T19:33:43.000Z | 2019-03-12T05:59:52.000Z | sample/tensor_functions.py | TheLampshady/tensor_tutorial | a5665d05ce7fdefa4975081847ac34d44756a3d0 | [
"MIT"
] | null | null | null | sample/tensor_functions.py | TheLampshady/tensor_tutorial | a5665d05ce7fdefa4975081847ac34d44756a3d0 | [
"MIT"
] | null | null | null | import numpy as np
from os import getcwd, path
import scipy.misc as misc
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def variable_summaries(var, histogram_name='histogram'):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
:type var: tf.Variable
:type histogram_name: str
:rtype: tf.Tensor
"""
mean = tf.reduce_mean(var)
mean_scalar = tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
stddev_scalar = tf.summary.scalar('stddev', stddev)
max_scalar = tf.summary.scalar('max', tf.reduce_max(var))
min_scalar = tf.summary.scalar('min', tf.reduce_min(var))
histogram = tf.summary.histogram(histogram_name, var)
return tf.summary.merge([
mean_scalar,
stddev_scalar,
max_scalar,
min_scalar,
histogram
])
def weight_variable(shape, stddev=0.1, enable_summary=True):
"""
Create a weight variable with appropriate initialization.
:type shape: list <float>
:type stddev: float
:type enable_summary: bool
:rtype: tf.Variable
"""
name = 'Weights'
initial = tf.truncated_normal(shape, stddev=stddev, name="%s_Init" % name)
weight = tf.Variable(initial, name=name)
with tf.name_scope(name):
if enable_summary:
variable_summaries(weight, name)
return weight
def bias_variable(shape, init=0.1, enable_summary=True):
"""
Create a bias variable with appropriate initialization.
:type shape: list <float>
:type init: float
:type enable_summary: bool
:rtype: tf.Variable
"""
name = 'Biases'
initial = tf.constant(init, shape=shape, name="%s_Init" % name)
biases = tf.Variable(initial, name=name)
with tf.name_scope(name):
if enable_summary:
variable_summaries(biases, name)
return biases
def build_sprite(sprite_images, filename='sprite_1024.png'):
x = None
res = None
for i in range(32):
x = None
for j in range(32):
img = sprite_images[i * 32 + j, :].reshape((28, 28))
x = np.concatenate((x, img), axis=1) if x is not None else img
res = np.concatenate((res, x), axis=0) if res is not None else x
misc.toimage(256 - res, channel_axis=0).save(filename)
def build_labels(labels, filename='labels_1024.tsv'):
label_file = open(filename, 'w')
for target in labels:
value = int(np.where(target == 1)[0])
label_file.write("%d\n" % value)
label_file.close()
def build_mnist_embeddings(data_path, mnist):
images = mnist.test.images[:1024]
labels = mnist.test.labels[:1024]
full_data_path = path.join(getcwd(), data_path)
sprite_path = path.join(full_data_path, 'sprite_1024.png')
label_path = path.join(full_data_path, 'labels_1024.tsv')
build_sprite(images, sprite_path)
build_labels(labels, label_path)
return sprite_path, label_path
def embedding_initializer(layer, embedding_batch, writer, image_shape, sprite_path, label_path):
# Embedding
nodes = int(layer.shape[-1])
embedding = tf.Variable(tf.zeros([embedding_batch, nodes]), name="Embedding")
assignment = embedding.assign(layer)
config = projector.ProjectorConfig()
embedding_config = config.embeddings.add()
embedding_config.tensor_name = embedding.name
embedding_config.metadata_path = label_path
# Specify the width and height of a single thumbnail.
embedding_config.sprite.image_path = sprite_path
embedding_config.sprite.single_image_dim.extend(image_shape)
projector.visualize_embeddings(writer, config)
return assignment
def build_text_metadata(index_word_map, logs_path):
"""
Maps word / index to .tsv
:type index_word_map: dict
:type logs_path: basestring
:rtype: basestring
"""
data_path = path.join(logs_path, 'metadata.tsv')
if not isinstance(list(index_word_map.keys())[0], int):
raise TypeError("Keys must be of type `int`")
meta_file = path.join(getcwd(), data_path)
with open(meta_file, "w") as metadata:
metadata.write("Name\tClass\n")
for k, v in index_word_map.items():
metadata.write("%s\t%d\n" % (v, k))
return meta_file
def embedding_text(embeddings, writer, label_path):
"""
Sets up embeddings and metadata
:type embeddings: tf.Variable
:type writer: tf.summary.FileWriter
:type label_path: basestring
"""
config = projector.ProjectorConfig()
embedding_config = config.embeddings.add()
embedding_config.tensor_name = embeddings.name
# Link embedding to its metadata file
embedding_config.metadata_path = label_path
projector.visualize_embeddings(writer, config)
| 30.225 | 96 | 0.680521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.236352 |
f0c3d53843280be100078a8627ba65bbe0348c16 | 141 | py | Python | test/example_project/main_package/workflow1.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 63 | 2020-08-15T19:02:06.000Z | 2022-03-29T16:19:00.000Z | test/example_project/main_package/workflow1.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 133 | 2020-08-18T03:51:05.000Z | 2022-03-05T13:43:22.000Z | test/example_project/main_package/workflow1.py | Tismas/bigflow | 6a4a14616d66beeaf45700ea340c97d797a1f9e5 | [
"Apache-2.0"
] | 10 | 2020-08-25T05:19:31.000Z | 2022-02-03T10:33:41.000Z | import bigflow as bf
from .job import ExampleJob
workflow1 = bf.Workflow(
workflow_id='workflow1',
definition=[ExampleJob('job1')]) | 20.142857 | 36 | 0.730496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.120567 |
f0c4871928dffeb7e7e0aad03825633ae820c35a | 2,076 | py | Python | hello/migrations/0002_auto_20201116_1409.py | chenyuan99/OwlSavesCats | d8135848db5e6092467ee0d31aa46c36599cace1 | [
"MIT"
] | null | null | null | hello/migrations/0002_auto_20201116_1409.py | chenyuan99/OwlSavesCats | d8135848db5e6092467ee0d31aa46c36599cace1 | [
"MIT"
] | null | null | null | hello/migrations/0002_auto_20201116_1409.py | chenyuan99/OwlSavesCats | d8135848db5e6092467ee0d31aa46c36599cace1 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-11-16 19:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hello', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('realname', models.CharField(max_length=64)),
('phone', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('sign', models.BooleanField()),
('create_time', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='paperclip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('abstract', models.CharField(max_length=200)),
('publish_time', models.DateTimeField()),
('create_time', models.DateTimeField(auto_now=True)),
('pid', models.CharField(max_length=16)),
],
),
migrations.AlterUniqueTogether(
name='guest',
unique_together=None,
),
migrations.RemoveField(
model_name='guest',
name='event',
),
migrations.DeleteModel(
name='Event',
),
migrations.DeleteModel(
name='Guest',
),
migrations.AddField(
model_name='author',
name='paperclip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello.paperclip'),
),
migrations.AlterUniqueTogether(
name='author',
unique_together={('phone', 'paperclip')},
),
]
| 32.952381 | 114 | 0.53131 | 1,950 | 0.939306 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.147881 |
f0c48e7fef50233e0f0dc3c44b24dc530a2c32de | 1,307 | py | Python | Curso Em Video-python/PYTHON (MUNDO 1, MUNDO 2 E MUNDO 3)/pythonAulas/Aula18-listas-dentro-listas.py | AlamoVinicius/code-pratice | 924a3ff782caf3695bbeeac39fa02fb23781cd75 | [
"MIT"
] | null | null | null | Curso Em Video-python/PYTHON (MUNDO 1, MUNDO 2 E MUNDO 3)/pythonAulas/Aula18-listas-dentro-listas.py | AlamoVinicius/code-pratice | 924a3ff782caf3695bbeeac39fa02fb23781cd75 | [
"MIT"
] | null | null | null | Curso Em Video-python/PYTHON (MUNDO 1, MUNDO 2 E MUNDO 3)/pythonAulas/Aula18-listas-dentro-listas.py | AlamoVinicius/code-pratice | 924a3ff782caf3695bbeeac39fa02fb23781cd75 | [
"MIT"
] | null | null | null | """ aula sobre lista 18 repare como as listas podem ser incluidas dentro de uma lista. """
teste = []
teste.append('Álamo')
teste.append(26)
galera = []
galera.append(teste[:]) # é preciso fazer uma cópia com [:] para o sistema não duplicar
teste[0] = 'Francielli'
teste[1] = 22
galera.append(teste)
print(galera[:])
" another way"
galera2 = [['joão', 19], ['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
print(galera2[0]) # desse modo eu mostro apenas o elemento 0 da principal lista
print(galera2[0][0]) # desse modo eu mando mostrar apenas o elementeo 0 da lista 0
print('usando o for para mostrar as os dados')
for p in galera2:
print(p[0])
galera3 = []
dado = list()
totmai = totmen = 0
for c in range(0, 3):
dado.append(str(input('Nome: ')))
dado.append(int(input('Idade: ')))
galera3.append(dado[:]) # cuidado pra nçao esquecer do [:] pois ele cria uma cópia dos dados, se não eu apagaria
# as duas listas
dado.clear() # aqui eu limpo a lista dado
print(galera3)
for dados in galera3: # somar maiores de idade
if dados[1] >= 21: # se o dado 1 [idade]
print(f'{dados[0]} é maior de idade. ')
totmai += 1
else:
print(f'{dados[0]} é menor de idade ')
totmen += 1
print(f'Temos {totmai} maiores e {totmen} menores de idade. ') | 34.394737 | 118 | 0.643458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 696 | 0.528474 |
f0c50e60679a31772ae1850437c877c7672b05e0 | 3,992 | py | Python | src/flups/io.py | spalato/flups | b878920b01a073938b312dccc1d30a64bc9c30e5 | [
"MIT"
] | null | null | null | src/flups/io.py | spalato/flups | b878920b01a073938b312dccc1d30a64bc9c30e5 | [
"MIT"
] | null | null | null | src/flups/io.py | spalato/flups | b878920b01a073938b312dccc1d30a64bc9c30e5 | [
"MIT"
] | null | null | null | # flups.io: tools for reading and writing files
import re
import logging
import numpy as np
from .calib import load_latest, calibration
logger = logging.getLogger(__name__)
def read_asc(fname):
"""
Read a single `asc` file, the ASCII format from Andor Solis.
"""
logger.debug("Loading `.asc` file: %s", fname)
with open(fname) as f:
contents = f.read()
start = contents.find("\n"*3)
return np.loadtxt((ln for ln in contents[start:].splitlines() if ln), delimiter=",")
def load_asc_series(fnames, calib=None, step=None):
"""
Load a series of Andor Solis `asc` files. Computes the delays and wl.
Parameters
----------
fnames: iterable of filenames.
The list of files to load.
calib: flups.calib.calibration; array-like of shape (2,) or None
Wavelength calibration used to convert the pixels to wavelength.
The parameters can also be passed as an array: `[b0, b1]`, where `b0` is
the initial value assuming 0-based indexing. If `None` (default), uses
the lastest calibration from `flups.calib`
step: float or None
The timestep, in fs. If `None` (default), the timestep will be found
from the filename as `_sNNN_`.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
# read the data
trace = [read_asc(fn)[:,1] for fn in fnames]
# TODO: change to proper error.
assert np.allclose([t.size for t in trace], trace[0].size) # check they all have the same length
trace = np.array(trace)
# compute time axis
step = step or float(re.search("_s(\d+)_", fnames[0]).group(1))
n_pix = trace.shape[1]
delays = np.arange(0, trace.shape[0])*step
# compute wavelength axis
pixels = np.arange(n_pix)
if calib is None:
calib = load_latest()
if isinstance(calib, calibration):
wl = calib.calibrate(pixels)
else:
b0, b1 = calib
wl = b0 + b1*pixels
assert trace.shape == (delays.size, wl.size)
return delays, wl, trace
def load_npz(fname):
"""
Load data from an npz archive.
Parameters
----------
fname : str
Path to the archive.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
df = np.load(fname)
delays = df["delays"]
trace = df["trace"]
wl = df["wl"]
return delays, wl, trace
def load_txt(fname):
"""
Load data from a ".txt" file.
The first element is discarded (ie: top left corner), the first column
contains the delays, the first row contains the wavelength, and the rest
contains the signal intensity.
Parameters
----------
fname : str
Path to the archive.
Returns
-------
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
"""
cnt = np.loadtxt(fname)
delays = cnt[1:,0]
wl = cnt[0,1:]
trace = cnt[1:,1:]
return delays, wl, trace
def save_txt(fname, delays, wl, trace):
"""
Saves the data in a `.txt` file.
The first element is undefined, the first column contains the delays, the
first row contains the wavelengths and the rest contains the signal
intensity.
Parameters
----------
fname : str
Path to the archive.
delays : (M,) np.ndarray
Delays, fs. Starts from 0.
wl : (N,) np.ndarray
Wavelengths, nm.
trace : (M,N) np.ndarray
Signal intensity
See also
--------
flups.io.load_txt
"""
cnt = np.full([s+1 for s in trace.shape], np.nan)
cnt[1:,0] = delays
cnt[0,1:] = wl
cnt[1:,1:] = trace
np.savetxt(fname, cnt, fmt="%.06g")
| 27.342466 | 100 | 0.596443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,517 | 0.630511 |
f0c5f63e6ccfe9b9da4d7242b7a76b297f2ab336 | 2,521 | py | Python | tests/test_text_search.py | jdvala/kulhadi | 9031d32070582fa46a74f760203882c213d02605 | [
"MIT"
] | 3 | 2021-10-31T07:02:04.000Z | 2022-01-10T12:55:27.000Z | tests/test_text_search.py | jdvala/kulhadi | 9031d32070582fa46a74f760203882c213d02605 | [
"MIT"
] | 2 | 2021-10-30T11:28:10.000Z | 2021-10-30T11:30:28.000Z | tests/test_text_search.py | jdvala/kawadi | 9031d32070582fa46a74f760203882c213d02605 | [
"MIT"
] | null | null | null | import multiprocessing as mp
import pytest
from kawadi.text_search import SearchInText
@pytest.fixture()
def input_data():
text_to_find = "String distance algorithm"
text_to_search = """SIFT4 is a general purpose string distance algorithm inspired by JaroWinkler and Longest Common Subsequence. It was developed to produce a distance measure that matches as close as possible to the human perception of string distance. Hence it takes into account elements like character substitution, character distance, longest common subsequence etc. It was developed using experimental testing, and without theoretical background."""
return text_to_find, text_to_search
@pytest.fixture()
def output():
return [
{
"sim_score": 1.0,
"searched_text": "string distance algorithm",
"to_find": "string distance algorithm",
"start": 27,
"end": 52,
}
]
def custom_score(**kwargs):
if kwargs["slide_of_text"] == kwargs["text_to_find"]:
return 1.0
else:
return 0.0
def test_search_in_text(input_data, output) -> None:
search_text = SearchInText()
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test multiprocessing
search_text = SearchInText(multiprocessing=True, max_workers=4)
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test threshold
search_text = SearchInText(
score_threshold=0.99, multiprocessing=True, max_workers=4
)
result = search_text.find_in_text("something stupid", input_data[1])
assert result == []
# test max_workers
search_text = SearchInText(score_threshold=0.99, multiprocessing=True)
assert search_text.max_workers == mp.cpu_count()
def test_search_in_text_custom_score(input_data, output) -> None:
search_text = SearchInText(custom_score_func=custom_score)
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test if invalid output
search_text = SearchInText(score_threshold=1, custom_score_func=custom_score)
with pytest.raises(ValueError):
result = search_text.find_in_text(input_data[0], input_data[1])
def test_search_in_text_sliding_window_errors() -> None:
search_text = SearchInText()
with pytest.raises(Exception):
search_text.find_in_text("ABC", "")
with pytest.raises(Exception):
search_text.find_in_text("", "ABC")
| 33.171053 | 459 | 0.714002 | 0 | 0 | 0 | 0 | 841 | 0.333598 | 0 | 0 | 707 | 0.280444 |
f0ca3d609391dc32aa46d1c4b4ec4ee3f9a34e0a | 448 | py | Python | cbandits/core/bayesian_nn.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | cbandits/core/bayesian_nn.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | cbandits/core/bayesian_nn.py | AlliedToasters/dev_bandits | 7e3655bd5a91854951a52d0f037ee06aefb2922c | [
"MIT"
] | null | null | null | """Define the abstract class for Bayesian Neural Networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class BayesianNN(object):
"""A Bayesian neural network keeps a distribution over neural nets."""
def __init__(self, optimizer):
pass
def build_model(self):
pass
def train(self, data):
pass
def sample(self, steps):
pass | 21.333333 | 74 | 0.694196 | 274 | 0.611607 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.292411 |
f0cb07dafe313489f043d26fe74c8e52bf97cd97 | 99 | py | Python | maidwhite/__init__.py | tihtw/maidwhite-python | 0f7613029bf12118c901273aa26aa89e843bd6ed | [
"Apache-2.0"
] | 1 | 2021-01-12T17:13:46.000Z | 2021-01-12T17:13:46.000Z | maidwhite/__init__.py | tihtw/maidwhite-python | 0f7613029bf12118c901273aa26aa89e843bd6ed | [
"Apache-2.0"
] | null | null | null | maidwhite/__init__.py | tihtw/maidwhite-python | 0f7613029bf12118c901273aa26aa89e843bd6ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from .MaidWhite import MaidWhite
name = "MaidWhite"
__all__ = (MaidWhite) | 16.5 | 32 | 0.676768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.343434 |
f0cb537671d132b5a4770577912b310f7cda8e12 | 832 | py | Python | app/main/config.py | leskeylevy/flaskBackend | 1cada27072517234f37991a3ef655f8c63d13867 | [
"MIT"
] | null | null | null | app/main/config.py | leskeylevy/flaskBackend | 1cada27072517234f37991a3ef655f8c63d13867 | [
"MIT"
] | null | null | null | app/main/config.py | leskeylevy/flaskBackend | 1cada27072517234f37991a3ef655f8c63d13867 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = False
class DevelopmentConfig:
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://levy:Dadiesboy12@localhost/ronchezfitness'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig:
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('postgres_uri')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class TestingConfig:
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_test.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| 21.333333 | 95 | 0.742788 | 626 | 0.752404 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.141827 |
f0cba37d4b9ac55bb211ec75a31599946da59b48 | 1,654 | py | Python | crosstab/mega_analysis/lateralised_intensities.py | karandahele/Epilepsy-Repository | 58f970b25808c0cdcd0dc44ab107cf00d9de74c2 | [
"MIT"
] | 2 | 2019-11-20T11:12:22.000Z | 2019-12-23T21:19:34.000Z | crosstab/mega_analysis/lateralised_intensities.py | karandahele/Epilepsy-Repository | 58f970b25808c0cdcd0dc44ab107cf00d9de74c2 | [
"MIT"
] | 6 | 2020-01-14T17:13:40.000Z | 2020-04-06T09:19:51.000Z | crosstab/mega_analysis/lateralised_intensities.py | karandahele/Epilepsy-Repository | 58f970b25808c0cdcd0dc44ab107cf00d9de74c2 | [
"MIT"
] | 3 | 2020-01-14T17:12:02.000Z | 2020-04-01T13:08:06.000Z | import pandas as pd
from crosstab.mega_analysis.pivot_result_to_pixel_intensities import *
def lateralisation_to_pixel_intensities(all_combined_gifs, df,
semiology_term,
quantiles, method='non-linear', scale_factor=10,
intensity_label='lateralised intensity',
use_semiology_dictionary=False):
"""
runs pivot_result_to_pixel_intensities when the input has already been mapped to gifs as a result of
running QUERY_LATERALISATION.
This is the final step in the query_lateralisation pathway.
Alim-Marvasti Aug 2019
"""
# isn't really a pivot_result but let's use consistent notations:
pivot_result = all_combined_gifs[['pt #s']].T
all_combined_gifs_intensities = pivot_result_to_pixel_intensities(pivot_result, df,
method=method, scale_factor=scale_factor, quantiles=quantiles,
use_main_df_calibration=False)
# now we just need to transpose it and add the other columns back
a2 = all_combined_gifs[['Gif Parcellations']].T
a3 = all_combined_gifs[['Semiology Term']].T
all_combined_gifs_intensities.index = [intensity_label]
all_lateralised_gifs = pd.concat([a3, a2, pivot_result, all_combined_gifs_intensities], sort=False).T
all_lateralised_gifs.loc[0, 'Semiology Term'] = str(semiology_term)
all_lateralised_gifs.loc[1, 'Semiology Term'] = 'use_semiology_dictionary='+str(use_semiology_dictionary)
return all_lateralised_gifs | 50.121212 | 109 | 0.663845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.308948 |
f0cd195aebcc37de4124760ae9d1f43db09b1611 | 585 | py | Python | requests_test.py | AnakinJiang/PythonDemo | 4e8e8296b098ce541c588fafdf07cf2d6d955d38 | [
"MIT"
] | null | null | null | requests_test.py | AnakinJiang/PythonDemo | 4e8e8296b098ce541c588fafdf07cf2d6d955d38 | [
"MIT"
] | null | null | null | requests_test.py | AnakinJiang/PythonDemo | 4e8e8296b098ce541c588fafdf07cf2d6d955d38 | [
"MIT"
] | null | null | null | '''
@Author: AnakinJiang
@Email: jiangjinpeng319 AT gmail.com
@Descripttion: requests测试demo
@Date: 2019-08-27 15:37:14
@LastEditors: AnakinJiang
@LastEditTime: 2019-08-27 16:55:06
'''
import requests
def get_test():
url1 = 'https://www.douban.com/'
r1 = requests.get(url1)
print(r1.status_code)
print(r1.text)
print(r1.content)
url2 = 'https://www.douban.com/search'
params = {'q': 'python', 'cat': '1001'}
r2 = requests.get(url2,params=params)
print(r2.url)
print(r2.encoding)
print(type(r2.content))
print(r2.headers)
get_test()
| 19.5 | 43 | 0.659829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.449915 |
f0cfaf531119941e8908019c7d9b113afa90346e | 3,536 | py | Python | allennlp/training/learning_rate_schedulers/polynomial_decay.py | alle-pawols/allennlp | 7d4a67263d7a210aca22d4f2b03e8568d3c34a48 | [
"Apache-2.0"
] | 2 | 2022-01-02T12:15:21.000Z | 2022-01-02T12:15:23.000Z | allennlp/training/learning_rate_schedulers/polynomial_decay.py | alle-pawols/allennlp | 7d4a67263d7a210aca22d4f2b03e8568d3c34a48 | [
"Apache-2.0"
] | 35 | 2021-08-11T13:20:30.000Z | 2022-03-29T13:17:39.000Z | allennlp/training/learning_rate_schedulers/polynomial_decay.py | alle-pawols/allennlp | 7d4a67263d7a210aca22d4f2b03e8568d3c34a48 | [
"Apache-2.0"
] | 1 | 2021-03-31T19:50:08.000Z | 2021-03-31T19:50:08.000Z | from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("polynomial_decay")
class PolynomialDecay(LearningRateScheduler):
"""
Implements polynomial decay Learning rate scheduling. The learning rate is first
linearly increased for the first `warmup_steps` training steps. Then it is decayed for
`total_steps` - `warmup_steps` from the initial learning rate to `end_learning_rate` using a polynomial
of degree `power`.
Formally,
`lr` = (`initial_lr` - `end_learning_rate`) *
((`total_steps` - `steps`)/(`total_steps` - `warmup_steps`)) ** `power`
# Parameters
total_steps: `int`, required
The total number of steps to adjust the learning rate for.
warmup_steps : `int`, required
The number of steps to linearly increase the learning rate.
power : `float`, optional (default = `1.0`)
The power of the polynomial used for decaying.
end_learning_rate : `float`, optional (default = `0.0`)
Final learning rate to decay towards.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: int,
power=1.0,
warmup_steps=0,
end_learning_rate=0.0,
last_epoch: int = -1,
):
super().__init__(optimizer, last_epoch)
# Sanity check here.
if num_steps_per_epoch is None:
raise ConfigurationError(
"'num_steps_per_epoch' is required for this LR scheduler.\n\n"
"If you know how many batches per epoch for your training data, you can set this value "
"directly in your config. Otherwise you'll need to use compatible settings with your data loader "
"so that it can report an accurate number of batches per epoch. "
"If you're using the MultiProcessDataLoader, "
"this means you either need to set 'batches_per_epoch' "
"or leave 'max_instances_in_memory' as None (if your entire dataset can fit into memory)."
)
self.power = power
self.warmup_steps = warmup_steps
self.total_steps = num_epochs * num_steps_per_epoch
self.end_learning_rate = end_learning_rate
self.steps = 0
self.step_batch(0)
@overrides
def get_values(self):
if self.warmup_steps > 0 and self.steps < self.warmup_steps:
f = self.steps / self.warmup_steps
return [f * lr for lr in self.base_values]
if self.steps >= self.total_steps:
return [self.end_learning_rate for _ in self.base_values]
current_decay_steps = self.total_steps - self.steps
total_decay_steps = self.total_steps - self.warmup_steps
f = (current_decay_steps / total_decay_steps) ** self.power
return [
f * (lr - self.end_learning_rate) + self.end_learning_rate for lr in self.base_values
]
@overrides
def step(self, metric: float = None) -> None:
pass
@overrides
def step_batch(self, batch_num_total: int = None) -> None:
if batch_num_total is None:
self.steps += 1
else:
self.steps = batch_num_total
for param_group, lr in zip(self.optimizer.param_groups, self.get_values()):
param_group[self.param_group_field] = lr
| 37.221053 | 114 | 0.653281 | 3,280 | 0.927602 | 0 | 0 | 3,332 | 0.942308 | 0 | 0 | 1,441 | 0.407523 |
f0cfb602ee2a0f5bc3a7dc61c2685237a493cae5 | 2,983 | py | Python | Scripts/fill_year.py | usuallyunusual/The-Explorer | 1569e8ac01f245cf2e2333070e82ff165abeb118 | [
"MIT"
] | null | null | null | Scripts/fill_year.py | usuallyunusual/The-Explorer | 1569e8ac01f245cf2e2333070e82ff165abeb118 | [
"MIT"
] | 5 | 2021-04-01T09:48:28.000Z | 2021-04-17T09:13:06.000Z | Scripts/fill_year.py | usuallyunusual/The-Explorer | 1569e8ac01f245cf2e2333070e82ff165abeb118 | [
"MIT"
] | 1 | 2020-09-14T11:42:40.000Z | 2020-09-14T11:42:40.000Z |
"""
TO DO:
1. Lot of edge cases not accounted for
2. Could use some unit testing scripts for sanity check
3. What are the bounds for years?
"""
import mysql
from mysql.connector import Error
import re
import numpy as np
def reject_outliers(data, m = 6.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def tag_year():
try:
conn = mysql.connector.connect(host='127.0.0.1',port = 3307,database='explorer_db',user='root',password = '')
if conn.is_connected():
print("Connection successful: ",conn.get_server_info())
cur = conn.cursor(buffered = True)
cur1 = conn.cursor()
cur.execute("SELECT event_key,htext FROM event WHERE htext IS NOT NULL AND event_year IS NULL")
count = 0
cent = {"first":"1st","second":"2nd","third":"3rd","fourth":"4th","fifth":"5th","sixth":"6th","seventh":"7th","eighth":"8th","ninth":"9th","tenth":"10th",
"eleventh":"11th","twelfth":"12th","thirteenth":"13th","fourteenth":"14th","fifteenth":"15th",
"sixteenth":"16th","seventeenth":"17th","eighteenth":"18th","nineteenth":"19th","twentieth":"20th","twentyfirst":"21st"}
mylist = list()
for row in cur:
text = row[1].lower()
pos = text.find("references[edit]")
pos2 = text.find("further reading[edit]")
if pos!=0:
sub1 = text[:pos]
sub2 = text[pos2:]
text = sub1+sub2
#print(text,"\n\n")
if "century" in text:
#print("YES\n\n")
mylist = re.findall("\d+[a-z][a-z]\s*-*century",text)
#print(mylist)
sec_list = re.findall("[f,s,t,e,n][a-z][a-z]+\s*-*century",text)
#print(sec_list)
sec_list = [i.replace(i[:i.find(" century")],cent[i[:i.find(" century")]]) for i in sec_list if (i[:i.find(" century")]) in cent]
mylist = mylist+sec_list
#print(mylist)
mylist = [re.sub(r"[a-z][a-z]\s*-*century","00",i) for i in mylist]
#print(mylist)
years = re.findall('([1][0-9][0-9][0-9])',row[1])
years2 = re.findall('([2][0-1][0-2][0-9])',row[1])
years = years+years2 + mylist
if not years:
allyear = "NULL"
else:
allyear = np.array([int(i) for i in years])
allyear = reject_outliers(allyear)
cur1.execute('''UPDATE event set event_year = %s WHERE event_key = %s''',(str(allyear[0]),row[0]))
#print(allyear)
print(len(allyear),count)
count+=1
conn.commit()
cur.close()
cur1.close()
conn.close()
print(count,"rows")
print("Done check database!")
except Error as e:
print("Error while connecting to MySQL", e)
| 30.131313 | 162 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,052 | 0.352665 |
f0d055f8993a7d6c33e001e9675d7f51424a7021 | 3,505 | py | Python | tests/web/client.py | bigbluedeer/instagram_private_api | e92b6f0b1904d25edf7ad02bc4c270627f461733 | [
"MIT"
] | null | null | null | tests/web/client.py | bigbluedeer/instagram_private_api | e92b6f0b1904d25edf7ad02bc4c270627f461733 | [
"MIT"
] | null | null | null | tests/web/client.py | bigbluedeer/instagram_private_api | e92b6f0b1904d25edf7ad02bc4c270627f461733 | [
"MIT"
] | null | null | null | from ..common import (
WebApiTestBase, WebClientError as ClientError,
WebClientLoginError as ClientLoginError,
WebClient as Client,
compat_mock, compat_urllib_error
)
class ClientTests(WebApiTestBase):
"""Tests for client related functions."""
@staticmethod
def init_all(api):
return [
{
'name': 'test_search',
'test': ClientTests('test_search', api),
},
{
'name': 'test_client_properties',
'test': ClientTests('test_client_properties', api),
'require_auth': True,
},
{
'name': 'test_client_errors',
'test': ClientTests('test_client_errors', api)
},
{
'name': 'test_client_init',
'test': ClientTests('test_client_init', api)
},
{
'name': 'test_login_mock',
'test': ClientTests('test_login_mock', api)
},
{
'name': 'test_unauthed_client',
'test': ClientTests('test_unauthed_client', api)
}
]
@compat_mock.patch('instagram_web_api.Client._make_request')
def test_login_mock(self, make_request):
make_request.side_effect = [
{'status': 'ok', 'authenticated': 'x'},
{'status': 'fail'}
]
self.api.on_login = lambda x: self.assertIsNotNone(x)
self.api.login()
self.api.on_login = None
make_request.assert_called_with(
'https://www.instagram.com/accounts/login/ajax/',
params={
'username': self.api.username,
'password': self.api.password,
'queryParams': '{}'})
with self.assertRaises(ClientLoginError):
self.api.login()
def test_search(self):
results = self.api.search('maru')
self.assertGreaterEqual(len(results['users']), 0)
self.assertGreaterEqual(len(results['hashtags']), 0)
def test_client_properties(self):
self.sleep_interval = 0
self.assertIsNotNone(self.api.csrftoken)
self.assertIsNotNone(self.api.authenticated_user_id)
self.assertTrue(self.api.is_authenticated)
settings = self.api.settings
for k in ('cookie', 'created_ts'):
self.assertIsNotNone(settings.get(k))
self.assertIsNotNone(self.api.cookie_jar.dump())
@compat_mock.patch('instagram_web_api.client.compat_urllib_request.OpenerDirector.open')
def test_client_errors(self, open_mock):
self.sleep_interval = 0
open_mock.side_effect = [
compat_urllib_error.HTTPError('', 404, 'Not Found', None, None),
compat_urllib_error.URLError('No route to host')]
with self.assertRaises(ClientError):
self.api.search('maru')
with self.assertRaises(ClientError):
self.api.search('maru')
@compat_mock.patch('instagram_web_api.Client.csrftoken',
new_callable=compat_mock.PropertyMock, return_value=None)
def test_client_init(self, csrftoken):
with self.assertRaises(ClientError):
self.api.init()
def test_unauthed_client(self):
api = Client()
self.assertFalse(api.is_authenticated)
with self.assertRaises(ClientError):
# Test authenticated method
api.user_following(self.test_user_id)
| 34.362745 | 92 | 0.583452 | 3,319 | 0.946933 | 0 | 0 | 2,364 | 0.674465 | 0 | 0 | 741 | 0.211412 |
f0d05b63ba5129c1c92a989823c1a16fc9dc7cbf | 842 | py | Python | setup.py | dasmanov/pyEcovent | 97efe1382d1a725135f2476b8e45aaf915121b1e | [
"MIT"
] | null | null | null | setup.py | dasmanov/pyEcovent | 97efe1382d1a725135f2476b8e45aaf915121b1e | [
"MIT"
] | null | null | null | setup.py | dasmanov/pyEcovent | 97efe1382d1a725135f2476b8e45aaf915121b1e | [
"MIT"
] | null | null | null | from setuptools import setup
long_description = None
with open("README.md", 'r') as fp:
long_description = fp.read()
setup(
name = 'pyEcovent',
packages = ['ecovent'],
version='0.8.4',
description='Python3 library for single-room energy recovery ventilators from Vents / Blauberg / Flexit',
long_description=long_description,
python_requires='>=3.6.7',
author='Aleksander Lehmann',
author_email='aleksander@flovik.no',
url='https://github.com/aglehmann/pyEcovent',
license="MIT",
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Home Automation',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 30.071429 | 109 | 0.648456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.54038 |
f0d08e4db132f744024a09a0a079f7e2cb992ed6 | 5,105 | py | Python | voicepuppet/atvgnet/train_atnet.py | taylorlu/voicepuppet | a0d3ca3296aca15abbfe75663a1bf682fb491efa | [
"MIT"
] | 20 | 2020-10-28T07:32:29.000Z | 2022-03-04T07:15:02.000Z | voicepuppet/atvgnet/train_atnet.py | taylorlu/voicepuppet | a0d3ca3296aca15abbfe75663a1bf682fb491efa | [
"MIT"
] | 4 | 2020-11-24T14:42:44.000Z | 2021-08-05T13:31:16.000Z | voicepuppet/atvgnet/train_atnet.py | taylorlu/voicepuppet | a0d3ca3296aca15abbfe75663a1bf682fb491efa | [
"MIT"
] | 11 | 2020-10-29T07:11:22.000Z | 2022-03-28T07:17:35.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
from optparse import OptionParser
import logging
from atnet import ATNet
from dataset.generator import ATNetDataGenerator
from plot import *
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def mkdir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
if (__name__ == '__main__'):
cmd_parser = OptionParser(usage="usage: %prog [options] --config_path <>")
cmd_parser.add_option('--config_path', type="string", dest="config_path",
help='the config yaml file')
opts, argv = cmd_parser.parse_args()
if (opts.config_path is None):
logger.error('Please check your parameters.')
exit(0)
config_path = opts.config_path
if (not os.path.exists(config_path)):
logger.error('config_path not exists')
exit(0)
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
batch_size = 16
### Generator for training setting
train_generator = ATNetDataGenerator(config_path)
params = train_generator.params
params.dataset_path = params.train_dataset_path
params.batch_size = batch_size
train_generator.set_params(params)
train_dataset = train_generator.get_dataset()
### Generator for evaluation setting
eval_generator = ATNetDataGenerator(config_path)
params = eval_generator.params
params.dataset_path = params.eval_dataset_path
params.batch_size = batch_size
eval_generator.set_params(params)
eval_dataset = eval_generator.get_dataset()
sess = tf.Session()
tf.train.start_queue_runners(sess=sess)
train_iter = train_dataset.make_one_shot_iterator()
eval_iter = eval_dataset.make_one_shot_iterator()
### ATNet setting
atnet = ATNet(config_path)
params = atnet.params
epochs = params.training['epochs']
params.add_hparam('max_to_keep', 10)
params.add_hparam('save_dir', 'ckpt_atnet')
params.add_hparam('save_name', 'atnet')
params.add_hparam('save_step', 1000)
params.add_hparam('eval_step', 1000)
params.add_hparam('summary_step', 100)
params.add_hparam('eval_visual_dir', 'log/eval_atnet')
params.add_hparam('summary_dir', 'log/summary_atnet')
params.batch_size = batch_size
atnet.set_params(params)
mean = np.load(params.mean_file)
mkdir(params.save_dir)
mkdir(params.eval_visual_dir)
mkdir(params.summary_dir)
train_nodes = atnet.build_train_op(*train_iter.get_next())
eval_nodes = atnet.build_eval_op(*eval_iter.get_next())
sess.run(tf.global_variables_initializer())
# Restore from save_dir
if ('checkpoint' in os.listdir(params.save_dir)):
tf.train.Saver().restore(sess, tf.train.latest_checkpoint(params.save_dir))
tf.summary.scalar("loss", train_nodes['Loss'])
tf.summary.scalar("lr", train_nodes['Lr'])
grads = train_nodes['Grads']
tvars = train_nodes['Tvars']
# Add histograms for gradients.
for i, grad in enumerate(grads):
if grad is not None:
var = tvars[i]
if ('BatchNorm' not in var.op.name):
tf.summary.histogram(var.op.name + '/gradients', grad)
merge_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(params.summary_dir, graph=sess.graph)
for i in range(epochs):
### Run training
result = sess.run([train_nodes['Train_op'],
merge_summary_op,
train_nodes['Loss'],
train_nodes['Lr'],
train_nodes['Global_step'],
train_nodes['Mfccs'],
train_nodes['Poses'],
train_nodes['Ears'],
train_nodes['Seq_len'],
train_nodes['Landmark'],
train_nodes['Example_landmark']])
_, summary, loss, lr, global_step, mfccs, poses, ears, seq_len, landmark, example_landmark = result
print('Step {}: Loss= {:.3f}, Lr= {:.2e}'.format(global_step, loss, lr))
if (global_step % params.summary_step == 0):
summary_writer.add_summary(summary, global_step)
### Run evaluation
if (global_step % params.eval_step == 0):
result = sess.run([eval_nodes['Loss'],
eval_nodes['Seq_len'],
eval_nodes['Landmark'],
eval_nodes['LandmarkDecoder']])
loss, seq_len, real_lmk_seq, lmk_seq = result
print('\r\nEvaluation >>> Loss= {:.3f}'.format(loss))
plot_lmk_seq(params.eval_visual_dir, global_step, mean, seq_len, real_lmk_seq, lmk_seq)
### Save checkpoint
if (global_step % params.save_step == 0):
tf.train.Saver(max_to_keep=params.max_to_keep, var_list=tf.global_variables()).save(sess,
os.path.join(params.save_dir,
params.save_name),
global_step=global_step)
| 35.950704 | 121 | 0.641724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 914 | 0.17904 |
f0d0afc35b9e84aba33193038e8752efcd1d32f7 | 1,695 | py | Python | tests/test_errors.py | althonos/pyhmmer | eb6fe7c0e74557e0ae9d647693711583d2d86b68 | [
"MIT"
] | 26 | 2020-11-10T22:57:49.000Z | 2022-03-24T16:58:55.000Z | tests/test_errors.py | althonos/pyhmmer | eb6fe7c0e74557e0ae9d647693711583d2d86b68 | [
"MIT"
] | 13 | 2020-11-12T11:41:08.000Z | 2022-03-09T18:17:48.000Z | tests/test_errors.py | althonos/pyhmmer | eb6fe7c0e74557e0ae9d647693711583d2d86b68 | [
"MIT"
] | 2 | 2021-04-04T05:13:07.000Z | 2021-11-30T09:11:23.000Z | import unittest
from pyhmmer.easel import Alphabet
from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch
class TestErrors(unittest.TestCase):
def test_unexpected_error(self):
err = UnexpectedError(1, "p7_ReconfigLength")
self.assertEqual(repr(err), "UnexpectedError(1, 'p7_ReconfigLength')")
self.assertEqual(str(err), "Unexpected error occurred in 'p7_ReconfigLength': eslFAIL (status code 1)")
def test_allocation_error(self):
err = AllocationError("ESL_SQ", 16)
self.assertEqual(repr(err), "AllocationError('ESL_SQ', 16)")
self.assertEqual(str(err), "Could not allocate 16 bytes for type ESL_SQ")
err2 = AllocationError("float", 4, 32)
self.assertEqual(repr(err2), "AllocationError('float', 4, 32)")
self.assertEqual(str(err2), "Could not allocate 128 bytes for an array of 32 float")
def test_easel_error(self):
err = EaselError(1, "failure")
self.assertEqual(repr(err), "EaselError(1, 'failure')")
self.assertEqual(str(err), "Error raised from C code: failure, eslFAIL (status code 1)")
def test_alphabet_mismatch(self):
err = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(repr(err), "AlphabetMismatch(Alphabet.dna(), Alphabet.rna())")
self.assertEqual(str(err), "Expected Alphabet.dna(), found Alphabet.rna()")
self.assertNotEqual(err, 1)
err2 = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(err, err)
self.assertEqual(err, err2)
err3 = AlphabetMismatch(Alphabet.dna(), Alphabet.amino())
self.assertNotEqual(err, err3)
| 42.375 | 111 | 0.684366 | 1,550 | 0.914454 | 0 | 0 | 0 | 0 | 0 | 0 | 506 | 0.298525 |
f0d0b9a716fb2240576fa6c1da54c8656e26b395 | 1,196 | py | Python | electrum_gui/common/provider/chains/cfx/sdk/cfx_address/base32.py | BixinKey/electrum | f5de4e74e313b9b569f13ba6ab9142a38bf095f2 | [
"MIT"
] | 12 | 2020-11-12T08:53:05.000Z | 2021-07-06T17:30:39.000Z | electrum_gui/common/provider/chains/cfx/sdk/cfx_address/base32.py | liyanhrxy/electrum | 107608ef201ff1d20d2f6091c257b1ceff9b7362 | [
"MIT"
] | 209 | 2020-09-23T06:58:18.000Z | 2021-11-18T11:25:41.000Z | electrum_gui/common/provider/chains/cfx/sdk/cfx_address/base32.py | liyanhrxy/electrum | 107608ef201ff1d20d2f6091c257b1ceff9b7362 | [
"MIT"
] | 19 | 2020-10-13T11:42:26.000Z | 2022-02-06T01:26:34.000Z | import base64
STANDARD_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
CUSTOM_ALPHABET = 'abcdefghjkmnprstuvwxyz0123456789'
ENCODE_TRANS = str.maketrans(STANDARD_ALPHABET, CUSTOM_ALPHABET)
DECODE_TRANS = str.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET)
PADDING_LETTER = '='
def encode(buffer):
assert type(buffer) == bytes or type(buffer) == bytearray, "please pass an bytes"
b32encoded = base64.b32encode(buffer) # encode bytes
b32str = b32encoded.decode().replace(PADDING_LETTER, "") # translate chars
return b32str.translate(ENCODE_TRANS) # remove padding char
def decode(b32str):
assert type(b32str) == str, "please pass an str"
# pad to 8's multiple with '='
b32len = len(b32str)
if b32len % 8 > 0:
padded_len = b32len + (8 - b32len % 8)
b32str = b32str.ljust(padded_len, PADDING_LETTER)
# translate and decode
return base64.b32decode(b32str.translate(DECODE_TRANS))
def decode_to_words(b32str):
result = bytearray()
for c in b32str:
result.append(CUSTOM_ALPHABET.index(c))
return result
def encode_words(words):
result = ""
for v in words:
result += CUSTOM_ALPHABET[v]
return result
| 29.9 | 85 | 0.70903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.184783 |
f0d117c241c9bb2cd6a66d2e35612733475a8a0e | 437 | py | Python | saefportal/datalakes/models/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/datalakes/models/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/datalakes/models/__init__.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T15:02:52.000Z | 2020-12-16T15:02:52.000Z | from .one_drive_datalake import OneDriveDatalake
from .google_drive_datalake import GoogleDriveDatalake
from .dropbox_datalake import DropboxDatalake
from .google_cloud_storage_datalake import GoogleCloudStorageDatalake
from .azure_blob_storage_datalake import AzureBlobStorageDatalake
from .azure_data_lake_datalake import AzureDataLakeDatalake
from .amazon_s3_datalake import AmazonS3Datalake
from .local_datalake import LocalDatalake
| 48.555556 | 69 | 0.908467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0d3f90aee2a25f409650fff2ffe65e97e26ad3b | 4,319 | py | Python | PathPlanning/AStar/a_star.py | Pythocrates/PythonRobotics | e16f070a59ef9edf7200361d2ab0171a6f540f1e | [
"MIT"
] | null | null | null | PathPlanning/AStar/a_star.py | Pythocrates/PythonRobotics | e16f070a59ef9edf7200361d2ab0171a6f540f1e | [
"MIT"
] | null | null | null | PathPlanning/AStar/a_star.py | Pythocrates/PythonRobotics | e16f070a59ef9edf7200361d2ab0171a6f540f1e | [
"MIT"
] | null | null | null | """
A* grid planning
author: Atsushi Sakai(@Atsushi_twi)
Nikos Kanargias (nkana@tee.gr)
See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
"""
import math
from node import Node
from obstacle_map import Position
class AStarPlanner:
def __init__(self, obstacle_map):
"""
Initialize grid map for a star planning
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
robot_radius: robot radius[m]
"""
self.obstacle_map = obstacle_map
self.motion = self.get_motion_model()
self._handlers = []
self._all_nodes = dict()
def _create_node(self, *args, **kwargs):
return Node(*args, parent=self, **kwargs)
def add_handler(self, handler):
self._handlers.append(handler)
def node_at(self, world_position):
grid_position = self.obstacle_map.world_to_grid(world_position)
return self.node_at_grid(grid_position)
def node_at_grid(self, grid_position):
try:
node = self._all_nodes[grid_position]
except KeyError:
node = self._create_node(grid_position)
self._all_nodes[grid_position] = node
return node
def plan(self, start_position, goal_position):
"""
A star path search
input:
s_x: start x position [m]
s_y: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
output:
rx: x position list of the final path
ry: y position list of the final path
"""
start_node = self.node_at(start_position)
goal_node = self.node_at(goal_position)
open_set = {start_node}
closed_set = set()
while open_set:
current = min(
open_set,
key=lambda o: o.cost + self.calc_heuristic(goal_node, o)
)
# Remove the item from the open set, and add it to the closed set
open_set.remove(current)
closed_set.add(current)
# show graph
for handler in self._handlers:
handler.on_position_update(current.world_position)
if current is goal_node:
print("Goal found")
break
# expand_grid search grid based on motion model
for motion in self.motion:
new_cost = current.cost + motion[2]
node = self.node_at_grid(
Position(
current.grid_position.x + motion[0],
current.grid_position.y + motion[1],
)
)
# If the node is not safe, do nothing
if not node.is_ok:
continue
if node in closed_set:
continue
if node not in open_set:
open_set.add(node) # discovered a new node
node.update(cost=new_cost, previous=current)
elif node.cost > new_cost:
# This path is the best until now. record it
node.update(cost=new_cost, previous=current)
path = self.calc_final_path(goal_node)
for handler in self._handlers:
handler.on_final_path(path)
return path
@staticmethod
def calc_final_path(goal_node):
# generate final course
result = list()
node = goal_node
while True:
result.append(node.world_position)
node = node.previous
if not node:
return result
@staticmethod
def calc_heuristic(node_1, node_2):
weight = 1.0 # weight of heuristic
pos_1 = node_1.grid_position
pos_2 = node_2.grid_position
return weight * math.hypot(pos_1.x - pos_2.x, pos_1.y - pos_2.y)
@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
| 28.414474 | 77 | 0.538782 | 4,068 | 0.941885 | 0 | 0 | 898 | 0.207918 | 0 | 0 | 990 | 0.22922 |
f0d567896a4d3766c40a65ce7fc805db88f92454 | 1,995 | py | Python | Wordle v3.py | favet/wordle | f0390775baec8534cdab564ed79225b530cbd404 | [
"MIT"
] | null | null | null | Wordle v3.py | favet/wordle | f0390775baec8534cdab564ed79225b530cbd404 | [
"MIT"
] | null | null | null | Wordle v3.py | favet/wordle | f0390775baec8534cdab564ed79225b530cbd404 | [
"MIT"
] | null | null | null | wordle = open("Wordle.txt", "r")
wordList = []
for line in wordle:
stripped_line = line.strip()
wordList.append(stripped_line)
mutableList = []
outcomeList = []
def blackLetter(letter, list):
for word in list:
if letter in word:
list.remove(word)
def greenLetter(letter, location, greenList):
for word in greenList:
if word[location] == letter:
mutableList.append(word)
continue
elif word in mutableList:
mutableList.remove(word)
#greenList.remove(word)
def moreGreenLetter(letter, location, greenList):
for word in greenList:
if word[location] == letter:
#mutableList.append(word)
continue
elif word[location] != letter:
mutableList.remove(word)
#greenList.remove(word)
greenLetter("z", 4, wordList)
moreGreenLetter("r", 1, mutableList)
print(wordList)
print(mutableList)
# def checkforyellow(yellowIn, yellowOut, wordCheck=0):
# for word in yellowIn:
# if blackSet.isdisjoint(word):
# for key in yellowKeys:
# if wordCheck == len(yellowKeys):
# print(word)
# yellowOut.append(word)
# break
# elif word[yellowDict[key]] == key:
# break
# elif key not in word:
# break
# elif key in word:
# print(word)
# wordCheck += 1
# else: break
#
#
# def checkforgreen(greenIn, greenOut, wordCheck=0):
# for word in greenIn:
# for key in greenKeys:
# if word[greenDict[key]] != key:
# break
# elif wordCheck < len(greenKeys):
# wordCheck += 1
# continue
# else:
# greenOut.append(word)
# break
| 29.776119 | 56 | 0.507769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,077 | 0.53985 |
f0d5a3b8f7ed6257ef4659a0b6e15b5bbbff0987 | 3,945 | py | Python | tests/resources/test_wallet_transactions.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 6 | 2020-11-02T21:03:11.000Z | 2022-01-13T23:12:01.000Z | tests/resources/test_wallet_transactions.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 220 | 2020-05-13T19:20:57.000Z | 2022-03-30T22:03:03.000Z | tests/resources/test_wallet_transactions.py | andreshndz/cuenca-python | ca9f0f078584f1458e71baeb4cd15fcc55b40397 | [
"MIT"
] | 14 | 2020-07-15T15:32:03.000Z | 2021-09-17T19:11:14.000Z | import datetime as dt
import pytest
from cuenca_validations.types import (
EntryType,
SavingCategory,
TransactionStatus,
WalletTransactionType,
)
from cuenca import BalanceEntry, Saving, WalletTransaction
@pytest.mark.vcr
def test_create_wallet_transaction():
wallet_id = 'LAvWUDH6OpQk-ber3E_zUEiQ'
deposit = WalletTransaction.create(
wallet_uri=f'/savings/{wallet_id}',
transaction_type=WalletTransactionType.deposit,
amount=10000,
)
assert deposit.id is not None
assert deposit.transaction_type == WalletTransactionType.deposit
assert deposit.status == TransactionStatus.succeeded
wallet = deposit.wallet
assert wallet.id == wallet_id
@pytest.mark.vcr
def test_retrieve_wallet_transaction():
id = 'LT32GEaFQR03cJRBcqb0p7uI'
transaction = WalletTransaction.retrieve(id)
assert transaction.id == id
assert transaction.status == TransactionStatus.succeeded
@pytest.mark.vcr
def test_query_wallet_transactions():
wallet_uri = '/savings/LAGdf-FVVeQeeKrmYpF5NIfA'
query = WalletTransaction.all(wallet_uri=wallet_uri)
transactions = [txn for txn in query]
assert len(transactions) == 2
@pytest.mark.vcr
def test_complete_flow_wallets():
# create wallet
saving = Saving.create(
name='Ahorros',
category=SavingCategory.travel,
goal_amount=1000000,
goal_date=dt.datetime.now() + dt.timedelta(days=365),
)
assert saving.balance == 0
assert saving.wallet_uri == f'/savings/{saving.id}'
# deposit money in wallet
deposit = WalletTransaction.create(
wallet_uri=saving.wallet_uri,
transaction_type=WalletTransactionType.deposit,
amount=10000,
)
assert deposit.status == TransactionStatus.succeeded
saving.refresh()
assert saving.balance == deposit.amount
deposit_uri = f'/wallet_transactions/{deposit.id}'
# withdraw money from wallet
withdrawal = WalletTransaction.create(
wallet_uri=saving.wallet_uri,
transaction_type=WalletTransactionType.withdrawal,
amount=2000,
)
assert withdrawal.status == TransactionStatus.succeeded
saving.refresh()
assert saving.balance == deposit.amount - withdrawal.amount
withdrawal_uri = f'/wallet_transactions/{withdrawal.id}'
# Check all transactions was created
query = WalletTransaction.all(wallet_uri=saving.wallet_uri)
transactions_db = [wt.id for wt in query]
assert deposit.id in transactions_db
assert withdrawal.id in transactions_db
# check balance entries created for wallet
entries = BalanceEntry.all(wallet_id=saving.id)
wallet_entries = [entry for entry in entries]
assert len(wallet_entries) == 2
# default -> deposit -> wallet (credit in wallet)
credit = [be for be in wallet_entries if be.type == EntryType.credit][0]
assert credit.related_transaction_uri == deposit_uri
assert credit.amount == deposit.amount
# default <- withdrawal <- wallet (debit in wallet)
debit = [be for be in wallet_entries if be.type == EntryType.debit][0]
assert debit.amount == withdrawal.amount
assert debit.related_transaction_uri == withdrawal_uri
# check balance entries created in default, related with wallet
entries = BalanceEntry.all(
wallet_id='default', funding_instrument_uri=saving.wallet_uri
)
default_entries = [entry for entry in entries]
assert len(default_entries) == 2
# default -> deposit -> wallet (debit in default)
debit = [be for be in default_entries if be.type == EntryType.debit][0]
assert debit.related_transaction_uri == deposit_uri
assert debit.amount == deposit.amount
# default <- withdrawal <- wallet (credit in default)
credit = [be for be in default_entries if be.type == EntryType.credit][0]
assert credit.amount == withdrawal.amount
assert credit.related_transaction_uri == withdrawal_uri
| 35.223214 | 77 | 0.72218 | 0 | 0 | 0 | 0 | 3,710 | 0.940431 | 0 | 0 | 639 | 0.161977 |
f0d5e0d9829ebe242a427efa144fb271f139091a | 884 | py | Python | xldlib/xlpy/inputs.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | xldlib/xlpy/inputs.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | xldlib/xlpy/inputs.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | '''
XlPy/inputs
___________
Validates input file selection, configurations, and matches file types.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
import operator as op
from xldlib.onstart.main import APP
from xldlib.utils import logger
from xldlib.xlpy import wrappers
# CHECKER
# -------
@logger.call('xlpy', 'debug')
@wrappers.threadprogress(3, 2, op.attrgetter('quantitative'))
@wrappers.threadmessage("Checking inputs...")
def checkinputs():
'''Validates the processed input files'''
source = APP.discovererthread
# crosslinkers
source.parameters.checkcrosslinkers()
# files
source.files.checkfile()
source.files.unzipfiles()
source.files.matchfile()
if source.quantitative:
source.files.checkengine()
| 22.1 | 75 | 0.709276 | 0 | 0 | 0 | 0 | 463 | 0.523756 | 0 | 0 | 396 | 0.447964 |
f0d6040d9c0d555d327cb87ba85195c3342a52cf | 4,789 | py | Python | ros/src/tl_detector/mocked_tl_detector.py | nyukhalov/CarND-Capstone | 949713ba7557a9765214a2a02329725b84552d13 | [
"MIT"
] | 3 | 2019-02-11T16:42:03.000Z | 2019-02-11T20:52:48.000Z | ros/src/tl_detector/mocked_tl_detector.py | nyukhalov/CarND-Capstone | 949713ba7557a9765214a2a02329725b84552d13 | [
"MIT"
] | 9 | 2019-02-12T18:38:18.000Z | 2019-02-26T22:20:21.000Z | ros/src/tl_detector/mocked_tl_detector.py | nyukhalov/CarND-Capstone | 949713ba7557a9765214a2a02329725b84552d13 | [
"MIT"
] | 2 | 2019-02-27T20:42:19.000Z | 2019-03-06T13:41:23.000Z | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from scipy.spatial import KDTree
import cv2
import yaml
import math
import numpy as np
STATE_COUNT_THRESHOLD = 3
TL_LOOK_AHEAD = 100
TL_LOOK_BEHIND = 15
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.camera_image = None
self.lights = []
self.waypoint_tree = None
self.state = TrafficLight.UNKNOWN
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
self.update_traffic_lights()
def is_stop_tl_state(self, tl_state):
return tl_state == TrafficLight.RED or tl_state == TrafficLight.YELLOW
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def update_traffic_lights(self):
'''
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
light_wp, state = self.process_traffic_lights()
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if self.is_stop_tl_state(state) else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, pose):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
px = pose.position.x
py = pose.position.y
closest_idx = -1
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([px, py], 1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if not self.pose:
return -1, TrafficLight.UNKNOWN
stop_line_positions = self.config['stop_line_positions']
car_position = self.get_closest_waypoint(self.pose.pose)
for i, light in enumerate(self.lights):
light_stop_pose = Pose()
light_stop_pose.position.x = stop_line_positions[i][0]
light_stop_pose.position.y = stop_line_positions[i][1]
# get the wp closest to each light_position
light_stop_wp = self.get_closest_waypoint(light_stop_pose)
if car_position - TL_LOOK_BEHIND <= light_stop_wp and light_stop_wp <= car_position + TL_LOOK_AHEAD:
state = self.get_light_state(light)
return light_stop_wp, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 33.725352 | 112 | 0.653999 | 4,243 | 0.885989 | 0 | 0 | 0 | 0 | 0 | 0 | 1,275 | 0.266235 |
f0d71db6f553884c2bb1448ebcb992750a840180 | 2,175 | py | Python | reamber/algorithms/generate/sv/generators/svFuncSequencer.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | reamber/algorithms/generate/sv/generators/svFuncSequencer.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | reamber/algorithms/generate/sv/generators/svFuncSequencer.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | from typing import Callable, List, Union
from numpy import arange
from reamber.algorithms.generate.sv.SvPkg import SvPkg
from reamber.algorithms.generate.sv.SvSequence import SvSequence
def svFuncSequencer(funcs: List[Union[float, Callable[[float], float], None]],
offsets: Union[List[float], float, None] = None,
repeats: int = 1,
repeatGap: float = 0,
startX: float = 0,
endX: float = 1
):
""" Sets up a sequence using functions.
:param funcs: Funcs to generate values. \
If List, values will be used directly. \
If Callable, values will be called with the X. \
If None, this will leave a gap in the sequence.
:param offsets: Offsets to use on functions. \
If List, offsets will be used to map the funcs. \
If Float, all funcs are assumed to be separated by {float} ms. Starting from 0. \
If None, all funcs are assumed to be separated by 1 ms. Starting from 0.
:param repeats: The amount of repeats. This affects the increment of the X argument passed to the Callables. \
If 0, only endX will be used.
:param repeatGap: The gap between the repeats.
:param startX: The starting X.
:param endX: The ending X.
"""
length = len(funcs)
if offsets is None: offsets = list(range(0, length))
# We use [:length] because sometimes arange will create too many for some reason (?)
elif isinstance(offsets, (float, int)): offsets = list(arange(0, length * offsets, offsets))[:length]
assert length == len(offsets)
seq = SvSequence()
for i, (offset, func) in enumerate(zip(offsets, funcs)):
if isinstance(func, Callable): seq.appendInit([(offset, 0)])
elif isinstance(func, (float, int)): seq.appendInit([(offset, func)])
elif func is None: pass
pkg = SvPkg.repeat(seq=seq, times=repeats, gap=repeatGap)
nones = 0
for funcI, func in enumerate(funcs):
if func is None: nones += 1
if isinstance(func, Callable):
pkg.applyNth(func, funcI - nones, startX, endX)
return pkg
| 38.157895 | 114 | 0.630805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.409195 |
f0d87d7b32c42472be81003f06fe5e9c4bf5e20f | 1,781 | py | Python | e_secretary/migrations/0010_auto_20190329_2219.py | tsitsikas96/e-secretary | bdda95e17093da730af33acf4b15ed03331c7643 | [
"MIT"
] | null | null | null | e_secretary/migrations/0010_auto_20190329_2219.py | tsitsikas96/e-secretary | bdda95e17093da730af33acf4b15ed03331c7643 | [
"MIT"
] | null | null | null | e_secretary/migrations/0010_auto_20190329_2219.py | tsitsikas96/e-secretary | bdda95e17093da730af33acf4b15ed03331c7643 | [
"MIT"
] | 1 | 2020-03-08T16:12:34.000Z | 2020-03-08T16:12:34.000Z | # Generated by Django 2.1.7 on 2019-03-29 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('e_secretary', '0009_profile'),
]
operations = [
migrations.AlterModelOptions(
name='professor',
options={'ordering': ['title']},
),
migrations.RemoveField(
model_name='professor',
name='email',
),
migrations.RemoveField(
model_name='professor',
name='fname',
),
migrations.RemoveField(
model_name='professor',
name='lname',
),
migrations.RemoveField(
model_name='student',
name='email',
),
migrations.RemoveField(
model_name='student',
name='fname',
),
migrations.RemoveField(
model_name='student',
name='lname',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(default='test@email.com', max_length=254, null=True),
),
migrations.AddField(
model_name='profile',
name='fname',
field=models.CharField(default='First', help_text='First Name', max_length=50),
),
migrations.AddField(
model_name='profile',
name='lname',
field=models.CharField(default='Last', help_text='Last Name', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='grammateia',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='Grammateia',
),
]
| 27.4 | 91 | 0.522179 | 1,688 | 0.947782 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.18922 |
f0d9cf2dd06e2e64395354f03a64ed4bfa3145d9 | 28,778 | py | Python | catenets/models/jax/pseudo_outcome_nets.py | AliciaCurth/CATENets | aeeae7625e454e97adff37b66ba2acb527dbd275 | [
"BSD-3-Clause"
] | 33 | 2021-02-25T13:50:11.000Z | 2022-03-11T14:31:40.000Z | catenets/models/jax/pseudo_outcome_nets.py | vanderschaarlab/CATENets | d0bc5316fa784fad78d8801367ed57c37193d2c5 | [
"BSD-3-Clause"
] | 2 | 2022-02-21T16:16:32.000Z | 2022-03-23T17:56:29.000Z | catenets/models/jax/pseudo_outcome_nets.py | vanderschaarlab/CATENets | d0bc5316fa784fad78d8801367ed57c37193d2c5 | [
"BSD-3-Clause"
] | 8 | 2021-02-26T10:20:06.000Z | 2021-12-03T16:34:23.000Z | """
Implements Pseudo-outcome based Two-step Nets, namely the DR-learner, the PW-learner and the
RA-learner.
"""
# Author: Alicia Curth
from typing import Callable, Optional, Tuple
import jax.numpy as jnp
import numpy as onp
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import catenets.logger as log
from catenets.models.constants import (
DEFAULT_AVG_OBJECTIVE,
DEFAULT_BATCH_SIZE,
DEFAULT_CF_FOLDS,
DEFAULT_LAYERS_OUT,
DEFAULT_LAYERS_OUT_T,
DEFAULT_LAYERS_R,
DEFAULT_LAYERS_R_T,
DEFAULT_N_ITER,
DEFAULT_N_ITER_MIN,
DEFAULT_N_ITER_PRINT,
DEFAULT_NONLIN,
DEFAULT_PATIENCE,
DEFAULT_PENALTY_L2,
DEFAULT_SEED,
DEFAULT_STEP_SIZE,
DEFAULT_STEP_SIZE_T,
DEFAULT_UNITS_OUT,
DEFAULT_UNITS_OUT_T,
DEFAULT_UNITS_R,
DEFAULT_UNITS_R_T,
DEFAULT_VAL_SPLIT,
)
from catenets.models.jax.base import BaseCATENet, train_output_net_only
from catenets.models.jax.disentangled_nets import predict_snet3, train_snet3
from catenets.models.jax.flextenet import predict_flextenet, train_flextenet
from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np
from catenets.models.jax.offsetnet import predict_offsetnet, train_offsetnet
from catenets.models.jax.representation_nets import (
predict_snet1,
predict_snet2,
train_snet1,
train_snet2,
)
from catenets.models.jax.snet import predict_snet, train_snet
from catenets.models.jax.tnet import predict_t_net, train_tnet
from catenets.models.jax.transformation_utils import (
DR_TRANSFORMATION,
PW_TRANSFORMATION,
RA_TRANSFORMATION,
_get_transformation_function,
)
T_STRATEGY = "T"
S1_STRATEGY = "Tar"
S2_STRATEGY = "S2"
S3_STRATEGY = "S3"
S_STRATEGY = "S"
OFFSET_STRATEGY = "Offset"
FLEX_STRATEGY = "Flex"
ALL_STRATEGIES = [
T_STRATEGY,
S1_STRATEGY,
S2_STRATEGY,
S3_STRATEGY,
S_STRATEGY,
FLEX_STRATEGY,
OFFSET_STRATEGY,
]
class PseudoOutcomeNet(BaseCATENet):
"""
Class implements TwoStepLearners based on pseudo-outcome regression as discussed in
Curth &vd Schaar (2021): RA-learner, PW-learner and DR-learner
Parameters
----------
first_stage_strategy: str, default 't'
which nuisance estimator to use in first stage
first_stage_args: dict
Any additional arguments to pass to first stage training function
data_split: bool, default False
Whether to split the data in two folds for estimation
cross_fit: bool, default False
Whether to perform cross fitting
n_cf_folds: int
Number of crossfitting folds to use
transformation: str, default 'AIPW'
pseudo-outcome to use ('AIPW' for DR-learner, 'HT' for PW learner, 'RA' for RA-learner)
binary_y: bool, default False
Whether the outcome is binary
n_layers_out: int
First stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Dense layer)
n_units_out: int
First stage Number of hidden units in each hypothesis layer
n_layers_r: int
First stage Number of representation layers before hypothesis layers (distinction between
hypothesis layers and representation layers is made to match TARNet & SNets)
n_units_r: int
First stage Number of hidden units in each representation layer
n_layers_out_t: int
Second stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Dense layer)
n_units_out_t: int
Second stage Number of hidden units in each hypothesis layer
n_layers_r_t: int
Second stage Number of representation layers before hypothesis layers (distinction between
hypothesis layers and representation layers is made to match TARNet & SNets)
n_units_r_t: int
Second stage Number of hidden units in each representation layer
penalty_l2: float
First stage l2 (ridge) penalty
penalty_l2_t: float
Second stage l2 (ridge) penalty
step_size: float
First stage learning rate for optimizer
step_size_t: float
Second stage learning rate for optimizer
n_iter: int
Maximum number of iterations
batch_size: int
Batch size
val_split_prop: float
Proportion of samples used for validation split (can be 0)
early_stopping: bool, default True
Whether to use early stopping
patience: int
Number of iterations to wait before early stopping after decrease in validation loss
n_iter_min: int
Minimum number of iterations to go through before starting early stopping
n_iter_print: int
Number of iterations after which to print updates
seed: int
Seed used
nonlin: string, default 'elu'
Nonlinearity to use in NN
"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
first_stage_args: Optional[dict] = None,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
transformation: str = DR_TRANSFORMATION,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
) -> None:
# settings
self.first_stage_strategy = first_stage_strategy
self.first_stage_args = first_stage_args
self.binary_y = binary_y
self.transformation = transformation
self.data_split = data_split
self.cross_fit = cross_fit
self.n_cf_folds = n_cf_folds
# model architecture hyperparams
self.n_layers_out = n_layers_out
self.n_layers_out_t = n_layers_out_t
self.n_layers_r = n_layers_r
self.n_layers_r_t = n_layers_r_t
self.n_units_out = n_units_out
self.n_units_out_t = n_units_out_t
self.n_units_r = n_units_r
self.n_units_r_t = n_units_r_t
self.nonlin = nonlin
# other hyperparameters
self.penalty_l2 = penalty_l2
self.penalty_l2_t = penalty_l2_t
self.step_size = step_size
self.step_size_t = step_size_t
self.n_iter = n_iter
self.batch_size = batch_size
self.n_iter_print = n_iter_print
self.seed = seed
self.val_split_prop = val_split_prop
self.early_stopping = early_stopping
self.patience = patience
self.n_iter_min = n_iter_min
self.rescale_transformation = rescale_transformation
def _get_train_function(self) -> Callable:
return train_pseudooutcome_net
def fit(
self,
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
p: Optional[jnp.ndarray] = None,
) -> "PseudoOutcomeNet":
# overwrite super so we can pass p as extra param
# some quick input checks
X = check_X_is_np(X)
self._check_inputs(w, p)
train_func = self._get_train_function()
train_params = self.get_params()
if "transformation" not in train_params.keys():
train_params.update({"transformation": self.transformation})
if self.rescale_transformation:
self._params, self._predict_funs, self._scale_factor = train_func(
X, y, w, p, **train_params
)
else:
self._params, self._predict_funs = train_func(X, y, w, p, **train_params)
return self
def _get_predict_function(self) -> Callable:
# Two step nets do not need this
pass
def predict(
self, X: jnp.ndarray, return_po: bool = False, return_prop: bool = False
) -> jnp.ndarray:
# check input
if return_po:
raise NotImplementedError(
"TwoStepNets have no Potential outcome predictors."
)
if return_prop:
raise NotImplementedError("TwoStepNets have no Propensity predictors.")
if isinstance(X, pd.DataFrame):
X = X.values
if self.rescale_transformation:
return 1 / self._scale_factor * self._predict_funs(self._params, X)
else:
return self._predict_funs(self._params, X)
class DRNet(PseudoOutcomeNet):
"""Wrapper for DR-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=DR_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
class RANet(PseudoOutcomeNet):
"""Wrapper for RA-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=RA_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
class PWNet(PseudoOutcomeNet):
"""Wrapper for PW-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=PW_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
def train_pseudooutcome_net(
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
p: Optional[jnp.ndarray] = None,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
transformation: str = DR_TRANSFORMATION,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
return_val_loss: bool = False,
nonlin: str = DEFAULT_NONLIN,
avg_objective: bool = DEFAULT_AVG_OBJECTIVE,
first_stage_args: Optional[dict] = None,
) -> Tuple:
# get shape of data
n, d = X.shape
if p is not None:
p = check_shape_1d_data(p)
# get transformation function
transformation_function = _get_transformation_function(transformation)
# get strategy name
if first_stage_strategy not in ALL_STRATEGIES:
raise ValueError(
"Parameter first stage should be in "
"catenets.models.pseudo_outcome_nets.ALL_STRATEGIES. "
"You passed {}".format(first_stage_strategy)
)
# split data as wanted
if p is None or transformation is not PW_TRANSFORMATION:
if not cross_fit:
if not data_split:
log.debug("Training first stage with all data (no data splitting)")
# use all data for both
fit_mask = onp.ones(n, dtype=bool)
pred_mask = onp.ones(n, dtype=bool)
else:
log.debug("Training first stage with half of the data (data splitting)")
# split data in half
fit_idx = onp.random.choice(n, int(onp.round(n / 2)))
fit_mask = onp.zeros(n, dtype=bool)
fit_mask[fit_idx] = 1
pred_mask = ~fit_mask
mu_0, mu_1, pi_hat = _train_and_predict_first_stage(
X,
y,
w,
fit_mask,
pred_mask,
first_stage_strategy=first_stage_strategy,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_units_out=n_units_out,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
transformation=transformation,
first_stage_args=first_stage_args,
)
if data_split:
# keep only prediction data
X, y, w = X[pred_mask, :], y[pred_mask, :], w[pred_mask, :]
if p is not None:
p = p[pred_mask, :]
else:
log.debug(f"Training first stage in {n_cf_folds} folds (cross-fitting)")
# do cross fitting
mu_0, mu_1, pi_hat = onp.zeros((n, 1)), onp.zeros((n, 1)), onp.zeros((n, 1))
splitter = StratifiedKFold(
n_splits=n_cf_folds, shuffle=True, random_state=seed
)
fold_count = 1
for train_idx, test_idx in splitter.split(X, w):
log.debug(f"Training fold {fold_count}.")
fold_count = fold_count + 1
pred_mask = onp.zeros(n, dtype=bool)
pred_mask[test_idx] = 1
fit_mask = ~pred_mask
(
mu_0[pred_mask],
mu_1[pred_mask],
pi_hat[pred_mask],
) = _train_and_predict_first_stage(
X,
y,
w,
fit_mask,
pred_mask,
first_stage_strategy=first_stage_strategy,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_units_out=n_units_out,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
transformation=transformation,
first_stage_args=first_stage_args,
)
log.debug("Training second stage.")
if p is not None:
# use known propensity score
p = check_shape_1d_data(p)
pi_hat = p
# second stage
y, w = check_shape_1d_data(y), check_shape_1d_data(w)
# transform data and fit on transformed data
if transformation is PW_TRANSFORMATION:
mu_0 = None
mu_1 = None
pseudo_outcome = transformation_function(y=y, w=w, p=pi_hat, mu_0=mu_0, mu_1=mu_1)
if rescale_transformation:
scale_factor = onp.std(y) / onp.std(pseudo_outcome)
if scale_factor > 1:
scale_factor = 1
else:
pseudo_outcome = scale_factor * pseudo_outcome
params, predict_funs = train_output_net_only(
X,
pseudo_outcome,
binary_y=False,
n_layers_out=n_layers_out_t,
n_units_out=n_units_out_t,
n_layers_r=n_layers_r_t,
n_units_r=n_units_r_t,
penalty_l2=penalty_l2_t,
step_size=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
return_val_loss=return_val_loss,
nonlin=nonlin,
avg_objective=avg_objective,
)
return params, predict_funs, scale_factor
else:
return train_output_net_only(
X,
pseudo_outcome,
binary_y=False,
n_layers_out=n_layers_out_t,
n_units_out=n_units_out_t,
n_layers_r=n_layers_r_t,
n_units_r=n_units_r_t,
penalty_l2=penalty_l2_t,
step_size=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
return_val_loss=return_val_loss,
nonlin=nonlin,
avg_objective=avg_objective,
)
def _train_and_predict_first_stage(
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
fit_mask: jnp.ndarray,
pred_mask: jnp.ndarray,
first_stage_strategy: str,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
penalty_l2: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
nonlin: str = DEFAULT_NONLIN,
avg_objective: bool = False,
transformation: str = DR_TRANSFORMATION,
first_stage_args: Optional[dict] = None,
) -> Tuple:
if len(w.shape) > 1:
w = w.reshape((len(w),))
if first_stage_args is None:
first_stage_args = {}
# split the data
X_fit, y_fit, w_fit = X[fit_mask, :], y[fit_mask], w[fit_mask]
X_pred = X[pred_mask, :]
train_fun: Callable
predict_fun: Callable
if first_stage_strategy == T_STRATEGY:
train_fun, predict_fun = train_tnet, predict_t_net
elif first_stage_strategy == S_STRATEGY:
train_fun, predict_fun = train_snet, predict_snet
elif first_stage_strategy == S1_STRATEGY:
train_fun, predict_fun = train_snet1, predict_snet1
elif first_stage_strategy == S2_STRATEGY:
train_fun, predict_fun = train_snet2, predict_snet2
elif first_stage_strategy == S3_STRATEGY:
train_fun, predict_fun = train_snet3, predict_snet3
elif first_stage_strategy == OFFSET_STRATEGY:
train_fun, predict_fun = train_offsetnet, predict_offsetnet
elif first_stage_strategy == FLEX_STRATEGY:
train_fun, predict_fun = train_flextenet, predict_flextenet
else:
raise ValueError(
"{} is not a valid first stage strategy for a PseudoOutcomeNet".format(
first_stage_strategy
)
)
log.debug("Training PO estimators")
trained_params, pred_fun = train_fun(
X_fit,
y_fit,
w_fit,
binary_y=binary_y,
n_layers_r=n_layers_r,
n_units_r=n_units_r,
n_layers_out=n_layers_out,
n_units_out=n_units_out,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
**first_stage_args,
)
if first_stage_strategy in [S_STRATEGY, S2_STRATEGY, S3_STRATEGY]:
_, mu_0, mu_1, pi_hat = predict_fun(
X_pred, trained_params, pred_fun, return_po=True, return_prop=True
)
else:
if transformation is not PW_TRANSFORMATION:
_, mu_0, mu_1 = predict_fun(
X_pred, trained_params, pred_fun, return_po=True
)
else:
mu_0, mu_1 = onp.nan, onp.nan
if transformation is not RA_TRANSFORMATION:
log.debug("Training propensity net")
params_prop, predict_fun_prop = train_output_net_only(
X_fit,
w_fit,
binary_y=True,
n_layers_out=n_layers_out,
n_units_out=n_units_out,
n_layers_r=n_layers_r,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
)
pi_hat = predict_fun_prop(params_prop, X_pred)
else:
pi_hat = onp.nan
return mu_0, mu_1, pi_hat
| 35.267157 | 98 | 0.629057 | 14,576 | 0.506498 | 0 | 0 | 0 | 0 | 0 | 0 | 4,153 | 0.144312 |
f0db46fd26b0c7315a9b0cf93b8d1fbaf8362e97 | 2,743 | py | Python | gender_converter/logger_aegender.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | null | null | null | gender_converter/logger_aegender.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | 1 | 2021-08-11T06:41:56.000Z | 2021-08-11T06:41:56.000Z | gender_converter/logger_aegender.py | roebel/DeepGC | 03eee63ff9d9f4daa34435ddca530b262f097ea6 | [
"MIT"
] | null | null | null | import random
from plotting_utils import plot_spectrogram_to_numpy, image_for_logger, plot_to_image
import numpy as np
import tensorflow as tf
class GParrotLogger():
def __init__(self, logdir, ali_path='ali'):
# super(ParrotLogger, self).__init__(logdir)
self.writer = tf.summary.create_file_writer(logdir)
def log_training(self, train_loss, loss_list, accuracy_list, grad_norm, learning_rate, duration, iteration):
(speaker_encoder_loss, gender_autoencoder_loss, gender_classification_loss, gender_adv_loss,
gender_autoencoder_destandardized_loss) = loss_list
speaker_encoder_acc, gender_classification_acc = accuracy_list
with self.writer.as_default():
tf.summary.scalar("training.loss", train_loss, iteration)
tf.summary.scalar("training.loss.spenc", speaker_encoder_loss, iteration)
tf.summary.scalar("training.loss.gauto", gender_autoencoder_loss, iteration)
tf.summary.scalar("training.loss.gautotrue", gender_autoencoder_destandardized_loss, iteration)
tf.summary.scalar("training.loss.gcla", gender_classification_loss, iteration)
tf.summary.scalar("training.loss.gadv", gender_adv_loss, iteration)
tf.summary.scalar('training.acc.spenc', speaker_encoder_acc, iteration)
tf.summary.scalar('training.acc.gcla', gender_classification_acc, iteration)
tf.summary.scalar("grad.norm", grad_norm, iteration)
tf.summary.scalar("learning.rate", learning_rate, iteration)
tf.summary.scalar("duration", duration, iteration)
self.writer.flush()
def log_validation(self, val_loss, loss_list, accuracy_list, iteration):
(speaker_encoder_loss, gender_autoencoder_loss, gender_classification_loss, gender_adv_loss,
gender_autoencoder_destandardized_loss) = loss_list
speaker_encoder_acc, gender_classification_acc = accuracy_list
with self.writer.as_default():
tf.summary.scalar("validation.loss", val_loss, iteration)
tf.summary.scalar("validation.loss.spenc", speaker_encoder_loss, iteration)
tf.summary.scalar("validation.loss.gauto", gender_autoencoder_loss, iteration)
tf.summary.scalar("validation.loss.gautotrue", gender_autoencoder_destandardized_loss, iteration)
tf.summary.scalar("validation.loss.gcla", gender_classification_loss, iteration)
tf.summary.scalar("validation.loss.gadv", gender_adv_loss, iteration)
tf.summary.scalar('validation.acc.spenc', speaker_encoder_acc, iteration)
tf.summary.scalar('validation.acc.gcla', gender_classification_acc, iteration)
self.writer.flush()
| 55.979592 | 112 | 0.728035 | 2,597 | 0.946774 | 0 | 0 | 0 | 0 | 0 | 0 | 423 | 0.154211 |
f0db6dccf419614f773337c9ea484f5c0bdce823 | 452 | py | Python | itmo/2015-16/final/weight.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | itmo/2015-16/final/weight.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | null | null | null | itmo/2015-16/final/weight.py | dluschan/olymp | dfbf4352dbc7f6fd7563e7bd19aff6fd67fb50b7 | [
"MIT"
] | 1 | 2018-09-14T18:50:48.000Z | 2018-09-14T18:50:48.000Z | weight = []
diff = 0
n = int(input())
for i in range(n):
q, c = map(int, input().split())
if c == 2:
q *= -1
weight.append(q)
diff += q
min_diff = abs(diff)
for i in range(n):
if abs(diff - 2*weight[i]) < min_diff:
min_diff = abs(diff - 2*weight[i])
for j in range(i+1, n):
if abs(diff - 2*(weight[i] + weight[j])) < min_diff:
min_diff = abs(diff - 2*(weight[i] + weight[j]))
print(min_diff)
| 25.111111 | 60 | 0.526549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0db8dc49a5a35a6b18b47a43dda531b71b817b6 | 534 | py | Python | open_spiel/python/examples/test.py | ThorvaldAagaard/open_spiel | 58f6584b555b1a1a83bf674f0cbe0162c1251eca | [
"Apache-2.0"
] | null | null | null | open_spiel/python/examples/test.py | ThorvaldAagaard/open_spiel | 58f6584b555b1a1a83bf674f0cbe0162c1251eca | [
"Apache-2.0"
] | null | null | null | open_spiel/python/examples/test.py | ThorvaldAagaard/open_spiel | 58f6584b555b1a1a83bf674f0cbe0162c1251eca | [
"Apache-2.0"
] | null | null | null | import pyspiel
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
line = '30 32 10 35 50 45 21 7 1 42 39 43 0 16 40 20 36 15 22 44 26 6 4 51 47 46 25 14 29 5 34 11 49 31 37 9 41 13 24 8 28 17 48 23 33 18 3 19 38 2 27 12 56 57 52 63 52 52 52 0 32 48 8 3 51 47 15 44 28 16 4 14 50 2 10 49 5 37 9 36 31 24 20 46 22 12 26 13 25 19 1 43 41 17 27 7 33 45 39 40 23 29 6 11 30 18 21 35 38 42 34'
actions = (int(x) for x in line.split(' '))
state = game.new_initial_state()
for a in actions: state.apply_action(a)
print(state)
| 59.333333 | 321 | 0.685393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.666667 |
f0dc6c0f1ad89845b0333162183c190359534d22 | 906 | py | Python | texel/keys.py | Xen0byte/texel | 9dcfba163c66e9da5e9b0757c4e587f297b0cfcb | [
"MIT"
] | 119 | 2022-02-06T21:47:55.000Z | 2022-03-21T23:14:30.000Z | texel/keys.py | Xen0byte/texel | 9dcfba163c66e9da5e9b0757c4e587f297b0cfcb | [
"MIT"
] | 3 | 2022-02-07T08:47:20.000Z | 2022-02-09T09:07:17.000Z | texel/keys.py | Xen0byte/texel | 9dcfba163c66e9da5e9b0757c4e587f297b0cfcb | [
"MIT"
] | 5 | 2022-02-07T08:13:11.000Z | 2022-02-12T22:31:37.000Z | import curses
class Key:
def __init__(self, *values):
self.values = values
self._hash = hash(values)
self._keyset = set(values)
def __eq__(self, other):
return self._hash == other._hash
def __hash__(self):
return self._hash
class Keys:
ESC = Key(27)
TAB = Key(ord("\t"), ord("n"))
SHIFT_TAB = Key(353, ord("N"))
VISUAL = Key(ord("v"), ord("V"))
COPY = Key(ord("c"), ord("y"))
QUIT = Key(ord("q"))
UP = Key(curses.KEY_UP, ord("k"))
DOWN = Key(curses.KEY_DOWN, ord("j"))
LEFT = Key(curses.KEY_LEFT, ord("h"))
RIGHT = Key(curses.KEY_RIGHT, ord("l"))
HELP = Key(ord("?"))
ALL = [ESC, TAB, SHIFT_TAB, VISUAL, COPY, QUIT, UP, DOWN, LEFT, RIGHT, HELP]
_id_to_key = {id: key for key in ALL for id in key.values}
@staticmethod
def to_key(key: int) -> Key:
return Keys._id_to_key.get(key)
| 25.885714 | 80 | 0.572848 | 886 | 0.977925 | 0 | 0 | 86 | 0.094923 | 0 | 0 | 40 | 0.04415 |
f0dd70671137f9276770eb4324b9867b67865ca3 | 51,458 | py | Python | tests/test_number_cleaner.py | theseus-automl/gorgona | a7366d54430caa5a038488432fb93702e1cb83b8 | [
"Apache-2.0"
] | 1 | 2021-12-12T10:47:00.000Z | 2021-12-12T10:47:00.000Z | tests/test_number_cleaner.py | theseus-automl/gorgona | a7366d54430caa5a038488432fb93702e1cb83b8 | [
"Apache-2.0"
] | 5 | 2021-12-12T10:45:04.000Z | 2022-01-17T07:51:14.000Z | tests/test_number_cleaner.py | theseus-automl/gorgona | a7366d54430caa5a038488432fb93702e1cb83b8 | [
"Apache-2.0"
] | null | null | null | import pytest
from gorgona.stages.cleaners import NumberCleaner
@pytest.fixture()
def setup_number_cleaner():
nc = NumberCleaner(
'',
'',
)
return nc
def test_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("7") == ""
def test_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3") == ""
def test_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("9'5") == ""
def test_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("0'257175") == ""
def test_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("9`9") == ""
def test_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("0`985776") == ""
def test_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5 6") == ""
def test_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3 839118") == ""
def test_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("4k6") == ""
def test_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3k504421") == ""
def test_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("4к4") == ""
def test_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("5к117864") == ""
def test_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("774464") == ""
def test_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("35655") == ""
def test_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("249910'9") == ""
def test_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("480142'838693") == ""
def test_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("154095`1") == ""
def test_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("85818`184705") == ""
def test_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("306485 3") == ""
def test_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("22721 546337") == ""
def test_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("464830k0") == ""
def test_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("955186k918058") == ""
def test_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("570511к2") == ""
def test_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("564964к869484") == ""
def test_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-4") == ""
def test_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-5") == ""
def test_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0'0") == ""
def test_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-8'803962") == ""
def test_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0`5") == ""
def test_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-7`895475") == ""
def test_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-9 8") == ""
def test_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-8 551966") == ""
def test_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-2k5") == ""
def test_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3k484318") == ""
def test_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-2к5") == ""
def test_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3к283697") == ""
def test_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-138166") == ""
def test_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-94352") == ""
def test_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-473778'5") == ""
def test_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-787864'453129") == ""
def test_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-911004`4") == ""
def test_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-392620`715189") == ""
def test_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-908466 6") == ""
def test_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-279418 645330") == ""
def test_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-591608k5") == ""
def test_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-997435k133244") == ""
def test_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-172174к1") == ""
def test_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-733910к513370") == ""
def test_left_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 4") == "hello "
def test_left_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 7") == "hello "
def test_left_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 3'5") == "hello "
def test_left_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 1'414237") == "hello "
def test_left_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 2`5") == "hello "
def test_left_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 6`792669") == "hello "
def test_left_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 8 6") == "hello "
def test_left_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 4 732535") == "hello "
def test_left_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 7k2") == "hello "
def test_left_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 9k798422") == "hello "
def test_left_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 0к2") == "hello "
def test_left_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 6к449708") == "hello "
def test_left_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 84908") == "hello "
def test_left_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 434178") == "hello "
def test_left_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 580178'5") == "hello "
def test_left_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 403087'446030") == "hello "
def test_left_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 99510`9") == "hello "
def test_left_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 880343`699877") == "hello "
def test_left_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 525007 2") == "hello "
def test_left_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 872947 296824") == "hello "
def test_left_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 450966k4") == "hello "
def test_left_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 993633k963503") == "hello "
def test_left_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 902081к2") == "hello "
def test_left_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 398410к5738") == "hello "
def test_left_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6") == "hello "
def test_left_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -6") == "hello "
def test_left_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6'2") == "hello "
def test_left_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -3'759377") == "hello "
def test_left_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -7`1") == "hello "
def test_left_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1`502604") == "hello "
def test_left_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -2 3") == "hello "
def test_left_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1 393569") == "hello "
def test_left_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6k3") == "hello "
def test_left_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1k432422") == "hello "
def test_left_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -5к5") == "hello "
def test_left_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1к68404") == "hello "
def test_left_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -518862") == "hello "
def test_left_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -311825") == "hello "
def test_left_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -13646'6") == "hello "
def test_left_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -155588'658068") == "hello "
def test_left_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -902010`6") == "hello "
def test_left_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -339050`817304") == "hello "
def test_left_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -923620 6") == "hello "
def test_left_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -277075 908827") == "hello "
def test_left_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -770630k5") == "hello "
def test_left_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -543724k219469") == "hello "
def test_left_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -118460к2") == "hello "
def test_left_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -159072к256757") == "hello "
def test_right_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("2 hello") == " hello"
def test_right_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("1 hello") == " hello"
def test_right_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("6'4 hello") == " hello"
def test_right_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3'58431 hello") == " hello"
def test_right_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("0`5 hello") == " hello"
def test_right_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("5`155738 hello") == " hello"
def test_right_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5 3 hello") == " hello"
def test_right_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("2 912797 hello") == " hello"
def test_right_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5k3 hello") == " hello"
def test_right_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("9k911768 hello") == " hello"
def test_right_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("3к3 hello") == " hello"
def test_right_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3к750248 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("42678 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("215188 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("455258'3 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("806580'611928 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("479352`5 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("519252`685635 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("928184 7 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("489262 493403 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("34773k1 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("675960k827611 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("876524к5 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("55243к431074 hello") == " hello"
def test_right_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-7 hello") == " hello"
def test_right_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-1 hello") == " hello"
def test_right_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-5'2 hello") == " hello"
def test_right_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-9'814320 hello") == " hello"
def test_right_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0`8 hello") == " hello"
def test_right_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3`877194 hello") == " hello"
def test_right_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-8 6 hello") == " hello"
def test_right_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3 873345 hello") == " hello"
def test_right_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-8k9 hello") == " hello"
def test_right_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-5k346049 hello") == " hello"
def test_right_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-4к6 hello") == " hello"
def test_right_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-9к703473 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-190239 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-839965 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-517738'9 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-764801'614671 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-634963`9 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-372948`939025 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-760889 7 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-7831 504330 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-837557k3 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-195729k572621 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-355848к0 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-665426к392704 hello") == " hello"
def test_both_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 4 world") == "hello world"
def test_both_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 8 world") == "hello world"
def test_both_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 6'2 world") == "hello world"
def test_both_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 3'622671 world") == "hello world"
def test_both_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 6`0 world") == "hello world"
def test_both_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 8`757195 world") == "hello world"
def test_both_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 0 1 world") == "hello world"
def test_both_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 7 862462 world") == "hello world"
def test_both_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 8k5 world") == "hello world"
def test_both_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 3k314471 world") == "hello world"
def test_both_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 2к5 world") == "hello world"
def test_both_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 9к486783 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 805686 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 369355 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 163343'0 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 461408'736785 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 864015`2 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 647078`653487 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 222917 9 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 564211 641276 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 440821k8 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 845780k860446 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 81289к1 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 146234к484167 world") == "hello world"
def test_both_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4 world") == "hello world"
def test_both_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -0 world") == "hello world"
def test_both_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4'9 world") == "hello world"
def test_both_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -5'387080 world") == "hello world"
def test_both_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -3`8 world") == "hello world"
def test_both_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -0`385330 world") == "hello world"
def test_both_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -7 7 world") == "hello world"
def test_both_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1 245555 world") == "hello world"
def test_both_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4k4 world") == "hello world"
def test_both_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -7k737481 world") == "hello world"
def test_both_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -3к8 world") == "hello world"
def test_both_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -4к979649 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -579549 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -521868 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -494030'8 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -997018'388418 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -48935`6 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -115491`848265 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -373023 5 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -526547 383697 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -304461k5 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -308120k521264 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -230268к9 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -695525к628100 world") == "hello world"
def test_inside_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he4llo") == "he4llo"
def test_inside_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he8llo") == "he8llo"
def test_inside_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he0'8llo") == "he0'8llo"
def test_inside_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he8'503290llo") == "he8'503290llo"
def test_inside_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he3`3llo") == "he3`3llo"
def test_inside_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he0`179192llo") == "he0`179192llo"
def test_inside_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he2 4llo") == "he2 4llo"
def test_inside_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he3 135087llo") == "he3 135087llo"
def test_inside_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he8k4llo") == "he8k4llo"
def test_inside_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he0k657610llo") == "he0k657610llo"
def test_inside_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he9к2llo") == "he9к2llo"
def test_inside_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he6к839529llo") == "he6к839529llo"
def test_inside_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he513934llo") == "he513934llo"
def test_inside_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he424141llo") == "he424141llo"
def test_inside_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he757949'6llo") == "he757949'6llo"
def test_inside_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he650035'989071llo") == "he650035'989071llo"
def test_inside_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he849767`6llo") == "he849767`6llo"
def test_inside_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he234327`915339llo") == "he234327`915339llo"
def test_inside_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he703293 5llo") == "he703293 5llo"
def test_inside_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he409856 70023llo") == "he409856 70023llo"
def test_inside_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he744620k6llo") == "he744620k6llo"
def test_inside_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he743290k231362llo") == "he743290k231362llo"
def test_inside_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he791511к3llo") == "he791511к3llo"
def test_inside_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he401092к788202llo") == "he401092к788202llo"
def test_inside_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-4llo") == "he-4llo"
def test_inside_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-8llo") == "he-8llo"
def test_inside_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-3'3llo") == "he-3'3llo"
def test_inside_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-4'290601llo") == "he-4'290601llo"
def test_inside_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-7`0llo") == "he-7`0llo"
def test_inside_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-6`707325llo") == "he-6`707325llo"
def test_inside_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-9 3llo") == "he-9 3llo"
def test_inside_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-0 183754llo") == "he-0 183754llo"
def test_inside_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-1k4llo") == "he-1k4llo"
def test_inside_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-3k878581llo") == "he-3k878581llo"
def test_inside_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-0к0llo") == "he-0к0llo"
def test_inside_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-6к377555llo") == "he-6к377555llo"
def test_inside_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-598986llo") == "he-598986llo"
def test_inside_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-393398llo") == "he-393398llo"
def test_inside_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-890636'7llo") == "he-890636'7llo"
def test_inside_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-834451'288314llo") == "he-834451'288314llo"
def test_inside_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-347856`8llo") == "he-347856`8llo"
def test_inside_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-504475`759252llo") == "he-504475`759252llo"
def test_inside_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-349749 9llo") == "he-349749 9llo"
def test_inside_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-184038 68144llo") == "he-184038 68144llo"
def test_inside_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-289290k6llo") == "he-289290k6llo"
def test_inside_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-964399k733553llo") == "he-964399k733553llo"
def test_inside_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-63989к5llo") == "he-63989к5llo"
def test_inside_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-403175к774771llo") == "he-403175к774771llo"
def test_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("2.9") == ""
def test_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("8.569333") == ""
def test_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("5,0") == ""
def test_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("1,780518") == ""
def test_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("785313.5") == ""
def test_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("537221.74655") == ""
def test_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("391240,8") == ""
def test_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("181004,460352") == ""
def test_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-9.6") == ""
def test_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-8.258030") == ""
def test_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-7,1") == ""
def test_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-0,885164") == ""
def test_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-864605.4") == ""
def test_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-355839.416791") == ""
def test_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-578243,4") == ""
def test_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-98767,817853") == ""
def test_left_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 4.6") == "hello "
def test_left_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 1.74914") == "hello "
def test_left_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 3,5") == "hello "
def test_left_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 2,8995") == "hello "
def test_left_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 128684.7") == "hello "
def test_left_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 832606.932249") == "hello "
def test_left_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 377802,4") == "hello "
def test_left_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 762367,135153") == "hello "
def test_left_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.8") == "hello "
def test_left_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -5.792708") == "hello "
def test_left_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -2,5") == "hello "
def test_left_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -5,888953") == "hello "
def test_left_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -486940.5") == "hello "
def test_left_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -716193.653169") == "hello "
def test_left_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -892150,7") == "hello "
def test_left_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -825361,420340") == "hello "
def test_right_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("9.7 hello") == " hello"
def test_right_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("8.668371 hello") == " hello"
def test_right_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("6,9 hello") == " hello"
def test_right_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("9,934089 hello") == " hello"
def test_right_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("243369.1 hello") == " hello"
def test_right_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("424756.17786 hello") == " hello"
def test_right_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("922173,3 hello") == " hello"
def test_right_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("829857,999977 hello") == " hello"
def test_right_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-1.8 hello") == " hello"
def test_right_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-5.743926 hello") == " hello"
def test_right_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-1,9 hello") == " hello"
def test_right_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-3,740022 hello") == " hello"
def test_right_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-746442.5 hello") == " hello"
def test_right_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-796358.785568 hello") == " hello"
def test_right_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-162965,8 hello") == " hello"
def test_right_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-510271,12306 hello") == " hello"
def test_both_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 2.6 world") == "hello world"
def test_both_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 6.756683 world") == "hello world"
def test_both_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 6,3 world") == "hello world"
def test_both_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 1,84108 world") == "hello world"
def test_both_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 430035.4 world") == "hello world"
def test_both_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 547739.554345 world") == "hello world"
def test_both_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 26171,1 world") == "hello world"
def test_both_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 666557,952575 world") == "hello world"
def test_both_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.0 world") == "hello world"
def test_both_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.445504 world") == "hello world"
def test_both_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -7,7 world") == "hello world"
def test_both_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -3,87658 world") == "hello world"
def test_both_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -477476.4 world") == "hello world"
def test_both_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -541300.867811 world") == "hello world"
def test_both_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -708842,4 world") == "hello world"
def test_both_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -741041,952275 world") == "hello world"
def test_inside_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he4.9llo") == "he4.9llo"
def test_inside_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he4.605648llo") == "he4.605648llo"
def test_inside_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he7,6llo") == "he7,6llo"
def test_inside_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he1,640808llo") == "he1,640808llo"
def test_inside_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he311010.5llo") == "he311010.5llo"
def test_inside_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he593407.960145llo") == "he593407.960145llo"
def test_inside_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he318574,7llo") == "he318574,7llo"
def test_inside_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he113354,321762llo") == "he113354,321762llo"
def test_inside_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-1.7llo") == "he-1.7llo"
def test_inside_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-5.347666llo") == "he-5.347666llo"
def test_inside_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-1,5llo") == "he-1,5llo"
def test_inside_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-0,785082llo") == "he-0,785082llo"
def test_inside_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-19847.2llo") == "he-19847.2llo"
def test_inside_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-163691.435539llo") == "he-163691.435539llo"
def test_inside_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-416740,2llo") == "he-416740,2llo"
def test_inside_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-117470,870470llo") == "he-117470,870470llo"
| 39.766615 | 110 | 0.837324 | 0 | 0 | 0 | 0 | 114 | 0.002213 | 0 | 0 | 8,056 | 0.156409 |
f0dd9ec3c005973e7ab2799270be09efd623558e | 9,700 | py | Python | test/test_digitalbitbox.py | matejcik/HWI | f0021502d470660b9d8a5e79fcc440ac809a610f | [
"MIT"
] | 54 | 2018-03-16T14:50:19.000Z | 2019-01-29T19:19:17.000Z | test/test_digitalbitbox.py | matejcik/HWI | f0021502d470660b9d8a5e79fcc440ac809a610f | [
"MIT"
] | 86 | 2018-01-30T18:40:44.000Z | 2019-01-30T20:48:24.000Z | test/test_digitalbitbox.py | matejcik/HWI | f0021502d470660b9d8a5e79fcc440ac809a610f | [
"MIT"
] | 11 | 2017-09-05T17:19:53.000Z | 2019-01-10T19:11:04.000Z | #! /usr/bin/env python3
import argparse
import atexit
import json
import os
import subprocess
import sys
import time
import unittest
from test_device import (
Bitcoind,
DeviceEmulator,
DeviceTestCase,
TestDeviceConnect,
TestGetKeypool,
TestGetDescriptors,
TestSignTx,
)
from hwilib.devices.digitalbitbox import BitboxSimulator, send_plain, send_encrypt
class BitBox01Emulator(DeviceEmulator):
def __init__(self, simulator):
try:
os.unlink('bitbox-emulator.stderr')
except FileNotFoundError:
pass
self.simulator = simulator
self.bitbox_log = None
self.simulator_proc = None
self.type = 'digitalbitbox'
self.path = 'udp:127.0.0.1:35345'
self.fingerprint = 'a31b978a'
self.master_xpub = "tpubDCjZ76WbqdyGWi7NaFLuhWL8GX7NK5gCGB7ApynxUHGkgvBVCtpXX1i6Uj88rL9WKM7vimN8QZRjowSX4g2uPxjnuie1Kg7XK8pvNGZznQi"
self.password = "0000"
self.supports_ms_display = False
self.supports_xpub_ms_display = False
self.supports_unsorted_ms = False
self.supports_taproot = False
self.strict_bip48 = False
self.include_xpubs = False
self.supports_device_multiple_multisig = True
def start(self):
super().start()
self.bitbox_log = open('bitbox-emulator.stderr', 'a')
# Start the Digital bitbox simulator
self.simulator_proc = subprocess.Popen(
[
'./' + os.path.basename(self.simulator),
'../../tests/sd_files/'
],
cwd=os.path.dirname(self.simulator),
stderr=self.bitbox_log
)
# Wait for simulator to be up
while True:
try:
self.dev = BitboxSimulator('127.0.0.1', 35345)
reply = send_plain(b'{"password":"0000"}', self.dev)
if 'error' not in reply:
break
except Exception:
pass
time.sleep(0.5)
# Set password and load from backup
send_encrypt(json.dumps({"seed": {"source": "backup", "filename": "test_backup.pdf", "key": "key"}}), '0000', self.dev)
atexit.register(self.stop)
def stop(self):
super().stop()
self.simulator_proc.terminate()
self.simulator_proc.wait()
self.bitbox_log.close()
atexit.unregister(self.stop)
# DigitalBitbox specific management command tests
class TestDBBManCommands(DeviceTestCase):
def test_restore(self):
result = self.do_command(self.dev_args + ['-i', 'restore'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not support restoring via software')
self.assertEqual(result['code'], -9)
def test_pin(self):
result = self.do_command(self.dev_args + ['promptpin'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not need a PIN sent from the host')
self.assertEqual(result['code'], -9)
result = self.do_command(self.dev_args + ['sendpin', '1234'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not need a PIN sent from the host')
self.assertEqual(result['code'], -9)
def test_display(self):
result = self.do_command(self.dev_args + ['displayaddress', '--path', 'm/0h'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not have a screen to display addresses on')
self.assertEqual(result['code'], -9)
def test_setup_wipe(self):
# Device is init, setup should fail
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', 'testpass'])
self.assertEquals(result['code'], -10)
self.assertEquals(result['error'], 'Device is already initialized. Use wipe first and try again')
# Wipe
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
# Check arguments
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a new Digital Bitbox wallet must be specified and cannot be empty')
result = self.do_command(self.dev_args + ['-i', 'setup', '--backup_passphrase', 'testpass'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a new Digital Bitbox wallet must be specified and cannot be empty')
# Setup
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', 'testpass'])
self.assertTrue(result['success'])
# Reset back to original
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
send_plain(b'{"password":"0000"}', self.emulator.dev)
send_encrypt(json.dumps({"seed": {"source": "backup", "filename": "test_backup.pdf", "key": "key"}}), '0000', self.emulator.dev)
# Make sure device is init, setup should fail
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', 'testpass'])
self.assertEquals(result['code'], -10)
self.assertEquals(result['error'], 'Device is already initialized. Use wipe first and try again')
def test_backup(self):
# Check arguments
result = self.do_command(self.dev_args + ['backup', '--label', 'backup_test'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a Digital Bitbox backup must be specified and cannot be empty')
result = self.do_command(self.dev_args + ['backup', '--backup_passphrase', 'key'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a Digital Bitbox backup must be specified and cannot be empty')
# Wipe
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
# Setup
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'backup_test', '--backup_passphrase', 'testpass'])
self.assertTrue(result['success'])
# make the backup
result = self.do_command(self.dev_args + ['backup', '--label', 'backup_test_backup', '--backup_passphrase', 'testpass'])
self.assertTrue(result['success'])
class TestBitboxGetXpub(DeviceTestCase):
def test_getxpub(self):
self.dev_args.remove('--chain')
self.dev_args.remove('test')
result = self.do_command(self.dev_args + ['--expert', 'getxpub', 'm/44h/0h/0h/3'])
self.assertEqual(result['xpub'], 'xpub6Du9e5Cz1NZWz3dvsvM21tsj4xEdbAb7AcbysFL42Y3yr8PLMnsaxhetHxurTpX5Rp5RbnFFwP1wct8K3gErCUSwcxFhxThsMBSxdmkhTNf')
self.assertFalse(result['testnet'])
self.assertFalse(result['private'])
self.assertEqual(result['depth'], 4)
self.assertEqual(result['parent_fingerprint'], '31d5e5ea')
self.assertEqual(result['child_num'], 3)
self.assertEqual(result['chaincode'], '7062818c752f878bf96ca668f77630452c3fa033b7415eed3ff568e04ada8104')
self.assertEqual(result['pubkey'], '029078c9ad8421afd958d7bc054a0952874923e2586fc9375604f0479a354ea193')
def digitalbitbox_test_suite(simulator, bitcoind, interface):
dev_emulator = BitBox01Emulator(simulator)
signtx_cases = [
(["legacy"], ["legacy"], True, True),
(["segwit"], ["segwit"], True, True),
(["legacy", "segwit"], ["legacy", "segwit"], True, True),
]
# Generic Device tests
suite = unittest.TestSuite()
suite.addTest(DeviceTestCase.parameterize(TestDBBManCommands, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestBitboxGetXpub, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestDeviceConnect, bitcoind, emulator=dev_emulator, interface=interface, detect_type="digitalbitbox"))
suite.addTest(DeviceTestCase.parameterize(TestDeviceConnect, bitcoind, emulator=dev_emulator, interface=interface, detect_type="digitalbitbox_01_simulator"))
suite.addTest(DeviceTestCase.parameterize(TestGetDescriptors, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestGetKeypool, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestSignTx, bitcoind, emulator=dev_emulator, interface=interface, signtx_cases=signtx_cases))
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
return result.wasSuccessful()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test Digital Bitbox implementation')
parser.add_argument('simulator', help='Path to simulator binary')
parser.add_argument('bitcoind', help='Path to bitcoind binary')
parser.add_argument('--interface', help='Which interface to send commands over', choices=['library', 'cli', 'bindist'], default='library')
args = parser.parse_args()
# Start bitcoind
bitcoind = Bitcoind.create(args.bitcoind)
sys.exit(not digitalbitbox_test_suite(args.simulator, bitcoind, args.interface))
| 45.971564 | 161 | 0.668454 | 7,277 | 0.750206 | 0 | 0 | 0 | 0 | 0 | 0 | 3,075 | 0.31701 |
f0de783da190b1f53132dc15989ab3f8990f924d | 560 | py | Python | docs/conf.py | maru/fiubar | be12547ae3f4560765c86ce5c49988931b09b19a | [
"MIT"
] | 5 | 2016-07-27T16:01:41.000Z | 2020-03-10T21:11:31.000Z | docs/conf.py | maru/fiubar | be12547ae3f4560765c86ce5c49988931b09b19a | [
"MIT"
] | 14 | 2015-07-22T16:41:58.000Z | 2019-03-28T20:45:17.000Z | docs/conf.py | maru/fiubar | be12547ae3f4560765c86ce5c49988931b09b19a | [
"MIT"
] | 3 | 2015-07-22T15:14:44.000Z | 2018-04-16T09:49:35.000Z | import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'fiubar'
copyright = u'2008-2018, Maru Berezin'
version = '2.0.0'
release = '2.0.0'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| 24.347826 | 62 | 0.707143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.328571 |
f0debfa3d1d8a0bacee277563d8f6b144a1f884d | 663 | py | Python | 2021/day9.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | 2021/day9.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | 2021/day9.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | from io import StringIO
import itertools
import numpy as np
import numpy.ma as ma
inputfile = './input/day9.txt'
# inputfile = StringIO('''2199943210
# 3987894921
# 9856789892
# 8767896789
# 9899965678''')
def neighbors(ar, i, j):
return {ar[i-1,j],
ar[i+1,j],
ar[i,j-1],
ar[i,j+1]}
a = np.genfromtxt(inputfile, dtype='i', delimiter=1)
nRows, nCols = a.shape
b = np.pad(a, ((1, 1), (1, 1)), constant_values=10)
lowPoints = []
for i, j in itertools.product(range(1, nRows+1), range(1, nCols+1)):
if all(b[i,j] < foo for foo in neighbors(b,i,j)):
lowPoints.append(b[i,j])
print(sum(1+n for n in lowPoints))
| 22.1 | 68 | 0.616893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.164404 |
f0df7eb72b3c3a8b625596cf0e32b466f1260149 | 456 | py | Python | ml-env/examples/train.py | RafalSkolasinski/dockerfiles | 8c8609fda42ad2b55960b64aef12774e00e2a4d4 | [
"MIT"
] | null | null | null | ml-env/examples/train.py | RafalSkolasinski/dockerfiles | 8c8609fda42ad2b55960b64aef12774e00e2a4d4 | [
"MIT"
] | 134 | 2021-02-10T14:32:47.000Z | 2022-03-31T02:16:17.000Z | ml-env/examples/train.py | RafalSkolasinski/dockerfiles | 8c8609fda42ad2b55960b64aef12774e00e2a4d4 | [
"MIT"
] | null | null | null | import numpy as np
import joblib
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import train_test_split
filename_p = "model.joblib"
if __name__ == "__main__":
digits = datasets.load_digits()
X_train, X_test, y_train, y_test = train_test_split(
digits.data, digits.target, random_state=0
)
clf = svm.SVC(gamma=0.001, C=100.0)
clf.fit(X_train, y_train)
joblib.dump(clf, filename_p)
| 19 | 56 | 0.717105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.052632 |
f0dfa56f80d5b88124cdf73fc8c809e7cf604253 | 1,050 | py | Python | vocoder/datasets/__init__.py | ZENGZHEN-TTS/LVCNet | e81e13f3479a4d85f498a02e42338ebe823a8b3d | [
"Apache-2.0"
] | 45 | 2021-02-23T07:43:14.000Z | 2021-06-21T11:33:45.000Z | vocoder/datasets/__init__.py | JackZiLong/LVCNet | e81e13f3479a4d85f498a02e42338ebe823a8b3d | [
"Apache-2.0"
] | 3 | 2021-04-08T05:01:38.000Z | 2021-06-09T17:41:48.000Z | vocoder/datasets/__init__.py | ZENGZHEN-TTS/LVCNet | e81e13f3479a4d85f498a02e42338ebe823a8b3d | [
"Apache-2.0"
] | 11 | 2021-02-25T18:14:12.000Z | 2021-06-21T11:40:41.000Z |
from .audio_mel import PWGAudioMelNoiseDataset, DataLoader
dataset_class_dict = {
"PWGAudioMelNoiseDataset": PWGAudioMelNoiseDataset
}
def create_dataloader(dataset_classname,
dataset_config,
batch_size=1,
collate_fn=None,
shuffle=False,
num_workers=0,
drop_last=False,
) -> DataLoader:
''' create dataloader
Args:
dataset_classname (str) : the classname of dataset.
dataset_config (dict): the config for dataset.
...
Returns:
Dataloader.
'''
dataset = dataset_class_dict[ dataset_classname ]( **dataset_config )
dataloader = DataLoader( dataset,
batch_size=batch_size,
collate_fn=collate_fn,
shuffle=shuffle,
num_workers=num_workers,
drop_last=drop_last)
return dataloader
| 31.818182 | 73 | 0.521905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.218095 |
f0dfa6a70f2b05edad087e65290480ef3841bae8 | 911 | py | Python | src/mdfserver/models.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | 2 | 2015-02-25T01:12:51.000Z | 2017-02-08T22:54:41.000Z | src/mdfserver/models.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | 48 | 2015-01-12T18:01:56.000Z | 2021-06-10T20:05:26.000Z | src/mdfserver/models.py | UCHIC/iUTAHData | 4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab | [
"Unlicense"
] | null | null | null | from django.db import models
# from tinymce import models as tinymce_models
#
# # Create your models here.
#
# class Page(models.Model):
# title = models.CharField(max_length=200)
# url = models.CharField(max_length=200)
# content = models.TextField(max_length=20000) #tinymce_models.HTMLField()#forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) #Use the WYSIWYG editor in this field.
# def __unicode__(self):
# return self.title
#
# class Subpage(models.Model):
# title = models.CharField(max_length=200)
# url = models.CharField(max_length=200)
# url_visible = models.BooleanField()
# content = models.TextField(max_length=20000)#tinymce_models.HTMLField() #forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) #Use the WYSIWYG editor in this field.
# pages = models.ForeignKey(Page)
# def __unicode__(self):
# return self.title | 45.55 | 181 | 0.70472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 864 | 0.948408 |
f0e0241d9b692b14994b533da81167806bad291d | 2,468 | py | Python | tests/test_connectors_bigquery.py | bbc/foxglove | eb09b23aed51ed615eccf71bf20f2f6b28f4b32f | [
"Apache-2.0"
] | null | null | null | tests/test_connectors_bigquery.py | bbc/foxglove | eb09b23aed51ed615eccf71bf20f2f6b28f4b32f | [
"Apache-2.0"
] | null | null | null | tests/test_connectors_bigquery.py | bbc/foxglove | eb09b23aed51ed615eccf71bf20f2f6b28f4b32f | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 BBC. Licensed under the terms of the Apache License 2.0.
"""
from unittest.mock import Mock
import pytest
from google.cloud.bigquery import Client
from foxglove.connectors.bigquery import BigQueryConnector
@pytest.fixture
def fake_bq_client():
return Mock(spec=Client(project='test_project'))
@pytest.mark.integration
def test_valid_bigquery_connector_init():
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
assert connector.bq_dataset_id
assert connector.bq_table_id
assert connector.bq_client
@pytest.mark.integration
def test_write_truncate_ndjson_file(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
connector.write_truncate_ndjson_file('test_ndjson_fh')
fake_bq_client.load_table_from_file.assert_called_with(
file_obj='test_ndjson_fh',
destination=connector._bq_table,
job_config=connector._job_config
)
@pytest.mark.integration
def test_bq_table(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
_ = connector._bq_table()
connector._bq_dataset.table.assert_called_once()
@pytest.mark.integration
def test_bq_dataset(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
_ = connector._bq_dataset()
fake_bq_client.create_dataset.assert_called_once()
def test_bigquery_engine_url_decode():
engine_url='bigquery://projectId=my_project;datasetId=nice_food;tableId=cakes;'
connector = BigQueryConnector(engine_url=engine_url)
project, dataset, table = connector._decode_engine_url()
assert project == 'my_project'
assert dataset == 'nice_food'
assert table == 'cakes'
@pytest.mark.integration
def test_sql_query_with_params():
engine_url='bigquery://projectId=bbc-datalab;datasetId=foxglove_test;tableId=rms_titles;'
connector = BigQueryConnector(engine_url=engine_url)
# check known value in sample data
sql = "SELECT id FROM `bbc-datalab.foxglove_test.rms_titles` WHERE pid=@my_pid"
for row in connector.query(sql=sql, sql_params=[("my_pid", "STRING", "b01qw8tz")]):
assert row.id == 1
| 29.73494 | 93 | 0.731361 | 0 | 0 | 0 | 0 | 1,886 | 0.764182 | 0 | 0 | 608 | 0.246353 |
f0e245d926233f4c636eb409d21b940db19ff2c8 | 1,982 | py | Python | activitfinal.py | AjayBadrinath/Python-Stuff | 34bb0339968b943cc63c5dc31721e3504faea157 | [
"MIT"
] | null | null | null | activitfinal.py | AjayBadrinath/Python-Stuff | 34bb0339968b943cc63c5dc31721e3504faea157 | [
"MIT"
] | null | null | null | activitfinal.py | AjayBadrinath/Python-Stuff | 34bb0339968b943cc63c5dc31721e3504faea157 | [
"MIT"
] | null | null | null | import sys
#"splchar":["!","@","#","$",".",",",":","%","^","*"]
splchar=[chr(i) for i in range(33,48)]#ASCII spl charecter range from 33-48 and58-65#and i here is the mapping expression ie the thing thet is executed evry iteration
splchar1=[chr(i) for i in range(58,65)]#Instead of explicit declaration of for loop this is better for assigning number and converting to charecter then make it a list
splchar+=splchar1#making all spl char in one list
letter={"cap":"ABCDEFGHIJKLMNOPQRSTUVWXYZ","alpha":"abcdefghijklmnopqrstuvwxyz","digit":"0123456789","white_space":[" ","\t","\n"],
"splchar":splchar}#Dictionary to be used
print("Enter your string with multiple lines\n")
print("Ctrl+D to Terminate input\n")
a=sys.stdin.read()#ctrl+d to finish input#this is for multiline input
x=list(letter.items())#return Datatype dict_item so convert to list which return tuples in list
lineno=[j for j in a if j=="\n"]#list of \n
cap,small,digit,whitespace,spl=0,0,0,0,0#Assign all values to be 0
'''Implementation of switch case(sort of)'''
d1={"cap":"if i in x[j][-1] and x[j][0]=='cap':cap+=1",
"alpha":"if i in x[j][-1] and x[j][0]=='alpha':small+=1",
"digit":"if i in x[j][-1] and x[j][0]=='digit':digit+=1",
"white_space":"if i in x[j][-1] and x[j][0]=='white_space':whitespace+=1",
"splchar":"if i in x[j][-1] and x[j][0]=='splchar':spl+=1"
}
#here to be specific the switch case here uses executable command with shared key with dictionary letter
for i in a:#Check the each charecter of input a
for j in range(0,len(x)):#Accessing the list of key value pair from letter
exec(str(d1.get(x[j][0])))#exec execute string and this access the switch statement
print("\nThe total number of caps",cap,"\nSmall:",small,"\ndigit:",digit,"\nwhitespaces",whitespace,"\nSpecialCharecters",spl,"\nTotal Alphabets",cap+small,
"\nTotal lines", len(lineno))
| 48.341463 | 168 | 0.652371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,427 | 0.71998 |
f0e44d50b65aca97969715923cd5504c6c2fa654 | 28,838 | py | Python | pytests/backup/ibr.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | pytests/backup/ibr.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | pytests/backup/ibr.py | ramalingam-cb/testrunner | 81cea7a5a493cf0c67fca7f97c667cd3c6ad2142 | [
"Apache-2.0"
] | null | null | null | __author__ = 'ashvinder'
import re
import os
import gc
import logger
import time
from TestInput import TestInputSingleton
from backup.backup_base import BackupBaseTest
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.documentgenerator import DocumentGenerator
from memcached.helper.kvstore import KVStore
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.data_analysis_helper import *
from memcached.helper.data_helper import VBucketAwareMemcached
from view.spatialquerytests import SimpleDataSet
from view.spatialquerytests import SpatialQueryTests
from membase.helper.spatial_helper import SpatialHelper
from couchbase_helper.cluster import Cluster
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.document import DesignDocument, View
import copy
class IBRTests(BackupBaseTest):
def setUp(self):
super(IBRTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
gen_load = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
def tearDown(self):
super(IBRTests, self).tearDown()
def restoreAndVerify(self, bucket_names, kvs_before, expected_error=None):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
errors, outputs = self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
errors.extend(outputs)
error_found = False
if expected_error:
for line in errors:
if line.find(expected_error) != -1:
error_found = True
break
self.assertTrue(error_found, "Expected error not found: %s" % expected_error)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if expected_error:
for bucket in self.buckets:
bucket.kvs[1] = KVStore()
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def verify_dir_structure(self, total_backups, buckets, nodes):
cmd = 'find ' + self.backup_location + ' -type f'
if self.shell.info.type.lower() == 'windows':
cmd = 'cmd.exe /C "dir /s /b C:\\tmp\\backup"'
output, error = self.shell.execute_command(cmd)
self.log.info("output = {0} error = {1}".format(output,error))
if error:
raise Exception('Got error {0}',format(error))
expected_design_json = total_backups * buckets
expected_data_cbb = total_backups * buckets * nodes
expected_meta_json = total_backups * buckets * nodes
expected_failover_json = total_backups * buckets * nodes
timestamp = '\d{4}\-\d{2}\-\d{2}T\d+Z'
pattern_mode = '(full|accu|diff)'
timestamp_backup = timestamp + '\-' + pattern_mode
pattern_bucket = 'bucket-\w+'
pattern_node = 'node\-\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}.+'
pattern_design_json = timestamp + '/|\\\\' + timestamp_backup + \
'/|\\\\' + pattern_bucket
pattern_backup_files = pattern_design_json + '/|\\\\' + pattern_node
data_cbb = 0
failover = 0
meta_json = 0
design_json = 0
for line in output:
if 'data-0000.cbb' in line:
if re.search(pattern_backup_files, line):
data_cbb += 1
if 'failover.json' in line:
if re.search(pattern_backup_files, line):
failover += 1
if self.cb_version[:5] != "4.5.1" and 'meta.json' in line:
if re.search(pattern_backup_files, line):
meta_json += 1
if 'design.json' in line:
if re.search(pattern_design_json, line):
design_json += 1
self.log.info("expected_data_cbb {0} data_cbb {1}"
.format(expected_data_cbb, data_cbb))
self.log.info("expected_failover_json {0} failover {1}"
.format(expected_failover_json, failover))
if self.cb_version[:5] != "4.5.1":
self.log.info("expected_meta_json {0} meta_json {1}"
.format(expected_meta_json, meta_json))
""" add json support later in this test
self.log.info("expected_design_json {0} design_json {1}"
.format(expected_design_json, design_json)) """
if self.cb_version[:5] != "4.5.1":
if data_cbb == expected_data_cbb and failover == expected_failover_json and \
meta_json == expected_meta_json:
# add support later in and design_json == expected_design_json:
return True
else:
if data_cbb == expected_data_cbb and failover == expected_failover_json:
return True
return False
def testFullBackupDirStructure(self):
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testMultipleFullBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(120)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testIncrBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleIncrBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.log.info("sleeping for 30 secs")
self.sleep(30)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testMultipleIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Incr and Diff Backup')
def testMultipleFullIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options, delete_backup=False)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Full,Incr and Diff Backups')
def testDiffBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=5)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testIncrementalBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testDifferentialBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Differential backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testIncrementalBackupConflict(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self.lww = self.num_mutate_items = self.input.param("lww_new", False)
self._bucket_creation()
self.sleep(20)
expected_error = self.input.param("expected_error", None)
self.restoreAndVerify(bucket_names, kvs_before, expected_error)
class IBRJsonTests(BackupBaseTest):
def setUp(self):
super(IBRJsonTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if self.test_with_view:
view_list = []
bucket = "default"
if self.dev_view:
prefix_ddoc="dev_ddoc"
else:
prefix_ddoc="ddoc"
ddoc_view_map = self.bucket_ddoc_map.pop(bucket, {})
for ddoc_count in xrange(self.num_ddocs):
design_doc_name = prefix_ddoc + str(ddoc_count)
view_list = self.make_default_views("views", self.num_views_per_ddoc)
self.create_views(self.master, design_doc_name, view_list,\
bucket, self.wait_timeout * 2)
ddoc_view_map[design_doc_name] = view_list
self.bucket_ddoc_map[bucket] = ddoc_view_map
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
self.sleep(2)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def restoreAndVerify(self,bucket_names,kvs_before):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
self.shell.restore_backupFile(self.couchbase_login_info,\
self.backup_location, bucket_names)
self.sleep(10)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
""" add design doc and view """
if self.test_with_view:
result = False
query = {"stale" : "false", "full_set" : "true", \
"connection_timeout" : 60000}
for bucket, ddoc_view_map in self.bucket_ddoc_map.items():
for ddoc_name, view_list in ddoc_view_map.items():
for view in view_list:
try:
result = self.cluster.query_view(self.master,\
ddoc_name, view.name, query,\
self.num_items, timeout=10)
except Exception:
pass
if not result:
self.fail("There is no: View: {0} in Design Doc:"\
" {1} in bucket: {2}"\
.format(view.name, ddoc_name, bucket))
self.log.info("DDoc Data Validation Successful")
def tearDown(self):
super(IBRJsonTests, self).tearDown()
def testMultipleBackups(self):
if not self.command_options:
self.command_options = []
options = self.command_options
if self.backup_type is not None:
if "accu" in self.backup_type:
options = self.command_options + [' -m accu']
if "diff" in self.backup_type:
options = self.command_options + [' -m diff']
diff_backup = [" -m diff"]
accu_backup = [" -m accu"]
current_backup = [" -m diff"]
for count in range(self.number_of_backups):
if "mix" in self.backup_type:
if current_backup == diff_backup:
current_backup = accu_backup
options = self.command_options + accu_backup
elif current_backup == accu_backup:
current_backup = diff_backup
options = self.command_options + diff_backup
# Update data
template = '{{ "mutated" : {0}, "age": {0}, "first_name": "{1}" }}'
gen_update = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a backup
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
class IBRSpatialTests(SpatialQueryTests):
def setUp(self):
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
self.cluster = Cluster()
self.default_bucket = self.input.param("default_bucket", True)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.servers = self.helper.servers
self.shell = RemoteMachineShellConnection(self.master)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
def tearDown(self):
self.helper.cleanup_cluster()
def test_backup_with_spatial_data(self):
num_docs = self.helper.input.param("num-docs", 5000)
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
time.sleep(2)
self.buckets = RestConnection(self.master).get_buckets()
bucket_names = [bucket.name for bucket in self.buckets]
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
gc.collect()
self.helper._create_default_bucket()
self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
SimpleDataSet(self.helper, num_docs)._create_views()
self._query_test_init(data_set)
| 45.485804 | 165 | 0.627055 | 27,921 | 0.968202 | 0 | 0 | 0 | 0 | 0 | 0 | 3,484 | 0.120813 |
f0e68b0a27260d16ee0b208166b5530ca8b0baef | 963 | py | Python | examples/test_feasibility.py | OllieBoyne/sslap | 02c88a05dee1c33d5e0c9b405f947d522fb0b150 | [
"MIT"
] | 7 | 2021-03-12T17:28:18.000Z | 2022-02-14T08:05:23.000Z | examples/test_feasibility.py | OllieBoyne/sslap | 02c88a05dee1c33d5e0c9b405f947d522fb0b150 | [
"MIT"
] | 6 | 2021-03-12T17:38:21.000Z | 2022-02-16T10:37:33.000Z | examples/test_feasibility.py | OllieBoyne/sslap | 02c88a05dee1c33d5e0c9b405f947d522fb0b150 | [
"MIT"
] | 4 | 2021-03-18T14:25:52.000Z | 2021-12-06T14:38:30.000Z | """Check the feasibility of a bipartite graph by using SSLAP's feasibility module"""
import numpy as np
from sslap import hopcroft_solve
# All 3 methods will use the same input bipartite graph:
# i = 0 connects to j = 0, 1
# i = 1 connects to j = 1, 2
# i = 2 connects to j = 1, 4
# i = 3 connects to j = 2
# i = 4 connects to j = 3
# which has a maximum matching of 5
# eg i:j of 0:0, 1:1, 2:4, 3:2, 4:3
def dict_usage():
lookup = {0: [0, 1], 1: [1, 2], 2: [1, 4], 3: [2], 4: [3]}
res = hopcroft_solve(lookup=lookup)
print(res)
def mat_usage():
mat = - np.ones((5, 5)) # all invalid, except
mat[[0, 0, 1, 1, 2, 2, 3, 4], [0, 1, 1, 2, 1, 4, 2, 3]] = 1 # for valid edges
res = hopcroft_solve(mat=mat)
print(res)
def loc_usage():
loc = np.array([[0, 0], [0, 1], [1, 1], [1, 2], [2, 1], [2, 4], [3, 2], [4, 3]]) # (i, j) for each edge
res = hopcroft_solve(loc=loc)
print(res)
if __name__ == "__main__":
dict_usage()
mat_usage()
loc_usage()
| 24.692308 | 105 | 0.5919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.429907 |
f0e6c3af9d104e8e439e1bf9d7509dae91e833dd | 5,933 | py | Python | clot/app.py | elliptical/clot | d1ee8ddc7527b170c0943b567b1cd589d5007b4d | [
"MIT"
] | null | null | null | clot/app.py | elliptical/clot | d1ee8ddc7527b170c0943b567b1cd589d5007b4d | [
"MIT"
] | null | null | null | clot/app.py | elliptical/clot | d1ee8ddc7527b170c0943b567b1cd589d5007b4d | [
"MIT"
] | null | null | null | """A simple command-line interface to the clot.torrent package."""
from argparse import ArgumentParser
from os import path, walk
import shutil
from clot import __version__, torrent
def main():
"""Execute actions according to the command-line arguments."""
args = _parse_command_line()
for source in args.sources:
if path.isdir(source):
traverse_dir(source, args)
elif path.isfile(source):
handle_file(source, args)
else:
if args.verbose:
print(f'Could not locate file or directory "{source}".')
def _parse_command_line():
parser = ArgumentParser(description=__doc__,
prog='python -m clot.app')
parser.add_argument('--version',
action='version',
version=f'{__file__} {__version__}')
subparsers = parser.add_subparsers(title='subcommands',
required=True,
dest='subcommand',
help='perform specific task on each torrent')
_add_load_command_to(subparsers)
_add_dump_command_to(subparsers)
return parser.parse_args()
def _add_load_command_to(subparsers):
parser = subparsers.add_parser('load')
parser.set_defaults(func=_load_torrent)
_add_traversal_arguments_to(parser)
_add_file_arguments_to(parser)
def _add_dump_command_to(subparsers):
parser = subparsers.add_parser('dump')
parser.set_defaults(func=_dump_torrent)
_add_traversal_arguments_to(parser)
_add_file_arguments_to(parser)
_add_dump_arguments_to(parser)
def _add_traversal_arguments_to(parser):
parser.add_argument('-v', '--verbose',
action='store_true',
help='print additional information')
parser.add_argument('-r', '--recurse',
action='store_true',
help='recurse into subdirectories')
parser.add_argument('--follow-links',
action='store_true',
help='walk down into symbolic links that resolve to directories')
parser.add_argument('--ext',
default='.torrent',
help='filter the directories based on filename extension'
' (default: "%(default)s")')
parser.add_argument('sources',
nargs='*',
metavar='PATH',
default='.',
help='torrent file or directory with torrent files'
' (default: current directory)')
def _add_file_arguments_to(parser):
parser.add_argument('-s', '--stash',
metavar='DIR',
help='stash torrents with errors in this directory')
parser.add_argument('--fallback-encoding',
help='use this encoding for strings which were not encoded with UTF-8')
parser.add_argument('--lazy',
action='store_true',
help='do not load metainfo fields (good for examining broken torrents)')
def _add_dump_arguments_to(parser):
group = parser.add_mutually_exclusive_group()
group.add_argument('--indent',
type=int,
default=4,
help='separate items with newlines and use this number of spaces'
' for indentation (default: %(default)s)')
group.add_argument('--tab',
action='store_const',
const='\t',
dest='indent',
help='separate items with newlines and use tabs for indentation')
group.add_argument('--no-indent',
action='store_const',
const=None,
dest='indent',
help='separate items with spaces rather than newlines')
parser.add_argument('--sort-keys',
action='store_true',
help='sort the output of dictionaries alphabetically by key')
parser.add_argument('-f', '--force',
action='store_true',
help='overwrite existing files')
def traverse_dir(dir_path, args):
"""Traverse the directory (flat or recursive) and handle files with the specified extension."""
def onerror(ex):
print(ex)
for root, dirs, files in walk(dir_path, onerror=onerror, followlinks=args.follow_links):
if not args.recurse:
dirs.clear()
for name in files:
if name.endswith(args.ext):
file_path = path.join(root, name)
handle_file(file_path, args)
def handle_file(file_path, args):
"""Handle the specified file based on args."""
if args.verbose:
print(file_path)
try:
obj = torrent.load(file_path, fallback_encoding=args.fallback_encoding, lazy=args.lazy)
args.func(file_path, obj, args)
except (TypeError, ValueError) as ex:
if args.stash:
_stash_file(file_path, args.stash)
if not args.verbose:
print(file_path)
print('\t', repr(ex), sep='')
def _stash_file(file_path, dir_path):
name, ext = path.splitext(path.basename(file_path))
target = path.join(dir_path, name + ext)
suffix = 0
while path.exists(target):
suffix += 1
target = path.join(dir_path, f'{name}-{suffix}{ext}')
shutil.copy2(file_path, target)
def _load_torrent(file_path, obj, args): # pylint: disable=unused-argument
pass
def _dump_torrent(file_path, obj, args):
obj.dump(file_path + '.json',
indent=args.indent,
sort_keys=args.sort_keys,
overwrite=args.force)
if __name__ == '__main__':
main()
| 33.331461 | 99 | 0.572055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,586 | 0.267318 |
f0e7d6bd1974b95ea4a8abd9c52fb010ef93328b | 475 | py | Python | app/converter/nsl/substance_cv_converter.py | c0d3m0nkey/xml-to-json-converter | 9cf040b591f45031c80dc5bc64d6fbb2c4665d25 | [
"BSD-2-Clause"
] | null | null | null | app/converter/nsl/substance_cv_converter.py | c0d3m0nkey/xml-to-json-converter | 9cf040b591f45031c80dc5bc64d6fbb2c4665d25 | [
"BSD-2-Clause"
] | null | null | null | app/converter/nsl/substance_cv_converter.py | c0d3m0nkey/xml-to-json-converter | 9cf040b591f45031c80dc5bc64d6fbb2c4665d25 | [
"BSD-2-Clause"
] | null | null | null | from lxml import objectify, etree
from operator import itemgetter
from ..xml_converter import XmlConverter
class SubstanceCVConverter(XmlConverter):
def convert(self, xml):
item = {}
item["term_english_equiv"] = str(xml.attrib["term-english-equiv"])
item["term_id"] = str(xml.attrib["term-id"])
item["term_lang"] = str(xml.attrib["term-lang"])
item["term_revision_num"] = str(xml.attrib["term-revision-num"])
return item
| 36.538462 | 74 | 0.671579 | 366 | 0.770526 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.248421 |
f0e89ae2be157a7563b7db3f026034c5b7dec360 | 482 | py | Python | cardiffshop/cardiffshop/urls.py | yigitguler/admin-example | 48a0eacea1d03123fef5e86165749e9c0213e0d8 | [
"MIT"
] | null | null | null | cardiffshop/cardiffshop/urls.py | yigitguler/admin-example | 48a0eacea1d03123fef5e86165749e9c0213e0d8 | [
"MIT"
] | null | null | null | cardiffshop/cardiffshop/urls.py | yigitguler/admin-example | 48a0eacea1d03123fef5e86165749e9c0213e0d8 | [
"MIT"
] | null | null | null | from products.views import ProductDetail
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^products/(?P<slug>[-\w]+)/', ProductDetail.as_view(), name="product-detail"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.076923 | 88 | 0.761411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.116183 |
f0e928a38ffedd9b5a5fdacb57e277620b6e15be | 339 | py | Python | headlineVsHeadline/polls/models.py | bacarpenter/headline-vs-headline | 2dc59e75fadbf7d5bb0a5b15a0bf41784712a290 | [
"MIT"
] | null | null | null | headlineVsHeadline/polls/models.py | bacarpenter/headline-vs-headline | 2dc59e75fadbf7d5bb0a5b15a0bf41784712a290 | [
"MIT"
] | 1 | 2020-12-28T01:28:19.000Z | 2020-12-28T01:56:47.000Z | headlineVsHeadline/polls/models.py | bacarpenter/headline-vs-headline | 2dc59e75fadbf7d5bb0a5b15a0bf41784712a290 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class HeadlineListing(models.Model):
headline_text = models.CharField(max_length=500)
accessed = models.DateTimeField()
source_url = models.CharField(max_length=200)
author = models.CharField(default="", max_length=200)
source = models.CharField(max_length=200)
| 33.9 | 57 | 0.755162 | 281 | 0.828909 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.082596 |
f0ea161b0bdb8c1460310c8cf5b767d1b7345d34 | 1,678 | py | Python | plot_result.py | PrimordialOcean/diffusion-plagioclase | f85a9c9a348435ca162de2da3ae9290b56d66807 | [
"MIT"
] | null | null | null | plot_result.py | PrimordialOcean/diffusion-plagioclase | f85a9c9a348435ca162de2da3ae9290b56d66807 | [
"MIT"
] | null | null | null | plot_result.py | PrimordialOcean/diffusion-plagioclase | f85a9c9a348435ca162de2da3ae9290b56d66807 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
def main():
times = [ 1, 100, 365, 365*20, 365*100, 100000]
df = pd.read_csv('tmp.csv')
df_ini = pd.read_csv("initial_value.csv")
xan_measured = (df_ini["distance"], df_ini["XAn"])
df_m = pd.read_csv('measured_value.csv')
measured_data = (df_m["Distance(um)"],df_m["MgO"])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
fig = plt.figure()
plt.rcParams["font.size"] = 14
ax = fig.add_subplot(111)
ax2 = ax.twinx()
ax.set_title("$T=1000 ^\circ$C (van Orman, 2014)")
#ax.set_title(str(time_days)+" days in " + str(tempc) + "$^\circ$C")
best_fit = 100000
#ax.plot(*measured_data, "o", color="w", mec="k", label="Measured")
for t in times:
x = df.iloc[:,0]
y = df.iloc[:,t]
plotdata = (x, y)
if t == 1:
ax.plot(*plotdata, "--", color="k", label="Initial")
elif t == best_fit:
ax.plot(*plotdata, "-", color="r", label=str(int(t/365))+"yrs")
else:
if t < 365:
ax.plot(x, y, "-", color="grey", label=str(int(t))+"days")
else:
ax.plot(x, y, "-", color="grey", label=str(int(t/365))+"yrs")
ax2.plot(*xan_measured, "o", color="b", label="XAn")
ax2.set_ylabel("XAn", fontsize=16)
ax2.set_ylim(0.55, 2)
ax.set_ylim(0, 0.7)
ax.set_xlabel("Distance from rim (\u03bcm)", fontsize=16)
ax.set_ylabel("MgO (wt.%)", fontsize=16)
fig.legend(loc=1, fancybox=False, framealpha=1, edgecolor="k", fontsize=10)
fig.savefig('img.jpg', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
main() | 34.244898 | 79 | 0.557211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.241955 |
f0ec35912039f91ed3134032fb990304ca7ef9ce | 2,685 | py | Python | tests/unit/test_core_context.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | tests/unit/test_core_context.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | tests/unit/test_core_context.py | nickchen-mitac/fork | 64dab56012da47465b4923f30f26925476c87afc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from pydispatch import dispatcher
from ava.core.context import Context, get_core_context
class MockAgent(object):
def __init__(self):
self.greenlets = []
self._dispatcher = dispatcher
def add_child_greenlet(self, child):
self.greenlets.append(child)
def send(self, signal=dispatcher.Any, sender=dispatcher.Anonymous, *args, **kwargs):
if signal is None:
signal = dispatcher.Any
if sender is None:
sender = dispatcher.Anonymous
self._dispatcher.send(signal, sender, *args, **kwargs)
def connect(self, receiver, signal=dispatcher.Any, sender=dispatcher.Any):
if signal is None:
signal = dispatcher.Any
if sender is None:
sender = dispatcher.Anonymous
self._dispatcher.connect(receiver, signal, sender)
def disconnect(self, receiver, signal=dispatcher.Any, sender=dispatcher.Any):
if signal is None:
signal = dispatcher.Any
if sender is None:
sender = dispatcher.Anonymous
self._dispatcher.disconnect(receiver, signal, sender)
@pytest.fixture
def context():
return Context(MockAgent())
class Receiver(object):
def __init__(self):
self.called = False
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
class TestCoreContext(object):
def test_binding_and_lookups(self, context):
context.bind('test', 'value')
value = context.lookup('test')
assert value == 'value'
context.unbind('test')
value2 = context.lookup('test')
assert value2 is None
def test_send_signals(self, context):
receiver = Receiver()
context.connect(receiver, signal='test_event')
context.send(signal='test_event')
assert receiver.called
def test_connect_and_then_disconnect(self, context):
SIGNAL = 'my-second-signal'
receiver = Receiver()
context.connect(receiver)
context.send(SIGNAL, msg="message", title="1234")
assert receiver.called
receiver.called = False
print(receiver.args, receiver.kwargs)
context.disconnect(receiver)
context.send(signal=SIGNAL)
assert not receiver.called
def test_get_core_context(self):
agent = MockAgent()
ctx = get_core_context(agent)
assert ctx._agent is agent
ctx2 = get_core_context()
assert ctx is ctx2
| 26.85 | 88 | 0.642831 | 2,410 | 0.897579 | 0 | 0 | 62 | 0.023091 | 0 | 0 | 118 | 0.043948 |
f0ed814c5e8a404bf3c7bc581d0b6d473446eadd | 5,736 | py | Python | Nitesh-Bhosle-Visualization-for-company-stakeholders/code.py | Niteshnupur/greyatom-python-for-data-science | fc8fd221f9ca1b256740b201dff2a806047644e5 | [
"MIT"
] | 3 | 2020-05-21T13:40:35.000Z | 2020-05-21T15:05:04.000Z | Nitesh-Bhosle-Visualization-for-company-stakeholders/code.py | Niteshnupur/greyatom-python-for-data-science | fc8fd221f9ca1b256740b201dff2a806047644e5 | [
"MIT"
] | null | null | null | Nitesh-Bhosle-Visualization-for-company-stakeholders/code.py | Niteshnupur/greyatom-python-for-data-science | fc8fd221f9ca1b256740b201dff2a806047644e5 | [
"MIT"
] | 1 | 2020-05-14T04:11:22.000Z | 2020-05-14T04:11:22.000Z | # --------------
# Loan Status
# Loan Status
# Let's start with the simple task of visualizing the company's record with respect to loan approvals.
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# The path to the dataset has been stored in a variable path
# Load the dataframe using pd.read_csv() and store the dataframe in a variable called data.
data = pd.read_csv(path)
print(data)
# Save the value counts of Loan_Status in a variable called loan_status using value_counts()
loan_status = data["Loan_Status"].value_counts()
print(loan_status)
# Plot a bar graph of loan_status
loan_status.plot(kind= "bar" , figsize=(10,10))
plt.title('visualizing the companys record with respect to loan approvals')
plt.xlabel('Loan Status of approval')
plt.ylabel('VALUES')
plt.show()
#Code starts here
# --------------
# Everyone needs money
# Everyone needs money
# The company provides financial assistance across the different regions of the country. One interesting statistic that stakeholders want to see is the loan approval distribution across the regions.
#Code starts here
# Group the 'data' dataframe by Property_Area and Loan_Status and store it in a variable called 'property_and_loan'
property_and_loan = data.groupby(["Property_Area" , "Loan_Status"]).size().unstack()
print(property_and_loan)
# Use the .size() method on 'property_and_loan' and then use .unstack() and save it back to 'property_and_loan'
property_and_loan = data.groupby(["Property_Area" , "Loan_Status"]).size().unstack()
print(property_and_loan)
# Plot an unstacked bar plot of property_and_loan (It is similar to creating a stacked bar plot except change the parameter 'stacked' to False)
property_and_loan.plot(kind="bar" , stacked = False , figsize=(20,20))
plt.xlabel("Property Area")
plt.ylabel("Loan Status")
plt.xticks(rotation=45)
plt.show()
# --------------
# Expensive Education
# Expensive Education
# Higher education has always been an expensive endeavour for people but it results in better career opportunities and stability in life. But does higher education result in a better guarantee in issuing loans?
#Code starts here
# Group the 'data' dataframe by Education and Loan_Status and store it in a variable called 'education_and_loan'
education_and_loan = data.groupby(["Education" , "Loan_Status"])
print(education_and_loan)
# Use the .size() method on 'education_and_loan' and then use .unstack() and save it back to 'education_and_loan'
education_and_loan = data.groupby(["Education" , "Loan_Status"]).size().unstack()
print(education_and_loan)
# Plot an stacked bar plot of education_and_loan
education_and_loan.plot(kind="bar" , figsize=(20,20))
# Name the x-axis as Education Status
plt.xlabel("Education Status")
# Name the y-axis as Loan Status
plt.ylabel("Loan Status")
# Rotate the labels of x-axis by 45o
plt.xticks(rotation=45)
plt.show()
# --------------
# Smarter and Richer?
# Smarter and Richer?
# After seeing the loan status distribution, let's check whether being graduate or not also leads to different loan amount distribution by plotting an overlapping density plot of two values
#Code starts here
# Create a dataframe called 'graduate' which is a subset of 'data' dataframe with the condition "data['Education'] == 'Graduate'"
graduate = pd.DataFrame(data[data["Education"]=="Graduate"])
print(graduate)
#graduate=data[data['Education']=='Graduate']
#print(graduate)
# Create a dataframe called 'not_graduate' which is a subset of 'data' dataframe with the condition "data['Education'] == 'Not Graduate'"
not_graduate = pd.DataFrame(data[data["Education"]=="Not Graduate"])
print(not_graduate)
# Plot a density plot LoanAmount of 'graduate' dataframe using "Series.plot()" and pass the parameter kind='density' and label='Graduate'
graduate["LoanAmount"].plot(kind="density" , label="Graduate" , figsize=(20,20))
# Do the same for LoanAmount of 'not_graduate' dataframe but with label='Not Graduate'
not_graduate["LoanAmount"].plot(kind="density" , label="Not_graduate" , figsize=(20,20))
#Code ends here
#For automatic legend display
plt.legend()
# --------------
# Income vs Loan
# Income vs Loan
# For any financial institution to be successful in its loan lending system, there has to be a correlation between the borrower's income and loan amount he is lent. Let's see how our company fares in that respect:
#Code starts here
# Create three subplots with (nrows = 3 , ncols = 1) and store it in variable's fig ,(ax_1,ax_2,ax_3)
fig ,(ax_1,ax_2,ax_3) = plt.subplots(nrows = 3 , ncols = 1)
# Since both are continuous variables, plot scatter plot between 'ApplicantIncome' and LoanAmount using ax_1. Set axis title as Applicant Income
ax_1.scatter(data["CoapplicantIncome"] , data["LoanAmount"])
ax_1.set(title="Applicant Income")
# Plot scatter plot between 'CoapplicantIncome' and LoanAmount using ax_2. Set axis title as Coapplicant Income
ax_2.scatter(data["CoapplicantIncome"] , data["LoanAmount"])
ax_2.set(title="Coapplicant Income")
# Create a new column in the dataframe called 'TotalIncome' which is a sum of the values of columns ApplicantIncome and CoapplicantIncome
data["TotalIncome"] = data["ApplicantIncome"] + data["CoapplicantIncome"]
# Plot scatter plot between 'TotalIncome' and LoanAmount using ax_3. Set axis title as Total Income
ax_3.scatter(data["TotalIncome"] , data["LoanAmount"])
ax_3.set(title="Total Income")
| 25.493333 | 214 | 0.719317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,129 | 0.71984 |
f0ef146504402ffb34754f9067bbfa37ee601926 | 649 | py | Python | tests/migration_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/migration_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/migration_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | import json
from rabbitai.migrations.versions.fb13d49b72f9_better_filters import (
Slice,
upgrade_slice,
)
from .base_tests import RabbitaiTestCase
class TestMigration(RabbitaiTestCase):
def test_upgrade_slice(self):
slc = Slice(
slice_name="FOO",
viz_type="filter_box",
params=json.dumps(dict(metric="foo", groupby=["bar"])),
)
upgrade_slice(slc)
params = json.loads(slc.params)
self.assertNotIn("metric", params)
self.assertIn("filter_configs", params)
cfg = params["filter_configs"][0]
self.assertEqual(cfg.get("metric"), "foo")
| 25.96 | 70 | 0.640986 | 488 | 0.751926 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.123267 |
f0f04e94a81beb88cb190deaab3f4d7bf4be0477 | 125 | py | Python | pyaz/mariadb/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/mariadb/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/mariadb/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | '''
Manage Azure Database for MariaDB servers.
'''
from .. pyaz_utils import _call_az
from . import db, server, server_logs
| 17.857143 | 42 | 0.744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.4 |
f0f24b597dd5dc011330fa8b6f22b56989bc02be | 19,805 | py | Python | utils/pvacapi/controllers/database.py | mrichters/pVACtools | 1e86411055b3da9f81f966e83578dfadc3883498 | [
"BSD-3-Clause-Clear"
] | 96 | 2017-11-06T18:53:19.000Z | 2022-02-28T07:36:12.000Z | utils/pvacapi/controllers/database.py | mrichters/pVACtools | 1e86411055b3da9f81f966e83578dfadc3883498 | [
"BSD-3-Clause-Clear"
] | 521 | 2017-11-29T16:27:54.000Z | 2022-03-30T13:57:38.000Z | utils/pvacapi/controllers/database.py | mrichters/pVACtools | 1e86411055b3da9f81f966e83578dfadc3883498 | [
"BSD-3-Clause-Clear"
] | 53 | 2017-11-29T19:40:27.000Z | 2022-01-14T16:51:12.000Z | import os
import re
import csv
import sys
import json
import yaml
import time
import socket
import connexion
import postgresql as psql
from flask import current_app
from urllib.parse import urlencode
from hashlib import md5
from bokeh.embed import server_document
from .processes import fetch_process, is_running, process_info
from .utils import column_filter
float_pattern = re.compile(r'^\d*\.\d+$')
int_pattern = re.compile(r'^-?\d+$')
NA_pattern = re.compile(r'^NA$')
queryfilters = re.compile(r'(.+)(<=?|>=?|!=|==)(.+)')
def init_column_mapping(row, schema):
"""Generate initial estimates of column data types"""
defs = {column_filter(col): 'text' for col in row}
# Apply predefined table schema
defs.update({k: v for (k, v) in schema.items() if k in defs})
for (col, val) in row.items():
col = column_filter(col)
if col not in schema:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
defs[col] = 'integer'
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
defs[col] = 'decimal'
except ValueError:
print("ERROR: Float mismatch:", val)
mapping = {}
for (col, val) in defs.items():
if 'int' in val:
mapping[col] = int
elif val == 'decimal':
mapping[col] = float
else:
mapping[col] = str
return (mapping, defs)
def column_mapping(row, mapping, schema):
"""Apply filtering to the current row.
Detect if column data types need to be changed"""
output = {}
changes = {}
for (col, val) in row.items():
col = column_filter(col)
if val == None or NA_pattern.match(str(val)):
output[col] = None
continue
if col not in schema and mapping[col] == str:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
mapping[col] = int
changes[col] = int
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
mapping[col] = float
changes[col] = float
except ValueError:
print("ERROR: Float mismatch:", val)
try:
output[col] = mapping[col](val)
except ValueError:
output[col] = None
return (mapping, output, changes)
def old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping):
with db.xact():
db.execute(CREATE_TABLE)
# table marked for insertion during original attempt, so don't need to here
# prepare the insertion query
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
update = "ALTER TABLE %s " % tablekey
for row in reader:
# process each row
# We format the data in the row and update column data types, if
# necessary
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
# Re-generate the insert statement since the data types changed
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
# insert the row
insert(*[formatted[column] for column in column_names])
def table_transaction(file_permissions, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping):
with db.xact():
db.execute(CREATE_TABLE)
db.prepare("LOCK TABLE %s IN ACCESS EXCLUSIVE MODE" % (tablekey))
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, raw_reader.name)
#copy_query may result in psql.exceptions.InsufficientPrivilegeError when run; workaround attempted below
if file_permissions:
#mark the table for deletion when the server shuts down
#don't need to mark table for deletion during second attempt
if 'db-clean' not in current_app.config:
current_app.config['db-clean'] = [tablekey]
else:
current_app.config['db-clean'].append(tablekey)
#attempt file copy
db.execute(copy_query)
else:
import subprocess
filedest = "/tmp/"+os.path.basename(raw_reader.name)
subprocess.run(["mktemp", filedest], stdout=subprocess.DEVNULL)
subprocess.run(["cp", raw_reader.name, filedest])
subprocess.run(["chmod", "666", filedest])
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, filedest)
try:
db.execute(copy_query)
print("...Success")
finally:
subprocess.run(["rm", filedest])
col_val_query = "SELECT "
for col_name in column_names:
col_val_query += "(select %s from %s where %s is not null limit 1), "%(col_name, tablekey, col_name)
col_val_query = col_val_query[:-2]
col_values = db.prepare(col_val_query)
values = col_values()[0]
update = "ALTER TABLE %s " % tablekey
row = dict(zip(col_values.column_names, values))
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
def create_table(parentID, fileID, data, tablekey, db):
# Open a reader to cache the file in the database
if parentID != -1:
process = fetch_process(parentID, data, current_app.config['storage']['children'])
if not process[0]:
return (
{
"code": 400,
"message": "The requested process (%d) does not exist" % parentID,
"fields": "parentID"
}, 400
)
if is_running(process):
return (
{
"code": 400,
"message": "The requested process (%d) is still running" % parentID,
"fields": "parentID"
}, 400
)
if str(fileID) not in process[0]['files']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist for this process (%d)" % (fileID, parentID),
"fields": "fileID"
}, 400
)
raw_reader = open(process[0]['files'][fileID]['fullname'])
else:
if str(fileID) not in data['visualize']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist in the visualize" % fileID,
"fields": "fileID"
}, 400
)
raw_reader = open(data['visualize'][str(fileID)]['fullname'])
if not raw_reader.name.endswith('.tsv'):
ext = os.path.splitext(raw_reader.name)[1].lower()
if len(ext) and ext[0] == '.':
ext = ext[1:]
return serve_as(raw_reader, ext)
reader = csv.DictReader(raw_reader, delimiter='\t')
tmp_reader = open(raw_reader.name)
tmp = csv.DictReader(tmp_reader, delimiter='\t')
try:
init = next(tmp)
except StopIteration:
return []
tmp_reader.close()
# Get an initial estimate of column datatypes from the first row
(mapping, column_names) = init_column_mapping(init, current_app.config['schema'])
tablecolumns = "\n".join( # use the estimated types to create the table
"%s %s," % (colname, column_names[colname])
for colname in column_names
)[:-1]
CREATE_TABLE = "CREATE TABLE %s (\
rowid SERIAL PRIMARY KEY NOT NULL,\
%s\
)" % (tablekey, tablecolumns)
all_tablecolumns = ', '.join(column_filter(col) for col in reader.fieldnames)
try:
table_transaction(True, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.UniqueError: #If another transaction already created specified table, pass
pass
except psql.exceptions.InsufficientPrivilegeError as e:
#can occur when postgres user unable to open file due to permissions; specifically for travis-ci tests
#check if resulting from postgres user permissions
if e.args[0].startswith("must be superuser"):
print("WARNING: Postgres user is not a super user; visualization time may be slow")
old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping) #use inefficient file-read-to-db method
else:
#attempt to resolve by copying file to /tmp/, changing its permissions, and accessing it there
try:
print("InsufficientPrivilegeError raised in accessing file.\nAttempting workaround...")
table_transaction(False, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.InsufficientPrivilegeError:
print("Postgres could not access file. Check to make sure that both the "
"file and your current postgres user has the appropriate permissions.")
raise
raw_reader.close()
def filterfile(parentID, fileID, count, page, filters, sort, direction):
"""Gets the file ID belonging to the parent.\
For result files, the parentID is the process ID that spawned them.\
For visualize files, the parentID is -1"""
data = current_app.config['storage']['loader']()
# first, generate the key
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
fileID = str(fileID)
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
response = query(tablekey)
if not len(response): # table does not exist
table_errors = create_table(parentID, fileID, data, tablekey, db)
if table_errors != None:
return table_errors
#with db.synchronizer:
# test_query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
# test_response = query(tablekey)
with db.xact():
typequery = db.prepare(
"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1"
)
column_defs = typequery(tablekey)
column_maps = {}
for (col, typ) in column_defs:
if 'int' in typ:
column_maps[col] = int
elif typ == 'numeric'or typ == 'decimal':
column_maps[col] = float
else:
column_maps[col] = str
formatted_filters = []
for i in range(len(filters)):
f = filters[i].strip()
if not len(f):
continue
result = queryfilters.match(f)
if not result:
return ({
"code": 400,
"message": "Encountered an invalid filter (%s)" % f,
"fields": "filters"
}, 400)
colname = column_filter(result.group(1))
if colname not in column_maps:
return ({
"code": 400,
"message": "Unknown column name %s" % result.group(1),
"fields": "filters"
}, 400)
op = result.group(2)
typ = column_maps[colname]
val = None
try:
val = column_maps[colname](
result.group(3)
)
except ValueError:
return ({
"code": 400,
"message": "Value %s cannot be formatted to match the type of column %s (%s)" % (
result.group(3),
result.group(1),
typ
)
}, 400)
if typ == str and (op in {'==', '!='}):
formatted_filters.append(
json.dumps(colname) + (' not ' if '!' in op else ' ') + "LIKE '%s'" % (
json.dumps(val)[1:-1]
)
)
else: # type is numerical
op = op.replace('==', '=')
formatted_filters.append(
'%s %s %s' % (
json.dumps(colname),
op,
json.dumps(val)
)
)
raw_query = "SELECT %s FROM %s" % (
','.join([k[0] for k in column_defs]),
tablekey
)
if len(formatted_filters):
raw_query += " WHERE " + " AND ".join(formatted_filters)
if sort:
if column_filter(sort) not in column_maps:
return ({
'code': 400,
'message': 'Invalid column name %s' % sort,
'fields': 'sort'
}, 400)
raw_query += " ORDER BY %s" % (column_filter(sort))
if direction:
raw_query += " " + direction
if count:
raw_query += " LIMIT %d" % count
if page:
raw_query += " OFFSET %d" % (page * count)
print("Query:", raw_query)
import decimal
with db.xact('SERIALIZABLE', 'READ ONLY DEFERRABLE'):
query = db.prepare(raw_query)
decimalizer = lambda x: (float(x) if type(x) == decimal.Decimal else x)
result = [
{
colname: decimalizer(value) for (colname, value) in zip(
[k[0] for k in column_defs],
[val for val in row]
)
} for row in query.rows()
]
db.close()
return result
def fileschema(parentID, fileID):
data = current_app.config['storage']['loader']()
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
if not len(query(tablekey)): # table does not exist
return ({
'code': 400,
'message': "The requested file has not been loaded into the Postgres database",
'fields': "fileID"
}, 400)
typequery = db.prepare("SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1")
result = {
key: val for (key, val) in typequery(tablekey)
}
db.close()
return result
def serve_as(reader, filetype):
if filetype == 'json':
return {
'filetype':'json',
'content':json.load(reader)
}
elif filetype == 'yaml' or filetype == 'yml':
return {
'filetype':'yaml',
'content':yaml.load(reader.read())
}
elif filetype == 'log':
return {
'filetype':'log',
'content':[line.rstrip() for line in reader.readlines()]
}
else:
return {
'filetype':'raw',
'content':reader.read()
}
def visualize(parentID, fileID):
vis = visualize_script(parentID, fileID)
return '<html><head></head><body>%s</body></html'%(vis if type(vis)!=tuple else vis[0])
def visualize_script(parentID, fileID):
"""Return an HTML document containing the requested table visualization"""
from .files import results_getcols
data = current_app.config['storage']['loader']()
#first call filterfile to load the table if it's not loaded already
result = filterfile(parentID, fileID, 1, 0, '', 'rowid', 'ASC')
if type(result) != list:
return (
{
'code':400,
'message':json.dumps(result),
'fields':'unknown',
},
400
)
if len(result) == 0 or type(result) == dict:
return (
'Results file contains no data - cannot visualize'
)
cols = results_getcols(parentID, fileID)
if type(cols) != dict:
return (
{
'code':400,
'message':json.dumps(cols),
'fields':'unknown'
},
400
)
proc_data = process_info(parentID)
if type(proc_data)==dict and 'parameters' in proc_data and 'sample_name' in proc_data['parameters']:
sample = proc_data['parameters']['sample_name']
elif parentID == -1:
sample = data['visualize'][str(fileID)]['display_name'].rsplit(".", 1)[0]
else:
sample = 'Unknown Sample'
if current_app.PROXY_IP_ADDRESS is not None:
IP = current_app.PROXY_IP_ADDRESS
else:
IP = current_app.IP_ADDRESS
return (
server_document(
url="http://" + IP + ":5006/visualizations",
arguments={
'target-process': parentID,
'target-file': fileID,
'cols': json.dumps(cols),
'samplename': sample
}
)
)
| 38.833333 | 142 | 0.533401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,522 | 0.278818 |
f0f336d7a75970c20e9d6a476b0d5df71d22aa9a | 3,327 | py | Python | BiasedPUlearning/nnPUSB/model.py | XINGXIAOYU/PUlearning | 3401b77ccdd653d39f4f3a6258a42c7938fa9ede | [
"MIT"
] | 24 | 2019-10-06T08:05:29.000Z | 2021-12-20T10:52:17.000Z | BiasedPUlearning/nnPUSB/model.py | XINGXIAOYU/PUlearning | 3401b77ccdd653d39f4f3a6258a42c7938fa9ede | [
"MIT"
] | 2 | 2020-09-29T22:34:25.000Z | 2020-09-29T22:43:01.000Z | BiasedPUlearning/nnPUSB/model.py | MasaKat0/PUlearning | 3401b77ccdd653d39f4f3a6258a42c7938fa9ede | [
"MIT"
] | 13 | 2019-05-20T06:29:52.000Z | 2022-02-03T15:31:20.000Z | import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable
from chainer import optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
class normalNN(Chain):
def __init__(self, dim):
super().__init__(
l1=L.Linear(dim, 100),
l2=L.Linear(100, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.af(h)
h = self.l2(h)
return h
class MultiLayerPerceptron(Chain):
def __init__(self, dim):
super(MultiLayerPerceptron, self).__init__(
l1=L.Linear(dim, 300, nobias=True),
b1=L.BatchNormalization(300),
l2=L.Linear(300, 300, nobias=True),
b2=L.BatchNormalization(300),
l3=L.Linear(300, 300, nobias=True),
b3=L.BatchNormalization(300),
l4=L.Linear(300, 300, nobias=True),
b4=L.BatchNormalization(300),
l5=L.Linear(300, 1))
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.b1(h)
h = self.af(h)
h = self.l2(h)
h = self.b2(h)
h = self.af(h)
h = self.l3(h)
h = self.b3(h)
h = self.af(h)
h = self.l4(h)
h = self.b4(h)
h = self.af(h)
h = self.l5(h)
return h
class CNN(Chain):
def __init__(self, dim):
super(CNN, self).__init__(
conv1=L.Convolution2D(3, 96, 3, pad=1),
conv2=L.Convolution2D(96, 96, 3, pad=1),
conv3=L.Convolution2D(96, 96, 3, pad=1, stride=2),
conv4=L.Convolution2D(96, 192, 3, pad=1),
conv5=L.Convolution2D(192, 192, 3, pad=1),
conv6=L.Convolution2D(192, 192, 3, pad=1, stride=2),
conv7=L.Convolution2D(192, 192, 3, pad=1),
conv8=L.Convolution2D(192, 192, 1),
conv9=L.Convolution2D(192, 10, 1),
b1=L.BatchNormalization(96),
b2=L.BatchNormalization(96),
b3=L.BatchNormalization(96),
b4=L.BatchNormalization(192),
b5=L.BatchNormalization(192),
b6=L.BatchNormalization(192),
b7=L.BatchNormalization(192),
b8=L.BatchNormalization(192),
b9=L.BatchNormalization(10),
fc1=L.Linear(None, 1000),
fc2=L.Linear(1000, 1000),
fc3=L.Linear(1000, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.conv1(x)
h = self.b1(h)
h = self.af(h)
h = self.conv2(h)
h = self.b2(h)
h = self.af(h)
h = self.conv3(h)
h = self.b3(h)
h = self.af(h)
h = self.conv4(h)
h = self.b4(h)
h = self.af(h)
h = self.conv5(h)
h = self.b5(h)
h = self.af(h)
h = self.conv6(h)
h = self.b6(h)
h = self.af(h)
h = self.conv7(h)
h = self.b7(h)
h = self.af(h)
h = self.conv8(h)
h = self.b8(h)
h = self.af(h)
h = self.conv9(h)
h = self.b9(h)
h = self.af(h)
h = self.fc1(h)
h = self.af(h)
h = self.fc2(h)
h = self.af(h)
h = self.fc3(h)
return h
| 28.930435 | 64 | 0.502254 | 3,075 | 0.924256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0f42be3894dc386d19ebedd7a5a49fac7cfc1f4 | 1,597 | py | Python | scripts/set_examples_to_doc.py | sasmirnov/numba-dppy | 6ec41a5adab3034ddcfba2df312117afd6e2327b | [
"Apache-2.0"
] | null | null | null | scripts/set_examples_to_doc.py | sasmirnov/numba-dppy | 6ec41a5adab3034ddcfba2df312117afd6e2327b | [
"Apache-2.0"
] | null | null | null | scripts/set_examples_to_doc.py | sasmirnov/numba-dppy | 6ec41a5adab3034ddcfba2df312117afd6e2327b | [
"Apache-2.0"
] | null | null | null | """ This script is needed to convert gdb scripts from commands to documentation
"""
import os
def convert_commands_to_docs():
commands_dir = os.getcwd() + "/numba_dppy/examples/debug/commands"
examples = os.listdir(commands_dir)
os.chdir(commands_dir + "/docs")
for file in examples:
if file != "docs":
open_file = open(commands_dir + "/" + file, "r")
read_lines = open_file.readlines()
if os.path.exists(file):
os.remove(file)
write_file = open(file, "a")
for line in read_lines:
if (
line.startswith("# Expected")
or line.startswith("echo Done")
or line.startswith("quit")
or line.startswith("set trace-commands")
or line.startswith("set pagination")
):
continue
if line.startswith("# Run: "):
line = line.replace("# Run:", "$")
words = line.split()
for i in range(len(words)):
if words[i] == "-command" or words[i].startswith("commands"):
words[i] = ""
line = " ".join(words)
line = " ".join(line.split()) + "\n"
elif line.startswith("# "):
line = line.replace("# ", "")
else:
line = "(gdb) " + line
write_file.write(line)
if __name__ == "__main__":
convert_commands_to_docs()
| 36.295455 | 85 | 0.469004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.179712 |
f0f438b51d2d508d57dd917ca013391d3e1a093f | 6,672 | py | Python | Phidget22/Devices/Accelerometer.py | keysie/python-fast-data-visualization | ebf392fe6d8d25bd5c178edbf095cf29e0daa4af | [
"MIT"
] | 1 | 2020-02-26T12:43:07.000Z | 2020-02-26T12:43:07.000Z | Phidget22/Devices/Accelerometer.py | keysie/phidget-python-interface | ebf392fe6d8d25bd5c178edbf095cf29e0daa4af | [
"MIT"
] | null | null | null | Phidget22/Devices/Accelerometer.py | keysie/phidget-python-interface | ebf392fe6d8d25bd5c178edbf095cf29e0daa4af | [
"MIT"
] | 1 | 2020-02-26T12:43:12.000Z | 2020-02-26T12:43:12.000Z | import sys
import ctypes
from Phidget22.PhidgetSupport import PhidgetSupport
from Phidget22.PhidgetException import PhidgetException
from Phidget22.Phidget import Phidget
class Accelerometer(Phidget):
def __init__(self):
Phidget.__init__(self)
self.handle = ctypes.c_void_p()
if sys.platform == 'win32':
self._AccelerationChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.c_double)
else:
self._AccelerationChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.POINTER(ctypes.c_double), ctypes.c_double)
self._AccelerationChange = None
self._onAccelerationChange = None
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_create
__func.restype = ctypes.c_int32
res = __func(ctypes.byref(self.handle))
except RuntimeError:
raise
if res > 0:
raise PhidgetException(res)
def __del__(self):
Phidget.__del__(self)
def _localAccelerationChangeEvent(self, handle, userPtr, acceleration, timestamp):
if self._AccelerationChange == None:
return
self._AccelerationChange(self, acceleration, timestamp)
def setOnAccelerationChangeHandler(self, handler):
if handler == None:
self._AccelerationChange = None
self._onAccelerationChange = None
else:
self._AccelerationChange = handler
self._onAccelerationChange = self._AccelerationChangeFactory(self._localAccelerationChangeEvent)
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_setOnAccelerationChangeHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onAccelerationChange, None)
except RuntimeError:
self._AccelerationChange = None
self._onAccelerationChange = None
def getAcceleration(self):
_Acceleration = (ctypes.c_double * 3)()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getAcceleration
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Acceleration))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return list(_Acceleration)
def getMinAcceleration(self):
_MinAcceleration = (ctypes.c_double * 3)()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMinAcceleration
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinAcceleration))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return list(_MinAcceleration)
def getMaxAcceleration(self):
_MaxAcceleration = (ctypes.c_double * 3)()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMaxAcceleration
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxAcceleration))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return list(_MaxAcceleration)
def getAccelerationChangeTrigger(self):
_AccelerationChangeTrigger = ctypes.c_double()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getAccelerationChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_AccelerationChangeTrigger))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _AccelerationChangeTrigger.value
def setAccelerationChangeTrigger(self, AccelerationChangeTrigger):
_AccelerationChangeTrigger = ctypes.c_double(AccelerationChangeTrigger)
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_setAccelerationChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, _AccelerationChangeTrigger)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getMinAccelerationChangeTrigger(self):
_MinAccelerationChangeTrigger = ctypes.c_double()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMinAccelerationChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinAccelerationChangeTrigger))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _MinAccelerationChangeTrigger.value
def getMaxAccelerationChangeTrigger(self):
_MaxAccelerationChangeTrigger = ctypes.c_double()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMaxAccelerationChangeTrigger
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxAccelerationChangeTrigger))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _MaxAccelerationChangeTrigger.value
def getAxisCount(self):
_AxisCount = ctypes.c_int()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getAxisCount
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_AxisCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _AxisCount.value
def getDataInterval(self):
_DataInterval = ctypes.c_uint32()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DataInterval))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _DataInterval.value
def setDataInterval(self, DataInterval):
_DataInterval = ctypes.c_uint32(DataInterval)
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_setDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, _DataInterval)
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getMinDataInterval(self):
_MinDataInterval = ctypes.c_uint32()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMinDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MinDataInterval))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _MinDataInterval.value
def getMaxDataInterval(self):
_MaxDataInterval = ctypes.c_uint32()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getMaxDataInterval
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxDataInterval))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _MaxDataInterval.value
def getTimestamp(self):
_Timestamp = ctypes.c_double()
try:
__func = PhidgetSupport.getDll().PhidgetAccelerometer_getTimestamp
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Timestamp))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
return _Timestamp.value
| 27.012146 | 145 | 0.77473 | 6,498 | 0.973921 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.001049 |
f0f5116f620313599917f1b146e0c00251125aed | 728 | py | Python | prickly-pufferfish/python_questions/merge_ranges.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | prickly-pufferfish/python_questions/merge_ranges.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | prickly-pufferfish/python_questions/merge_ranges.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | """
In HiCal, a meeting is stored as tuples of integers (start_time, end_time). /
These integers represent the number of 30-minute blocks past 9:00am. /
For example: /
(2, 3) # meeting from 10:00 - 10:30 am /
(6, 9) # meeting from 12:00 - 1:30 pm /
Write a function merge_ranges() that /
takes a list of meeting time ranges as a parameter /
and returns a list of condensed ranges. /
>>> merge_ranges([(3, 5), (4, 8), (10, 12), (9, 10), (0, 1)]) /
[(0, 1), (3, 8), (9, 12)] /
>>> merge_ranges([(0, 3), (3, 5), (4, 8), (10, 12), (9, 10)]) /
[(0, 8), (9, 12)] /
>>> merge_ranges([(0, 3), (3, 5)]) /
[(0, 5)] /
>>> merge_ranges([(0, 3), (3, 5), (7, 8)]) /
[(0, 5), (7, 8)] /
>>> merge_ranges([(1, 5), (2, 3)]) /
[(1, 5)] /
"""
| 28 | 77 | 0.539835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 727 | 0.998626 |
f0f5c3172c842de383a43edeab846b0154a88fb9 | 1,209 | py | Python | tests/tokenizers.py | theeluwin/textrankr | ec692be8d952dc0d88c74001e77f5a9be20a12d3 | [
"MIT"
] | 219 | 2016-05-03T08:38:36.000Z | 2022-03-08T04:40:09.000Z | tests/tokenizers.py | theeluwin/textrankr | ec692be8d952dc0d88c74001e77f5a9be20a12d3 | [
"MIT"
] | 11 | 2016-05-03T11:25:03.000Z | 2020-12-18T00:10:31.000Z | tests/tokenizers.py | theeluwin/textrankr | ec692be8d952dc0d88c74001e77f5a9be20a12d3 | [
"MIT"
] | 44 | 2016-07-02T05:18:49.000Z | 2022-03-08T08:14:26.000Z | import json
import requests
from typing import List
from konlpy.tag import Okt
from requests.models import Response
class OktTokenizer:
"""
A POS-tagger based tokenizer functor. Note that these are just examples. The `phrases` function usually gives a better result than an ordinary POS tokenizer.
Example:
tokenizer: OktTokenizer = OktTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
okt: Okt = Okt()
def __call__(self, text: str) -> List[str]:
tokens: List[str] = self.okt.phrases(text)
return tokens
class ApiTokenizer:
"""
An API based tokenizer functor, assuming that the response body is a jsonifyable string with content of list of `str` tokens.
Example:
tokenizer: ApiTokenizer = ApiTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
def __init__(self, endpoint: str) -> None:
self.endpoint: str = endpoint
def __call__(self, text: str) -> List[str]:
body: bytes = text.encode('utf-8')
res: Response = requests.post(self.endpoint, data=body)
tokens: List[str] = json.loads(res.text)
return tokens
| 28.116279 | 165 | 0.647643 | 1,085 | 0.897436 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.485525 |
f0f5e1e9645a14c12d5c5d93830c5d22ebcb474f | 823 | py | Python | Module/engine.py | NoahSchiro/physics-engine | c96d96eeaf823583ac1035d58fb69d7d47019f87 | [
"MIT"
] | null | null | null | Module/engine.py | NoahSchiro/physics-engine | c96d96eeaf823583ac1035d58fb69d7d47019f87 | [
"MIT"
] | null | null | null | Module/engine.py | NoahSchiro/physics-engine | c96d96eeaf823583ac1035d58fb69d7d47019f87 | [
"MIT"
] | null | null | null | from free_bodies import *
# This is the actual engine which is apply physics to objects
class physics_engine:
FB = [] # Array holds all of the objects in our simulation. FB = free bodies
# Add free bodies to the system
def add_fb(self, fb):
self.FB.append(fb)
# Remove a free body from the sytem
def remove_fb(self, fb):
if fb in self.FB:
self.FB.remove(fb)
# Apply force to an object in the system
def apply_force(self, fb, newtons):
# Make sure our object is in our system, and
# see that we are not trying to apply a negative force
if (fb in self.FB and newtons >= 0):
# Find where the object is in our engine
for bodies in self.FB:
if bodies == fb:
| 28.37931 | 84 | 0.584447 | 711 | 0.863913 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.454435 |
f0f736be57d784d43416f82badd0762190fea39d | 96 | py | Python | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/57/99/fd/1d22e7d1fbf9ab07bcdf332318605c4de276c282734bf85d8c6421a6ce | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0f901df9fc78d9208f11171a838b27095f77671 | 5,044 | py | Python | tests/test_spi.py | petermankowski510/python-periphery-master | 11e76632d4a58a77db71cb2e35815ef7ebc5e5dc | [
"MIT"
] | null | null | null | tests/test_spi.py | petermankowski510/python-periphery-master | 11e76632d4a58a77db71cb2e35815ef7ebc5e5dc | [
"MIT"
] | null | null | null | tests/test_spi.py | petermankowski510/python-periphery-master | 11e76632d4a58a77db71cb2e35815ef7ebc5e5dc | [
"MIT"
] | null | null | null | import sys
import periphery
from .asserts import AssertRaises
if sys.version_info[0] == 3:
raw_input = input
spi_device = None
def test_arguments():
print("Starting arguments test...")
# Invalid mode
with AssertRaises(ValueError):
periphery.SPI(spi_device, 4, int(1e6))
# Invalid bit order
with AssertRaises(ValueError):
periphery.SPI(spi_device, 4, int(1e6), bit_order="blah")
print("Arguments test passed.")
def test_open_close():
print("Starting open/close test...")
# Normal open (mode=1, max_speed = 100000)
spi = periphery.SPI(spi_device, 1, 100000)
# Confirm fd and defaults
assert spi.fd > 0
assert spi.mode == 1
assert spi.max_speed == 100000
assert spi.bit_order == "msb"
assert spi.bits_per_word == 8
assert spi.extra_flags == 0
# Not going to try different bit order or bits per word, because not
# all SPI controllers support them
# Try modes 0, 1, 2, 3
spi.mode = 0
assert spi.mode == 0
spi.mode = 1
assert spi.mode == 1
spi.mode = 2
assert spi.mode == 2
spi.mode = 3
assert spi.mode == 3
# Try max speeds 100Khz, 500KHz, 1MHz, 2MHz
spi.max_speed = 100000
assert spi.max_speed == 100000
spi.max_speed = 500000
assert spi.max_speed == 500000
spi.max_speed = 1000000
assert spi.max_speed == 1000000
spi.max_speed = 2e6
assert spi.max_speed == 2000000
spi.close()
print("Open/close test passed.")
def test_loopback():
print("Starting loopback test...")
spi = periphery.SPI(spi_device, 0, 100000)
# Try list transfer
buf_in = list(range(256)) * 4
buf_out = spi.transfer(buf_in)
assert buf_out == buf_in
# Try bytearray transfer
buf_in = bytearray(buf_in)
buf_out = spi.transfer(buf_in)
assert buf_out == buf_in
# Try bytes transfer
buf_in = bytes(bytearray(buf_in))
buf_out = spi.transfer(buf_in)
assert buf_out == buf_in
spi.close()
print("Loopback test passed.")
def test_interactive():
print("Starting interactive test...")
spi = periphery.SPI(spi_device, 0, 100000)
print("Starting interactive test. Get out your logic analyzer, buddy!")
raw_input("Press enter to continue...")
# Mode 0 transfer
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 100KHz, mode 0 occurred? y/n ") == "y"
# Mode 1 transfer
spi.mode = 1
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 100KHz, mode 1 occurred? y/n ") == "y"
# Mode 2 transfer
spi.mode = 2
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 100KHz, mode 2 occurred? y/n ") == "y"
# Mode 3 transfer
spi.mode = 3
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 100KHz, mode 3 occurred? y/n ") == "y"
spi.mode = 0
# 500KHz transfer
spi.max_speed = 500000
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 500KHz, mode 0 occurred? y/n ") == "y"
# 1MHz transfer
spi.max_speed = 1000000
raw_input("Press enter to start transfer...")
spi.transfer([0x55, 0xaa, 0x0f, 0xf0])
print("SPI data 0x55, 0xaa, 0x0f, 0xf0")
assert raw_input("SPI transfer speed <= 1MHz, mode 0 occurred? y/n ") == "y"
spi.close()
print("Interactive test passed.")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python -m tests.test_spi <SPI device>")
print("")
print("[1/4] Arguments test: No requirements.")
print("[2/4] Open/close test: SPI device should be real.")
print("[3/4] Loopback test: SPI MISO and MOSI should be connected with a wire.")
print("[4/4] Interactive test: SPI MOSI, CLK, CS should be observed with an oscilloscope or logic analyzer.")
print("")
print("Hint: for Raspberry Pi 3, enable SPI0 with:")
print(" $ echo \"dtparam=spi=on\" | sudo tee -a /boot/config.txt")
print(" $ sudo reboot")
print("Use pins SPI0 MOSI (header pin 19), SPI0 MISO (header pin 21), SPI0 SCLK (header pin 23),")
print("connect a loopback between MOSI and MISO, and run this test with:")
print(" python -m tests.test_spi /dev/spidev0.0")
print("")
sys.exit(1)
spi_device = sys.argv[1]
print("Starting SPI tests...")
test_arguments()
test_open_close()
test_loopback()
test_interactive()
print("All SPI tests passed.")
| 28.822857 | 117 | 0.634615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,182 | 0.432593 |
f0fb0c535a2388e4de96ced030b5de23be03985e | 12,129 | py | Python | src/shapiro/language.py | roskakori/shapiro | 55c702cb20b85cbfcc33e82964849414817339d0 | [
"MIT"
] | 2 | 2018-07-28T13:00:35.000Z | 2019-01-07T08:57:34.000Z | src/shapiro/language.py | roskakori/shapiro | 55c702cb20b85cbfcc33e82964849414817339d0 | [
"MIT"
] | 26 | 2018-07-28T10:39:36.000Z | 2018-08-05T20:37:20.000Z | src/shapiro/language.py | roskakori/shapiro | 55c702cb20b85cbfcc33e82964849414817339d0 | [
"MIT"
] | 5 | 2018-07-28T10:51:55.000Z | 2018-07-28T13:58:45.000Z | """
Language specific settings
"""
from typing import Dict, Set
from shapiro.common import Rating, ranged_rating
from shapiro.tools import log, signum
from spacy.tokens import Token
_log = log
class LanguageSentiment:
def __init__(self, language_code: str):
assert language_code is not None
assert len(language_code) == 2, 'language code must have exactly 2 characters but is: %r' % language_code
self.language_code = language_code
self.diminishers: Set[str] = set()
self.intensifiers: Set[str] = set()
self.negatives: Dict[str, Rating] = {}
self.positives: Dict[str, Rating] = {}
self.idioms: Dict[str, Rating] = {}
self.negations: Set[str] = set()
self.rating_to_localized_text_map: Dict[Rating, str] = {}
def diminished(self, rating: Rating) -> Rating:
if abs(rating.value) > 1:
return ranged_rating(rating.value - signum(rating.value))
else:
return rating
def intensified(self, rating: Rating) -> Rating:
if abs(rating.value) > 1:
return ranged_rating(rating.value + signum(rating.value))
else:
return rating
def is_intensifier(self, token: Token) -> bool:
return token.lemma_.lower() in self.intensifiers
def is_diminisher(self, token: Token) -> bool:
return token.lemma_.lower() in self.diminishers
def is_negation(self, token: Token) -> bool:
return token.lemma_.lower() in self.negations
class EnglishSentiment(LanguageSentiment):
def __init__(self):
super().__init__('en')
self.diminishers = {
'a little bit',
'a little',
'almost',
'barely',
'fairly',
'hardly',
'just enough',
'kind of',
'kind-of',
'kinda',
'kindof',
'less',
'marginally',
'minimally',
'mostly',
'occasionally',
'partly',
'pretty',
'scarcely',
'slightly',
'somewhat',
'sort of',
'sort-of',
'sorta',
'sortof',
}
self.intensifiers = {
'absolutely',
'amazingly',
'awfully',
'completely',
'considerably',
'decidedly',
'deeply',
'dreadfully',
'effing',
'enormously',
'entirely',
'especially',
'exceptionally',
'extremely',
'fabulously',
'flippin',
'flipping',
'frickin',
'fricking',
'friggin',
'frigging',
'fucking',
'fully',
'greatly',
'hella',
'highly',
'hugely',
'incredibly',
'intensely',
'majorly',
'more',
'most',
'particularly',
'purely',
'quite',
'really',
'remarkably',
'so',
'substantially',
'terribly',
'thoroughly',
'totally',
'tremendously',
'uber',
'unbelievably',
'unusually',
'utterly',
'very',
}
self.negatives = {
'appalling': Rating.VERY_BAD,
'awful': Rating.VERY_BAD,
'bad': Rating.BAD,
'disgusting': Rating.VERY_BAD,
'dreadful': Rating.VERY_BAD,
'foul': Rating.VERY_BAD,
'poor': Rating.BAD,
'subpar': Rating.BAD,
'terrible': Rating.VERY_BAD,
'unusual': Rating.BAD,
}
self.positives = {
'amazing': Rating.VERY_GOOD,
'awesome': Rating.VERY_GOOD,
'excellent': Rating.VERY_GOOD,
'exceptional': Rating.VERY_GOOD,
'fabulous': Rating.VERY_GOOD,
'good': Rating.GOOD,
'great': Rating.VERY_GOOD,
'incredible': Rating.VERY_GOOD,
'nice': Rating.GOOD,
'remarkable': Rating.VERY_GOOD,
'special': Rating.GOOD,
'thorough': Rating.VERY_GOOD,
'tremendous': Rating.VERY_GOOD,
'wonderful': Rating.VERY_GOOD,
}
self.idioms = {
"don't give up your day job": Rating.VERY_BAD,
'add insult to injury': Rating.VERY_BAD,
'back handed': Rating.VERY_BAD,
'back to the drawing board': Rating.VERY_BAD,
'barking up the wrong tree': Rating.BAD,
'benefit of the doubt': Rating.BAD,
'better late than never': Rating.BAD,
'bite the bullet': Rating.BAD,
'blessing in disguise': Rating.GOOD,
'cooking with gas': Rating.VERY_GOOD,
'cost an arm and a leg': Rating.VERY_BAD,
'cut corners': Rating.VERY_BAD,
'cut the mustard': Rating.GOOD,
'cutting corners': Rating.VERY_BAD,
'elephant in the room': Rating.BAD,
'far cry from': Rating.VERY_BAD,
'get your act together': Rating.VERY_BAD,
'hit the nail on the head': Rating.VERY_GOOD,
'kiss of death': Rating.VERY_BAD,
'last straw': Rating.VERY_BAD,
'missed the boat': Rating.VERY_BAD,
'not rocket science': Rating.BAD,
'nothing better than': Rating.VERY_GOOD,
'old fashioned': Rating.BAD,
'on the ball': Rating.VERY_GOOD,
'out of hand': Rating.VERY_BAD,
'pull your socks up': Rating.VERY_BAD,
'the bomb': Rating.VERY_GOOD,
'the cold shoulder': Rating.VERY_BAD,
'under the weather': Rating.BAD,
'up to par': Rating.GOOD,
'wild goose chase': Rating.BAD,
'yeah right': Rating.BAD,
}
self.negations = {
'ain\'t',
'aint',
'aren\'t',
'arent',
'can\'t',
'cannot',
'cant',
'couldn\'t',
'couldnt',
'daren\'t',
'darent',
'despite',
'didn\'t',
'didnt',
'doesn\'t',
'doesnt',
'don\'t',
'dont',
'hadn\'t',
'hadnt',
'hasn\'t',
'hasnt',
'haven\'t',
'havent',
'isn\'t',
'isnt',
'mightn\'t',
'mightnt',
'mustn\'t',
'mustnt',
'needn\'t',
'neednt',
'neither',
'never',
'no',
'none',
'nope',
'nor',
'not',
'not',
'nothing',
'nowhere',
'oughtn\'t',
'oughtnt',
'rarely',
'seldom',
'shan\'t',
'shant',
'shouldn\'t',
'shouldnt',
'uh-uh',
'wasn\'t',
'wasnt',
'weren\'t',
'werent',
'without',
'won\'t',
'wont',
'wouldn\'t',
'wouldnt',
}
self.rating_to_localized_text_map = {
rating: rating.name.lower().replace('_', ' ') for rating in Rating
}
class GermanSentiment(LanguageSentiment):
def __init__(self):
super().__init__('de')
self.diminishers = {
'eher',
'bisschen',
'ein wenig',
'einigermaßen',
'etwas',
}
self.intensifiers = {
'absolut',
'besonders',
'extrem',
'sehr',
'total',
'voll',
'vollkommen',
'wirklich',
'ziemlich',
'zu',
}
self.negatives = {
'ausbaufähig': Rating.BAD,
'Bedenken': Rating.SOMEWHAT_BAD,
'beschissen': Rating.VERY_BAD,
'durchschnittlich': Rating.SOMEWHAT_BAD,
'furchtbar': Rating.VERY_BAD,
'ineffizient': Rating.BAD,
'mau': Rating.SOMEWHAT_BAD,
'mühsam': Rating.SOMEWHAT_BAD,
'obwohl': Rating.BAD,
'schal': Rating.BAD,
'schlecht': Rating.BAD,
'uncharmant': Rating.BAD,
'uneffektiv': Rating.BAD,
'ungut': Rating.BAD,
'unschön': Rating.SOMEWHAT_BAD,
'übel': Rating.BAD,
'unterdurchschnittlich': Rating.BAD,
'verbesserungsfähig': Rating.BAD,
'verbesserungswürdig': Rating.BAD,
'wünschenswert': Rating.BAD,
# TODO: Add special logic for modals.
'können': Rating.SOMEWHAT_BAD,
'müssen': Rating.SOMEWHAT_BAD,
'sollen': Rating.SOMEWHAT_BAD,
'wär': Rating.SOMEWHAT_BAD,
'wäre': Rating.SOMEWHAT_BAD,
'wären': Rating.SOMEWHAT_BAD,
'wärn': Rating.SOMEWHAT_BAD,
}
self.positives = {
'ausgezeichnet': Rating.VERY_GOOD,
'bestens': Rating.VERY_GOOD,
'Charme': Rating.GOOD,
'charmant': Rating.GOOD,
'cool': Rating.GOOD,
'entzückend': Rating.VERY_GOOD,
'effektiv': Rating.GOOD,
'effizient': Rating.GOOD,
'exzellent': Rating.VERY_GOOD,
'fantastisch': Rating.VERY_GOOD,
'geil': Rating.GOOD,
'gern': Rating.GOOD,
'gut': Rating.GOOD,
'kompetent': Rating.GOOD,
'lässig': Rating.GOOD,
'leiwand': Rating.GOOD,
'nett': Rating.GOOD,
'ok': Rating.SOMEWHAT_GOOD,
'okay': Rating.SOMEWHAT_GOOD,
'passen': Rating.GOOD,
'perfekt': Rating.VERY_GOOD,
'prima': Rating.VERY_GOOD,
'reichhaltig': Rating.GOOD,
'reizend': Rating.GOOD,
'super': Rating.VERY_GOOD,
'toll': Rating.VERY_GOOD,
'top': Rating.VERY_GOOD,
'überdurchschnittlich': Rating.SOMEWHAT_GOOD,
'überzeugend': Rating.SOMEWHAT_GOOD,
'vorzüglich': Rating.VERY_GOOD,
'weiterempfehlen': Rating.VERY_GOOD,
'wunderbar': Rating.VERY_GOOD,
'zufrieden': Rating.SOMEWHAT_GOOD
}
self.idioms = {
'gerne wieder': Rating.GOOD,
'Gold wert': Rating.VERY_GOOD,
'ist spitze': Rating.VERY_GOOD,
'könnte etwas Liebe vertragen': Rating.SOMEWHAT_BAD,
'luft nach oben': Rating.SOMEWHAT_GOOD,
'vom Hocker gerissen': Rating.VERY_GOOD,
'weiter so!': Rating.GOOD,
'wenig berauschend': Rating.SOMEWHAT_BAD,
'würde wieder': Rating.GOOD,
}
self.negations = {
'kein',
'keine',
'keiner',
'keines',
'nicht',
}
self.rating_to_localized_text_map = {
Rating.VERY_GOOD: 'sehr gut',
Rating.GOOD: 'gut',
Rating.SOMEWHAT_GOOD: 'eher gut',
Rating.SOMEWHAT_BAD: 'eher schlecht',
Rating.BAD: 'schlecht',
Rating.VERY_BAD: 'sehr schlecht',
}
def language_sentiment_for(language_code: str) -> LanguageSentiment:
base_code = language_code.split('_')[0]
if len(base_code) != 2:
raise ValueError(
'language base code must be exactly 2 letters but is: %r (derived from %r)'
% (base_code, language_code))
if base_code == 'en':
result = EnglishSentiment()
elif base_code == 'de':
result = GermanSentiment()
else:
_log.warning('cannot find language sentiment for %r, using empty default sentiment')
result = LanguageSentiment(base_code)
return result
| 31.260309 | 113 | 0.480419 | 11,355 | 0.934568 | 0 | 0 | 0 | 0 | 0 | 0 | 3,334 | 0.274403 |
f0fe1edc9a050c8494f0a0d7ffeb0068529e480f | 3,053 | py | Python | sources/zmp_feedforwad_control.py | ekorudiawan/ZMP_Preview_Control_WPG | 7a0574e1ff991aafb9324eb82d0b9e66e96737f1 | [
"MIT"
] | 8 | 2020-04-08T19:36:26.000Z | 2022-01-29T18:13:58.000Z | sources/zmp_feedforwad_control.py | ekorudiawan/ZMP_Preview_Control_WPG | 7a0574e1ff991aafb9324eb82d0b9e66e96737f1 | [
"MIT"
] | null | null | null | sources/zmp_feedforwad_control.py | ekorudiawan/ZMP_Preview_Control_WPG | 7a0574e1ff991aafb9324eb82d0b9e66e96737f1 | [
"MIT"
] | 5 | 2020-10-19T12:10:52.000Z | 2022-02-17T03:03:47.000Z | # ZMP preview control simulation with Python
# By : Eko Rudiawan Jamzuri
# Email : eko.rudiawan@gmail.com
# This is an implementation of ZMP preview control with feedforward method
# This Python program will calculating CoM trajectory based on ZMP trajectory input
# The Gain parameter Gi, Gx, and Gd is imported from mat file from previous calculation in Matlab
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
def generate_zmp_trajectory(footstep, dt, t_step):
n_step = len(footstep)
zmp_x = []
zmp_y = []
k = 0
for i in range(0, n_step*int(t_step/dt)):
zmp_x.append(footstep[k][0])
zmp_y.append(footstep[k][1])
if i != 0 and i%int(t_step/dt) == 0:
k += 1
return zmp_x, zmp_y
def calc_preview_control(zmp_x, zmp_y, dt, t_preview, t_calc, A_d, B_d, C_d, Gi, Gx, Gd):
x_x = np.array([[0],
[0],
[0]])
x_y = np.array([[0],
[0],
[0]])
com_x = []
com_y = []
for i in range(0, int(t_calc/dt)):
y_x = np.asscalar(C_d.dot(x_x))
y_y = np.asscalar(C_d.dot(x_y))
e_x = zmp_x[i] - y_x
e_y = zmp_y[i] - y_y
preview_x = 0
preview_y = 0
n = 0
for j in range(i, i+int(t_preview/dt)):
preview_x += Gd[0, n] * zmp_x[j]
preview_y += Gd[0, n] * zmp_y[j]
n += 1
u_x = np.asscalar(-Gi * e_x - Gx.dot(x_x) - preview_x)
u_y = np.asscalar(-Gi * e_y - Gx.dot(x_y) - preview_y)
x_x = A_d.dot(x_x) + B_d * u_x
x_y = A_d.dot(x_y) + B_d * u_y
com_x.append(x_x[0,0])
com_y.append(x_y[0,0])
return com_x, com_y
def main():
print("ZMP Preview Control Simulation")
wpg_param = scipy.io.loadmat('wpg_parameter.mat')
A_d = wpg_param['A_d']
zc = np.asscalar(wpg_param['zc'])
dt = np.asscalar(wpg_param['dt'])
t_preview = np.asscalar(wpg_param['t_preview'])
A_d = wpg_param['A_d']
B_d = wpg_param['B_d']
C_d = wpg_param['C_d']
Gi = np.asscalar(wpg_param['Gi'])
Gx = wpg_param['Gx']
Gd = wpg_param['Gd']
# footstep = [[0.00, 0.00, 0.00],
# [0.25, 0.10, 0.00],
# [0.50, -0.10, 0.00],
# [0.75, 0.10, 0.00],
# [1.00, -0.10, 0.00]]
footstep = [[0.0, 0.0, 0.0],
[0.2, 0.06, 0.0],
[0.4, -0.06, 0.0],
[0.6, 0.09, 0.0],
[0.8, -0.03, 0.0],
[1.3, 0.09, 0.0],
[1.7, -0.03, 0.0],
[1.9, 0.09, 0.0],
[2.0, -0.03, 0.0]]
t_step = 0.6
zmp_x, zmp_y = generate_zmp_trajectory(footstep, dt, t_step)
t_calc = 4
com_x, com_y = calc_preview_control(zmp_x, zmp_y, dt, t_preview, t_calc, A_d, B_d, C_d, Gi, Gx, Gd)
plt.title("ZMP VS CoM Trajectory")
plt.plot(zmp_x, zmp_y, color='green')
plt.plot(com_x, com_y, 'x', color='red')
plt.show()
if __name__ == "__main__":
main() | 30.838384 | 103 | 0.523092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.220766 |
f0fe907e32be0b4e9c4019c39f5c30195bfaa907 | 458 | py | Python | algorithms/sorting/counting_sort.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | 1 | 2021-05-02T05:43:34.000Z | 2021-05-02T05:43:34.000Z | algorithms/sorting/counting_sort.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | algorithms/sorting/counting_sort.py | abhishektyagi2912/python-dsa | 8f51f15a091ee76e00fb34abc232c23cb68440cb | [
"MIT"
] | null | null | null | def counting_sort(arr):
# Find min and max values
min_value = min(arr)
max_value = max(arr)
counting_arr = [0]*(max_value-min_value+1)
for num in arr:
counting_arr[num-min_value] += 1
index = 0
for i, count in enumerate(counting_arr):
for _ in range(count):
arr[index] = min_value + i
index += 1
test_array = [3, 3, 2, 6, 4, 7, 9, 7, 8]
counting_sort(test_array)
print(test_array)
| 21.809524 | 46 | 0.591703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.054585 |
0b0149c561d9b693e298cce2051643d984729630 | 154 | py | Python | Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py | Hsins/CodeWars | 7e7b912fdd0647c0af381d8b566408e383ea5df8 | [
"MIT"
] | 1 | 2020-01-09T21:47:56.000Z | 2020-01-09T21:47:56.000Z | Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py | Hsins/CodeWars | 7e7b912fdd0647c0af381d8b566408e383ea5df8 | [
"MIT"
] | 1 | 2020-01-20T12:39:03.000Z | 2020-01-20T12:39:03.000Z | Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py | Hsins/CodeWars | 7e7b912fdd0647c0af381d8b566408e383ea5df8 | [
"MIT"
] | null | null | null | # [8 kyu] Grasshopper - Terminal Game Move Function
#
# Author: Hsins
# Date: 2019/12/20
def move(position, roll):
return position + 2 * roll
| 17.111111 | 51 | 0.649351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.590909 |
0b04026c6e3cfd8fd000afa2890bd940e623cf60 | 2,501 | py | Python | server/app/__init__.py | trungkienbkhn/nmt-wizard | dcf1f20d92e0f9788a811e79093ac0d917ae9e4e | [
"MIT"
] | null | null | null | server/app/__init__.py | trungkienbkhn/nmt-wizard | dcf1f20d92e0f9788a811e79093ac0d917ae9e4e | [
"MIT"
] | null | null | null | server/app/__init__.py | trungkienbkhn/nmt-wizard | dcf1f20d92e0f9788a811e79093ac0d917ae9e4e | [
"MIT"
] | null | null | null | import os
import logging.config
import redis
from flask import Flask
from nmtwizard import common
from nmtwizard import configuration as config
from utils.database_utils import DatabaseUtils
from flask_session import Session
VERSION = "1.12.0"
app = Flask(__name__)
app.request_id = 1
def get_log_handler():
log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(log_formatter)
return handler
def get_other_config_flask(self, keys=None, fallback=None):
if keys is None:
keys = []
if not isinstance(keys, list):
return fallback
result = self.other_config.copy()
if not keys or len(keys) == 0:
return result
for key in keys:
if key not in result:
return fallback
result = result[key]
return result
def append_version(version):
global VERSION
VERSION += ":" + version
def get_version():
return VERSION
system_config = config.get_system_config()
mongo_client = DatabaseUtils.get_mongo_client(system_config)
redis_db = DatabaseUtils.get_redis_client(system_config)
redis_db_without_decode = DatabaseUtils.get_redis_client(system_config, decode_response=False)
base_config = config.process_base_config(mongo_client)
log_handler = get_log_handler()
app.logger.addHandler(log_handler)
common.add_log_handler(log_handler)
flask_config = system_config["flask"]
redis_uri = DatabaseUtils.get_redis_uri(system_config)
flask_config['SESSION_REDIS'] = redis.from_url(redis_uri)
app.config.update(flask_config)
app.other_config = system_config
app.__class__.get_other_config = get_other_config_flask
app.logger.setLevel(logging.getLevelName(app.get_other_config(["default", "log_level"], fallback='ERROR')))
sess = Session()
sess.init_app(app)
assert system_config["default"]["taskfile_dir"], "missing taskfile_dir from settings.yaml"
taskfile_dir = system_config["default"]["taskfile_dir"]
assert os.path.isdir(taskfile_dir), "taskfile_dir (%s) must be a directory" % taskfile_dir
input_dir = app.get_other_config(["push_model", "inputDir"], fallback=None)
if input_dir is not None:
if not os.path.isabs(input_dir):
input_dir = os.path.join(os.path.dirname(config.system_config_file), input_dir)
app.other_config['push_model']['inputDir'] = input_dir
assert os.path.isdir(input_dir), "Invalid input directory used for deploying model: %s" % input_dir
from app import routes
| 29.423529 | 107 | 0.754498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.135146 |
0b04b5673e663cf9c8fc8da92d1c7bd8d879657a | 1,152 | py | Python | Neural_Networks/Multilayer/Neural_Multi_pybrain.py | jeffreire/Deep_Learning | 960142080dc63ea103b326ea3d6d17bd44ae0f51 | [
"MIT"
] | null | null | null | Neural_Networks/Multilayer/Neural_Multi_pybrain.py | jeffreire/Deep_Learning | 960142080dc63ea103b326ea3d6d17bd44ae0f51 | [
"MIT"
] | null | null | null | Neural_Networks/Multilayer/Neural_Multi_pybrain.py | jeffreire/Deep_Learning | 960142080dc63ea103b326ea3d6d17bd44ae0f51 | [
"MIT"
] | null | null | null | # from pybrain.datasets import SupervisedDataSet
# from pybrain.supervised.trainers import BackpropTrainer
# from pybrain.structure.modules import SoftmaxLayer
# from pybrain.structure.modules import SigmoidLayer
# from pybrain.tools.s import buildNetwork
# (x,y,z) = x é a quantidade de camadas de entrada, y é a quantidade de camada oculta, z é a quantidade de camada de saida
# rede = buildNetwork(3,3,1)
# (x,y) sao os previsores, dois atributos e uma class
# base = SupervisedDataSet(2, 1)
# queremos dizer que a entrada sera de (0,0) e queremos obter uma saida de (0, )
# base.addSample((0,0),(0, ))
# base.addSample((0,1),(1, ))
# base.addSample((1,0),(0, ))
# base.addSample((1,1),(0, ))
# print(base['input'])
# treinamos a nossa rede passando por parametro rede, basa, taxa de aprendizagem e momento
# treinamento = BackpropTrainer(rede, dataset = base, learningrate = 0.01,
# momentum = 0.06)
# o for é quantas epocas iremos calcular os nossos pesos
# for i in range(1, 30000):
# erro = treinamento.train()
# if i % 1000 == 0:
# print("Erro: %s" % erro)
| 39.724138 | 123 | 0.664931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,115 | 0.964533 |
0b058e034e1693406e04d820118fb4291f6395b3 | 4,213 | py | Python | aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 4 | 2019-07-01T13:12:50.000Z | 2019-07-02T20:01:37.000Z | aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 51 | 2021-01-12T05:50:50.000Z | 2022-03-25T06:03:13.000Z | aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 12 | 2019-06-24T22:17:44.000Z | 2019-07-02T19:49:31.000Z | from unittest import TestCase
from ..pres_request_schema import DIFProofRequestSchema
class TestPresRequestSchema(TestCase):
"""DIF Presentation Request Test"""
def test_limit_disclosure(self):
test_pd_a = {
"options": {
"challenge": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"domain": "4jt78h47fh47",
},
"presentation_definition": {
"id": "32f54163-7166-48f1-93d8-ff217bdb0654",
"submission_requirements": [
{
"name": "Citizenship Information",
"rule": "pick",
"min": 1,
"from": "A",
}
],
"input_descriptors": [
{
"id": "citizenship_input_1",
"name": "EU Driver's License",
"group": ["A"],
"schema": [
{
"uri": "https://www.w3.org/2018/credentials#VerifiableCredential"
}
],
"constraints": {
"limit_disclosure": "required",
"fields": [
{
"path": ["$.credentialSubject.givenName"],
"purpose": "The claim must be from one of the specified issuers",
"filter": {
"type": "string",
"enum": ["JOHN", "CAI"],
},
}
],
},
}
],
},
}
test_pd_b = {
"options": {
"challenge": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"domain": "4jt78h47fh47",
},
"presentation_definition": {
"id": "32f54163-7166-48f1-93d8-ff217bdb0654",
"submission_requirements": [
{
"name": "Citizenship Information",
"rule": "pick",
"min": 1,
"from": "A",
}
],
"input_descriptors": [
{
"id": "citizenship_input_1",
"name": "EU Driver's License",
"group": ["A"],
"schema": [
{
"uri": "https://www.w3.org/2018/credentials#VerifiableCredential"
}
],
"constraints": {
"limit_disclosure": "preferred",
"fields": [
{
"path": ["$.credentialSubject.givenName"],
"purpose": "The claim must be from one of the specified issuers",
"filter": {
"type": "string",
"enum": ["JOHN", "CAI"],
},
}
],
},
}
],
},
}
pres_request_a = DIFProofRequestSchema().load(test_pd_a)
test_limit_disclosure_a = (
pres_request_a.presentation_definition.input_descriptors[
0
].constraint.limit_disclosure
)
assert test_limit_disclosure_a == "required"
pres_request_b = DIFProofRequestSchema().load(test_pd_b)
test_limit_disclosure_b = (
pres_request_b.presentation_definition.input_descriptors[
0
].constraint.limit_disclosure
)
assert test_limit_disclosure_b == "preferred"
| 38.651376 | 101 | 0.350344 | 4,123 | 0.978638 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.283171 |
0b066a126354f8b346cb7cd4adb064a1438b7f2c | 1,190 | py | Python | datadog/api/screenboards.py | gust/datadogpy | 94f7dfeed87849a615916a5f171b25400b7c6cc5 | [
"BSD-3-Clause"
] | null | null | null | datadog/api/screenboards.py | gust/datadogpy | 94f7dfeed87849a615916a5f171b25400b7c6cc5 | [
"BSD-3-Clause"
] | null | null | null | datadog/api/screenboards.py | gust/datadogpy | 94f7dfeed87849a615916a5f171b25400b7c6cc5 | [
"BSD-3-Clause"
] | null | null | null | from datadog.api.resources import GetableAPIResource, CreateableAPIResource, \
UpdatableAPIResource, DeletableAPIResource, ActionAPIResource, ListableAPIResource
class Screenboard(GetableAPIResource, CreateableAPIResource,
UpdatableAPIResource, DeletableAPIResource,
ActionAPIResource, ListableAPIResource):
"""
A wrapper around Screenboard HTTP API.
"""
_class_name = 'screen'
_class_url = '/screen'
_json_name = 'board'
@classmethod
def share(cls, board_id):
"""
Share the screenboard with given id
:param board_id: screenboard to share
:type board_id: id
:returns: Dictionary representing the API's JSON response
"""
return super(Screenboard, cls)._trigger_action('POST', 'screen/share', board_id)
@classmethod
def revoke(cls, board_id):
"""
Revoke a shared screenboard with given id
:param board_id: screenboard to revoke
:type board_id: id
:returns: Dictionary representing the API's JSON response
"""
return super(Screenboard, cls)._trigger_action('DELETE', 'screen/share', board_id)
| 31.315789 | 90 | 0.668908 | 1,021 | 0.857983 | 0 | 0 | 690 | 0.579832 | 0 | 0 | 527 | 0.442857 |
0b07019b4136badacad9043f3524e2c6ae411079 | 10,525 | py | Python | models/TCNet.py | sjenni/LCI | 5313fe9eb43fce8f0352828094369bc153d98eb1 | [
"MIT"
] | 13 | 2020-05-01T12:55:20.000Z | 2021-09-30T14:44:33.000Z | models/TCNet.py | sjenni/LCI | 5313fe9eb43fce8f0352828094369bc153d98eb1 | [
"MIT"
] | 1 | 2020-06-25T23:19:07.000Z | 2020-06-25T23:19:07.000Z | models/TCNet.py | sjenni/LCI | 5313fe9eb43fce8f0352828094369bc153d98eb1 | [
"MIT"
] | 8 | 2020-05-25T19:13:25.000Z | 2021-09-24T05:34:36.000Z | from .alexnet import alexnet_V2
import tensorflow.compat.v1 as tf
import tensorflow.contrib.slim as slim
from utils import montage_tf
from .lci_nets import patch_inpainter, patch_discriminator
import tensorflow.contrib as contrib
# Average pooling params for imagenet linear classifier experiments
AVG_POOL_PARAMS = {'conv_1': (6, 6, 'SAME'), 'conv_2': (4, 4, 'VALID'), 'conv_3': (3, 3, 'SAME'),
'conv_4': (3, 3, 'SAME'), 'conv_5': (2, 2, 'VALID')}
class TRCNet:
def __init__(self, batch_size, im_shape, n_tr_classes=6, lci_patch_sz=64, lci_crop_sz=80, ae_dim=48, n_layers_lci=5,
tag='default', feats_ids=None, feat_pool='AVG', enc_params=None):
if enc_params is None:
enc_params = {}
self.name = 'TRNet_{}'.format(tag)
self.n_tr_classes = n_tr_classes
self.batch_size = batch_size
self.im_shape = im_shape
self.feats_IDs = feats_ids
self.feat_pool = feat_pool
self.enc_params = enc_params
self.lci_patch_sz = lci_patch_sz
self.lci_crop_sz = lci_crop_sz
self.num_LCI_layers = n_layers_lci
self.ae_model = patch_inpainter
self.class_model = alexnet_V2
self.disc_model = patch_discriminator
self.ae_dim = ae_dim
def lci(self, img, enc_scope, dec_scope):
# Extract random patch
patch, jit_x, jit_y = random_crop(img, crop_sz=(self.lci_crop_sz, self.lci_crop_sz))
# Erase the center of the patch
patch_erased, mask_erase = patch_erase(patch, patch_sz=(self.lci_patch_sz, self.lci_patch_sz))
tf.summary.image('imgs/patch_erased', montage_tf(patch_erased, 4, 8), max_outputs=1)
# Perform inpainting/autoencoding
net_in = tf.concat([patch, patch_erased], 0)
net_out, _ = self.ae_model(net_in, depth=self.ae_dim, num_layers=self.num_LCI_layers,
encoder_scope=enc_scope, decoder_scope=dec_scope)
patch_ae, patch_ip = tf.split(net_out, 2)
# Paste inpainted patches
pasted_patch_inpaint, patch_mask = paste_crop(img, patch_ip, jit_x, jit_y)
pasted_patch_ae, _ = paste_crop(img, patch_ae, jit_x, jit_y)
img_lci = img * (1. - patch_mask) + pasted_patch_inpaint
img_patchae = img * (1. - patch_mask) + pasted_patch_ae
return patch_ip, patch_ae, mask_erase, tf.ones_like(mask_erase), patch, img_lci, img_patchae
def ssl_net(self, net, reuse=None, training=True, scope='encoder'):
return self.class_model(net, self.n_tr_classes, reuse, training, scope, **self.enc_params)
def net(self, img, reuse=tf.AUTO_REUSE, training=True):
preds, _ = self.ssl_net(img, reuse, training, scope='features')
return preds
def linear_classifiers(self, img, num_classes, training, reuse=None):
_, feats = self.ssl_net(img, training=False, scope='features')
preds_list = []
with tf.variable_scope('classifier', reuse=reuse):
for feats_id in self.feats_IDs:
p = AVG_POOL_PARAMS[feats_id]
if self.feat_pool == 'AVG':
class_in = slim.avg_pool2d(feats[feats_id], p[0], p[1], p[2])
elif self.feat_pool == 'None':
class_in = feats[feats_id]
print('{} linear classifier input shape: {}'.format(feats_id, class_in.get_shape().as_list()))
preds = linear_classifier(class_in, num_classes, reuse, training, scope=feats_id, wd=5e-4)
preds_list.append(preds)
return preds_list
def patch_disc(self, input, update_collection, disc_scope):
in_1, in_2 = tf.split(input, 2)
input = tf.concat([in_1, in_2], -1)
model, _ = self.disc_model(input,
update_collection=update_collection,
num_layers=self.num_LCI_layers - 1,
scope=disc_scope)
return model
def linear_class_loss(self, scope, preds, labels):
total_loss = 0.
for pred, f_id in zip(preds, self.feats_IDs):
loss = tf.losses.softmax_cross_entropy(labels, pred, scope=scope)
tf.summary.scalar('losses/SCE_{}'.format(f_id), loss)
total_loss += loss
# Compute accuracy
predictions = tf.argmax(pred, 1)
tf.summary.scalar('accuracy/train_accuracy_{}'.format(f_id),
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
loss_wd = tf.add_n(tf.losses.get_regularization_losses(), name='loss_wd')
tf.summary.scalar('losses/loss_wd', loss_wd)
total_loss = total_loss + loss_wd
return total_loss
def inpainter_loss(self, preads_fake, imgs, recs_erase, mask_erase, recs_orig, mask_orig):
loss_fake = -tf.reduce_mean(preads_fake)
tf.summary.scalar('losses/generator_fake_loss', loss_fake)
loss_ae_erase = tf.losses.mean_squared_error(imgs, recs_erase, weights=50. * mask_erase)
loss_ae_orig = tf.losses.mean_squared_error(imgs, recs_orig, weights=50. * mask_orig)
tf.summary.scalar('losses/loss_ae_erase', loss_ae_erase)
tf.summary.scalar('losses/loss_ae_orig', loss_ae_orig)
return loss_fake + loss_ae_erase + loss_ae_orig
def discriminator_loss(self, preds_fake, preds_real):
loss_real = tf.reduce_mean(tf.nn.relu(1. - preds_real))
loss_fake = tf.reduce_mean(tf.nn.relu(1. + preds_fake))
loss = loss_real + loss_fake
tf.summary.scalar('losses/disc_fake_loss', loss_fake)
tf.summary.scalar('losses/disc_real_loss', loss_real)
tf.summary.scalar('losses/disc_total_loss', loss)
return loss
def loss_ssl(self, preds, labels):
# Define the loss
loss = tf.losses.softmax_cross_entropy(labels, preds)
tf.summary.scalar('losses/SCE', loss)
# Compute accuracy
predictions = tf.argmax(preds, 1)
tf.summary.scalar('accuracy/train_accuracy',
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
bs = self.batch_size
tf.summary.scalar('accuracy/train_accuracy_real_noae',
slim.metrics.accuracy(predictions[:bs // 2], tf.argmax(labels[:bs // 2], 1)))
tf.summary.scalar('accuracy/train_accuracy_real_ae',
slim.metrics.accuracy(predictions[bs // 2:bs], tf.argmax(labels[bs // 2:bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_lci',
slim.metrics.accuracy(predictions[bs:2 * bs], tf.argmax(labels[bs:2 * bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_rot',
slim.metrics.accuracy(predictions[2 * bs:-bs], tf.argmax(labels[2 * bs:-bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_warp',
slim.metrics.accuracy(predictions[-bs:], tf.argmax(labels[-bs:], 1)))
return loss
def loss_lci_adv(self, preds, labels_tf):
loss = tf.losses.softmax_cross_entropy(labels_tf, preds)
return loss
def linear_classifier(net, num_out, reuse=None, training=True, scope='classifier', wd=5e-4):
with tf.variable_scope(scope, reuse=reuse):
net = slim.batch_norm(net, decay=0.975, is_training=training, fused=True, center=False, scale=False)
net = slim.flatten(net)
net = slim.fully_connected(net, num_out,
weights_initializer=contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(wd),
activation_fn=None, normalizer_fn=None)
return net
def patch_erase(img, patch_sz=(16, 16)):
im_shape = img.get_shape()
pad_sz = [im_shape[1] - patch_sz[0], im_shape[2] - patch_sz[1]]
patch_mask = tf.ones([im_shape[0], patch_sz[0], patch_sz[1], im_shape[3]])
patch_mask = tf.pad(patch_mask,
[[0, 0], [pad_sz[0] // 2, pad_sz[0] // 2], [pad_sz[1] // 2, pad_sz[1] // 2], [0, 0]])
return img * (1. - patch_mask) + 0.1 * patch_mask * tf.random_normal(im_shape), 1. - patch_mask
def random_crop(img, crop_sz=(20, 20)):
im_shape = img.get_shape().as_list()
bsz = im_shape[0]
dx = (im_shape[1] - crop_sz[0]) // 2
dy = (im_shape[2] - crop_sz[1]) // 2
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
jit_x = tf.random_uniform([bsz, 8], minval=-dx + 1, maxval=dx, dtype=tf.int32)
jit_x = tf.cast(jit_x, tf.float32)
jit_y = tf.random_uniform([bsz, 8], minval=-dy + 1, maxval=dy, dtype=tf.int32)
jit_y = tf.cast(jit_y, tf.float32)
xforms = base + jit_x * mask_x + jit_y * mask_y
processed_data = contrib.image.transform(
images=img, transforms=xforms
)
cropped_data = processed_data[:, dx:dx + crop_sz[0], dy:dy + crop_sz[1], :]
return cropped_data, jit_x, jit_y
def paste_crop(img, crop, jit_x, jit_y):
im_shape = tf.shape(img)
crop_shape = tf.shape(crop)
bsz = im_shape[0]
dx_1 = (im_shape[1] - crop_shape[1]) // 2
dy_1 = (im_shape[2] - crop_shape[2]) // 2
dx_2 = im_shape[1] - crop_shape[1] - dx_1
dy_2 = im_shape[2] - crop_shape[2] - dy_1
patch_mask = tf.ones_like(crop)
crop = tf.pad(crop, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
patch_mask = tf.pad(patch_mask, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
xforms = base - jit_x * mask_x - jit_y * mask_y
transformed_crop = contrib.image.transform(
images=crop, transforms=xforms
)
transformed_mask = contrib.image.transform(
images=patch_mask, transforms=xforms
)
return transformed_crop, transformed_mask
| 41.113281 | 120 | 0.618622 | 6,678 | 0.634489 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.08076 |
0b07d80b933132232b4df2d891725e4a81593fd0 | 1,486 | py | Python | dbs/dal/Whiteport.py | xinghejd/opencanary_web | 0d7eec70e154ad2d172175ab3bff48ba8999d5d9 | [
"BSD-3-Clause"
] | 633 | 2018-09-04T03:55:10.000Z | 2022-03-18T07:21:01.000Z | dbs/dal/Whiteport.py | fankeji/opencanary_web | 71156ef8d7f86e2bb380a69f584de9603bfba93a | [
"BSD-3-Clause"
] | 51 | 2018-12-27T15:46:33.000Z | 2022-03-03T03:16:16.000Z | dbs/dal/Whiteport.py | fankeji/opencanary_web | 71156ef8d7f86e2bb380a69f584de9603bfba93a | [
"BSD-3-Clause"
] | 233 | 2018-09-04T05:59:50.000Z | 2022-03-10T06:00:30.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: pirogue
Purpose: 白名单端口表操作
Site: http://pirogue.org
Created: 2018-08-03 17:32:54
"""
from dbs.initdb import DBSession
from dbs.models.Whiteport import Whiteport
from sqlalchemy import desc,asc
from sqlalchemy.exc import InvalidRequestError
# import sys
# sys.path.append("..")
class WhitePort:
"""增删改查"""
def __init__(self):
self.session=DBSession
# 查询白名单表port数据
def select_white_port(self):
try:
white_port_res = self.session.query(Whiteport.dst_port).all()
return white_port_res
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 增加白名单
def insert_white_port(self, dst_port):
try:
wip_insert = Whiteport(dst_port=dst_port)
self.session.merge(wip_insert)
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 删除白名单端口表数据
def delete_white_port(self):
try:
self.session.query(Whiteport).delete()
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close() | 24.360656 | 73 | 0.596231 | 1,192 | 0.766067 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.190231 |
0b080bc190d330770740d83db0e379029d019b52 | 789 | py | Python | anyconfig/singleton.py | edyan/python-anyconfig | d237909a6e4848737539b80951b710238c72052f | [
"MIT"
] | null | null | null | anyconfig/singleton.py | edyan/python-anyconfig | d237909a6e4848737539b80951b710238c72052f | [
"MIT"
] | null | null | null | anyconfig/singleton.py | edyan/python-anyconfig | d237909a6e4848737539b80951b710238c72052f | [
"MIT"
] | null | null | null | #
# Copyright (C) 2018 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
r"""Singleton class
.. versionadded:: 0.9.8
- Add to make a kind of manager instancne later to manage plugins.
"""
from __future__ import absolute_import
import threading
class Singleton(object):
"""Singleton utilizes __new__ special method.
.. note:: Inherited classes are equated with base class inherit this.
"""
__instance = None
__lock = threading.RLock()
def __new__(cls):
if cls.__instance is None:
cls.__lock.acquire()
if cls.__instance is None:
try:
cls.__instance = object.__new__(cls)
finally:
cls.__lock.release()
return cls.__instance
# vim:sw=4:ts=4:et:
| 22.542857 | 73 | 0.6109 | 515 | 0.652725 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.425856 |
0b0815c2d6a9cafe4678f5aa35ab346506fdf7c6 | 4,937 | py | Python | geophires_simulation/main.py | tuw-eeg/GEOPHIRES-simulation | 14343cefd7c373f240e610a5a5a1143457a4f8d4 | [
"Apache-2.0"
] | null | null | null | geophires_simulation/main.py | tuw-eeg/GEOPHIRES-simulation | 14343cefd7c373f240e610a5a5a1143457a4f8d4 | [
"Apache-2.0"
] | null | null | null | geophires_simulation/main.py | tuw-eeg/GEOPHIRES-simulation | 14343cefd7c373f240e610a5a5a1143457a4f8d4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 9 10:37:24 2020
@author: fallahnejad
Important Note: all the costs provided in EUR should be multiplied by EUR2USD factor before providing to the GEOPHIRES
"""
import os
import re
import shutil
import pandas as pd
from .cm.geophires.GEOPHIRESv2 import geophires
def annuity(r, n):
q = 1 + r
return r * q ** n / (q ** n - 1)
def payload_eur_to_usd(payload):
eur2usd = payload['EUR to USD Rate']
for key in payload.keys():
key_lower = key.lower()
if payload[key] is None:
continue
if "cost" in key_lower:
if 'adjustment factor' not in key_lower:
payload[key] *= eur2usd
continue
# Exclude false-positive cases such as '*Exploration*'
if any([s == 'ratio' for s in key_lower.split(' ')]):
payload[key] *= eur2usd
return payload
def create_input_text(payload, default_input_txt_file, field_specific_parameters, reservoir_outputfactors_csv_file,
user_input_txt_file,
reservoir_outputfactor_txt_file):
df = pd.read_csv(field_specific_parameters, index_col='parameter')
df = df.loc[:, payload['Field Name']]
for index in df.index:
payload[index] = df.loc[index]
payload['Reservoir Output File Name'] = reservoir_outputfactor_txt_file
breakthrough_year = payload['Thermal Breakthrough Time']
reservoir_depth = payload['Reservoir Depth']
grad1 = payload['Gradient 1']
grad2 = payload['Gradient 2']
thickness1 = payload['Thickness 1']
surface_temperature = payload['Surface Temperature']
df = pd.read_csv(reservoir_outputfactors_csv_file)
time_steps = df['time_steps'].values
factors = df['breakthrough_' + str(breakthrough_year)].values
shutil.copy(default_input_txt_file, user_input_txt_file)
with open(user_input_txt_file, "a") as uitf:
uitf.write('\n')
for key in payload:
uitf.write(str(key) + ',' + str(payload[key]) + ',' + '\n')
uitf.close()
if payload['Number of Segments'] == 2:
start_temperature = grad1 * thickness1 + (reservoir_depth - thickness1) * grad2 + surface_temperature
else:
start_temperature = grad1 * reservoir_depth + surface_temperature
res_temp = start_temperature * factors
with open(reservoir_outputfactor_txt_file, 'w') as f:
for step, t in zip(time_steps, res_temp):
f.write(str(step) + '\t\t' + ',' + '\t\t' + str(t) + '\n')
f.close()
return start_temperature
def result2df(fname):
out_dict = dict()
with open(fname, encoding='latin1') as file:
content = file.readlines()
for line in content:
if "=" in line:
line_temp = line.strip()
par, val = line_temp.split(" = ")
par = par.strip()
val = re.findall(r"[-+]?\d*\.\d+|\d+", val)
if len(val) > 0:
val = float(val[0])
out_dict[par] = [val]
return out_dict
def main_func(out_path, payload):
main_path = os.path.abspath(os.path.dirname(__file__))
# define input & output files
default_input_txt_file = os.path.join(main_path, 'data_warehouse/online_tool_default_input.txt')
reservoir_outputfactors_csv_file = os.path.join(main_path, 'data_warehouse/reservoir_output_factors.csv')
field_specific_parameters = os.path.join(main_path, 'data_warehouse/field_specific_parameters.csv')
user_input_txt_file = os.path.join(out_path, 'user_inputs.txt')
reservoir_output_path = os.path.join(out_path, 'ReservoirOutput.txt')
output_file_1 = os.path.join(out_path, 'warnings.txt')
output_file_2 = os.path.join(out_path, 'outputs.txt')
# convert EUR values to USD
payload = payload_eur_to_usd(payload)
# create input text file based on user inputs
start_temperature = create_input_text(payload, default_input_txt_file, field_specific_parameters,
reservoir_outputfactors_csv_file, user_input_txt_file, reservoir_output_path)
# run geophires
# average net heat production [MWth], average annual heat production [GWh], LCOH [EUR/MWh]
eur2usd = payload['EUR to USD Rate']
cap, prod, lcoh = geophires(user_input_txt_file, output_file_1, output_file_2, eur2usd)
indicators = {
"Start production temperature": {'value': start_temperature,
'unit': 'Deg.C'},
"Heat capacity": {'value': cap,
'unit': 'MW'},
"Heat production": {'value': prod,
'unit': 'GWh'},
"Levelized cost of heat": {'value': lcoh,
'unit': 'EUR/MWh'},
"Annual cost (annuity)": {'value': 1e-3 * prod * lcoh,
'unit': 'MEUR'},
}
return indicators
| 40.801653 | 119 | 0.625886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,218 | 0.246709 |
0b08ce05031842d1b1da5e73875c7ace68953124 | 6,040 | py | Python | mydb/test_postgres.py | dappsunilabs/DB4SCI | 54bdd03aaa12957e622c921b263e187740a8b2ae | [
"Apache-2.0"
] | 7 | 2018-12-05T19:18:20.000Z | 2020-11-21T07:27:54.000Z | mydb/test_postgres.py | dappsunilabs/DB4SCI | 54bdd03aaa12957e622c921b263e187740a8b2ae | [
"Apache-2.0"
] | 8 | 2018-04-25T06:02:41.000Z | 2020-09-08T21:55:56.000Z | mydb/test_postgres.py | FredHutch/DB4SCI | cc950a36b6b678fe16c1c91925ec402581636fc0 | [
"Apache-2.0"
] | 2 | 2019-11-14T02:09:09.000Z | 2021-12-28T19:05:51.000Z | #!/usr/bin/python
import time
import psycopg2
import argparse
import postgres_util
import container_util
import admin_db
import volumes
from send_mail import send_mail
from config import Config
def full_test(params):
admin_db.init_db()
con_name = params['dbname']
dbtype = params['dbtype']
print('Starting %s Test; Container Name: %s' % (dbtype, con_name))
if container_util.container_exists(con_name):
print(' Duplicate container: KILLING')
result = container_util.kill_con(con_name,
Config.accounts[dbtype]['admin'],
Config.accounts[dbtype]['admin_pass'],
params['username'])
time.sleep(5)
print(result)
print(' removing old directories')
volumes.cleanup_dirs(con_name)
print(' Create container')
result = postgres_util.create(params)
print(' Create result: %s' % result)
port = params['port']
#
# Admin DB checking
#
print(' Check Admin DB log for "create"')
admin_db.display_container_log(limit=1)
print(' Check Admin DB for State entry')
info = admin_db.get_container_state(con_name)
print(' Name: %s ' % info.name),
print('State: %s ' % info.state),
print('TS: %s ' % info.ts),
print('CID: %d' % info.c_id)
print(' Check Admin DB for Container Info')
info = admin_db.display_container_info(con_name)
print('Info: %s' % info)
print(' Postgres Show All')
postgres_util.showall(params)
print("\n=========")
print(" - Test Accounts\n")
print("=========")
admin_user = Config.accounts[dbtype]['admin']
admin_pass = Config.accounts[dbtype]['admin_pass']
test_user = Config.accounts['test_user']['admin']
test_pass = Config.accounts['test_user']['admin_pass']
for dbuser, dbuserpass in [[test_user, test_pass],
['svc_'+test_user, params['longpass']],
[admin_user, admin_pass]]:
auth = postgres_util.auth_check(dbuser,
dbuserpass,
port)
if auth:
print('User %s verified!' % dbuser)
else:
print('user account not valid: %s' % dbuser)
print(" - Test Complete")
def populate(params):
dbTestName = 'testdb'
dbtype = params['dbtype']
conn_string = "dbname='%s' " % params['dbname']
conn_string += "user='%s' " % Config.accounts[dbtype]['admin']
conn_string += "host='%s' " % Config.container_host
conn_string += "port='%d' " % params['port']
conn_string += "password='%s'" % Config.accounts[dbtype]['admin_pass']
print(' - Populate with test data: ')
try:
conn = psycopg2.connect(conn_string)
except:
print "I am unable to connect to the database"
conn.set_isolation_level(0)
cur = conn.cursor()
print(' - Create DB: ' + dbTestName)
cur.execute("CREATE TABLE t1 (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO t1 (num, data) VALUES (%s, %s)",
(100, "table t1 in Primary database"))
cur.execute("CREATE DATABASE " + dbTestName)
conn.close()
target = "dbname='%s'" % params['dbname']
testdb = "dbname='%s'" % dbTestName
conn2 = conn_string.replace(target, testdb)
print(' - Connect to new DB: ' + conn2)
conn = psycopg2.connect(conn2)
cur = conn.cursor()
print(' - Create Table and Insert ')
cur.execute("CREATE TABLE t2 (id serial PRIMARY KEY, num integer, data varchar);")
cur.execute("INSERT INTO t2 (num, data) VALUES (%s, %s)",
(100, "Important test data in t2"))
conn.commit()
cur.close()
print(' - Populate Success')
def delete_test_container(dbtype, con_name):
print("\n=========")
print(" - Removing Container")
print("=========")
result = container_util.kill_con(con_name,
Config.accounts[dbtype]['admin'],
Config.accounts[dbtype]['admin_pass'])
print(result)
def setup(dbtype, con_name):
params = {'dbname': con_name,
'dbuser': Config.accounts['test_user']['admin'],
'dbtype': dbtype,
'dbuserpass': Config.accounts['test_user']['admin_pass'],
'support': 'Basic',
'owner': Config.accounts['test_user']['owner'],
'description': 'Test the Dev',
'contact': Config.accounts['test_user']['contact'],
'life': 'medium',
'backup_type': 'User',
'backup_freq': 'Daily',
'backup_life': '6',
'backup_window': 'any',
'pitr': 'n',
'maintain': 'standard',
'phi': 'No',
'pitr': 'n',
'username': Config.accounts['test_user']['admin'],
'image': Config.info[dbtype]['images'][1][1],
'db_vol': '/mydb/dbs_data',
}
return params
if __name__ == "__main__":
dbtype = 'Postgres'
con_name = 'postgres-test'
params = setup(dbtype, con_name)
# paramd['db_vol'] = '/mydb/encrypt',
parser = argparse.ArgumentParser(prog='test_postgres.py',
description='Test %s routines' % dbtype)
parser.add_argument('--purge', '-d', action='store_true', default=False,
help='Delete test container')
parser.add_argument('--backup', '-b', action='store_true', default=False,
help='backup %s' % params['dbname'])
args = parser.parse_args()
if args.purge:
delete_test_container(dbtype, con_name)
elif args.backup:
(cmd, mesg) = postgres_util.backup(params)
print("Command: %s\nBackup result: %s" % (cmd, mesg))
else:
full_test(params)
populate(params)
postgres_util.backup(params)
print('- Tests Complete!')
| 36.606061 | 86 | 0.565728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,901 | 0.314735 |
0b09670421913d5dc655e11f02bde1b874775f20 | 4,742 | py | Python | perfectextractor/tests/test_extractor.py | UUDigitalHumanitieslab/time-in-translation | 2b1a3ef2d382b725ec87f0266fc726fbeb818e7b | [
"MIT"
] | 3 | 2020-04-22T15:12:58.000Z | 2021-05-31T16:36:23.000Z | perfectextractor/tests/test_extractor.py | UUDigitalHumanitieslab/time-in-translation | 2b1a3ef2d382b725ec87f0266fc726fbeb818e7b | [
"MIT"
] | 13 | 2017-08-17T13:31:40.000Z | 2022-03-11T10:29:22.000Z | perfectextractor/tests/test_extractor.py | UUDigitalHumanitieslab/time-in-translation | 2b1a3ef2d382b725ec87f0266fc726fbeb818e7b | [
"MIT"
] | 1 | 2020-04-16T10:33:05.000Z | 2020-04-16T10:33:05.000Z | # -*- coding: utf-8 -*-
import unittest
from lxml import etree
from perfectextractor.apps.extractor.models import Perfect
from perfectextractor.corpora.opus.perfect import OPUSPerfectExtractor
class TestPerfectExtractor(unittest.TestCase):
def test_is_lexically_bound(self):
en_ex = OPUSPerfectExtractor('en', ['de', 'es', 'fr', 'nl'])
nl_ex = OPUSPerfectExtractor('nl', ['de', 'es', 'en', 'fr'])
fr_ex = OPUSPerfectExtractor('fr', ['de', 'es', 'en', 'nl'])
lemma_attr = en_ex.config.get('all', 'lemma_attr')
mock_s = etree.Element('s')
mock_pp = Perfect(mock_s)
mock_pp.add_word('is', 'be', 'VERB', 'w1.1.1')
mock_before1 = etree.Element('w')
mock_before2 = etree.Element('w')
mock_before = [mock_before1, mock_before2]
# Default case: English (no lexical bounds)
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'have')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'collided')
self.assertTrue(en_ex.is_lexically_bound('en', mock_pp, mock_aux_verb, mock_participle, mock_before))
# Checking Dutch (ik ben gebotst vs. *ik ben gehad)
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'zijn')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'botsen')
self.assertTrue(nl_ex.is_lexically_bound('nl', mock_pp, mock_aux_verb, mock_participle, mock_before))
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'zijn')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'hebben')
self.assertFalse(nl_ex.is_lexically_bound('nl', mock_pp, mock_aux_verb, mock_participle, mock_before))
# Checking French (*je suis regardé vs. je suis revenu)
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'être')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'regarder')
self.assertFalse(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, mock_before))
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'être')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'revenir')
self.assertTrue(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, mock_before))
# Checking reflexive passé composés (je me suis couché)
mock_pp = Perfect(mock_s)
mock_pp.add_word('suis', u'être', 'VERB', 'w1.1.1')
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'être')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'coucher')
self.assertFalse(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, mock_before))
mock_sentence = etree.Element('s')
me = etree.SubElement(mock_sentence, 'w')
me.text = 'me'
me.set(lemma_attr, 'me')
me.set('pos', 'PRO:PER')
je = etree.SubElement(mock_sentence, 'w')
je.text = 'je'
je.set(lemma_attr, 'je')
je.set('pos', 'PRO:PER')
me_je = [me, je]
self.assertTrue(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, me_je))
self.assertEqual(len(mock_pp.words), 2)
self.assertEqual(mock_pp.construction_to_string(), 'me suis')
self.assertTrue(mock_pp.is_reflexive)
# Checking reflexive passé composés (puis nous sommes restés)
mock_pp = Perfect(mock_s)
mock_pp.add_word('sommes', u'être', 'VERB', 'w1.1.1')
mock_aux_verb = etree.Element('w')
mock_aux_verb.set(lemma_attr, 'être')
mock_participle = etree.Element('w')
mock_participle.set(lemma_attr, 'rester')
self.assertTrue(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, mock_before))
mock_sentence = etree.Element('s')
nous = etree.SubElement(mock_sentence, 'w')
nous.text = 'nous'
nous.set(lemma_attr, 'nous')
nous.set('pos', 'PRO:PER')
puis = etree.SubElement(mock_sentence, 'w')
puis.text = 'puis'
puis.set(lemma_attr, 'puis')
puis.set('pos', 'ADV')
nous_puis = [nous, puis]
# This should be lexically bound, but 'nous' should not be part of the passé composé
self.assertTrue(fr_ex.is_lexically_bound('fr', mock_pp, mock_aux_verb, mock_participle, nous_puis))
self.assertEqual(mock_pp.construction_to_string(), 'sommes')
self.assertEqual(len(mock_pp.words), 1)
self.assertFalse(mock_pp.is_reflexive)
| 43.907407 | 110 | 0.653522 | 4,558 | 0.958167 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.180786 |
0b09bab5310470d71a60ceebe7eea1ff4b9be3ca | 4,303 | py | Python | python/tvm/tensor_graph/core/auto_schedule/utils.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | python/tvm/tensor_graph/core/auto_schedule/utils.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | null | null | null | python/tvm/tensor_graph/core/auto_schedule/utils.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | import tvm
from functools import reduce
from ..utils import to_int, to_int_or_None
def get_need_tile(need_tile):
return [True if x.value == 1 else False for x in need_tile]
def get_factors(split_factor_entities):
return [[x.value for x in factors.factors] for factors in split_factor_entities]
def tile_axis(stage, axis, factors, inner_to_outer=False):
ret = []
if inner_to_outer:
factors = list(reversed(factors))
for f in factors[:-1]:
axis, inner = stage.split(axis, f)
ret.append(inner)
ret.append(axis)
ret = list(reversed(ret))
else:
for f in factors[:-1]:
outer, axis = stage.split(axis, nparts=f)
ret.append(outer)
ret.append(axis)
return ret
def tile_axes(sch, op, axes, need_tile, split_factors, inner_to_outer=False):
"""Tile axes according to need_tile and split_factors
"""
axis_map = {}
count_axis = 0
split_axis_list = []
split_factor_list = []
for axis, need_tile, factors in zip(axes, need_tile, split_factors):
if need_tile:
split_axis = tile_axis(sch[op], axis, factors, inner_to_outer=inner_to_outer)
split_axis_list.append(split_axis)
split_factor_list.append(factors)
axis_map[count_axis] = split_axis
else:
axis_map[count_axis] = axis
count_axis += 1
return axis_map, split_axis_list, split_factor_list
def get_bind_spec(binding_entity):
ret = []
for b in binding_entity:
tmp = []
for bb in b:
tmp.append([bb[0].value, bb[1].value])
ret.append(tmp)
return ret
def bind_axes(sch, op, axis_map, bind, to_bind, already_bind=None, factors=None, extents=None):
"""The bind function will fuse some axes,
which is dangerous because this is not updated
to the schedule state. For now it shouldn't be
a problem because the fusion should only happen
on blockIdx.z
"""
ret = []
for part in bind:
to_fuse = []
to_fuse_extent = 1
for ele in part:
if ele[1] < 0:
axis = axis_map[ele[0]]
if already_bind is not None:
to_fuse_extent *= extents[ele[0]]
else:
axis = axis_map[ele[0]][ele[1]]
if already_bind is not None:
to_fuse_extent *= factors[ele[0]][ele[1]]
to_fuse.append(axis)
if len(to_fuse) > 1:
sch[op].reorder(*to_fuse)
fused_axis = sch[op].fuse(*to_fuse)
else:
fused_axis = to_fuse[0]
ret.append(fused_axis)
sch[op].bind(fused_axis, to_bind)
if already_bind is not None:
already_bind["extent"] = to_fuse_extent
return ret
def get_move_to_inner(move):
return [x.value for x in move]
def reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, reduce_split_axis_list, extents_info=None):
"""Reorder spatial and reduce axes
"""
pre = []
ones = []
for k, v in axis_map.items():
if not isinstance(v, (list, tuple)):
if v.dom is None:
ext = None
else:
ext = to_int_or_None(v.dom.extent)
if ext is None:
if v in extents_info:
ext = extents_info[v]
else:
ERROR("Can't decide extent for %s" % (str(v)))
if ext > 1:
pre.append(v)
else:
ones.append(v)
# perform local reorder
num_axis_parts = len(split_axis_list[0]) if len(split_axis_list) > 0 else 0
num_reduce_axis_parts = len(reduce_split_axis_list[0]) if len(reduce_split_axis_list) > 0 else 0
leveled_axes = []
reduce_leveled_axes = []
local_order = []
def _inner(axis_list, leveled, nparts):
for i in range(nparts):
leveled.append([])
for part in axis_list:
for i, axis in enumerate(part):
leveled[i].append(axis)
_inner(split_axis_list, leveled_axes, num_axis_parts)
_inner(reduce_split_axis_list, reduce_leveled_axes, num_reduce_axis_parts)
if len(leveled_axes) >= 1:
# GPU specific reorder choice
# put the inner part as inner-most axes
local_order = list(reduce(lambda x, y: x + y, leveled_axes[:-1], []))
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
local_order += leveled_axes[-1]
else:
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
if len(local_order) > 0:
sch[op].reorder(*ones, *pre, *local_order)
return leveled_axes, reduce_leveled_axes
| 28.496689 | 115 | 0.660702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.101557 |
0b0b1a7b4c5a81ec7957c24cfd67e9ce4f84e8eb | 7,311 | py | Python | SCALA_library/Lower_level_tools/pyCorner260.py | snfactory/scala | f046d0e458bc4146546e80df16e15259f6019f93 | [
"Apache-2.0"
] | null | null | null | SCALA_library/Lower_level_tools/pyCorner260.py | snfactory/scala | f046d0e458bc4146546e80df16e15259f6019f93 | [
"Apache-2.0"
] | null | null | null | SCALA_library/Lower_level_tools/pyCorner260.py | snfactory/scala | f046d0e458bc4146546e80df16e15259f6019f93 | [
"Apache-2.0"
] | null | null | null | ###########################################################
# LOWER LEVEL LIBRARY THAT TALKS TO THE MONOCHROMATON #
###########################################################
import subprocess
import time
# -- See pyserial
import serial
class CornerStone260:
"""This class controlls the Cornerstone 260 monochromator"""
def __init__(self, port='/dev/ttyUSB1'):
self.serialport = port #windows: 'COMx', Linux: '/dev/<your_device_file>'
self.baud = 9600
self.sendtermchar = "\r\n"
self.rectermchar = "\r\n"
self.timeout = 10
def SerialCommand(self,command):
#setup - if a Serial object can't be created, a SerialException will be raised.
while True:
try:
ser = serial.Serial(self.serialport, self.baud, timeout=self.timeout)
#break out of while loop when connection is made
break
except serial.SerialException:
print 'waiting for device ' + self.serialport + ' to be available'
self.CS_Sleep(3)
ser.flushInput()
ser.write(command + self.sendtermchar)
answer = ser.readline()
ser.close()
return answer.upper()[:-2] == command.upper()
def SerialQuery(self,command):
#setup - if a Serial object can't be created, a SerialException will be raised.
while True:
try:
ser = serial.Serial(self.serialport, self.baud)
#break out of while loop when connection is made
break
except serial.SerialException:
print 'waiting for device ' + self.serialport + ' to be available'
self.CS_Sleep(3)
ser.flushInput()
ser.write(command + self.sendtermchar)
answer1 = ser.readline()
answer2 = ser.readline()
ser.close()
return answer2[:-2]
def CS_Sleep(self,timesleep_in_second):
"""
"""
time.sleep(timesleep_in_second)
def CS_Units_NM(self):
"""Specifies the operational units: nanometer"""
return self.SerialCommand('UNITS NM')
def CS_Units_UM(self):
"""Specifies the operational units: micrometer"""
return self.SerialCommand('UNITS UM')
def CS_Units_WN(self):
"""Specifies the operational units: wavenumbers (1/cm)"""
return self.SerialCommand('UNITS WN')
def CS_GetUnits(self):
"""Returns the operational units: NM, UM, WN"""
return self.SerialQuery('UNITS?')[0:2]
def CS_GoWave(self, position):
"""Moves the wavelength drive to the specified position (see units!)"""
return self.SerialCommand('GOWAVE %f' % (position))
def CS_GetWave(self):
"""Returns the wavelength drive position (see units!)"""
return self.SerialQuery('WAVE?')
def CS_Calibrate(self, cal):
"""Define the current position as the wavelength specified in the numeric parameter"""
return self.SerialCommand('CALIBRATE %f' % (cal))
def CS_Abort(self):
"""Stops any wavelength motion immediately"""
return self.SerialCommand('ABORT')
def CS_Step(self, n):
"""Moves the wavelength drive by the integer number of n"""
return self.SerialCommand('STEP %d' % (n))
def CS_GetStep(self):
"""Returns the wavelength drive position in steps"""
return self.SerialQuery('STEP?')
def CS_Grat(self,n):
"""Selects the grating Nr. 'n' """
return self.SerialCommand('GRAT %d' % (n))
def CS_GetGrat(self):
"""Returns the grating parameters"""
return self.SerialQuery('GRAT?')
def CS_GratLabel(self,n,label=' '):
"""Defines the label of the grating Nr. 'n' """
return self.SerialCommand('GRAT%dLABEL %s' % (n, label[:8]))
def CS_GetLabel(self,n):
"""Returns the label of the grating"""
return self.SerialQuery('GRAT%dLABEL?' % (n))
def CS_GratZero(self,n,zero):
"""Defines the zero of the grating Nr. 'n' """
return self.SerialCommand('GRAT%dZERO %f' % (n, zero))
def CS_GetZero(self,n):
"""Returns the zero of the grating"""
return self.SerialQuery('GRAT%dZERO?' % (n))
def CS_GratLines(self,n,lines):
"""Defines the lines of the grating Nr. 'n' """
return self.SerialCommand('GRAT%dLINES %d' % (n, lines))
def CS_GetLines(self,n):
"""Returns the label of the grating"""
return self.SerialQuery('GRAT%dLINES?' % (n))
def CS_GratFactor(self,n,factor):
"""Sets the calibration factor of the grating Nr. 'n' """
return self.SerialCommand('GRAT%dFACTOR %f' % (n, factor))
def CS_GetFactor(self,n):
"""Returns the calibration factor of the grating"""
return self.SerialQuery('GRAT%dFACTOR?' % (n))
def CS_GratOffset(self,n,offset):
"""Sets the calibration offset of the grating Nr. 'n' """
return self.SerialCommand('GRAT%dOFFSET %f' % (n, offset))
def CS_GetOffset(self,n):
"""Returns the calibration offset of the grating"""
return self.SerialQuery('GRAT%dOFFSET?' % (n))
def CS_ShutterOpen(self):
"""Opens the shutter"""
return self.SerialCommand('SHUTTER O')
def CS_ShutterClose(self):
"""Closess the shutter"""
return self.SerialCommand('SHUTTER C')
def CS_GetShutter(self):
"""Returns the shutter state"""
return self.SerialQuery('SHUTTER?')
def CS_Filter(self,n):
"""Moves the filter wheel to the position specified in 'n' """
return self.SerialCommand('FILTER %d' % (n))
def CS_GetFilter(self):
"""Returns the current filter position"""
return self.SerialQuery('FILTER?')
def CS_OutPort(self,n):
"""Selects the output port"""
return self.SerialCommand('OUTPORT %d' % (n))
def CS_GetOutPort(self):
"""Returns the current out port"""
return self.SerialQuery('OUTPORT?')
def CS_FilterLabel(self,n,label):
"""Defines the label of the filter Nr. 'n' """
return self.SerialCommand('FILTER%dLABEL %s' % (n, label[:8]))
def CS_GetFilterLabel(self,n):
"""Returns the label of the filter"""
return self.SerialQuery('FILTER%dLABEL?' % (n))
def CS_GetInfo(self):
"""Returns the system info"""
return self.SerialQuery('INFO?')
def CS_GetStatus(self):
"""Returns the status byte"""
return self.SerialQuery('STB?')
def CS_GetError(self):
"""Returns the error code"""
return self.SerialQuery('ERROR?')
def CS_GetSerialPort(self):
return self.serialport
def CS_SetSerialPort(self, port):
self.serialport = port
if __name__ == '__main__':
cs = CornerStone260( port = '/dev/ttyUSB1')
print cs.GetInfo()
| 36.193069 | 95 | 0.562987 | 6,942 | 0.949528 | 0 | 0 | 0 | 0 | 0 | 0 | 2,627 | 0.359322 |
0b0d5b4aaea543cf63af89cd9a03e2708a9fbab7 | 4,845 | py | Python | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/autoflex4Commands.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/autoflex4Commands.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/AutoDockTools/autoflex4Commands.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | #############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2008
#
#############################################################################
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autoflex4Commands.py,v 1.1 2008/06/04 15:37:20 rhuey Exp $
#
# $Id: autoflex4Commands.py,v 1.1 2008/06/04 15:37:20 rhuey Exp $
#
#
#
#
#
"""
This Module facilitates producing a formatted flexible residue file for AutoDock. The steps in this process are:
* Set the macromolecule:
o Read a PDBQT Macromolecule
o Choose Macromol...
* Select which residues are to be flexible in macromolecule using Pmv selection tools:
o ICOM Select
o SelectFromString
o Select Spherical Region
* Set which torsions in the sidechains of those residues are to be flexible interactively
* The results of the previous steps are written to two files:
o one containing the sidechains of the flexible residues with special keywords
o a second containing the rigid portion of the macromolecule
"""
from ViewerFramework.VFCommand import CommandGUI
from AutoDockTools.autoflexCommands import AF_MacroReader,\
AF_MacroChooser, AF_SelectResidues, AF_ProcessResidues,\
AF_ProcessHingeResidues, AF_EditHinge, AF_SetHinge,\
AF_SetBondRotatableFlag, AF_StepBack, AF_FlexFileWriter,\
AF_RigidFileWriter, AF_LigandDirectoryWriter, menuText
AF_MacroReaderGUI=CommandGUI()
AF_MacroReaderGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'], \
menuText['Read Macro'], cascadeName = menuText['InputMB'])
AF_MacroChooserGUI=CommandGUI()
AF_MacroChooserGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'],
menuText['Choose Macro'], cascadeName = menuText['InputMB'])
AF_SelectResiduesGUI = CommandGUI()
AF_SelectResiduesGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'],menuText['Set Residues'])
AF_ProcessResiduesGUI = CommandGUI()
AF_ProcessHingeResiduesGUI = CommandGUI()
AF_EditHingeGUI = CommandGUI()
AF_EditHingeGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'],\
menuText['Edit Hinge'])
AF_SetHingeGUI = CommandGUI()
AF_SetHingeGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'],\
menuText['Set Hinge'])
AF_StepBackGUI = CommandGUI()
AF_StepBackGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'], menuText['Step Back'])
AF_FlexFileWriterGUI = CommandGUI()
AF_FlexFileWriterGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'], \
menuText['writeFlexible'], cascadeName = menuText['WriteMB'])
AF_RigidFileWriterGUI = CommandGUI()
AF_RigidFileWriterGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'], \
menuText['writeRigid'], cascadeName = menuText['WriteMB'])
AF_LigandDirectoryWriterGUI = CommandGUI()
AF_LigandDirectoryWriterGUI.addMenuCommand('AutoTools4Bar', menuText['AutoFlexMB'], \
menuText['writeDir'], cascadeName = menuText['WriteMB'])
commandList = [
{'name':'AD4flex_readMacro','cmd':AF_MacroReader(),'gui':AF_MacroReaderGUI},
{'name':'AD4flex_chooseMacro','cmd':AF_MacroChooser(),'gui':AF_MacroChooserGUI},
{'name':'AD4flex_setResidues','cmd':AF_SelectResidues(),'gui':AF_SelectResiduesGUI},
#{'name':'AD4flex_processResidues','cmd':AF_ProcessResidues(),'gui':None},
#{'name':'AD4flex_processHingeResidues','cmd':AF_ProcessHingeResidues(),'gui':None},
#{'name':'AD4flex_setBondRotatableFlag','cmd':AF_SetBondRotatableFlag(),'gui':None},
#{'name':'AD4flex_setHinge','cmd':AF_SetHinge(),'gui':AF_SetHingeGUI},
#{'name':'AD4flex_editHinge','cmd':AF_EditHinge(),'gui':None},
{'name':'AD4flex_stepBack','cmd':AF_StepBack(),'gui':AF_StepBackGUI},
{'name':'AD4flex_writeFlexFile','cmd':AF_FlexFileWriter(),'gui':AF_FlexFileWriterGUI},
{'name':'AD4flex_writeRigidFile','cmd':AF_RigidFileWriter(),'gui':AF_RigidFileWriterGUI},
#{'name':'AD4flex_writeFlexDir','cmd':AF_LigandDirectoryWriter(),'gui':AF_LigandDirectoryWriterGUI}
]
def initModule(vf):
for dict in commandList:
vf.addCommand(dict['cmd'], dict['name'], dict['gui'])
if not hasattr(vf, 'ADflex_processResidues'):
vf.addCommand(AF_ProcessResidues(), 'ADflex_processResidues', None)
if not hasattr(vf, 'ADflex_setBondRotatableFlag'):
vf.addCommand(AF_SetBondRotatableFlag(), 'ADflex_setBondRotatableFlag', None)
vf.ADflex_setResidues = vf.AD4flex_setResidues
if vf.hasGui:
vf.GUI.menuBars['AutoTools4Bar'].menubuttons[menuText['AutoFlexMB']].config(bg='tan',underline='-1')
if not hasattr(vf.GUI, 'adtBar'):
vf.GUI.adtBar = vf.GUI.menuBars['AutoTools4Bar']
vf.GUI.adtFrame = vf.GUI.adtBar.menubuttons.values()[0].master
| 36.984733 | 118 | 0.707327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,416 | 0.498658 |
0b0e2f1e049e622e46932ea64784ee002a5a6564 | 6,801 | py | Python | cohesity_management_sdk/models/net_app_volume_information.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/net_app_volume_information.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/net_app_volume_information.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.cifs_share_information
import cohesity_management_sdk.models.volume_security_information
class NetAppVolumeInformation(object):
"""Implementation of the 'NetApp Volume Information.' model.
Specifies information about a volume in a NetApp cluster.
Attributes:
aggregate_name (string): Specifies the containing aggregate name of
this volume.
capacity_bytes (long|int): Specifies the total capacity in bytes of
this volume.
cifs_shares (list of CIFSShareInformation): Array of CIFS Shares.
Specifies the set of CIFS Shares exported for this volume.
creation_time_usecs (long|int): Specifies the creation time of the
volume specified in Unix epoch time (in microseconds).
data_protocols (list of DataProtocolEnum): Array of Data Protocols.
Specifies the set of data protocols supported by this volume.
'kNfs' indicates NFS connections. 'kCifs' indicates SMB (CIFS)
connections. 'kIscsi' indicates iSCSI connections. 'kFc' indicates
Fiber Channel connections. 'kFcache' indicates Flex Cache
connections. 'kHttp' indicates HTTP connections. 'kNdmp' indicates
NDMP connections. 'kManagement' indicates non-data connections
used for management purposes.
export_policy_name (string): Specifies the name of the export policy
(which defines the access permissions for the mount client) that
has been assigned to this volume.
junction_path (string): Specifies the junction path of this volume.
This path can be used to mount this volume via protocols such as
NFS.
name (string): Specifies the name of the NetApp Vserver that this
volume belongs to.
security_info (VolumeSecurityInformation): Specifies information about
NetApp volume security settings.
state (StateEnum): Specifies the state of this volume. Specifies the
state of a NetApp Volume. 'kOnline' indicates the volume is
online. Read and write access to this volume is allowed.
'kRestricted' indicates the volume is restricted. Some operations,
such as parity reconstruction, are allowed, but data access is not
allowed. 'kOffline' indicates the volume is offline. No access to
the volume is allowed. 'kMixed' indicates the volume is in mixed
state, which means its aggregates are not all in the same state.
mtype (Type12Enum): Specifies the NetApp type of this volume.
Specifies the type of a NetApp Volume. 'kReadWrite' indicates
read-write volume. 'kLoadSharing' indicates load-sharing volume.
'kDataProtection' indicates data-protection volume. 'kDataCache'
indicates data-cache volume. 'kTmp' indicates temporaray purpose.
'kUnknownType' indicates unknown type.
used_bytes (long|int): Specifies the total space (in bytes) used in
this volume.
"""
# Create a mapping from Model property names to API property names
_names = {
"aggregate_name":'aggregateName',
"capacity_bytes":'capacityBytes',
"cifs_shares":'cifsShares',
"creation_time_usecs":'creationTimeUsecs',
"data_protocols":'dataProtocols',
"export_policy_name":'exportPolicyName',
"junction_path":'junctionPath',
"name":'name',
"security_info":'securityInfo',
"state":'state',
"mtype":'type',
"used_bytes":'usedBytes'
}
def __init__(self,
aggregate_name=None,
capacity_bytes=None,
cifs_shares=None,
creation_time_usecs=None,
data_protocols=None,
export_policy_name=None,
junction_path=None,
name=None,
security_info=None,
state=None,
mtype=None,
used_bytes=None):
"""Constructor for the NetAppVolumeInformation class"""
# Initialize members of the class
self.aggregate_name = aggregate_name
self.capacity_bytes = capacity_bytes
self.cifs_shares = cifs_shares
self.creation_time_usecs = creation_time_usecs
self.data_protocols = data_protocols
self.export_policy_name = export_policy_name
self.junction_path = junction_path
self.name = name
self.security_info = security_info
self.state = state
self.mtype = mtype
self.used_bytes = used_bytes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
aggregate_name = dictionary.get('aggregateName')
capacity_bytes = dictionary.get('capacityBytes')
cifs_shares = None
if dictionary.get('cifsShares') != None:
cifs_shares = list()
for structure in dictionary.get('cifsShares'):
cifs_shares.append(cohesity_management_sdk.models.cifs_share_information.CIFSShareInformation.from_dictionary(structure))
creation_time_usecs = dictionary.get('creationTimeUsecs')
data_protocols = dictionary.get('dataProtocols')
export_policy_name = dictionary.get('exportPolicyName')
junction_path = dictionary.get('junctionPath')
name = dictionary.get('name')
security_info = cohesity_management_sdk.models.volume_security_information.VolumeSecurityInformation.from_dictionary(dictionary.get('securityInfo')) if dictionary.get('securityInfo') else None
state = dictionary.get('state')
mtype = dictionary.get('type')
used_bytes = dictionary.get('usedBytes')
# Return an object of this model
return cls(aggregate_name,
capacity_bytes,
cifs_shares,
creation_time_usecs,
data_protocols,
export_policy_name,
junction_path,
name,
security_info,
state,
mtype,
used_bytes)
| 43.877419 | 200 | 0.641964 | 6,614 | 0.972504 | 0 | 0 | 2,036 | 0.299368 | 0 | 0 | 4,097 | 0.602411 |
0b0f14dc9edd917b5943eaee8fa4e20472331f44 | 170 | py | Python | src/matlab2cpp/node/__init__.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null | src/matlab2cpp/node/__init__.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null | src/matlab2cpp/node/__init__.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null | """
The module contains the following submodules.
"""
from .frontend import Node
__all__ = ["Node"]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15.454545 | 45 | 0.688235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.405882 |
0b0f2186008ea5990d722ee460df30182fea5006 | 1,088 | py | Python | ipsc/2016/f.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/f.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/f.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | import collections
import itertools
import re
import sys
read_str = lambda : sys.stdin.readline().strip()
read_str_list = lambda : sys.stdin.readline().strip().split()
read_int = lambda : int(read_str())
read_int_list = lambda : map(int, read_str_list())
read_float = lambda : float(read_str())
read_float_list = lambda : map(float, read_str_list())
def solve_one(N2, fr, to):
# print >> sys.stderr, 'Q', N2, fr, to
bg, ed = fr, fr
l = 0
# print >> sys.stderr, (bg, ed)
while (to-bg)%N2 > ed-bg:
bg *= 2
ed = ed*2 + 1
if bg > N2:
bg -= N2
ed -= N2
l += 1
# print >> sys.stderr, (bg, ed)
d = (to-bg) % N2
c = ""
for b in xrange(l-1, -1, -1):
b1 = ((d>>b) & 1)
b2 = 1 if fr >= N2/2 else 0
c += 'L' if (b1^b2)==1 else 'R'
fr = (fr*2+b1) % N2
return c
def solve(N, A):
return ''.join( solve_one(N*2, A[i-1]-1, A[i]-1) for i in xrange(1,len(A)))
def main():
T = read_int()
for _ in xrange(T):
read_str()
N, X, K = read_int_list()
A = read_int_list()
ans = solve(N, [X] + A)
print '%d:%s' % (len(ans), ans)
if __name__ == "__main__":
main()
| 21.76 | 76 | 0.581801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.117647 |
0b1003b9ec3ab93ffe28039b6241e37c51da92c6 | 5,958 | py | Python | mcpython/common/data/gen/TextureDataGen.py | mcpython4-coding/core | e4c4f59dab68c90e2028db3add2e5065116bf4a6 | [
"CC0-1.0",
"MIT"
] | 2 | 2019-11-02T05:26:11.000Z | 2019-11-03T08:52:18.000Z | mcpython/common/data/gen/TextureDataGen.py | mcpython4-coding/core | e4c4f59dab68c90e2028db3add2e5065116bf4a6 | [
"CC0-1.0",
"MIT"
] | 25 | 2019-11-02T05:24:29.000Z | 2022-02-09T14:09:08.000Z | mcpython/common/data/gen/TextureDataGen.py | mcpython4-coding/core | e4c4f59dab68c90e2028db3add2e5065116bf4a6 | [
"CC0-1.0",
"MIT"
] | 5 | 2019-11-09T05:36:06.000Z | 2021-11-28T13:07:08.000Z | """
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import mcpython.engine.ResourceLoader as ResourceLoader
import mcpython.util.texture
import PIL.Image
from mcpython.common.data.gen.DataGeneratorManager import (
DataGeneratorInstance,
IDataGenerator,
)
from mcpython.engine import logger
class TextureConstructor(IDataGenerator):
"""
generator system for generating textures
"""
def __init__(self, name: str, image_size: tuple = None):
"""
will create an new TextureConstructor-instance
:param name: the name of the texture address as "{group}/{path without .png}"
:param image_size: the size of the image to create
"""
self.name = name
self.image_size = image_size
self.actions = []
def add_image_layer_top(self, location_or_image, position=(0, 0), rescale=(1, 1)):
"""
will alpha-composite an image ontop of all previous actions
:param location_or_image: the image to add
:param position: the position to add on
:param rescale: rescale of the image
"""
try:
self.actions.append(
(
0,
location_or_image
if type(location_or_image) == PIL.Image.Image
else asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image)),
position,
rescale,
)
)
except:
logger.print_exception(
"[ERROR] failed to add image layer from file {}".format(
location_or_image
)
)
return self
def add_coloring_layer(
self, location_or_image, color: tuple, position=(0, 0), rescale=(1, 1)
):
"""
will alpha-composite an image (which is colored before) ontop of all previous actions
:param location_or_image: the image to add
:param color: the color to colorize with
:param position: the position to add on
:param rescale: rescale of the image
"""
try:
if type(location_or_image) != PIL.Image.Image:
location_or_image = asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image))
self.actions.append(
(
1,
location_or_image,
color,
position,
rescale,
)
)
except:
logger.print_exception(
"[ERROR] failed to add colorized layer from file {} with color {}".format(
location_or_image, color
)
)
return self
def scaled(self, scale: tuple):
self.actions.append((3, scale))
return self
def crop(self, region: tuple):
self.actions.append((4, region))
return self
def add_alpha_composite_layer(self, location_or_image, position=(0, 0)):
try:
self.actions.append(
(
2,
location_or_image
if type(location_or_image) == PIL.Image.Image
else asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image)),
position,
)
)
except:
logger.print_exception("failed to add alpha composite layer")
return self
def write(self, generator: "DataGeneratorInstance", name: str):
file = self.get_default_location(generator, name)
if self.image_size is None:
for action, *data in self.actions:
if action == 0:
self.image_size = data[0]
break
else:
logger.println(
"[ERROR] failed to texture-gen as image size could not get loaded for"
" generator named {} to store at {}!".format(self.name, file)
)
return
image = PIL.Image.new("RGBA", self.image_size, (0, 0, 0, 0))
for action, *data in self.actions:
if action == 0:
sx, sy = data[0].size
px, py = data[2]
image.alpha_composite(
data[0]
.resize((sx * px, sy * py), PIL.Image.NEAREST)
.convert("RGBA"),
data[1],
)
elif action == 1:
i = mcpython.util.texture.colorize(data[0], data[1])
sx, sy = i.size
px, py = data[3]
image.alpha_composite(
i.resize((sx * px, sy * py), PIL.Image.NEAREST).convert("RGBA"),
data[2],
)
elif action == 2:
image.alpha_composite(data[0], data[1])
elif action == 3:
size = image.size
scale = data[0]
image = image.resize(tuple([scale[i] * size[i] for i in range(2)]))
elif action == 4:
size = image.size
region = data[0]
image = image.crop(tuple([region[i] * size[i % 2] for i in range(4)]))
image.save(generator.get_full_path(file))
| 35.891566 | 125 | 0.544982 | 5,105 | 0.856831 | 0 | 0 | 0 | 0 | 0 | 0 | 1,686 | 0.282981 |
0b10cab889db1a04677f05bcfb228ad6218dea84 | 9,537 | py | Python | qinling/tests/unit/api/controllers/v1/test_webhook.py | goldyfruit/qinling | 7e1391056cc90c274f063ea25b59bb138fdfef94 | [
"Apache-2.0"
] | null | null | null | qinling/tests/unit/api/controllers/v1/test_webhook.py | goldyfruit/qinling | 7e1391056cc90c274f063ea25b59bb138fdfef94 | [
"Apache-2.0"
] | null | null | null | qinling/tests/unit/api/controllers/v1/test_webhook.py | goldyfruit/qinling | 7e1391056cc90c274f063ea25b59bb138fdfef94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from qinling import context
from qinling.db import api as db_api
from qinling.tests.unit.api import base
from qinling.utils import constants
class TestWebhookController(base.APITest):
def setUp(self):
super(TestWebhookController, self).setUp()
db_func = self.create_function()
self.func_id = db_func.id
def test_crud(self):
# Create
body = {
'function_id': self.func_id,
'description': 'webhook test'
}
resp = self.app.post_json('/v1/webhooks', body)
self.assertEqual(201, resp.status_int)
webhook_id = resp.json.get('id')
self.assertIn(self.qinling_endpoint, resp.json.get('webhook_url'))
# Get
resp = self.app.get('/v1/webhooks/%s' % webhook_id)
self.assertEqual(200, resp.status_int)
self._assertDictContainsSubset(resp.json, body)
# List
resp = self.app.get('/v1/webhooks')
self.assertEqual(200, resp.status_int)
actual = self._assert_single_item(
resp.json['webhooks'], id=webhook_id
)
self._assertDictContainsSubset(actual, body)
# Update
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook_id,
{'description': 'webhook test update'}
)
self.assertEqual(200, resp.status_int)
expected = {
'function_id': self.func_id,
'description': 'webhook test update'
}
resp = self.app.get('/v1/webhooks/%s' % webhook_id)
self.assertEqual(200, resp.status_int)
self._assertDictContainsSubset(resp.json, expected)
# Delete
resp = self.app.delete('/v1/webhooks/%s' % webhook_id)
self.assertEqual(204, resp.status_int)
resp = self.app.get('/v1/webhooks/%s' % webhook_id, expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_create_with_version(self):
db_api.increase_function_version(self.func_id, 0)
body = {
'function_id': self.func_id,
'function_version': 1,
'description': 'webhook test'
}
resp = self.app.post_json('/v1/webhooks', body)
self.assertEqual(201, resp.status_int)
self.assertEqual(1, resp.json.get("function_version"))
self.assertIsNone(resp.json.get("function_alias"))
def test_create_with_alias(self):
db_api.increase_function_version(self.func_id, 0)
name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 1,
'name': name
}
db_api.create_function_alias(**body)
webhook_body = {
'function_alias': name,
'description': 'webhook test'
}
resp = self.app.post_json('/v1/webhooks', webhook_body)
self.assertEqual(201, resp.status_int)
self.assertEqual(name, resp.json.get('function_alias'))
self.assertIsNone(resp.json.get("function_id"))
self.assertIsNone(resp.json.get("function_version"))
def test_create_with_invalid_alias(self):
body = {
'function_alias': 'fake_alias',
'description': 'webhook test'
}
resp = self.app.post_json('/v1/webhooks', body, expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_create_without_required_params(self):
resp = self.app.post(
'/v1/webhooks',
params={},
expect_errors=True
)
self.assertEqual(400, resp.status_int)
def test_update_with_version(self):
db_api.increase_function_version(self.func_id, 0)
webhook = self.create_webhook(self.func_id)
self.assertIsNone(webhook.function_version)
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook.id,
{'function_version': 1}
)
self.assertEqual(200, resp.status_int)
self.assertEqual(1, resp.json.get("function_version"))
self.assertIsNone(resp.json.get("function_alias"))
def test_update_only_description(self):
db_api.increase_function_version(self.func_id, 0)
webhook = self.create_webhook(self.func_id, function_version=1)
self.assertEqual(1, webhook.function_version)
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook.id,
{'description': 'updated description'}
)
self.assertEqual(200, resp.status_int)
self.assertEqual(1, resp.json.get("function_version"))
self.assertEqual('updated description', resp.json.get("description"))
def test_update_function_alias_1(self):
# Create webhook using function alias
db_api.increase_function_version(self.func_id, 0)
name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 1,
'name': name
}
db_api.create_function_alias(**body)
webhook = self.create_webhook(function_alias=name)
db_api.increase_function_version(self.func_id, 1)
new_name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 2,
'name': new_name
}
db_api.create_function_alias(**body)
# Update webhook with the new alias
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook.id,
{'function_alias': new_name}
)
self.assertEqual(200, resp.status_int)
self.assertEqual(new_name, resp.json.get("function_alias"))
self.assertIsNone(resp.json.get("function_id"))
self.assertIsNone(resp.json.get("function_version"))
def test_update_function_alias_2(self):
# Create webhook using function id
db_api.increase_function_version(self.func_id, 0)
webhook = self.create_webhook(function_id=self.func_id,
function_version=1)
db_api.increase_function_version(self.func_id, 1)
alias_name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 2,
'name': alias_name
}
db_api.create_function_alias(**body)
# Update webhook with function alias
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook.id,
{'function_alias': alias_name}
)
self.assertEqual(200, resp.status_int)
self.assertEqual(alias_name, resp.json.get("function_alias"))
self.assertIsNone(resp.json.get("function_id"))
self.assertIsNone(resp.json.get("function_version"))
@mock.patch("qinling.utils.openstack.keystone.create_trust_context")
@mock.patch("qinling.utils.executions.create_execution")
def test_invoke_with_function_id(self, mock_create_execution,
mock_create_context):
exec_mock = mock_create_execution.return_value
exec_mock.id = "fake_id"
webhook = self.create_webhook(function_id=self.func_id)
resp = self.app.post_json('/v1/webhooks/%s/invoke' % webhook.id, {})
context.set_ctx(self.ctx)
self.assertEqual(202, resp.status_int)
params = {
'function_id': self.func_id,
'function_version': None,
'sync': False,
'input': json.dumps({}),
'description': constants.EXECUTION_BY_WEBHOOK % webhook.id
}
mock_create_execution.assert_called_once_with(mock.ANY, params)
@mock.patch("qinling.utils.openstack.keystone.create_trust_context")
@mock.patch("qinling.utils.executions.create_execution")
def test_invoke_with_function_alias(self, mock_create_execution,
mock_create_context):
exec_mock = mock_create_execution.return_value
exec_mock.id = "fake_id"
db_api.increase_function_version(self.func_id, 0)
alias_name = self.rand_name(name="alias", prefix=self.prefix)
body = {
'function_id': self.func_id,
'function_version': 1,
'name': alias_name
}
db_api.create_function_alias(**body)
webhook = self.create_webhook(function_alias=alias_name)
resp = self.app.post_json('/v1/webhooks/%s/invoke' % webhook.id, {})
context.set_ctx(self.ctx)
self.assertEqual(202, resp.status_int)
params = {
'function_id': self.func_id,
'function_version': 1,
'sync': False,
'input': json.dumps({}),
'description': constants.EXECUTION_BY_WEBHOOK % webhook.id
}
mock_create_execution.assert_called_once_with(mock.ANY, params)
| 35.585821 | 79 | 0.624934 | 8,758 | 0.918318 | 0 | 0 | 2,076 | 0.217679 | 0 | 0 | 2,256 | 0.236552 |
0b112c171eb02964d852f653ff119690933ba7a7 | 314 | py | Python | setup.py | elcolumbio/goofy | f922db2b9662fcc5e26529a2d428feaabede6b42 | [
"Apache-2.0"
] | 1 | 2018-06-29T06:03:17.000Z | 2018-06-29T06:03:17.000Z | setup.py | elcolumbio/goofy | f922db2b9662fcc5e26529a2d428feaabede6b42 | [
"Apache-2.0"
] | null | null | null | setup.py | elcolumbio/goofy | f922db2b9662fcc5e26529a2d428feaabede6b42 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(name='goofy',
version='0.1',
description='A goofy ebay bot.',
url='github.com/elcolumbio/goofy',
author='Florian Benkö',
author_email='f.benkoe@innotrade24.de',
license='Apache License, Version 2.0 (the "License")',
packages=['goofy'])
| 28.545455 | 60 | 0.640127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.485714 |
0b114a97cf42ee5a0905829c9063503a14dd4b05 | 1,820 | py | Python | nxt/main.py | TeamGalileoRobotics/qLearning | 70333f45373abc72a489e1ac4089f8e3db34c6a8 | [
"MIT"
] | 4 | 2017-09-27T16:16:58.000Z | 2019-11-27T09:26:54.000Z | nxt/main.py | TeamGalileoRobotics/qLearning | 70333f45373abc72a489e1ac4089f8e3db34c6a8 | [
"MIT"
] | null | null | null | nxt/main.py | TeamGalileoRobotics/qLearning | 70333f45373abc72a489e1ac4089f8e3db34c6a8 | [
"MIT"
] | null | null | null | import random
import os
import traceback
import json
from environment import Environment
from environment import Action
# 0 <= GAMMA < 1
# GAMMA = 0 -> only consider immediate rewards
# GAMMA = 1 -> consider future rewards
GAMMA = 0.8
confidence = 0
# initialize "brain"
brain = open("brainFile", "r")
if(os.stat("./brainFile").st_size == 0):
q = {}
else:
q = json.load(brain)
brain.close()
# normalize so highest/lowest value is 100/-100
def normalize(matrix):
flat = []
for li in matrix:
for i in li:
flat.append(abs(i))
max_val = max(flat)
if max_val == 0:
return matrix
for x, li in enumerate(matrix):
for y, val in enumerate(li):
matrix[x][y] = (val / max_val) * 100
return matrix
# initialize a q value array
def initialize_q_value(key):
if not key in q:
q[key] = [0 for _ in range(Environment.NUM_ACTIONS)]
env = Environment()
while env.running:
old_state = env.state
# set q value to empty array if not already existing
initialize_q_value(old_state)
# pick only best actions (this way of picking might leave actions unexplored)
max_action = [action for action, q_value in enumerate(q[env.state]) if q_value == max(q[env.state])]
action = 0
if (confidence > random.random()):
action = max_action
else:
action = random.randint(0, env.NUM_ACTIONS - 1)
confidence += (1 - confidence) / 10
try:
reward = env.move(action)
except Exception:
print traceback.format_exc()
break
initialize_q_value(env.state)
# q-learning
q[old_state][action] = reward + GAMMA * max(q[env.state])
# normalize values
#q = normalize(q)
# write brain to file
brain = open("brainFile", "w")
json.dump(q,brain)
brain.close() | 21.162791 | 104 | 0.638462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.237912 |
0b115c280b92cd1887912e7b5b880c8d693ab762 | 987 | py | Python | final_project/machinetranslation/tests.py | eduardomecchia/xzceb-flask_eng_fr | fa2da58ecabd43385a77be42f1eb6cf5401d6757 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests.py | eduardomecchia/xzceb-flask_eng_fr | fa2da58ecabd43385a77be42f1eb6cf5401d6757 | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests.py | eduardomecchia/xzceb-flask_eng_fr | fa2da58ecabd43385a77be42f1eb6cf5401d6757 | [
"Apache-2.0"
] | null | null | null | import unittest
import translator
class TestEnglishToFrench(unittest.TestCase):
def test_love(self):
self.assertEqual(translator.english_to_french('Love'), 'Amour')
def test_sun(self):
self.assertEqual(translator.english_to_french('Sun'), 'Soleil')
def test_null(self):
self.assertRaises(ValueError, translator.english_to_french, None)
def test_hello(self):
self.assertEqual(translator.english_to_french('Hello'), 'Bonjour')
class TestFrenchToEnglish(unittest.TestCase):
def test_love(self):
self.assertEqual(translator.french_to_english('Amour'), 'Love')
def test_sun(self):
self.assertEqual(translator.french_to_english('Soleil'), 'Sun')
def test_null(self):
self.assertRaises(ValueError, translator.french_to_english, None)
def test_hello(self):
self.assertEqual(translator.french_to_english('Bonjour'), 'Hello')
if __name__ == '__main__':
unittest.main() | 31.83871 | 74 | 0.703141 | 898 | 0.909828 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.095238 |
0b12869eee8275c66479c755fba088b954a1e101 | 2,149 | py | Python | dataRT/http.py | thedeltaflyer/dataRT | 9befbad597cfd50aea1ea48271adcd29e29a4d45 | [
"MIT"
] | 2 | 2020-04-26T14:47:04.000Z | 2022-02-25T21:04:00.000Z | dataRT/http.py | thedeltaflyer/dataRT | 9befbad597cfd50aea1ea48271adcd29e29a4d45 | [
"MIT"
] | null | null | null | dataRT/http.py | thedeltaflyer/dataRT | 9befbad597cfd50aea1ea48271adcd29e29a4d45 | [
"MIT"
] | null | null | null | from flask import (Flask, jsonify)
from gevent import (pywsgi, sleep)
from geventwebsocket.handler import WebSocketHandler
from . import __version__
from .logs import logger
class FlaskApp(object):
def __init__(self, host='', port=8080):
self.app = Flask(__name__)
self._register_routes()
self._socket_app = None
self._host = host
self._port = port
self._server = pywsgi.WSGIServer((self._host, self._port), self.app, handler_class=WebSocketHandler)
self._serving = False
@property
def socket_app(self):
return self._socket_app
@socket_app.setter
def socket_app(self, socket_app):
self._socket_app = socket_app
@property
def socket_clients(self):
if self._socket_app is not None:
return len(self._socket_app)
else:
return 0
@property
def is_serving(self):
return self._serving
def _register_routes(self):
@self.app.route("/", methods=['GET'])
def root():
return "200 OK", 200
# Tesseract requires at least a /status endpoint to verify that the app is running.
@self.app.route("/status", methods=['GET'])
def status():
return jsonify({
"status": "up",
"version": __version__,
"clients": self.socket_clients
}), 200
def serve_forever(self):
logger.debug('Serving Forever!')
try:
print(str(self._port))
print(str(self._host))
self._server.serve_forever()
except KeyboardInterrupt:
print("Keyboard Interrupt, Exiting...")
exit(0)
def start(self):
logger.debug('Starting Server...')
self._serving = True
self._server.start()
def stop(self):
logger.debug('Stopping Server...')
self._server.stop()
self._serving = False
def run_in_loop(self, actions, *args, **kwargs):
if not self._serving:
self.start()
while self._serving:
actions(*args, **kwargs)
sleep(0)
| 27.551282 | 108 | 0.584458 | 1,971 | 0.917171 | 0 | 0 | 704 | 0.327594 | 0 | 0 | 235 | 0.109353 |
0b1294f67b7fca283b849f80601174b8800cd976 | 9,284 | py | Python | order/views.py | abhijitdalavi/matrix | 29a736e6112994d6d746d9846520dcd532feb46d | [
"MIT"
] | null | null | null | order/views.py | abhijitdalavi/matrix | 29a736e6112994d6d746d9846520dcd532feb46d | [
"MIT"
] | null | null | null | order/views.py | abhijitdalavi/matrix | 29a736e6112994d6d746d9846520dcd532feb46d | [
"MIT"
] | null | null | null | from django.views.generic import ListView, CreateView, UpdateView
from django.utils.decorators import method_decorator
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.template.loader import render_to_string
from django.http import JsonResponse
from django.db.models import Sum
from django_tables2 import RequestConfig
from .models import Order, OrderItem, CURRENCY
from .forms import OrderCreateForm, OrderEditForm
from product.models import Product, Category
from .tables import ProductTable, OrderItemTable, OrderTable
import datetime
@method_decorator(staff_member_required, name='dispatch')
class HomepageView(ListView):
template_name = 'index.html'
model = Order
queryset = Order.objects.all()[:10]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = Order.objects.all()
total_sales = orders.aggregate(Sum('final_value'))['final_value__sum'] if orders.exists() else 0
paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum']\
if orders.filter(is_paid=True).exists() else 0
remaining = total_sales - paid_value
diviner = total_sales if total_sales > 0 else 1
paid_percent, remain_percent = round((paid_value/diviner)*100, 1), round((remaining/diviner)*100, 1)
total_sales = f'{total_sales} {CURRENCY}'
paid_value = f'{paid_value} {CURRENCY}'
remaining = f'{remaining} {CURRENCY}'
orders = OrderTable(orders)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@staff_member_required
def auto_create_order_view(request):
new_order = Order.objects.create(
title='Order 66',
date=datetime.datetime.now()
)
new_order.title = f'Order - {new_order.id}'
new_order.save()
return redirect(new_order.get_edit_url())
@method_decorator(staff_member_required, name='dispatch')
class OrderListView(ListView):
template_name = 'list.html'
model = Order
paginate_by = 50
def get_queryset(self):
qs = Order.objects.all()
if self.request.GET:
qs = Order.filter_data(self.request, qs)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = OrderTable(self.object_list)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@method_decorator(staff_member_required, name='dispatch')
class CreateOrderView(CreateView):
template_name = 'form.html'
form_class = OrderCreateForm
model = Order
def get_success_url(self):
self.new_object.refresh_from_db()
return reverse('update_order', kwargs={'pk': self.new_object.id})
def form_valid(self, form):
object = form.save()
object.refresh_from_db()
self.new_object = object
return super().form_valid(form)
@method_decorator(staff_member_required, name='dispatch')
class OrderUpdateView(UpdateView):
model = Order
template_name = 'order_update.html'
form_class = OrderEditForm
def get_success_url(self):
return reverse('update_order', kwargs={'pk': self.object.id})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
instance = self.object
qs_p = Product.objects.filter(active=True)[:12]
products = ProductTable(qs_p)
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(self.request).configure(products)
RequestConfig(self.request).configure(order_items)
context.update(locals())
return context
@staff_member_required
def delete_order(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.delete()
messages.warning(request, 'The order is deleted!')
return redirect(reverse('homepage'))
@staff_member_required
def done_order_view(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.is_paid = True
instance.save()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_add_product(request, pk, dk):
instance = get_object_or_404(Order, id=pk)
product = get_object_or_404(Product, id=dk)
order_item, created = OrderItem.objects.get_or_create(order=instance, product=product)
if created:
order_item.qty = 1
order_item.price = product.value
order_item.discount_price = product.discount_value
else:
order_item.qty += 1
order_item.save()
product.qty -= 1
product.save()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data = dict()
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_modify_order_item(request, pk, action):
order_item = get_object_or_404(OrderItem, id=pk)
product = order_item.product
instance = order_item.order
if action == 'remove':
order_item.qty -= 1
product.qty += 1
if order_item.qty < 1: order_item.qty = 1
if action == 'add':
order_item.qty += 1
product.qty -= 1
product.save()
order_item.save()
if action == 'delete':
order_item.delete()
data = dict()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={
'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_search_products(request, pk):
instance = get_object_or_404(Order, id=pk)
q = request.GET.get('q', None)
products = Product.broswer.active().filter(title__startswith=q) if q else Product.broswer.active()
products = products[:12]
products = ProductTable(products)
RequestConfig(request).configure(products)
data = dict()
data['products'] = render_to_string(template_name='include/product_container.html',
request=request,
context={
'products': products,
'instance': instance
})
return JsonResponse(data)
@staff_member_required
def order_action_view(request, pk, action):
instance = get_object_or_404(Order, id=pk)
if action == 'is_paid':
instance.is_paid = True
instance.save()
if action == 'delete':
instance.delete()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_calculate_results_view(request):
orders = Order.filter_data(request, Order.objects.all())
total_value, total_paid_value, remaining_value, data = 0, 0, 0, dict()
if orders.exists():
total_value = orders.aggregate(Sum('final_value'))['final_value__sum']
total_paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum'] if\
orders.filter(is_paid=True) else 0
remaining_value = total_value - total_paid_value
total_value, total_paid_value, remaining_value = f'{total_value} {CURRENCY}',\
f'{total_paid_value} {CURRENCY}', f'{remaining_value} {CURRENCY}'
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals())
return JsonResponse(data)
@staff_member_required
def ajax_calculate_category_view(request):
orders = Order.filter_data(request, Order.objects.all())
order_items = OrderItem.objects.filter(order__in=orders)
category_analysis = order_items.values_list('product__category__title').annotate(qty=Sum('qty'),
total_incomes=Sum('total_price')
)
data = dict()
category, currency = True, CURRENCY
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals()
)
return JsonResponse(data)
| 37.739837 | 118 | 0.630978 | 2,647 | 0.285114 | 0 | 0 | 8,532 | 0.919 | 0 | 0 | 856 | 0.092202 |
0b13468367f2b17010236ccf549467580e3949c1 | 4,880 | py | Python | example/testify_pytorch_to_caffe_example.py | templeblock/nn_tools | 4389f3e2ca35b0a37f872cbb85f3cadd3f94e4eb | [
"MIT"
] | null | null | null | example/testify_pytorch_to_caffe_example.py | templeblock/nn_tools | 4389f3e2ca35b0a37f872cbb85f3cadd3f94e4eb | [
"MIT"
] | null | null | null | example/testify_pytorch_to_caffe_example.py | templeblock/nn_tools | 4389f3e2ca35b0a37f872cbb85f3cadd3f94e4eb | [
"MIT"
] | null | null | null | import caffe
import torch
import numpy as np
import argparse
from collections import OrderedDict
from torch.autograd import Variable
import torch.nn as nn
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', default='alexnet')
parser.add_argument('--decimal', '-d', default=2)
parser.add_argument('--gpu', '-gpu', action='store_true')
args = parser.parse_args()
return args
def generate_random(shape, gpu=False):
data_np = np.random.rand(np.prod(shape)).reshape(shape)
data_torch = Variable(torch.Tensor(data_np))
if gpu:
data_torch = data_torch.cuda()
return [data_np], [data_torch]
def get_input_size(caffe_net):
input_name = caffe_net.inputs[0]
return caffe_net.blobs[input_name].data.shape
def forward_torch(net, data):
blobs = OrderedDict()
module2name = {}
for layer_name, m in net.named_modules():
layer_name = layer_name.replace('.', '_')
module2name[m] = layer_name
# turn off all the inplace operation
if hasattr(m, 'inplace'):
m.inplace = False
def forward_hook(m, i, o):
o_np = o.data.cpu().numpy()
blobs[module2name[m]] = o_np
for m in net.modules():
m.register_forward_hook(forward_hook)
output = net.forward(*data)
if isinstance(output, tuple):
outputs = []
for o in output:
outputs.append(o.data.cpu().numpy())
else:
outputs = [output.data.cpu().numpy()]
return blobs, outputs
def forward_caffe(net, data):
for input_name, d in zip(net.inputs, data):
net.blobs[input_name].data[...] = d
rst = net.forward()
blobs = OrderedDict()
blob2layer = {}
for layer_name, tops in net.top_names.items():
for top in tops:
blob2layer[top] = layer_name
for name, value in net.blobs.items():
layer_name = blob2layer[name]
value = value.data
if layer_name in blobs:
blobs[layer_name].append(value)
else:
blobs[layer_name] = [value]
outputs = []
for output_name in net.outputs:
outputs.append(rst[output_name])
return blobs, outputs
def test(net_caffe, net_torch, data_np, data_torch, args):
blobs_caffe, rsts_caffe = forward_caffe(net_caffe, data_np)
blobs_torch, rsts_torchs = forward_torch(net_torch, data_torch)
# test the output of every layer
for layer, value in blobs_caffe.items():
if layer in blobs_torch:
value_torch = blobs_torch[layer]
value = value[0]
if value.size != value_torch.size: continue
if 'relu' in layer: continue
try:
np.testing.assert_almost_equal(value, value_torch, decimal=args.decimal)
print("TEST layer {}: PASS".format(layer))
except:
print("TEST layer {}: FAIL".format(layer))
# np.testing.assert_almost_equal(np.clip(value, min=0), np.clip(value_torch, min=0))
# test the output
print("TEST output")
for rst_caffe, rst_torch in zip(rsts_caffe, rsts_torchs):
np.testing.assert_almost_equal(rst_caffe, rst_torch, decimal=args.decimal)
print("TEST output: PASS")
if __name__ == '__main__':
args = arg_parse()
if args.model == 'alexnet':
# Alexnet example
from torchvision.models.alexnet import alexnet
net_torch = alexnet(True).eval()
if args.gpu:
net_torch.cuda()
try:
net_caffe = caffe.Net('alexnet.prototxt', 'alexnet.caffemodel', caffe.TEST)
except:
raise ("Please run alexnet_pytorch_to_caffe.py first")
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
elif args.model == 'resnet18':
# ResNet example
from torchvision.models.resnet import resnet18
net_torch = resnet18(True).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('resnet18.prototxt', 'resnet18.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
elif args.model == 'inception_v3':
# Inception_v3 example
from torchvision.models.inception import inception_v3
net_torch = inception_v3(True, transform_input=False).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('inception_v3.prototxt', 'inception_v3.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
else:
raise NotImplementedError()
| 33.655172 | 100 | 0.63832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.122131 |
0b13920e15983fcf25bab57e71b06db2fade7f5d | 1,903 | py | Python | gregsprograms/ohshots.py | gjhartwell/cth-python | 558148a5755fd0bd3b12e1380d365f8bf51efa19 | [
"MIT"
] | null | null | null | gregsprograms/ohshots.py | gjhartwell/cth-python | 558148a5755fd0bd3b12e1380d365f8bf51efa19 | [
"MIT"
] | null | null | null | gregsprograms/ohshots.py | gjhartwell/cth-python | 558148a5755fd0bd3b12e1380d365f8bf51efa19 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 15:53:33 2020
@author: hartwgj
"""
# --------------------------------------
# OHShots.py
#
# MDSplus Python project
# for CTH data access
#
# OHShots.py --- gets the shots within a given date range that use OH
#
# Parameters:
# startdate
# enddate
#
# Returns:
# list of shots
#
# Example:
# shots=ohshots(190901,200329,'mds')
#
# Also defines:
#
# Greg Hartwell
# 2020 May 26
#----------------------------------------------------------------------------
import cthmds
def ohshots(startdate,enddate,server):
#allways connect to mds server
c=cthmds.cthconnect(server)
shots=[]
for idate in range(startdate,enddate+1):
# don't search for dates that don't exist
if ((idate % 100) <= 31) \
and ((idate % 10000 - idate % 100)/100) <= 12:
print(idate)
for ishot in range(1,100):
shotnum=idate*100+ishot
#print(shotnum)
try:
cthmds.cthopen(c,shotnum)
except:
#print('shot not opened')
continue
else:
try:
usestate=c.get('usestate')
except:
#print('usestate not found')
continue
else:
if usestate > 0:
usestate = \
[int(i) for i in bin(usestate)[2:]]
usestate.reverse()
# the OH is set at bit 5
if usestate[5]:
shots.append(shotnum)
return shots
#-----------------------------------------------------------------------------
| 25.716216 | 78 | 0.393589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.39464 |
0b1418e35fb652db01e8e815e64e3fe3157b954a | 6,271 | py | Python | app.py | plotly/radon-transform-visualisation | 5a2fc53316087a08972400e55b1af54ebfb1b528 | [
"MIT"
] | null | null | null | app.py | plotly/radon-transform-visualisation | 5a2fc53316087a08972400e55b1af54ebfb1b528 | [
"MIT"
] | null | null | null | app.py | plotly/radon-transform-visualisation | 5a2fc53316087a08972400e55b1af54ebfb1b528 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.transform import radon, rescale, rotate
import plotly.graph_objs as go
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# Parse input argument
args = {
'input': './assets/00382-s1-neg2.png'
}
# Read image as grayscale
image = cv2.imread(args["input"], 0);
image = rescale(image, scale=0.3, mode='reflect', multichannel=False);
# Pad image to square
image_width, image_height = image.shape;
image_shape_dif = abs(image_width-image_height);
image_shape_dif_first_half = int(image_shape_dif/2);
image_shape_dif_second_half = image_shape_dif-image_shape_dif_first_half;
# Padding on the (top, bottom) if image wdith is greater than height
# or (left, right) if image height is greater than width.
if image_width < image_height:
padded_image = np.pad(image,
((image_shape_dif_first_half,image_shape_dif_second_half),(0, 0)),
'constant',
constant_values=0);
else:
padded_image = np.pad(image,
((0,0),(image_shape_dif_first_half,image_shape_dif_second_half)),
'constant',
constant_values=0);
padded_image_file_name = './assets/padded-image.png';
plt.imsave(padded_image_file_name, padded_image, cmap=plt.cm.Greys_r);
padded_image_width, padded_image_height = padded_image.shape;
# Create radon transform
theta = np.linspace(0., 180., 180, endpoint=False);
sinogram = radon(image, theta=theta, circle=False);
sinogram = np.array(sinogram.T);
sinogram_width, sinogram_height = sinogram.shape;
# Save radon transform
sinogram_file_name = './assets/radon-transform.png';
plt.imsave(sinogram_file_name, sinogram, cmap=plt.cm.Greys_r);
# Extract data for plotting
x_data = [];
y_data = [];
for i in range(len(theta)):
yd = sinogram[i, :];
x_data.append(i);
y_data.append(yd);
# Create app layout
app.layout = html.Div([
html.H1(
children='Radon Transform Visualisation',
style={
'textAlign': 'center'}
),
html.Div(
children='A web application for visualising radon transform.',
style={
'textAlign': 'center'}
),
html.Hr(),
html.Div(
children='Use the slider to change the view of radon transform at different angle.',
style={
'textAlign': 'left'}
),
html.Hr(),
dcc.Slider(
id='radon-slider',
min=0,
max=179,
value=0,
step=1
),
html.Div(id='slider-output-container'),
dcc.Graph(id='radon-transform'),
dcc.Graph(id='radon-transform-angle-view')
])
# Add new trace line
@app.callback(
Output('radon-transform', 'figure'),
[Input('radon-slider', 'value')],
)
def update_trace_radon_transform(value):
# Plot radon transform with annnotated line
return {
'data': [
go.Scatter(
x=(0, sinogram_height),
y=(0, 179),
mode="markers",
showlegend=False
),
go.Scatter(
x=[20],
y=[179-value],
name="Angle:{}".format(value),
mode="text"
)
],
'layout': go.Layout(
xaxis={
'title': 'Pixel values',
'showgrid': False,
'zeroline': False
},
yaxis={
'title': 'Angle',
'showgrid': False,
'zeroline': False
},
margin = dict(l=40, r=10, t=10, b=40),
images=[dict(
source=sinogram_file_name,
xref= "x",
yref= "y",
x= 0,
y= 179,
sizex= sinogram_height,
sizey= 179,
sizing= "stretch",
opacity= 1.0,
visible = True,
layer= "below")],
template="plotly_white",
shapes=[dict(
type="line",
xref="x",
yref="y",
x0=0,
y0=179-value,
x1=sinogram_height,
y1=179-value,
line=dict(
color="LightSeaGreen",
width=3,
)
)]
)
}
# Display angle values
@app.callback(
Output('slider-output-container', 'children'),
[Input('radon-slider', 'value')],
)
def display_value(value):
return 'Angle: {}'.format(value)
# Display figure at different angle
@app.callback(
Output('radon-transform-angle-view', 'figure'),
[Input('radon-slider', 'value')],
)
# Update plots for sliders
def display_image_and_update_graph(value):
rotated_image = rotate(padded_image, -value);
rotated_image_file_name = "./assets/rotated_image_{}.png".format(value);
plt.imsave(rotated_image_file_name, rotated_image, cmap=plt.cm.Greys_r);
yd = y_data[value];
max_yd = np.amax(yd);
x_min = int(sinogram_height/2-padded_image_width/2);
x_max = padded_image_width;
return {
'data': [
go.Scatter(
y=yd
)
],
'layout': go.Layout(
xaxis={
'title': 'Position (pixel)'
},
yaxis={
'title': 'Pixel values'
},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
images=[dict(
source=rotated_image_file_name,
xref= "x",
yref= "y",
x= x_min,
y= max_yd,
sizex= x_max,
sizey= max_yd,
sizing= "stretch",
opacity= 1.0,
visible = True,
layer= "below")],
template="plotly_white"
)
}
if __name__ == '__main__':
app.run_server(debug=True)
| 26.238494 | 92 | 0.545368 | 0 | 0 | 0 | 0 | 3,173 | 0.50598 | 0 | 0 | 1,417 | 0.225961 |
0b14ffda334fedd5c264042439a74b0107e9cb74 | 766 | py | Python | code/chapter_04/listing_04_13.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | 80 | 2021-04-19T10:03:57.000Z | 2022-03-30T15:34:47.000Z | code/chapter_04/listing_04_13.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | null | null | null | code/chapter_04/listing_04_13.py | guinslym/python_earth_science_book | f4dd0115dbbce140c6713989f630a71238daa72c | [
"MIT"
] | 23 | 2021-04-25T03:50:07.000Z | 2022-03-22T03:06:19.000Z | # Go to new line using \n
print('-------------------------------------------------------')
print("My name is\nMaurizio Petrelli")
# Inserting characters using octal values
print('-------------------------------------------------------')
print("\100 \136 \137 \077 \176")
# Inserting characters using hex values
print('-------------------------------------------------------')
print("\x23 \x24 \x25 \x26 \x2A")
print('-------------------------------------------------------')
'''Output:
-------------------------------------------------------
My name is
Maurizio Petrelli
-------------------------------------------------------
@ ^ _ ? ~
-------------------------------------------------------
# $ % & *
-------------------------------------------------------
'''
| 31.916667 | 64 | 0.26893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.917755 |
0b15fe672d85053023d683317e83766d99d50902 | 505 | py | Python | azure/durable_functions/models/RetryOptions.py | gled4er/azure-functions-durable-python | f212d4c32372e093a6d4d607ba40a2f443372f43 | [
"MIT"
] | 9 | 2019-08-16T15:37:51.000Z | 2020-05-12T17:33:50.000Z | azure/durable_functions/models/RetryOptions.py | gled4er/azure-functions-durable-python | f212d4c32372e093a6d4d607ba40a2f443372f43 | [
"MIT"
] | 7 | 2019-07-26T00:24:20.000Z | 2020-01-29T16:30:06.000Z | azure/durable_functions/models/RetryOptions.py | gled4er/azure-functions-durable-python | f212d4c32372e093a6d4d607ba40a2f443372f43 | [
"MIT"
] | 11 | 2019-07-22T17:40:47.000Z | 2020-06-24T14:43:18.000Z | class RetryOptions:
def __init__(self, firstRetry: int, maxNumber: int):
self.backoffCoefficient: int
self.maxRetryIntervalInMilliseconds: int
self.retryTimeoutInMilliseconds: int
self.firstRetryIntervalInMilliseconds: int = firstRetry
self.maxNumberOfAttempts: int = maxNumber
if self.firstRetryIntervalInMilliseconds <= 0:
raise ValueError("firstRetryIntervalInMilliseconds value"
"must be greater than 0.")
| 38.846154 | 69 | 0.683168 | 504 | 0.99802 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.128713 |
0b1794937b8fbf2bc1062698f11f7eb5b8b36fe3 | 14,753 | py | Python | modules/kachelmann_bot.py | WhereIsTheExit/HeinzBot | 1e35f1706d03b47dddfa8b8a04cede6a7c4be301 | [
"Apache-2.0"
] | 6 | 2019-05-12T13:30:48.000Z | 2020-07-30T08:58:10.000Z | modules/kachelmann_bot.py | WhereIsTheExit/HeinzBot | 1e35f1706d03b47dddfa8b8a04cede6a7c4be301 | [
"Apache-2.0"
] | 16 | 2019-05-11T14:07:06.000Z | 2021-11-29T22:13:35.000Z | modules/kachelmann_bot.py | WhereIsTheExit/HeinzBot | 1e35f1706d03b47dddfa8b8a04cede6a7c4be301 | [
"Apache-2.0"
] | 5 | 2019-05-11T13:29:47.000Z | 2020-01-15T12:18:40.000Z | import datetime
import urllib
from urllib.request import urlopen
import time
import logging
import bs4
import telegram
from telegram import Update
from telegram.ext import CommandHandler, CallbackContext
from modules.abstract_module import AbstractModule
from utils.decorators import register_module, register_command, log_errors
from constants.bezirke import BEZIRKE
# selenium for forecast
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.options import Options
@register_module()
class KachelmannBot(AbstractModule):
def __getClosestTime(self, increment):
time = datetime.datetime.utcnow()
diff = time.minute % increment
time = time - datetime.timedelta(minutes=diff)
timestring = time.strftime("%Y%m%d-%H%Mz")
return timestring
def __getKachelmannImage(self, pageURL):
header = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64)"
}
soup = self.get_soup(pageURL, header)
imageurl = soup.find("meta", property="og:image")
imageurl = imageurl["content"]
return imageurl
def __getRegion(self, region):
errorMessage = ""
if not region:
errorMessage = "Parameter angeben bitte! Mögliche Regionen:\n" + ", ".join(BEZIRKE.keys())
return (region, errorMessage)
try:
region = BEZIRKE[region.upper()]
except KeyError:
errorMessage = "De Region kenn i ned 🙄"
return (region, errorMessage)
return (region, errorMessage)
def get_soup(self, url, header):
req = urllib.request.Request(url, headers=header)
open_url = urlopen(req)
soup = bs4.BeautifulSoup(open_url, "html.parser")
return soup
@register_command(command="radar", short_desc="Shows the rain radar of a region. 🌧",
long_desc="This command returns an image containing the current "
"rain conditions of a given austrian region.\n"
"Possible regions are: " + ", ".join(BEZIRKE.keys()),
usage=["/radar $region-abbreviation", "/radar FR"])
def radar(self, update: Update, context: CallbackContext):
queryText = self.get_command_parameter("/radar", update)
region, errorMessage = self.__getRegion(queryText)
if errorMessage != "":
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=errorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
return
# build page url
baseURL = "https://kachelmannwetter.com/at/regenradar"
timestring = self.__getClosestTime(5)
pageURL = (baseURL + "/{}/{}.html").format(region, timestring)
# get image
imageURL = self.__getKachelmannImage(pageURL)
# send image
chat_id = update.message.chat_id
context.bot.send_photo(chat_id=chat_id, photo=imageURL)
@register_command(command="tracking", short_desc="Storm-tracking of a region. ⛈⚡️",
long_desc="This command returns an image containing the current "
"storm-tracking information of a given austrian region.\n"
"Possible regions are: " + ", ".join(BEZIRKE.keys()),
usage=["/tracking $region-abbreviation", "/tracking AT"])
def tracking(self, update: Update, context: CallbackContext):
queryText = self.get_command_parameter("/tracking", update)
region, errorMessage = self.__getRegion(queryText)
if errorMessage != "":
# invalid region
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=errorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
return
# build page url
baseURL = "https://kachelmannwetter.com/at/stormtracking"
timestring = self.__getClosestTime(5)
pageURL = (baseURL + "/{}/blitze-radarhd/{}.html").format(region, timestring)
# get image
imageURL = self.__getKachelmannImage(pageURL)
# send image
chat_id = update.message.chat_id
context.bot.send_photo(chat_id=chat_id, photo=imageURL)
@register_command(command="wind", short_desc="Shows the wind gusts of a region. 💨🌬",
long_desc="This command returns an image containing the current "
"wind direction or wind gust information of a given austrian region.\n"
"Possible regions are: " + ", ".join(BEZIRKE.keys()),
usage=["/wind (böen|mittel) $region", "/wind böen AT", "/wind mittel WZ"])
def wind(self, update: Update, context: CallbackContext):
queryText = self.get_command_parameter("/wind", update)
# split query into type and region
syntaxErrorMessage = "I checks ned ganz, bitte schick ma dein command im Muster:\n`/wind (böen|mittel) <Region>`"
windtype = ""
region = ""
try:
windtype, region = queryText.split(maxsplit=2)
except (ValueError, AttributeError) as e:
# send syntax error
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
return
# get region
region, errorMessage = self.__getRegion(region)
if errorMessage != "":
if region == "böen" or region == "böe" or region == "mittel":
# mixed up parameters (/wind at böen), send syntax error
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
else:
# else send unknown region error
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=errorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
return
# check type
if windtype is not None and (windtype.lower() == 'böen' or windtype.lower() == 'böe'):
windtype = "windboeen"
elif windtype is not None and windtype.lower() == "mittel":
windtype = "windrichtung-windmittel"
else:
# unknown type, send error
errorMessage = "Mechadsd du Böen oder Mittelwind? Schick ma ans vo de zwa: 🌬️\n`/wind böen <Region>`\n`/wind mittel <Region>`"
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=errorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
return
# build page url
baseURL = "https://kachelmannwetter.com/at/analyse/superhd/"
timestring = self.__getClosestTime(60)
pageURL = (baseURL + "{}/{}/{}.html").format(region, windtype, timestring)
# get image
imageURL = self.__getKachelmannImage(pageURL)
# send image
chat_id = update.message.chat_id
context.bot.send_photo(chat_id=chat_id, photo=imageURL)
@register_command(command="forecast", short_desc="Shows the forecast for the selected location",
long_desc="This command returns an image containing the"
"forecast for temperature, rainfall, clouds, wind, sunshine and barometric pressure.\n",
usage=["/forecast <location>", "/forecast Hagenberg", "/forecast Ellmau"])
@log_errors(perform_finally_call=True)
# "Possible forecast types are super HD (SHD) and HD (HD)",
# usage=["/forecast [SHD|HD] <location>", "/forecast SHD Hagenberg", "/forecast HD Ellmau"])
def forecast(self, update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text="Command temporary disabled", parse_mode=telegram.ParseMode.MARKDOWN)
return
queryText = self.get_command_parameter("/forecast", update)
# split query
syntaxErrorMessage = "I checks ned ganz, bitte schick ma dein command im Muster:\n`/forecast <Ort>`"
location = ""
try:
location = queryText.split(maxsplit=1)
except (ValueError, AttributeError) as e:
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
print("Error splitting command")
return
if location == "":
context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
print("Error splitting command")
return
# # split query into forecast type and location
# syntaxErrorMessage = "I checks ned ganz, bitte schick ma dein command im Muster:\n`/forecast [SHD|HD] <Ort>`"
# forecasttype = ""
# location = ""
# try:
# forecasttype, location = queryText.split(maxsplit=2)
# except (ValueError, AttributeError) as e:
# # send syntax error
# context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
# text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
# print("Error splitting command")
# return
# forecasttype = forecasttype.upper()
# print("location: {}, type: {}".format(location, forecasttype))
# if (location == "" or location == "SHD" or location == "HD" or (forecasttype == "SDH" or forecasttype == "HD")):
# # send syntax error
# context.bot.send_message(chat_id=update.message.chat_id, reply_to_message_id=update.message.message_id,
# text=syntaxErrorMessage, parse_mode=telegram.ParseMode.MARKDOWN)
# print("Error parsing forecast type")
# return
# load search page
options = Options()
# Enable headless mode to run on systems without a display (docker container)
options.headless = True
driver = webdriver.Firefox(options=options, log_path='./log/geckodriver.log')
# This function is being called as finally statement:
self.finally_call = lambda: driver.close()
searchUrl = "https://kachelmannwetter.com/at/vorhersage"
driver.get(searchUrl)
# click away cookie message
try:
print("Trying to find cookie message ...")
elem = WebDriverWait(driver, 15).until(
EC.presence_of_element_located((By.CSS_SELECTOR, ".nx2XwXx4"))
)
print("Found message, clicking button ...")
cookieButton = driver.find_element_by_css_selector("button.nx3Fpp8U.nx3gnDVX").click()
except TimeoutException:
print("Cookie message not found, skipping ...")
# search for location on search page
print("Searching for location")
searchBox = driver.find_element_by_id("forecast-input-0")
searchBox.clear()
searchBox.send_keys(location)
searchButton = driver.find_element_by_css_selector("span.input-group-addon:nth-child(4)").click()
if (driver.current_url == searchUrl + "/search"):
print("Still on search page")
# if the URL after the search is still the search URL,
# there are either multiple or no results for the location.
searchRes = driver.find_elements_by_id("search-results")[0]
if (searchRes.find_elements_by_tag_name("p")[
0].text == 'Wir haben zu Ihrer Sucheingabe leider keine passenden Orte gefunden.'):
# no results found
errMsg = "Moasd des Loch kenn i? Probier vllt an aundan Ort. 🗺️"
context.bot.send_message(chat_id=update.message.chat_id,
reply_to_message_id=update.message.message_id,
text=errMsg)
elif (searchRes.find_elements_by_tag_name("p")[
0].text == 'Wir haben mehrere infrage kommende Orte für Ihre Sucheingabe gefunden.'):
# just take the first search result - otherwise the communication flow
# will be slowed down for a functionality that can be forced by using a more
# specific search term in the first place
driver.find_elements_by_class_name("fcwcity")[0].find_element_by_tag_name("a").click()
# let page render
print("Waiting for page to render")
elem = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'visibility_graph')))
time.sleep(1) # wait for animation to finish
# hide header (will jump into forecast otherwise)
driver.execute_script("document.getElementById('w0').remove()")
driver.execute_script("document.getElementById('w3').remove()")
driver.execute_script("document.getElementsByClassName('menue-head')[0].remove()")
# save image
print("Saving image")
imagePath = "./images/forecast_image.png"
elem = driver.find_element_by_id("weather-forecast-compact")
elem.screenshot(imagePath)
# pngImage = elem.screenshot_as_png # can't send binary data, need to save first ...
# get location name
locName = ""
try:
locName = driver.find_elements_by_class_name("forecast-h1")[0].text + driver.find_elements_by_class_name("h3-landkreis")[0].text
except NoSuchElementException as nse:
# don't add text, keep empty
print("No location name found")
# send image
context.bot.send_photo(chat_id=update.message.chat_id, photo=open(imagePath, "rb"), caption=locName)
| 47.590323 | 140 | 0.626856 | 14,019 | 0.947678 | 0 | 0 | 14,038 | 0.948962 | 0 | 0 | 5,109 | 0.345366 |